summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/include
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/include')
-rw-r--r--contrib/llvm/include/llvm-c/Analysis.h65
-rw-r--r--contrib/llvm/include/llvm-c/BitReader.h85
-rw-r--r--contrib/llvm/include/llvm-c/BitWriter.h59
-rw-r--r--contrib/llvm/include/llvm-c/Core.h2938
-rw-r--r--contrib/llvm/include/llvm-c/Disassembler.h254
-rw-r--r--contrib/llvm/include/llvm-c/ErrorHandling.h51
-rw-r--r--contrib/llvm/include/llvm-c/ExecutionEngine.h193
-rw-r--r--contrib/llvm/include/llvm-c/IRReader.h40
-rw-r--r--contrib/llvm/include/llvm-c/Initialization.h55
-rw-r--r--contrib/llvm/include/llvm-c/LinkTimeOptimizer.h69
-rw-r--r--contrib/llvm/include/llvm-c/Linker.h55
-rw-r--r--contrib/llvm/include/llvm-c/Object.h100
-rw-r--r--contrib/llvm/include/llvm-c/OrcBindings.h134
-rw-r--r--contrib/llvm/include/llvm-c/Support.h65
-rw-r--r--contrib/llvm/include/llvm-c/Target.h290
-rw-r--r--contrib/llvm/include/llvm-c/TargetMachine.h147
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/IPO.h81
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/PassManagerBuilder.h90
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/Scalar.h158
-rw-r--r--contrib/llvm/include/llvm-c/Transforms/Vectorize.h53
-rw-r--r--contrib/llvm/include/llvm-c/Types.h124
-rw-r--r--contrib/llvm/include/llvm-c/lto.h559
-rw-r--r--contrib/llvm/include/llvm-c/module.modulemap4
-rw-r--r--contrib/llvm/include/llvm/ADT/APFloat.h686
-rw-r--r--contrib/llvm/include/llvm/ADT/APInt.h1915
-rw-r--r--contrib/llvm/include/llvm/ADT/APSInt.h342
-rw-r--r--contrib/llvm/include/llvm/ADT/ArrayRef.h384
-rw-r--r--contrib/llvm/include/llvm/ADT/BitVector.h589
-rw-r--r--contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h77
-rw-r--r--contrib/llvm/include/llvm/ADT/DeltaAlgorithm.h93
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMap.h1074
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseMapInfo.h221
-rw-r--r--contrib/llvm/include/llvm/ADT/DenseSet.h165
-rw-r--r--contrib/llvm/include/llvm/ADT/DepthFirstIterator.h291
-rw-r--r--contrib/llvm/include/llvm/ADT/EpochTracker.h99
-rw-r--r--contrib/llvm/include/llvm/ADT/EquivalenceClasses.h283
-rw-r--r--contrib/llvm/include/llvm/ADT/FoldingSet.h750
-rw-r--r--contrib/llvm/include/llvm/ADT/GraphTraits.h106
-rw-r--r--contrib/llvm/include/llvm/ADT/Hashing.h661
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableList.h229
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableMap.h408
-rw-r--r--contrib/llvm/include/llvm/ADT/ImmutableSet.h1218
-rw-r--r--contrib/llvm/include/llvm/ADT/IndexedMap.h85
-rw-r--r--contrib/llvm/include/llvm/ADT/IntEqClasses.h88
-rw-r--r--contrib/llvm/include/llvm/ADT/IntervalMap.h2156
-rw-r--r--contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h288
-rw-r--r--contrib/llvm/include/llvm/ADT/MapVector.h200
-rw-r--r--contrib/llvm/include/llvm/ADT/None.h26
-rw-r--r--contrib/llvm/include/llvm/ADT/Optional.h228
-rw-r--r--contrib/llvm/include/llvm/ADT/PackedVector.h149
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerIntPair.h218
-rw-r--r--contrib/llvm/include/llvm/ADT/PointerUnion.h474
-rw-r--r--contrib/llvm/include/llvm/ADT/PostOrderIterator.h300
-rw-r--r--contrib/llvm/include/llvm/ADT/PriorityQueue.h84
-rw-r--r--contrib/llvm/include/llvm/ADT/SCCIterator.h245
-rw-r--r--contrib/llvm/include/llvm/ADT/STLExtras.h472
-rw-r--r--contrib/llvm/include/llvm/ADT/ScopedHashTable.h256
-rw-r--r--contrib/llvm/include/llvm/ADT/SetOperations.h71
-rw-r--r--contrib/llvm/include/llvm/ADT/SetVector.h255
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallBitVector.h600
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallPtrSet.h350
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallSet.h131
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallString.h297
-rw-r--r--contrib/llvm/include/llvm/ADT/SmallVector.h954
-rw-r--r--contrib/llvm/include/llvm/ADT/SparseBitVector.h904
-rw-r--r--contrib/llvm/include/llvm/ADT/SparseMultiSet.h518
-rw-r--r--contrib/llvm/include/llvm/ADT/SparseSet.h311
-rw-r--r--contrib/llvm/include/llvm/ADT/Statistic.h186
-rw-r--r--contrib/llvm/include/llvm/ADT/StringExtras.h212
-rw-r--r--contrib/llvm/include/llvm/ADT/StringMap.h463
-rw-r--r--contrib/llvm/include/llvm/ADT/StringRef.h606
-rw-r--r--contrib/llvm/include/llvm/ADT/StringSet.h39
-rw-r--r--contrib/llvm/include/llvm/ADT/StringSwitch.h166
-rw-r--r--contrib/llvm/include/llvm/ADT/TinyPtrVector.h306
-rw-r--r--contrib/llvm/include/llvm/ADT/Triple.h674
-rw-r--r--contrib/llvm/include/llvm/ADT/Twine.h542
-rw-r--r--contrib/llvm/include/llvm/ADT/UniqueVector.h107
-rw-r--r--contrib/llvm/include/llvm/ADT/VariadicFunction.h331
-rw-r--r--contrib/llvm/include/llvm/ADT/edit_distance.h103
-rw-r--r--contrib/llvm/include/llvm/ADT/ilist.h800
-rw-r--r--contrib/llvm/include/llvm/ADT/ilist_node.h123
-rw-r--r--contrib/llvm/include/llvm/ADT/iterator.h246
-rw-r--r--contrib/llvm/include/llvm/ADT/iterator_range.h68
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasAnalysis.h1072
-rw-r--r--contrib/llvm/include/llvm/Analysis/AliasSetTracker.h449
-rw-r--r--contrib/llvm/include/llvm/Analysis/AssumptionCache.h183
-rw-r--r--contrib/llvm/include/llvm/Analysis/BasicAliasAnalysis.h223
-rw-r--r--contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h90
-rw-r--r--contrib/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h1231
-rw-r--r--contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h160
-rw-r--r--contrib/llvm/include/llvm/Analysis/CFG.h94
-rw-r--r--contrib/llvm/include/llvm/Analysis/CFGPrinter.h130
-rw-r--r--contrib/llvm/include/llvm/Analysis/CFLAliasAnalysis.h158
-rw-r--r--contrib/llvm/include/llvm/Analysis/CGSCCPassManager.h490
-rw-r--r--contrib/llvm/include/llvm/Analysis/CallGraph.h495
-rw-r--r--contrib/llvm/include/llvm/Analysis/CallGraphSCCPass.h108
-rw-r--r--contrib/llvm/include/llvm/Analysis/CallPrinter.h27
-rw-r--r--contrib/llvm/include/llvm/Analysis/CaptureTracking.h81
-rw-r--r--contrib/llvm/include/llvm/Analysis/CodeMetrics.h107
-rw-r--r--contrib/llvm/include/llvm/Analysis/ConstantFolding.h113
-rw-r--r--contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h189
-rw-r--r--contrib/llvm/include/llvm/Analysis/DemandedBits.h75
-rw-r--r--contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h941
-rw-r--r--contrib/llvm/include/llvm/Analysis/DivergenceAnalysis.h48
-rw-r--r--contrib/llvm/include/llvm/Analysis/DomPrinter.h30
-rw-r--r--contrib/llvm/include/llvm/Analysis/DominanceFrontier.h210
-rw-r--r--contrib/llvm/include/llvm/Analysis/DominanceFrontierImpl.h226
-rw-r--r--contrib/llvm/include/llvm/Analysis/EHPersonalities.h94
-rw-r--r--contrib/llvm/include/llvm/Analysis/GlobalsModRef.h160
-rw-r--r--contrib/llvm/include/llvm/Analysis/IVUsers.h188
-rw-r--r--contrib/llvm/include/llvm/Analysis/InlineCost.h128
-rw-r--r--contrib/llvm/include/llvm/Analysis/InstructionSimplify.h358
-rw-r--r--contrib/llvm/include/llvm/Analysis/Interval.h150
-rw-r--r--contrib/llvm/include/llvm/Analysis/IntervalIterator.h268
-rw-r--r--contrib/llvm/include/llvm/Analysis/IntervalPartition.h111
-rw-r--r--contrib/llvm/include/llvm/Analysis/IteratedDominanceFrontier.h96
-rw-r--r--contrib/llvm/include/llvm/Analysis/LazyCallGraph.h569
-rw-r--r--contrib/llvm/include/llvm/Analysis/LazyValueInfo.h90
-rw-r--r--contrib/llvm/include/llvm/Analysis/Lint.h49
-rw-r--r--contrib/llvm/include/llvm/Analysis/Loads.h62
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopAccessAnalysis.h721
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfo.h869
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h540
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopIterator.h181
-rw-r--r--contrib/llvm/include/llvm/Analysis/LoopPass.h161
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h262
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h467
-rw-r--r--contrib/llvm/include/llvm/Analysis/MemoryLocation.h142
-rw-r--r--contrib/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h102
-rw-r--r--contrib/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h287
-rw-r--r--contrib/llvm/include/llvm/Analysis/ObjCARCInstKind.h123
-rw-r--r--contrib/llvm/include/llvm/Analysis/OrderedBasicBlock.h66
-rw-r--r--contrib/llvm/include/llvm/Analysis/PHITransAddr.h127
-rw-r--r--contrib/llvm/include/llvm/Analysis/Passes.h108
-rw-r--r--contrib/llvm/include/llvm/Analysis/PostDominators.h117
-rw-r--r--contrib/llvm/include/llvm/Analysis/PtrUseVisitor.h285
-rw-r--r--contrib/llvm/include/llvm/Analysis/RegionInfo.h921
-rw-r--r--contrib/llvm/include/llvm/Analysis/RegionInfoImpl.h927
-rw-r--r--contrib/llvm/include/llvm/Analysis/RegionIterator.h345
-rw-r--r--contrib/llvm/include/llvm/Analysis/RegionPass.h128
-rw-r--r--contrib/llvm/include/llvm/Analysis/RegionPrinter.h71
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolution.h1381
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h79
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h312
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h709
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h78
-rw-r--r--contrib/llvm/include/llvm/Analysis/ScopedNoAliasAA.h92
-rw-r--r--contrib/llvm/include/llvm/Analysis/SparsePropagation.h201
-rw-r--r--contrib/llvm/include/llvm/Analysis/TargetFolder.h270
-rw-r--r--contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def1119
-rw-r--r--contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.h328
-rw-r--r--contrib/llvm/include/llvm/Analysis/TargetTransformInfo.h968
-rw-r--r--contrib/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h512
-rw-r--r--contrib/llvm/include/llvm/Analysis/Trace.h119
-rw-r--r--contrib/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h93
-rw-r--r--contrib/llvm/include/llvm/Analysis/ValueTracking.h458
-rw-r--r--contrib/llvm/include/llvm/Analysis/VectorUtils.h132
-rw-r--r--contrib/llvm/include/llvm/AsmParser/Parser.h96
-rw-r--r--contrib/llvm/include/llvm/AsmParser/SlotMapping.h42
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitCodes.h185
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitcodeWriterPass.h71
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitstreamReader.h519
-rw-r--r--contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h548
-rw-r--r--contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h502
-rw-r--r--contrib/llvm/include/llvm/Bitcode/ReaderWriter.h213
-rw-r--r--contrib/llvm/include/llvm/CodeGen/Analysis.h126
-rw-r--r--contrib/llvm/include/llvm/CodeGen/AsmPrinter.h557
-rw-r--r--contrib/llvm/include/llvm/CodeGen/AtomicExpandUtils.h57
-rw-r--r--contrib/llvm/include/llvm/CodeGen/BasicTTIImpl.h846
-rw-r--r--contrib/llvm/include/llvm/CodeGen/CalcSpillWeights.h82
-rw-r--r--contrib/llvm/include/llvm/CodeGen/CallingConvLower.h519
-rw-r--r--contrib/llvm/include/llvm/CodeGen/CommandFlags.h380
-rw-r--r--contrib/llvm/include/llvm/CodeGen/DAGCombine.h25
-rw-r--r--contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h214
-rw-r--r--contrib/llvm/include/llvm/CodeGen/DIE.h764
-rw-r--r--contrib/llvm/include/llvm/CodeGen/DIEValue.def47
-rw-r--r--contrib/llvm/include/llvm/CodeGen/DwarfStringPoolEntry.h51
-rw-r--r--contrib/llvm/include/llvm/CodeGen/EdgeBundles.h64
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FastISel.h579
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FaultMaps.h220
-rw-r--r--contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h262
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCMetadata.h206
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h64
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCStrategy.h177
-rw-r--r--contrib/llvm/include/llvm/CodeGen/GCs.h46
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h939
-rw-r--r--contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h58
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h98
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LexicalScopes.h257
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LinkAllAsmWriterComponents.h38
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h59
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveInterval.h868
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h449
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveIntervalUnion.h216
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LivePhysRegs.h147
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h233
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveRegMatrix.h148
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveStackAnalysis.h98
-rw-r--r--contrib/llvm/include/llvm/CodeGen/LiveVariables.h311
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MIRParser/MIRParser.h81
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MIRYamlMapping.h430
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachORelocation.h56
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h883
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBlockFrequencyInfo.h71
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h77
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineCombinerPattern.h46
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h165
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineDominanceFrontier.h109
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineDominators.h283
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h647
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunction.h585
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h55
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunctionInitializer.h38
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineFunctionPass.h59
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstr.h1270
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h497
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h257
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h130
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h186
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h238
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h433
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h89
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineOperand.h754
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h157
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachinePostDominators.h86
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineRegionInfo.h180
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h995
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineSSAUpdater.h116
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineScheduler.h964
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineTraceMetrics.h398
-rw-r--r--contrib/llvm/include/llvm/CodeGen/MachineValueType.h713
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/CostAllocator.h132
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h681
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/Math.h429
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/ReductionRules.h221
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQP/Solution.h94
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PBQPRAConstraint.h69
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ParallelCG.h43
-rw-r--r--contrib/llvm/include/llvm/CodeGen/Passes.h685
-rw-r--r--contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h183
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h598
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegAllocRegistry.h63
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h145
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterPressure.h455
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h186
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h136
-rw-r--r--contrib/llvm/include/llvm/CodeGen/RuntimeLibcalls.h427
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h760
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h293
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleDFS.h194
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h111
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h106
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ScoreboardHazardRecognizer.h126
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAG.h1328
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h306
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h2323
-rw-r--r--contrib/llvm/include/llvm/CodeGen/SlotIndexes.h717
-rw-r--r--contrib/llvm/include/llvm/CodeGen/StackMaps.h257
-rw-r--r--contrib/llvm/include/llvm/CodeGen/StackProtector.h129
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h168
-rw-r--r--contrib/llvm/include/llvm/CodeGen/TargetSchedule.h190
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.h386
-rw-r--r--contrib/llvm/include/llvm/CodeGen/ValueTypes.td119
-rw-r--r--contrib/llvm/include/llvm/CodeGen/VirtRegMap.h190
-rw-r--r--contrib/llvm/include/llvm/CodeGen/WinEHFuncInfo.h130
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/CodeView.h367
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewOStream.h39
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/FieldListRecordBuilder.h78
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/FunctionId.h56
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/Line.h124
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/ListRecordBuilder.h43
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/MemoryTypeTableBuilder.h68
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/MethodListRecordBuilder.h35
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/TypeIndex.h176
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h270
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h57
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h37
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h60
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DIContext.h198
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h62
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h54
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h34
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h319
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h63
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h70
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h87
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h43
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h160
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h252
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h81
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h59
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h79
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h99
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h22
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFSection.h25
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h42
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h299
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h81
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h59
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h33
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h35
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h35
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h37
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h37
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h39
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h206
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASession.h51
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h36
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASupport.h33
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBDataStream.h37
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h33
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBLineNumber.h36
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h211
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSession.h61
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSourceFile.h37
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDB.h26
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBContext.h59
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBExtras.h38
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymDumper.h61
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbol.h97
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h39
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h41
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h38
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h55
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h37
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h39
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolData.h61
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h46
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h80
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h49
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h49
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h49
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h47
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h57
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h45
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h60
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h40
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h36
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h36
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h55
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h37
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h37
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h50
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h35
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h43
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h54
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h52
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h40
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h39
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h34
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h36
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/PDB/PDBTypes.h547
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/Symbolize/DIPrinter.h47
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/Symbolize/SymbolizableModule.h53
-rw-r--r--contrib/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h105
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h649
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/GenericValue.h53
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Interpreter.h28
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h122
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/JITSymbolFlags.h81
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h38
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h124
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/ObjectCache.h40
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h63
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h480
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h61
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h182
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h108
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h146
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h101
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h413
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/JITSymbol.h77
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/LambdaResolver.h62
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h304
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/LogicalDylib.h135
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/NullResolver.h36
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h283
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h104
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcTargetSupport.h103
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/OrcMCJITReplacement.h38
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/RTDyldMemoryManager.h136
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h256
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h102
-rw-r--r--contrib/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h123
-rw-r--r--contrib/llvm/include/llvm/IR/Argument.h135
-rw-r--r--contrib/llvm/include/llvm/IR/AssemblyAnnotationWriter.h62
-rw-r--r--contrib/llvm/include/llvm/IR/Attributes.h547
-rw-r--r--contrib/llvm/include/llvm/IR/Attributes.td192
-rw-r--r--contrib/llvm/include/llvm/IR/AutoUpgrade.h71
-rw-r--r--contrib/llvm/include/llvm/IR/BasicBlock.h341
-rw-r--r--contrib/llvm/include/llvm/IR/CFG.h265
-rw-r--r--contrib/llvm/include/llvm/IR/CallSite.h563
-rw-r--r--contrib/llvm/include/llvm/IR/CallingConv.h178
-rw-r--r--contrib/llvm/include/llvm/IR/Comdat.h66
-rw-r--r--contrib/llvm/include/llvm/IR/Constant.h166
-rw-r--r--contrib/llvm/include/llvm/IR/ConstantFolder.h245
-rw-r--r--contrib/llvm/include/llvm/IR/ConstantRange.h289
-rw-r--r--contrib/llvm/include/llvm/IR/Constants.h1294
-rw-r--r--contrib/llvm/include/llvm/IR/DIBuilder.h726
-rw-r--r--contrib/llvm/include/llvm/IR/DataLayout.h554
-rw-r--r--contrib/llvm/include/llvm/IR/DebugInfo.h148
-rw-r--r--contrib/llvm/include/llvm/IR/DebugInfoFlags.def37
-rw-r--r--contrib/llvm/include/llvm/IR/DebugInfoMetadata.h2375
-rw-r--r--contrib/llvm/include/llvm/IR/DebugLoc.h126
-rw-r--r--contrib/llvm/include/llvm/IR/DerivedTypes.h522
-rw-r--r--contrib/llvm/include/llvm/IR/DiagnosticInfo.h589
-rw-r--r--contrib/llvm/include/llvm/IR/DiagnosticPrinter.h94
-rw-r--r--contrib/llvm/include/llvm/IR/Dominators.h254
-rw-r--r--contrib/llvm/include/llvm/IR/Function.h665
-rw-r--r--contrib/llvm/include/llvm/IR/FunctionInfo.h241
-rw-r--r--contrib/llvm/include/llvm/IR/GVMaterializer.h63
-rw-r--r--contrib/llvm/include/llvm/IR/GetElementPtrTypeIterator.h139
-rw-r--r--contrib/llvm/include/llvm/IR/GlobalAlias.h124
-rw-r--r--contrib/llvm/include/llvm/IR/GlobalObject.h78
-rw-r--r--contrib/llvm/include/llvm/IR/GlobalValue.h370
-rw-r--r--contrib/llvm/include/llvm/IR/GlobalVariable.h177
-rw-r--r--contrib/llvm/include/llvm/IR/IRBuilder.h1782
-rw-r--r--contrib/llvm/include/llvm/IR/IRPrintingPasses.h94
-rw-r--r--contrib/llvm/include/llvm/IR/InlineAsm.h362
-rw-r--r--contrib/llvm/include/llvm/IR/InstIterator.h157
-rw-r--r--contrib/llvm/include/llvm/IR/InstVisitor.h295
-rw-r--r--contrib/llvm/include/llvm/IR/InstrTypes.h1633
-rw-r--r--contrib/llvm/include/llvm/IR/Instruction.def225
-rw-r--r--contrib/llvm/include/llvm/IR/Instruction.h553
-rw-r--r--contrib/llvm/include/llvm/IR/Instructions.h4806
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicInst.h410
-rw-r--r--contrib/llvm/include/llvm/IR/Intrinsics.h133
-rw-r--r--contrib/llvm/include/llvm/IR/Intrinsics.td671
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsAArch64.td658
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsAMDGPU.td157
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsARM.td519
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsBPF.td24
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsHexagon.td9373
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsMips.td1771
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsNVVM.td3746
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td985
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsSystemZ.td376
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsWebAssembly.td22
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsX86.td7540
-rw-r--r--contrib/llvm/include/llvm/IR/IntrinsicsXCore.td121
-rw-r--r--contrib/llvm/include/llvm/IR/LLVMContext.h235
-rw-r--r--contrib/llvm/include/llvm/IR/LegacyPassManager.h103
-rw-r--r--contrib/llvm/include/llvm/IR/LegacyPassManagers.h508
-rw-r--r--contrib/llvm/include/llvm/IR/LegacyPassNameParser.h139
-rw-r--r--contrib/llvm/include/llvm/IR/MDBuilder.h164
-rw-r--r--contrib/llvm/include/llvm/IR/Mangler.h56
-rw-r--r--contrib/llvm/include/llvm/IR/Metadata.def123
-rw-r--r--contrib/llvm/include/llvm/IR/Metadata.h1297
-rw-r--r--contrib/llvm/include/llvm/IR/Module.h660
-rw-r--r--contrib/llvm/include/llvm/IR/ModuleSlotTracker.h76
-rw-r--r--contrib/llvm/include/llvm/IR/NoFolder.h299
-rw-r--r--contrib/llvm/include/llvm/IR/OperandTraits.h160
-rw-r--r--contrib/llvm/include/llvm/IR/Operator.h497
-rw-r--r--contrib/llvm/include/llvm/IR/PassManager.h896
-rw-r--r--contrib/llvm/include/llvm/IR/PassManagerInternal.h350
-rw-r--r--contrib/llvm/include/llvm/IR/PatternMatch.h1318
-rw-r--r--contrib/llvm/include/llvm/IR/PredIteratorCache.h79
-rw-r--r--contrib/llvm/include/llvm/IR/Statepoint.h411
-rw-r--r--contrib/llvm/include/llvm/IR/SymbolTableListTraits.h122
-rw-r--r--contrib/llvm/include/llvm/IR/TrackingMDRef.h166
-rw-r--r--contrib/llvm/include/llvm/IR/Type.h480
-rw-r--r--contrib/llvm/include/llvm/IR/TypeBuilder.h407
-rw-r--r--contrib/llvm/include/llvm/IR/TypeFinder.h79
-rw-r--r--contrib/llvm/include/llvm/IR/Use.h172
-rw-r--r--contrib/llvm/include/llvm/IR/UseListOrder.h56
-rw-r--r--contrib/llvm/include/llvm/IR/User.h271
-rw-r--r--contrib/llvm/include/llvm/IR/Value.def91
-rw-r--r--contrib/llvm/include/llvm/IR/Value.h767
-rw-r--r--contrib/llvm/include/llvm/IR/ValueHandle.h386
-rw-r--r--contrib/llvm/include/llvm/IR/ValueMap.h398
-rw-r--r--contrib/llvm/include/llvm/IR/ValueSymbolTable.h133
-rw-r--r--contrib/llvm/include/llvm/IR/Verifier.h77
-rw-r--r--contrib/llvm/include/llvm/IRReader/IRReader.h49
-rw-r--r--contrib/llvm/include/llvm/InitializePasses.h314
-rw-r--r--contrib/llvm/include/llvm/LTO/LTOCodeGenerator.h196
-rw-r--r--contrib/llvm/include/llvm/LTO/LTOModule.h214
-rw-r--r--contrib/llvm/include/llvm/LibDriver/LibDriver.h26
-rw-r--r--contrib/llvm/include/llvm/LineEditor/LineEditor.h153
-rw-r--r--contrib/llvm/include/llvm/LinkAllIR.h53
-rw-r--r--contrib/llvm/include/llvm/LinkAllPasses.h199
-rw-r--r--contrib/llvm/include/llvm/Linker/IRMover.h76
-rw-r--r--contrib/llvm/include/llvm/Linker/Linker.h77
-rw-r--r--contrib/llvm/include/llvm/MC/ConstantPools.h93
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmBackend.h148
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfo.h569
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfoCOFF.h36
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfoDarwin.h29
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmInfoELF.h25
-rw-r--r--contrib/llvm/include/llvm/MC/MCAsmLayout.h107
-rw-r--r--contrib/llvm/include/llvm/MC/MCAssembler.h425
-rw-r--r--contrib/llvm/include/llvm/MC/MCCodeEmitter.h46
-rw-r--r--contrib/llvm/include/llvm/MC/MCCodeGenInfo.h51
-rw-r--r--contrib/llvm/include/llvm/MC/MCContext.h605
-rw-r--r--contrib/llvm/include/llvm/MC/MCDirectives.h72
-rw-r--r--contrib/llvm/include/llvm/MC/MCDisassembler.h113
-rw-r--r--contrib/llvm/include/llvm/MC/MCDwarf.h523
-rw-r--r--contrib/llvm/include/llvm/MC/MCELFObjectWriter.h135
-rw-r--r--contrib/llvm/include/llvm/MC/MCELFStreamer.h109
-rw-r--r--contrib/llvm/include/llvm/MC/MCExpr.h572
-rw-r--r--contrib/llvm/include/llvm/MC/MCExternalSymbolizer.h58
-rw-r--r--contrib/llvm/include/llvm/MC/MCFixedLenDisassembler.h34
-rw-r--r--contrib/llvm/include/llvm/MC/MCFixup.h113
-rw-r--r--contrib/llvm/include/llvm/MC/MCFixupKindInfo.h43
-rw-r--r--contrib/llvm/include/llvm/MC/MCFragment.h506
-rw-r--r--contrib/llvm/include/llvm/MC/MCInst.h205
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstBuilder.h74
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstPrinter.h108
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrAnalysis.h71
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrDesc.h556
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrInfo.h59
-rw-r--r--contrib/llvm/include/llvm/MC/MCInstrItineraries.h239
-rw-r--r--contrib/llvm/include/llvm/MC/MCLabel.h57
-rw-r--r--contrib/llvm/include/llvm/MC/MCLinkerOptimizationHint.h205
-rw-r--r--contrib/llvm/include/llvm/MC/MCMachObjectWriter.h277
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectFileInfo.h358
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectStreamer.h147
-rw-r--r--contrib/llvm/include/llvm/MC/MCObjectWriter.h203
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/AsmCond.h40
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h74
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h217
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h219
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h92
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCAsmParserUtils.h33
-rw-r--r--contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h98
-rw-r--r--contrib/llvm/include/llvm/MC/MCRegisterInfo.h689
-rw-r--r--contrib/llvm/include/llvm/MC/MCRelocationInfo.h55
-rw-r--r--contrib/llvm/include/llvm/MC/MCSchedule.h237
-rw-r--r--contrib/llvm/include/llvm/MC/MCSection.h203
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionCOFF.h79
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionELF.h97
-rw-r--r--contrib/llvm/include/llvm/MC/MCSectionMachO.h91
-rw-r--r--contrib/llvm/include/llvm/MC/MCStreamer.h750
-rw-r--r--contrib/llvm/include/llvm/MC/MCSubtargetInfo.h170
-rw-r--r--contrib/llvm/include/llvm/MC/MCSymbol.h421
-rw-r--r--contrib/llvm/include/llvm/MC/MCSymbolCOFF.h64
-rw-r--r--contrib/llvm/include/llvm/MC/MCSymbolELF.h54
-rw-r--r--contrib/llvm/include/llvm/MC/MCSymbolMachO.h123
-rw-r--r--contrib/llvm/include/llvm/MC/MCSymbolizer.h85
-rw-r--r--contrib/llvm/include/llvm/MC/MCTargetAsmParser.h222
-rw-r--r--contrib/llvm/include/llvm/MC/MCTargetOptions.h72
-rw-r--r--contrib/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h74
-rw-r--r--contrib/llvm/include/llvm/MC/MCValue.h86
-rw-r--r--contrib/llvm/include/llvm/MC/MCWin64EH.h63
-rw-r--r--contrib/llvm/include/llvm/MC/MCWinCOFFObjectWriter.h47
-rw-r--r--contrib/llvm/include/llvm/MC/MCWinCOFFStreamer.h81
-rw-r--r--contrib/llvm/include/llvm/MC/MCWinEH.h84
-rw-r--r--contrib/llvm/include/llvm/MC/MachineLocation.h79
-rw-r--r--contrib/llvm/include/llvm/MC/SectionKind.h195
-rw-r--r--contrib/llvm/include/llvm/MC/StringTableBuilder.h65
-rw-r--r--contrib/llvm/include/llvm/MC/SubtargetFeature.h127
-rw-r--r--contrib/llvm/include/llvm/MC/YAML.h94
-rw-r--r--contrib/llvm/include/llvm/Object/Archive.h231
-rw-r--r--contrib/llvm/include/llvm/Object/ArchiveWriter.h48
-rw-r--r--contrib/llvm/include/llvm/Object/Binary.h192
-rw-r--r--contrib/llvm/include/llvm/Object/COFF.h912
-rw-r--r--contrib/llvm/include/llvm/Object/COFFImportFile.h74
-rw-r--r--contrib/llvm/include/llvm/Object/COFFYAML.h223
-rw-r--r--contrib/llvm/include/llvm/Object/ELF.h542
-rw-r--r--contrib/llvm/include/llvm/Object/ELFObjectFile.h930
-rw-r--r--contrib/llvm/include/llvm/Object/ELFTypes.h567
-rw-r--r--contrib/llvm/include/llvm/Object/ELFYAML.h319
-rw-r--r--contrib/llvm/include/llvm/Object/Error.h52
-rw-r--r--contrib/llvm/include/llvm/Object/FunctionIndexObjectFile.h110
-rw-r--r--contrib/llvm/include/llvm/Object/IRObjectFile.h74
-rw-r--r--contrib/llvm/include/llvm/Object/MachO.h523
-rw-r--r--contrib/llvm/include/llvm/Object/MachOUniversal.h118
-rw-r--r--contrib/llvm/include/llvm/Object/ObjectFile.h459
-rw-r--r--contrib/llvm/include/llvm/Object/RelocVisitor.h420
-rw-r--r--contrib/llvm/include/llvm/Object/StackMapParser.h442
-rw-r--r--contrib/llvm/include/llvm/Object/SymbolSize.h23
-rw-r--r--contrib/llvm/include/llvm/Object/SymbolicFile.h207
-rw-r--r--contrib/llvm/include/llvm/Option/Arg.h127
-rw-r--r--contrib/llvm/include/llvm/Option/ArgList.h465
-rw-r--r--contrib/llvm/include/llvm/Option/OptParser.td132
-rw-r--r--contrib/llvm/include/llvm/Option/OptSpecifier.h41
-rw-r--r--contrib/llvm/include/llvm/Option/OptTable.h174
-rw-r--r--contrib/llvm/include/llvm/Option/Option.h205
-rw-r--r--contrib/llvm/include/llvm/Pass.h380
-rw-r--r--contrib/llvm/include/llvm/PassAnalysisSupport.h275
-rw-r--r--contrib/llvm/include/llvm/PassInfo.h143
-rw-r--r--contrib/llvm/include/llvm/PassRegistry.h99
-rw-r--r--contrib/llvm/include/llvm/PassSupport.h250
-rw-r--r--contrib/llvm/include/llvm/Passes/PassBuilder.h105
-rw-r--r--contrib/llvm/include/llvm/ProfileData/CoverageMapping.h509
-rw-r--r--contrib/llvm/include/llvm/ProfileData/CoverageMappingReader.h182
-rw-r--r--contrib/llvm/include/llvm/ProfileData/CoverageMappingWriter.h63
-rw-r--r--contrib/llvm/include/llvm/ProfileData/InstrProf.h590
-rw-r--r--contrib/llvm/include/llvm/ProfileData/InstrProfData.inc735
-rw-r--r--contrib/llvm/include/llvm/ProfileData/InstrProfReader.h390
-rw-r--r--contrib/llvm/include/llvm/ProfileData/InstrProfWriter.h61
-rw-r--r--contrib/llvm/include/llvm/ProfileData/SampleProf.h386
-rw-r--r--contrib/llvm/include/llvm/ProfileData/SampleProfReader.h409
-rw-r--r--contrib/llvm/include/llvm/ProfileData/SampleProfWriter.h134
-rw-r--r--contrib/llvm/include/llvm/Support/AIXDataTypesFix.h25
-rw-r--r--contrib/llvm/include/llvm/Support/ARMBuildAttributes.h229
-rw-r--r--contrib/llvm/include/llvm/Support/ARMEHABI.h134
-rw-r--r--contrib/llvm/include/llvm/Support/ARMTargetParser.def223
-rw-r--r--contrib/llvm/include/llvm/Support/ARMWinEH.h382
-rw-r--r--contrib/llvm/include/llvm/Support/AlignOf.h258
-rw-r--r--contrib/llvm/include/llvm/Support/Allocator.h434
-rw-r--r--contrib/llvm/include/llvm/Support/ArrayRecycler.h143
-rw-r--r--contrib/llvm/include/llvm/Support/Atomic.h39
-rw-r--r--contrib/llvm/include/llvm/Support/BlockFrequency.h78
-rw-r--r--contrib/llvm/include/llvm/Support/BranchProbability.h219
-rw-r--r--contrib/llvm/include/llvm/Support/CBindingWrapping.h47
-rw-r--r--contrib/llvm/include/llvm/Support/COFF.h679
-rw-r--r--contrib/llvm/include/llvm/Support/COM.h36
-rw-r--r--contrib/llvm/include/llvm/Support/Capacity.h32
-rw-r--r--contrib/llvm/include/llvm/Support/Casting.h326
-rw-r--r--contrib/llvm/include/llvm/Support/CodeGen.h95
-rw-r--r--contrib/llvm/include/llvm/Support/CommandLine.h1755
-rw-r--r--contrib/llvm/include/llvm/Support/Compiler.h448
-rw-r--r--contrib/llvm/include/llvm/Support/Compression.h57
-rw-r--r--contrib/llvm/include/llvm/Support/ConvertUTF.h268
-rw-r--r--contrib/llvm/include/llvm/Support/CrashRecoveryContext.h203
-rw-r--r--contrib/llvm/include/llvm/Support/DOTGraphTraits.h167
-rw-r--r--contrib/llvm/include/llvm/Support/DataExtractor.h365
-rw-r--r--contrib/llvm/include/llvm/Support/DataStream.h38
-rw-r--r--contrib/llvm/include/llvm/Support/DataTypes.h.in123
-rw-r--r--contrib/llvm/include/llvm/Support/Debug.h96
-rw-r--r--contrib/llvm/include/llvm/Support/Dwarf.def352
-rw-r--r--contrib/llvm/include/llvm/Support/Dwarf.h700
-rw-r--r--contrib/llvm/include/llvm/Support/DynamicLibrary.h105
-rw-r--r--contrib/llvm/include/llvm/Support/ELF.h1288
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/AArch64.def147
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/ARM.def138
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/AVR.def40
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/Hexagon.def100
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/Mips.def117
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/PowerPC.def123
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/PowerPC64.def181
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/Sparc.def89
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/SystemZ.def67
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/i386.def47
-rw-r--r--contrib/llvm/include/llvm/Support/ELFRelocs/x86_64.def44
-rw-r--r--contrib/llvm/include/llvm/Support/Endian.h345
-rw-r--r--contrib/llvm/include/llvm/Support/EndianStream.h64
-rw-r--r--contrib/llvm/include/llvm/Support/Errc.h86
-rw-r--r--contrib/llvm/include/llvm/Support/Errno.h34
-rw-r--r--contrib/llvm/include/llvm/Support/ErrorHandling.h106
-rw-r--r--contrib/llvm/include/llvm/Support/ErrorOr.h298
-rw-r--r--contrib/llvm/include/llvm/Support/FileOutputBuffer.h89
-rw-r--r--contrib/llvm/include/llvm/Support/FileSystem.h903
-rw-r--r--contrib/llvm/include/llvm/Support/FileUtilities.h78
-rw-r--r--contrib/llvm/include/llvm/Support/Format.h195
-rw-r--r--contrib/llvm/include/llvm/Support/FormattedStream.h162
-rw-r--r--contrib/llvm/include/llvm/Support/GCOV.h445
-rw-r--r--contrib/llvm/include/llvm/Support/GenericDomTree.h779
-rw-r--r--contrib/llvm/include/llvm/Support/GenericDomTreeConstruction.h291
-rw-r--r--contrib/llvm/include/llvm/Support/GraphWriter.h363
-rw-r--r--contrib/llvm/include/llvm/Support/Host.h74
-rw-r--r--contrib/llvm/include/llvm/Support/JamCRC.h48
-rw-r--r--contrib/llvm/include/llvm/Support/LEB128.h121
-rw-r--r--contrib/llvm/include/llvm/Support/LineIterator.h88
-rw-r--r--contrib/llvm/include/llvm/Support/Locale.h17
-rw-r--r--contrib/llvm/include/llvm/Support/LockFileManager.h88
-rw-r--r--contrib/llvm/include/llvm/Support/MD5.h70
-rw-r--r--contrib/llvm/include/llvm/Support/MachO.h1675
-rw-r--r--contrib/llvm/include/llvm/Support/ManagedStatic.h111
-rw-r--r--contrib/llvm/include/llvm/Support/MathExtras.h723
-rw-r--r--contrib/llvm/include/llvm/Support/Memory.h186
-rw-r--r--contrib/llvm/include/llvm/Support/MemoryBuffer.h173
-rw-r--r--contrib/llvm/include/llvm/Support/MemoryObject.h68
-rw-r--r--contrib/llvm/include/llvm/Support/MipsABIFlags.h102
-rw-r--r--contrib/llvm/include/llvm/Support/Mutex.h158
-rw-r--r--contrib/llvm/include/llvm/Support/MutexGuard.h41
-rw-r--r--contrib/llvm/include/llvm/Support/OnDiskHashTable.h600
-rw-r--r--contrib/llvm/include/llvm/Support/Options.h120
-rw-r--r--contrib/llvm/include/llvm/Support/Path.h437
-rw-r--r--contrib/llvm/include/llvm/Support/PluginLoader.h37
-rw-r--r--contrib/llvm/include/llvm/Support/PointerLikeTypeTraits.h92
-rw-r--r--contrib/llvm/include/llvm/Support/PrettyStackTrace.h83
-rw-r--r--contrib/llvm/include/llvm/Support/Printable.h52
-rw-r--r--contrib/llvm/include/llvm/Support/Process.h190
-rw-r--r--contrib/llvm/include/llvm/Support/Program.h192
-rw-r--r--contrib/llvm/include/llvm/Support/RWMutex.h177
-rw-r--r--contrib/llvm/include/llvm/Support/RandomNumberGenerator.h58
-rw-r--r--contrib/llvm/include/llvm/Support/Recycler.h114
-rw-r--r--contrib/llvm/include/llvm/Support/RecyclingAllocator.h77
-rw-r--r--contrib/llvm/include/llvm/Support/Regex.h105
-rw-r--r--contrib/llvm/include/llvm/Support/Registry.h227
-rw-r--r--contrib/llvm/include/llvm/Support/RegistryParser.h55
-rw-r--r--contrib/llvm/include/llvm/Support/SMLoc.h63
-rw-r--r--contrib/llvm/include/llvm/Support/SaveAndRestore.h49
-rw-r--r--contrib/llvm/include/llvm/Support/ScaledNumber.h899
-rw-r--r--contrib/llvm/include/llvm/Support/Signals.h71
-rw-r--r--contrib/llvm/include/llvm/Support/Solaris.h49
-rw-r--r--contrib/llvm/include/llvm/Support/SourceMgr.h285
-rw-r--r--contrib/llvm/include/llvm/Support/SpecialCaseList.h104
-rw-r--r--contrib/llvm/include/llvm/Support/StreamingMemoryObject.h95
-rw-r--r--contrib/llvm/include/llvm/Support/StringPool.h138
-rw-r--r--contrib/llvm/include/llvm/Support/StringSaver.h32
-rw-r--r--contrib/llvm/include/llvm/Support/SwapByteOrder.h125
-rw-r--r--contrib/llvm/include/llvm/Support/SystemUtils.h32
-rw-r--r--contrib/llvm/include/llvm/Support/TargetParser.h145
-rw-r--r--contrib/llvm/include/llvm/Support/TargetRegistry.h1188
-rw-r--r--contrib/llvm/include/llvm/Support/TargetSelect.h165
-rw-r--r--contrib/llvm/include/llvm/Support/ThreadLocal.h63
-rw-r--r--contrib/llvm/include/llvm/Support/ThreadPool.h136
-rw-r--r--contrib/llvm/include/llvm/Support/Threading.h39
-rw-r--r--contrib/llvm/include/llvm/Support/TimeValue.h386
-rw-r--r--contrib/llvm/include/llvm/Support/Timer.h196
-rw-r--r--contrib/llvm/include/llvm/Support/ToolOutputFile.h63
-rw-r--r--contrib/llvm/include/llvm/Support/TrailingObjects.h349
-rw-r--r--contrib/llvm/include/llvm/Support/Unicode.h67
-rw-r--r--contrib/llvm/include/llvm/Support/UnicodeCharRanges.h108
-rw-r--r--contrib/llvm/include/llvm/Support/UniqueLock.h67
-rw-r--r--contrib/llvm/include/llvm/Support/Valgrind.h34
-rw-r--r--contrib/llvm/include/llvm/Support/Watchdog.h38
-rw-r--r--contrib/llvm/include/llvm/Support/Win64EH.h147
-rw-r--r--contrib/llvm/include/llvm/Support/WindowsError.h19
-rw-r--r--contrib/llvm/include/llvm/Support/YAMLParser.h588
-rw-r--r--contrib/llvm/include/llvm/Support/YAMLTraits.h1394
-rw-r--r--contrib/llvm/include/llvm/Support/circular_raw_ostream.h156
-rw-r--r--contrib/llvm/include/llvm/Support/raw_os_ostream.h42
-rw-r--r--contrib/llvm/include/llvm/Support/raw_ostream.h530
-rw-r--r--contrib/llvm/include/llvm/Support/thread.h66
-rw-r--r--contrib/llvm/include/llvm/Support/type_traits.h109
-rw-r--r--contrib/llvm/include/llvm/TableGen/Error.h39
-rw-r--r--contrib/llvm/include/llvm/TableGen/Main.h28
-rw-r--r--contrib/llvm/include/llvm/TableGen/Record.h1592
-rw-r--r--contrib/llvm/include/llvm/TableGen/SetTheory.h142
-rw-r--r--contrib/llvm/include/llvm/TableGen/StringMatcher.h54
-rw-r--r--contrib/llvm/include/llvm/TableGen/StringToOffsetTable.h83
-rw-r--r--contrib/llvm/include/llvm/TableGen/TableGenBackend.h28
-rw-r--r--contrib/llvm/include/llvm/Target/CostTable.h71
-rw-r--r--contrib/llvm/include/llvm/Target/Target.td1237
-rw-r--r--contrib/llvm/include/llvm/Target/TargetCallingConv.h202
-rw-r--r--contrib/llvm/include/llvm/Target/TargetCallingConv.td177
-rw-r--r--contrib/llvm/include/llvm/Target/TargetFrameLowering.h318
-rw-r--r--contrib/llvm/include/llvm/Target/TargetInstrInfo.h1440
-rw-r--r--contrib/llvm/include/llvm/Target/TargetIntrinsicInfo.h65
-rw-r--r--contrib/llvm/include/llvm/Target/TargetItinerary.td152
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLowering.h2879
-rw-r--r--contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h194
-rw-r--r--contrib/llvm/include/llvm/Target/TargetMachine.h311
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOpcodes.h139
-rw-r--r--contrib/llvm/include/llvm/Target/TargetOptions.h292
-rw-r--r--contrib/llvm/include/llvm/Target/TargetRecip.h73
-rw-r--r--contrib/llvm/include/llvm/Target/TargetRegisterInfo.h982
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSchedule.td419
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSelectionDAG.td1150
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h158
-rw-r--r--contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h197
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO.h229
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/ForceFunctionAttrs.h35
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/FunctionImport.h43
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/InferFunctionAttrs.h38
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h94
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/LowerBitSets.h201
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h179
-rw-r--r--contrib/llvm/include/llvm/Transforms/IPO/StripDeadPrototypes.h34
-rw-r--r--contrib/llvm/include/llvm/Transforms/InstCombine/InstCombine.h46
-rw-r--r--contrib/llvm/include/llvm/Transforms/InstCombine/InstCombineWorklist.h116
-rw-r--r--contrib/llvm/include/llvm/Transforms/Instrumentation.h177
-rw-r--r--contrib/llvm/include/llvm/Transforms/ObjCARC.h48
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar.h491
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/ADCE.h38
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/EarlyCSE.h39
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h40
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/SROA.h129
-rw-r--r--contrib/llvm/include/llvm/Transforms/Scalar/SimplifyCFG.h46
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/ASanStackFrameLayout.h64
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h297
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h116
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h34
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Cloning.h266
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h65
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h126
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/CtorUtils.h32
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/GlobalStatus.h82
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/IntegerDivision.h73
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/Local.h335
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h379
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/LoopVersioning.h114
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h64
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h50
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h178
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h460
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h71
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h174
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SplitModule.h43
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/SymbolRewriter.h152
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h52
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h45
-rw-r--r--contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h135
-rw-r--r--contrib/llvm/include/llvm/Transforms/Vectorize.h144
-rw-r--r--contrib/llvm/include/llvm/module.modulemap231
-rw-r--r--contrib/llvm/include/llvm/module.modulemap.build5
789 files changed, 231424 insertions, 0 deletions
diff --git a/contrib/llvm/include/llvm-c/Analysis.h b/contrib/llvm/include/llvm-c/Analysis.h
new file mode 100644
index 0000000..36dcb89
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Analysis.h
@@ -0,0 +1,65 @@
+/*===-- llvm-c/Analysis.h - Analysis Library C Interface --------*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMAnalysis.a, which *|
+|* implements various analyses of the LLVM IR. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_ANALYSIS_H
+#define LLVM_C_ANALYSIS_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCAnalysis Analysis
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+typedef enum {
+ LLVMAbortProcessAction, /* verifier will print to stderr and abort() */
+ LLVMPrintMessageAction, /* verifier will print to stderr and return 1 */
+ LLVMReturnStatusAction /* verifier will just return 1 */
+} LLVMVerifierFailureAction;
+
+
+/* Verifies that a module is valid, taking the specified action if not.
+ Optionally returns a human-readable description of any invalid constructs.
+ OutMessage must be disposed with LLVMDisposeMessage. */
+LLVMBool LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action,
+ char **OutMessage);
+
+/* Verifies that a single function is valid, taking the specified action. Useful
+ for debugging. */
+LLVMBool LLVMVerifyFunction(LLVMValueRef Fn, LLVMVerifierFailureAction Action);
+
+/* Open up a ghostview window that displays the CFG of the current function.
+ Useful for debugging. */
+void LLVMViewFunctionCFG(LLVMValueRef Fn);
+void LLVMViewFunctionCFGOnly(LLVMValueRef Fn);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/BitReader.h b/contrib/llvm/include/llvm-c/BitReader.h
new file mode 100644
index 0000000..d1fc302
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/BitReader.h
@@ -0,0 +1,85 @@
+/*===-- llvm-c/BitReader.h - BitReader Library C Interface ------*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMBitReader.a, which *|
+|* implements input of the LLVM bitcode format. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_BITREADER_H
+#define LLVM_C_BITREADER_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCBitReader Bit Reader
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+/* Builds a module from the bitcode in the specified memory buffer, returning a
+ reference to the module via the OutModule parameter. Returns 0 on success.
+ Optionally returns a human-readable error message via OutMessage.
+
+ This is deprecated. Use LLVMParseBitcode2. */
+LLVMBool LLVMParseBitcode(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutModule,
+ char **OutMessage);
+
+/* Builds a module from the bitcode in the specified memory buffer, returning a
+ reference to the module via the OutModule parameter. Returns 0 on success. */
+LLVMBool LLVMParseBitcode2(LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutModule);
+
+/* This is deprecated. Use LLVMParseBitcodeInContext2. */
+LLVMBool LLVMParseBitcodeInContext(LLVMContextRef ContextRef,
+ LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutModule, char **OutMessage);
+
+LLVMBool LLVMParseBitcodeInContext2(LLVMContextRef ContextRef,
+ LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutModule);
+
+/** Reads a module from the specified path, returning via the OutMP parameter
+ a module provider which performs lazy deserialization. Returns 0 on success.
+ Optionally returns a human-readable error message via OutMessage.
+ This is deprecated. Use LLVMGetBitcodeModuleInContext2. */
+LLVMBool LLVMGetBitcodeModuleInContext(LLVMContextRef ContextRef,
+ LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutM, char **OutMessage);
+
+/** Reads a module from the specified path, returning via the OutMP parameter a
+ * module provider which performs lazy deserialization. Returns 0 on success. */
+LLVMBool LLVMGetBitcodeModuleInContext2(LLVMContextRef ContextRef,
+ LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutM);
+
+/* This is deprecated. Use LLVMGetBitcodeModule2. */
+LLVMBool LLVMGetBitcodeModule(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM,
+ char **OutMessage);
+
+LLVMBool LLVMGetBitcodeModule2(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/BitWriter.h b/contrib/llvm/include/llvm-c/BitWriter.h
new file mode 100644
index 0000000..797d031
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/BitWriter.h
@@ -0,0 +1,59 @@
+/*===-- llvm-c/BitWriter.h - BitWriter Library C Interface ------*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMBitWriter.a, which *|
+|* implements output of the LLVM bitcode format. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_BITWRITER_H
+#define LLVM_C_BITWRITER_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCBitWriter Bit Writer
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+/*===-- Operations on modules ---------------------------------------------===*/
+
+/** Writes a module to the specified path. Returns 0 on success. */
+int LLVMWriteBitcodeToFile(LLVMModuleRef M, const char *Path);
+
+/** Writes a module to an open file descriptor. Returns 0 on success. */
+int LLVMWriteBitcodeToFD(LLVMModuleRef M, int FD, int ShouldClose,
+ int Unbuffered);
+
+/** Deprecated for LLVMWriteBitcodeToFD. Writes a module to an open file
+ descriptor. Returns 0 on success. Closes the Handle. */
+int LLVMWriteBitcodeToFileHandle(LLVMModuleRef M, int Handle);
+
+/** Writes a module to a new memory buffer and returns it. */
+LLVMMemoryBufferRef LLVMWriteBitcodeToMemoryBuffer(LLVMModuleRef M);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Core.h b/contrib/llvm/include/llvm-c/Core.h
new file mode 100644
index 0000000..c8fda15
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Core.h
@@ -0,0 +1,2938 @@
+/*===-- llvm-c/Core.h - Core Library C Interface ------------------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMCore.a, which implements *|
+|* the LLVM intermediate representation. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_CORE_H
+#define LLVM_C_CORE_H
+
+#include "llvm-c/ErrorHandling.h"
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMC LLVM-C: C interface to LLVM
+ *
+ * This module exposes parts of the LLVM library as a C API.
+ *
+ * @{
+ */
+
+/**
+ * @defgroup LLVMCTransforms Transforms
+ */
+
+/**
+ * @defgroup LLVMCCore Core
+ *
+ * This modules provide an interface to libLLVMCore, which implements
+ * the LLVM intermediate representation as well as other related types
+ * and utilities.
+ *
+ * Many exotic languages can interoperate with C code but have a harder time
+ * with C++ due to name mangling. So in addition to C, this interface enables
+ * tools written in such languages.
+ *
+ * @{
+ */
+
+/**
+ * @defgroup LLVMCCoreTypes Types and Enumerations
+ *
+ * @{
+ */
+
+typedef enum {
+ LLVMZExtAttribute = 1<<0,
+ LLVMSExtAttribute = 1<<1,
+ LLVMNoReturnAttribute = 1<<2,
+ LLVMInRegAttribute = 1<<3,
+ LLVMStructRetAttribute = 1<<4,
+ LLVMNoUnwindAttribute = 1<<5,
+ LLVMNoAliasAttribute = 1<<6,
+ LLVMByValAttribute = 1<<7,
+ LLVMNestAttribute = 1<<8,
+ LLVMReadNoneAttribute = 1<<9,
+ LLVMReadOnlyAttribute = 1<<10,
+ LLVMNoInlineAttribute = 1<<11,
+ LLVMAlwaysInlineAttribute = 1<<12,
+ LLVMOptimizeForSizeAttribute = 1<<13,
+ LLVMStackProtectAttribute = 1<<14,
+ LLVMStackProtectReqAttribute = 1<<15,
+ LLVMAlignment = 31<<16,
+ LLVMNoCaptureAttribute = 1<<21,
+ LLVMNoRedZoneAttribute = 1<<22,
+ LLVMNoImplicitFloatAttribute = 1<<23,
+ LLVMNakedAttribute = 1<<24,
+ LLVMInlineHintAttribute = 1<<25,
+ LLVMStackAlignment = 7<<26,
+ LLVMReturnsTwice = 1 << 29,
+ LLVMUWTable = 1 << 30,
+ LLVMNonLazyBind = 1 << 31
+
+ /* FIXME: These attributes are currently not included in the C API as
+ a temporary measure until the API/ABI impact to the C API is understood
+ and the path forward agreed upon.
+ LLVMSanitizeAddressAttribute = 1ULL << 32,
+ LLVMStackProtectStrongAttribute = 1ULL<<35,
+ LLVMColdAttribute = 1ULL << 40,
+ LLVMOptimizeNoneAttribute = 1ULL << 42,
+ LLVMInAllocaAttribute = 1ULL << 43,
+ LLVMNonNullAttribute = 1ULL << 44,
+ LLVMJumpTableAttribute = 1ULL << 45,
+ LLVMConvergentAttribute = 1ULL << 46,
+ LLVMSafeStackAttribute = 1ULL << 47,
+ */
+} LLVMAttribute;
+
+typedef enum {
+ /* Terminator Instructions */
+ LLVMRet = 1,
+ LLVMBr = 2,
+ LLVMSwitch = 3,
+ LLVMIndirectBr = 4,
+ LLVMInvoke = 5,
+ /* removed 6 due to API changes */
+ LLVMUnreachable = 7,
+
+ /* Standard Binary Operators */
+ LLVMAdd = 8,
+ LLVMFAdd = 9,
+ LLVMSub = 10,
+ LLVMFSub = 11,
+ LLVMMul = 12,
+ LLVMFMul = 13,
+ LLVMUDiv = 14,
+ LLVMSDiv = 15,
+ LLVMFDiv = 16,
+ LLVMURem = 17,
+ LLVMSRem = 18,
+ LLVMFRem = 19,
+
+ /* Logical Operators */
+ LLVMShl = 20,
+ LLVMLShr = 21,
+ LLVMAShr = 22,
+ LLVMAnd = 23,
+ LLVMOr = 24,
+ LLVMXor = 25,
+
+ /* Memory Operators */
+ LLVMAlloca = 26,
+ LLVMLoad = 27,
+ LLVMStore = 28,
+ LLVMGetElementPtr = 29,
+
+ /* Cast Operators */
+ LLVMTrunc = 30,
+ LLVMZExt = 31,
+ LLVMSExt = 32,
+ LLVMFPToUI = 33,
+ LLVMFPToSI = 34,
+ LLVMUIToFP = 35,
+ LLVMSIToFP = 36,
+ LLVMFPTrunc = 37,
+ LLVMFPExt = 38,
+ LLVMPtrToInt = 39,
+ LLVMIntToPtr = 40,
+ LLVMBitCast = 41,
+ LLVMAddrSpaceCast = 60,
+
+ /* Other Operators */
+ LLVMICmp = 42,
+ LLVMFCmp = 43,
+ LLVMPHI = 44,
+ LLVMCall = 45,
+ LLVMSelect = 46,
+ LLVMUserOp1 = 47,
+ LLVMUserOp2 = 48,
+ LLVMVAArg = 49,
+ LLVMExtractElement = 50,
+ LLVMInsertElement = 51,
+ LLVMShuffleVector = 52,
+ LLVMExtractValue = 53,
+ LLVMInsertValue = 54,
+
+ /* Atomic operators */
+ LLVMFence = 55,
+ LLVMAtomicCmpXchg = 56,
+ LLVMAtomicRMW = 57,
+
+ /* Exception Handling Operators */
+ LLVMResume = 58,
+ LLVMLandingPad = 59,
+ LLVMCleanupRet = 61,
+ LLVMCatchRet = 62,
+ LLVMCatchPad = 63,
+ LLVMCleanupPad = 64,
+ LLVMCatchSwitch = 65
+} LLVMOpcode;
+
+typedef enum {
+ LLVMVoidTypeKind, /**< type with no size */
+ LLVMHalfTypeKind, /**< 16 bit floating point type */
+ LLVMFloatTypeKind, /**< 32 bit floating point type */
+ LLVMDoubleTypeKind, /**< 64 bit floating point type */
+ LLVMX86_FP80TypeKind, /**< 80 bit floating point type (X87) */
+ LLVMFP128TypeKind, /**< 128 bit floating point type (112-bit mantissa)*/
+ LLVMPPC_FP128TypeKind, /**< 128 bit floating point type (two 64-bits) */
+ LLVMLabelTypeKind, /**< Labels */
+ LLVMIntegerTypeKind, /**< Arbitrary bit width integers */
+ LLVMFunctionTypeKind, /**< Functions */
+ LLVMStructTypeKind, /**< Structures */
+ LLVMArrayTypeKind, /**< Arrays */
+ LLVMPointerTypeKind, /**< Pointers */
+ LLVMVectorTypeKind, /**< SIMD 'packed' format, or other vector type */
+ LLVMMetadataTypeKind, /**< Metadata */
+ LLVMX86_MMXTypeKind, /**< X86 MMX */
+ LLVMTokenTypeKind /**< Tokens */
+} LLVMTypeKind;
+
+typedef enum {
+ LLVMExternalLinkage, /**< Externally visible function */
+ LLVMAvailableExternallyLinkage,
+ LLVMLinkOnceAnyLinkage, /**< Keep one copy of function when linking (inline)*/
+ LLVMLinkOnceODRLinkage, /**< Same, but only replaced by something
+ equivalent. */
+ LLVMLinkOnceODRAutoHideLinkage, /**< Obsolete */
+ LLVMWeakAnyLinkage, /**< Keep one copy of function when linking (weak) */
+ LLVMWeakODRLinkage, /**< Same, but only replaced by something
+ equivalent. */
+ LLVMAppendingLinkage, /**< Special purpose, only applies to global arrays */
+ LLVMInternalLinkage, /**< Rename collisions when linking (static
+ functions) */
+ LLVMPrivateLinkage, /**< Like Internal, but omit from symbol table */
+ LLVMDLLImportLinkage, /**< Obsolete */
+ LLVMDLLExportLinkage, /**< Obsolete */
+ LLVMExternalWeakLinkage,/**< ExternalWeak linkage description */
+ LLVMGhostLinkage, /**< Obsolete */
+ LLVMCommonLinkage, /**< Tentative definitions */
+ LLVMLinkerPrivateLinkage, /**< Like Private, but linker removes. */
+ LLVMLinkerPrivateWeakLinkage /**< Like LinkerPrivate, but is weak. */
+} LLVMLinkage;
+
+typedef enum {
+ LLVMDefaultVisibility, /**< The GV is visible */
+ LLVMHiddenVisibility, /**< The GV is hidden */
+ LLVMProtectedVisibility /**< The GV is protected */
+} LLVMVisibility;
+
+typedef enum {
+ LLVMDefaultStorageClass = 0,
+ LLVMDLLImportStorageClass = 1, /**< Function to be imported from DLL. */
+ LLVMDLLExportStorageClass = 2 /**< Function to be accessible from DLL. */
+} LLVMDLLStorageClass;
+
+typedef enum {
+ LLVMCCallConv = 0,
+ LLVMFastCallConv = 8,
+ LLVMColdCallConv = 9,
+ LLVMWebKitJSCallConv = 12,
+ LLVMAnyRegCallConv = 13,
+ LLVMX86StdcallCallConv = 64,
+ LLVMX86FastcallCallConv = 65
+} LLVMCallConv;
+
+typedef enum {
+ LLVMIntEQ = 32, /**< equal */
+ LLVMIntNE, /**< not equal */
+ LLVMIntUGT, /**< unsigned greater than */
+ LLVMIntUGE, /**< unsigned greater or equal */
+ LLVMIntULT, /**< unsigned less than */
+ LLVMIntULE, /**< unsigned less or equal */
+ LLVMIntSGT, /**< signed greater than */
+ LLVMIntSGE, /**< signed greater or equal */
+ LLVMIntSLT, /**< signed less than */
+ LLVMIntSLE /**< signed less or equal */
+} LLVMIntPredicate;
+
+typedef enum {
+ LLVMRealPredicateFalse, /**< Always false (always folded) */
+ LLVMRealOEQ, /**< True if ordered and equal */
+ LLVMRealOGT, /**< True if ordered and greater than */
+ LLVMRealOGE, /**< True if ordered and greater than or equal */
+ LLVMRealOLT, /**< True if ordered and less than */
+ LLVMRealOLE, /**< True if ordered and less than or equal */
+ LLVMRealONE, /**< True if ordered and operands are unequal */
+ LLVMRealORD, /**< True if ordered (no nans) */
+ LLVMRealUNO, /**< True if unordered: isnan(X) | isnan(Y) */
+ LLVMRealUEQ, /**< True if unordered or equal */
+ LLVMRealUGT, /**< True if unordered or greater than */
+ LLVMRealUGE, /**< True if unordered, greater than, or equal */
+ LLVMRealULT, /**< True if unordered or less than */
+ LLVMRealULE, /**< True if unordered, less than, or equal */
+ LLVMRealUNE, /**< True if unordered or not equal */
+ LLVMRealPredicateTrue /**< Always true (always folded) */
+} LLVMRealPredicate;
+
+typedef enum {
+ LLVMLandingPadCatch, /**< A catch clause */
+ LLVMLandingPadFilter /**< A filter clause */
+} LLVMLandingPadClauseTy;
+
+typedef enum {
+ LLVMNotThreadLocal = 0,
+ LLVMGeneralDynamicTLSModel,
+ LLVMLocalDynamicTLSModel,
+ LLVMInitialExecTLSModel,
+ LLVMLocalExecTLSModel
+} LLVMThreadLocalMode;
+
+typedef enum {
+ LLVMAtomicOrderingNotAtomic = 0, /**< A load or store which is not atomic */
+ LLVMAtomicOrderingUnordered = 1, /**< Lowest level of atomicity, guarantees
+ somewhat sane results, lock free. */
+ LLVMAtomicOrderingMonotonic = 2, /**< guarantees that if you take all the
+ operations affecting a specific address,
+ a consistent ordering exists */
+ LLVMAtomicOrderingAcquire = 4, /**< Acquire provides a barrier of the sort
+ necessary to acquire a lock to access other
+ memory with normal loads and stores. */
+ LLVMAtomicOrderingRelease = 5, /**< Release is similar to Acquire, but with
+ a barrier of the sort necessary to release
+ a lock. */
+ LLVMAtomicOrderingAcquireRelease = 6, /**< provides both an Acquire and a
+ Release barrier (for fences and
+ operations which both read and write
+ memory). */
+ LLVMAtomicOrderingSequentiallyConsistent = 7 /**< provides Acquire semantics
+ for loads and Release
+ semantics for stores.
+ Additionally, it guarantees
+ that a total ordering exists
+ between all
+ SequentiallyConsistent
+ operations. */
+} LLVMAtomicOrdering;
+
+typedef enum {
+ LLVMAtomicRMWBinOpXchg, /**< Set the new value and return the one old */
+ LLVMAtomicRMWBinOpAdd, /**< Add a value and return the old one */
+ LLVMAtomicRMWBinOpSub, /**< Subtract a value and return the old one */
+ LLVMAtomicRMWBinOpAnd, /**< And a value and return the old one */
+ LLVMAtomicRMWBinOpNand, /**< Not-And a value and return the old one */
+ LLVMAtomicRMWBinOpOr, /**< OR a value and return the old one */
+ LLVMAtomicRMWBinOpXor, /**< Xor a value and return the old one */
+ LLVMAtomicRMWBinOpMax, /**< Sets the value if it's greater than the
+ original using a signed comparison and return
+ the old one */
+ LLVMAtomicRMWBinOpMin, /**< Sets the value if it's Smaller than the
+ original using a signed comparison and return
+ the old one */
+ LLVMAtomicRMWBinOpUMax, /**< Sets the value if it's greater than the
+ original using an unsigned comparison and return
+ the old one */
+ LLVMAtomicRMWBinOpUMin /**< Sets the value if it's greater than the
+ original using an unsigned comparison and return
+ the old one */
+} LLVMAtomicRMWBinOp;
+
+typedef enum {
+ LLVMDSError,
+ LLVMDSWarning,
+ LLVMDSRemark,
+ LLVMDSNote
+} LLVMDiagnosticSeverity;
+
+/**
+ * @}
+ */
+
+void LLVMInitializeCore(LLVMPassRegistryRef R);
+
+/** Deallocate and destroy all ManagedStatic variables.
+ @see llvm::llvm_shutdown
+ @see ManagedStatic */
+void LLVMShutdown(void);
+
+/*===-- Error handling ----------------------------------------------------===*/
+
+char *LLVMCreateMessage(const char *Message);
+void LLVMDisposeMessage(char *Message);
+
+/**
+ * @defgroup LLVMCCoreContext Contexts
+ *
+ * Contexts are execution states for the core LLVM IR system.
+ *
+ * Most types are tied to a context instance. Multiple contexts can
+ * exist simultaneously. A single context is not thread safe. However,
+ * different contexts can execute on different threads simultaneously.
+ *
+ * @{
+ */
+
+typedef void (*LLVMDiagnosticHandler)(LLVMDiagnosticInfoRef, void *);
+typedef void (*LLVMYieldCallback)(LLVMContextRef, void *);
+
+/**
+ * Create a new context.
+ *
+ * Every call to this function should be paired with a call to
+ * LLVMContextDispose() or the context will leak memory.
+ */
+LLVMContextRef LLVMContextCreate(void);
+
+/**
+ * Obtain the global context instance.
+ */
+LLVMContextRef LLVMGetGlobalContext(void);
+
+/**
+ * Set the diagnostic handler for this context.
+ */
+void LLVMContextSetDiagnosticHandler(LLVMContextRef C,
+ LLVMDiagnosticHandler Handler,
+ void *DiagnosticContext);
+
+/**
+ * Set the yield callback function for this context.
+ *
+ * @see LLVMContext::setYieldCallback()
+ */
+void LLVMContextSetYieldCallback(LLVMContextRef C, LLVMYieldCallback Callback,
+ void *OpaqueHandle);
+
+/**
+ * Destroy a context instance.
+ *
+ * This should be called for every call to LLVMContextCreate() or memory
+ * will be leaked.
+ */
+void LLVMContextDispose(LLVMContextRef C);
+
+/**
+ * Return a string representation of the DiagnosticInfo. Use
+ * LLVMDisposeMessage to free the string.
+ *
+ * @see DiagnosticInfo::print()
+ */
+char *LLVMGetDiagInfoDescription(LLVMDiagnosticInfoRef DI);
+
+/**
+ * Return an enum LLVMDiagnosticSeverity.
+ *
+ * @see DiagnosticInfo::getSeverity()
+ */
+LLVMDiagnosticSeverity LLVMGetDiagInfoSeverity(LLVMDiagnosticInfoRef DI);
+
+unsigned LLVMGetMDKindIDInContext(LLVMContextRef C, const char* Name,
+ unsigned SLen);
+unsigned LLVMGetMDKindID(const char* Name, unsigned SLen);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreModule Modules
+ *
+ * Modules represent the top-level structure in an LLVM program. An LLVM
+ * module is effectively a translation unit or a collection of
+ * translation units merged together.
+ *
+ * @{
+ */
+
+/**
+ * Create a new, empty module in the global context.
+ *
+ * This is equivalent to calling LLVMModuleCreateWithNameInContext with
+ * LLVMGetGlobalContext() as the context parameter.
+ *
+ * Every invocation should be paired with LLVMDisposeModule() or memory
+ * will be leaked.
+ */
+LLVMModuleRef LLVMModuleCreateWithName(const char *ModuleID);
+
+/**
+ * Create a new, empty module in a specific context.
+ *
+ * Every invocation should be paired with LLVMDisposeModule() or memory
+ * will be leaked.
+ */
+LLVMModuleRef LLVMModuleCreateWithNameInContext(const char *ModuleID,
+ LLVMContextRef C);
+/**
+ * Return an exact copy of the specified module.
+ */
+LLVMModuleRef LLVMCloneModule(LLVMModuleRef M);
+
+/**
+ * Destroy a module instance.
+ *
+ * This must be called for every created module or memory will be
+ * leaked.
+ */
+void LLVMDisposeModule(LLVMModuleRef M);
+
+/**
+ * Obtain the data layout for a module.
+ *
+ * @see Module::getDataLayout()
+ */
+const char *LLVMGetDataLayout(LLVMModuleRef M);
+
+/**
+ * Set the data layout for a module.
+ *
+ * @see Module::setDataLayout()
+ */
+void LLVMSetDataLayout(LLVMModuleRef M, const char *Triple);
+
+/**
+ * Obtain the target triple for a module.
+ *
+ * @see Module::getTargetTriple()
+ */
+const char *LLVMGetTarget(LLVMModuleRef M);
+
+/**
+ * Set the target triple for a module.
+ *
+ * @see Module::setTargetTriple()
+ */
+void LLVMSetTarget(LLVMModuleRef M, const char *Triple);
+
+/**
+ * Dump a representation of a module to stderr.
+ *
+ * @see Module::dump()
+ */
+void LLVMDumpModule(LLVMModuleRef M);
+
+/**
+ * Print a representation of a module to a file. The ErrorMessage needs to be
+ * disposed with LLVMDisposeMessage. Returns 0 on success, 1 otherwise.
+ *
+ * @see Module::print()
+ */
+LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename,
+ char **ErrorMessage);
+
+/**
+ * Return a string representation of the module. Use
+ * LLVMDisposeMessage to free the string.
+ *
+ * @see Module::print()
+ */
+char *LLVMPrintModuleToString(LLVMModuleRef M);
+
+/**
+ * Set inline assembly for a module.
+ *
+ * @see Module::setModuleInlineAsm()
+ */
+void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm);
+
+/**
+ * Obtain the context to which this module is associated.
+ *
+ * @see Module::getContext()
+ */
+LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M);
+
+/**
+ * Obtain a Type from a module by its registered name.
+ */
+LLVMTypeRef LLVMGetTypeByName(LLVMModuleRef M, const char *Name);
+
+/**
+ * Obtain the number of operands for named metadata in a module.
+ *
+ * @see llvm::Module::getNamedMetadata()
+ */
+unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char* name);
+
+/**
+ * Obtain the named metadata operands for a module.
+ *
+ * The passed LLVMValueRef pointer should refer to an array of
+ * LLVMValueRef at least LLVMGetNamedMetadataNumOperands long. This
+ * array will be populated with the LLVMValueRef instances. Each
+ * instance corresponds to a llvm::MDNode.
+ *
+ * @see llvm::Module::getNamedMetadata()
+ * @see llvm::MDNode::getOperand()
+ */
+void LLVMGetNamedMetadataOperands(LLVMModuleRef M, const char* name, LLVMValueRef *Dest);
+
+/**
+ * Add an operand to named metadata.
+ *
+ * @see llvm::Module::getNamedMetadata()
+ * @see llvm::MDNode::addOperand()
+ */
+void LLVMAddNamedMetadataOperand(LLVMModuleRef M, const char* name,
+ LLVMValueRef Val);
+
+/**
+ * Add a function to a module under a specified name.
+ *
+ * @see llvm::Function::Create()
+ */
+LLVMValueRef LLVMAddFunction(LLVMModuleRef M, const char *Name,
+ LLVMTypeRef FunctionTy);
+
+/**
+ * Obtain a Function value from a Module by its name.
+ *
+ * The returned value corresponds to a llvm::Function value.
+ *
+ * @see llvm::Module::getFunction()
+ */
+LLVMValueRef LLVMGetNamedFunction(LLVMModuleRef M, const char *Name);
+
+/**
+ * Obtain an iterator to the first Function in a Module.
+ *
+ * @see llvm::Module::begin()
+ */
+LLVMValueRef LLVMGetFirstFunction(LLVMModuleRef M);
+
+/**
+ * Obtain an iterator to the last Function in a Module.
+ *
+ * @see llvm::Module::end()
+ */
+LLVMValueRef LLVMGetLastFunction(LLVMModuleRef M);
+
+/**
+ * Advance a Function iterator to the next Function.
+ *
+ * Returns NULL if the iterator was already at the end and there are no more
+ * functions.
+ */
+LLVMValueRef LLVMGetNextFunction(LLVMValueRef Fn);
+
+/**
+ * Decrement a Function iterator to the previous Function.
+ *
+ * Returns NULL if the iterator was already at the beginning and there are
+ * no previous functions.
+ */
+LLVMValueRef LLVMGetPreviousFunction(LLVMValueRef Fn);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreType Types
+ *
+ * Types represent the type of a value.
+ *
+ * Types are associated with a context instance. The context internally
+ * deduplicates types so there is only 1 instance of a specific type
+ * alive at a time. In other words, a unique type is shared among all
+ * consumers within a context.
+ *
+ * A Type in the C API corresponds to llvm::Type.
+ *
+ * Types have the following hierarchy:
+ *
+ * types:
+ * integer type
+ * real type
+ * function type
+ * sequence types:
+ * array type
+ * pointer type
+ * vector type
+ * void type
+ * label type
+ * opaque type
+ *
+ * @{
+ */
+
+/**
+ * Obtain the enumerated type of a Type instance.
+ *
+ * @see llvm::Type:getTypeID()
+ */
+LLVMTypeKind LLVMGetTypeKind(LLVMTypeRef Ty);
+
+/**
+ * Whether the type has a known size.
+ *
+ * Things that don't have a size are abstract types, labels, and void.a
+ *
+ * @see llvm::Type::isSized()
+ */
+LLVMBool LLVMTypeIsSized(LLVMTypeRef Ty);
+
+/**
+ * Obtain the context to which this type instance is associated.
+ *
+ * @see llvm::Type::getContext()
+ */
+LLVMContextRef LLVMGetTypeContext(LLVMTypeRef Ty);
+
+/**
+ * Dump a representation of a type to stderr.
+ *
+ * @see llvm::Type::dump()
+ */
+void LLVMDumpType(LLVMTypeRef Val);
+
+/**
+ * Return a string representation of the type. Use
+ * LLVMDisposeMessage to free the string.
+ *
+ * @see llvm::Type::print()
+ */
+char *LLVMPrintTypeToString(LLVMTypeRef Val);
+
+/**
+ * @defgroup LLVMCCoreTypeInt Integer Types
+ *
+ * Functions in this section operate on integer types.
+ *
+ * @{
+ */
+
+/**
+ * Obtain an integer type from a context with specified bit width.
+ */
+LLVMTypeRef LLVMInt1TypeInContext(LLVMContextRef C);
+LLVMTypeRef LLVMInt8TypeInContext(LLVMContextRef C);
+LLVMTypeRef LLVMInt16TypeInContext(LLVMContextRef C);
+LLVMTypeRef LLVMInt32TypeInContext(LLVMContextRef C);
+LLVMTypeRef LLVMInt64TypeInContext(LLVMContextRef C);
+LLVMTypeRef LLVMInt128TypeInContext(LLVMContextRef C);
+LLVMTypeRef LLVMIntTypeInContext(LLVMContextRef C, unsigned NumBits);
+
+/**
+ * Obtain an integer type from the global context with a specified bit
+ * width.
+ */
+LLVMTypeRef LLVMInt1Type(void);
+LLVMTypeRef LLVMInt8Type(void);
+LLVMTypeRef LLVMInt16Type(void);
+LLVMTypeRef LLVMInt32Type(void);
+LLVMTypeRef LLVMInt64Type(void);
+LLVMTypeRef LLVMInt128Type(void);
+LLVMTypeRef LLVMIntType(unsigned NumBits);
+unsigned LLVMGetIntTypeWidth(LLVMTypeRef IntegerTy);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreTypeFloat Floating Point Types
+ *
+ * @{
+ */
+
+/**
+ * Obtain a 16-bit floating point type from a context.
+ */
+LLVMTypeRef LLVMHalfTypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a 32-bit floating point type from a context.
+ */
+LLVMTypeRef LLVMFloatTypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a 64-bit floating point type from a context.
+ */
+LLVMTypeRef LLVMDoubleTypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a 80-bit floating point type (X87) from a context.
+ */
+LLVMTypeRef LLVMX86FP80TypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a 128-bit floating point type (112-bit mantissa) from a
+ * context.
+ */
+LLVMTypeRef LLVMFP128TypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a 128-bit floating point type (two 64-bits) from a context.
+ */
+LLVMTypeRef LLVMPPCFP128TypeInContext(LLVMContextRef C);
+
+/**
+ * Obtain a floating point type from the global context.
+ *
+ * These map to the functions in this group of the same name.
+ */
+LLVMTypeRef LLVMHalfType(void);
+LLVMTypeRef LLVMFloatType(void);
+LLVMTypeRef LLVMDoubleType(void);
+LLVMTypeRef LLVMX86FP80Type(void);
+LLVMTypeRef LLVMFP128Type(void);
+LLVMTypeRef LLVMPPCFP128Type(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreTypeFunction Function Types
+ *
+ * @{
+ */
+
+/**
+ * Obtain a function type consisting of a specified signature.
+ *
+ * The function is defined as a tuple of a return Type, a list of
+ * parameter types, and whether the function is variadic.
+ */
+LLVMTypeRef LLVMFunctionType(LLVMTypeRef ReturnType,
+ LLVMTypeRef *ParamTypes, unsigned ParamCount,
+ LLVMBool IsVarArg);
+
+/**
+ * Returns whether a function type is variadic.
+ */
+LLVMBool LLVMIsFunctionVarArg(LLVMTypeRef FunctionTy);
+
+/**
+ * Obtain the Type this function Type returns.
+ */
+LLVMTypeRef LLVMGetReturnType(LLVMTypeRef FunctionTy);
+
+/**
+ * Obtain the number of parameters this function accepts.
+ */
+unsigned LLVMCountParamTypes(LLVMTypeRef FunctionTy);
+
+/**
+ * Obtain the types of a function's parameters.
+ *
+ * The Dest parameter should point to a pre-allocated array of
+ * LLVMTypeRef at least LLVMCountParamTypes() large. On return, the
+ * first LLVMCountParamTypes() entries in the array will be populated
+ * with LLVMTypeRef instances.
+ *
+ * @param FunctionTy The function type to operate on.
+ * @param Dest Memory address of an array to be filled with result.
+ */
+void LLVMGetParamTypes(LLVMTypeRef FunctionTy, LLVMTypeRef *Dest);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreTypeStruct Structure Types
+ *
+ * These functions relate to LLVMTypeRef instances.
+ *
+ * @see llvm::StructType
+ *
+ * @{
+ */
+
+/**
+ * Create a new structure type in a context.
+ *
+ * A structure is specified by a list of inner elements/types and
+ * whether these can be packed together.
+ *
+ * @see llvm::StructType::create()
+ */
+LLVMTypeRef LLVMStructTypeInContext(LLVMContextRef C, LLVMTypeRef *ElementTypes,
+ unsigned ElementCount, LLVMBool Packed);
+
+/**
+ * Create a new structure type in the global context.
+ *
+ * @see llvm::StructType::create()
+ */
+LLVMTypeRef LLVMStructType(LLVMTypeRef *ElementTypes, unsigned ElementCount,
+ LLVMBool Packed);
+
+/**
+ * Create an empty structure in a context having a specified name.
+ *
+ * @see llvm::StructType::create()
+ */
+LLVMTypeRef LLVMStructCreateNamed(LLVMContextRef C, const char *Name);
+
+/**
+ * Obtain the name of a structure.
+ *
+ * @see llvm::StructType::getName()
+ */
+const char *LLVMGetStructName(LLVMTypeRef Ty);
+
+/**
+ * Set the contents of a structure type.
+ *
+ * @see llvm::StructType::setBody()
+ */
+void LLVMStructSetBody(LLVMTypeRef StructTy, LLVMTypeRef *ElementTypes,
+ unsigned ElementCount, LLVMBool Packed);
+
+/**
+ * Get the number of elements defined inside the structure.
+ *
+ * @see llvm::StructType::getNumElements()
+ */
+unsigned LLVMCountStructElementTypes(LLVMTypeRef StructTy);
+
+/**
+ * Get the elements within a structure.
+ *
+ * The function is passed the address of a pre-allocated array of
+ * LLVMTypeRef at least LLVMCountStructElementTypes() long. After
+ * invocation, this array will be populated with the structure's
+ * elements. The objects in the destination array will have a lifetime
+ * of the structure type itself, which is the lifetime of the context it
+ * is contained in.
+ */
+void LLVMGetStructElementTypes(LLVMTypeRef StructTy, LLVMTypeRef *Dest);
+
+/**
+ * Get the type of the element at a given index in the structure.
+ *
+ * @see llvm::StructType::getTypeAtIndex()
+ */
+LLVMTypeRef LLVMStructGetTypeAtIndex(LLVMTypeRef StructTy, unsigned i);
+
+/**
+ * Determine whether a structure is packed.
+ *
+ * @see llvm::StructType::isPacked()
+ */
+LLVMBool LLVMIsPackedStruct(LLVMTypeRef StructTy);
+
+/**
+ * Determine whether a structure is opaque.
+ *
+ * @see llvm::StructType::isOpaque()
+ */
+LLVMBool LLVMIsOpaqueStruct(LLVMTypeRef StructTy);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreTypeSequential Sequential Types
+ *
+ * Sequential types represents "arrays" of types. This is a super class
+ * for array, vector, and pointer types.
+ *
+ * @{
+ */
+
+/**
+ * Obtain the type of elements within a sequential type.
+ *
+ * This works on array, vector, and pointer types.
+ *
+ * @see llvm::SequentialType::getElementType()
+ */
+LLVMTypeRef LLVMGetElementType(LLVMTypeRef Ty);
+
+/**
+ * Create a fixed size array type that refers to a specific type.
+ *
+ * The created type will exist in the context that its element type
+ * exists in.
+ *
+ * @see llvm::ArrayType::get()
+ */
+LLVMTypeRef LLVMArrayType(LLVMTypeRef ElementType, unsigned ElementCount);
+
+/**
+ * Obtain the length of an array type.
+ *
+ * This only works on types that represent arrays.
+ *
+ * @see llvm::ArrayType::getNumElements()
+ */
+unsigned LLVMGetArrayLength(LLVMTypeRef ArrayTy);
+
+/**
+ * Create a pointer type that points to a defined type.
+ *
+ * The created type will exist in the context that its pointee type
+ * exists in.
+ *
+ * @see llvm::PointerType::get()
+ */
+LLVMTypeRef LLVMPointerType(LLVMTypeRef ElementType, unsigned AddressSpace);
+
+/**
+ * Obtain the address space of a pointer type.
+ *
+ * This only works on types that represent pointers.
+ *
+ * @see llvm::PointerType::getAddressSpace()
+ */
+unsigned LLVMGetPointerAddressSpace(LLVMTypeRef PointerTy);
+
+/**
+ * Create a vector type that contains a defined type and has a specific
+ * number of elements.
+ *
+ * The created type will exist in the context thats its element type
+ * exists in.
+ *
+ * @see llvm::VectorType::get()
+ */
+LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount);
+
+/**
+ * Obtain the number of elements in a vector type.
+ *
+ * This only works on types that represent vectors.
+ *
+ * @see llvm::VectorType::getNumElements()
+ */
+unsigned LLVMGetVectorSize(LLVMTypeRef VectorTy);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreTypeOther Other Types
+ *
+ * @{
+ */
+
+/**
+ * Create a void type in a context.
+ */
+LLVMTypeRef LLVMVoidTypeInContext(LLVMContextRef C);
+
+/**
+ * Create a label type in a context.
+ */
+LLVMTypeRef LLVMLabelTypeInContext(LLVMContextRef C);
+
+/**
+ * Create a X86 MMX type in a context.
+ */
+LLVMTypeRef LLVMX86MMXTypeInContext(LLVMContextRef C);
+
+/**
+ * These are similar to the above functions except they operate on the
+ * global context.
+ */
+LLVMTypeRef LLVMVoidType(void);
+LLVMTypeRef LLVMLabelType(void);
+LLVMTypeRef LLVMX86MMXType(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValues Values
+ *
+ * The bulk of LLVM's object model consists of values, which comprise a very
+ * rich type hierarchy.
+ *
+ * LLVMValueRef essentially represents llvm::Value. There is a rich
+ * hierarchy of classes within this type. Depending on the instance
+ * obtained, not all APIs are available.
+ *
+ * Callers can determine the type of an LLVMValueRef by calling the
+ * LLVMIsA* family of functions (e.g. LLVMIsAArgument()). These
+ * functions are defined by a macro, so it isn't obvious which are
+ * available by looking at the Doxygen source code. Instead, look at the
+ * source definition of LLVM_FOR_EACH_VALUE_SUBCLASS and note the list
+ * of value names given. These value names also correspond to classes in
+ * the llvm::Value hierarchy.
+ *
+ * @{
+ */
+
+#define LLVM_FOR_EACH_VALUE_SUBCLASS(macro) \
+ macro(Argument) \
+ macro(BasicBlock) \
+ macro(InlineAsm) \
+ macro(User) \
+ macro(Constant) \
+ macro(BlockAddress) \
+ macro(ConstantAggregateZero) \
+ macro(ConstantArray) \
+ macro(ConstantDataSequential) \
+ macro(ConstantDataArray) \
+ macro(ConstantDataVector) \
+ macro(ConstantExpr) \
+ macro(ConstantFP) \
+ macro(ConstantInt) \
+ macro(ConstantPointerNull) \
+ macro(ConstantStruct) \
+ macro(ConstantTokenNone) \
+ macro(ConstantVector) \
+ macro(GlobalValue) \
+ macro(GlobalAlias) \
+ macro(GlobalObject) \
+ macro(Function) \
+ macro(GlobalVariable) \
+ macro(UndefValue) \
+ macro(Instruction) \
+ macro(BinaryOperator) \
+ macro(CallInst) \
+ macro(IntrinsicInst) \
+ macro(DbgInfoIntrinsic) \
+ macro(DbgDeclareInst) \
+ macro(MemIntrinsic) \
+ macro(MemCpyInst) \
+ macro(MemMoveInst) \
+ macro(MemSetInst) \
+ macro(CmpInst) \
+ macro(FCmpInst) \
+ macro(ICmpInst) \
+ macro(ExtractElementInst) \
+ macro(GetElementPtrInst) \
+ macro(InsertElementInst) \
+ macro(InsertValueInst) \
+ macro(LandingPadInst) \
+ macro(PHINode) \
+ macro(SelectInst) \
+ macro(ShuffleVectorInst) \
+ macro(StoreInst) \
+ macro(TerminatorInst) \
+ macro(BranchInst) \
+ macro(IndirectBrInst) \
+ macro(InvokeInst) \
+ macro(ReturnInst) \
+ macro(SwitchInst) \
+ macro(UnreachableInst) \
+ macro(ResumeInst) \
+ macro(CleanupReturnInst) \
+ macro(CatchReturnInst) \
+ macro(FuncletPadInst) \
+ macro(CatchPadInst) \
+ macro(CleanupPadInst) \
+ macro(UnaryInstruction) \
+ macro(AllocaInst) \
+ macro(CastInst) \
+ macro(AddrSpaceCastInst) \
+ macro(BitCastInst) \
+ macro(FPExtInst) \
+ macro(FPToSIInst) \
+ macro(FPToUIInst) \
+ macro(FPTruncInst) \
+ macro(IntToPtrInst) \
+ macro(PtrToIntInst) \
+ macro(SExtInst) \
+ macro(SIToFPInst) \
+ macro(TruncInst) \
+ macro(UIToFPInst) \
+ macro(ZExtInst) \
+ macro(ExtractValueInst) \
+ macro(LoadInst) \
+ macro(VAArgInst)
+
+/**
+ * @defgroup LLVMCCoreValueGeneral General APIs
+ *
+ * Functions in this section work on all LLVMValueRef instances,
+ * regardless of their sub-type. They correspond to functions available
+ * on llvm::Value.
+ *
+ * @{
+ */
+
+/**
+ * Obtain the type of a value.
+ *
+ * @see llvm::Value::getType()
+ */
+LLVMTypeRef LLVMTypeOf(LLVMValueRef Val);
+
+/**
+ * Obtain the string name of a value.
+ *
+ * @see llvm::Value::getName()
+ */
+const char *LLVMGetValueName(LLVMValueRef Val);
+
+/**
+ * Set the string name of a value.
+ *
+ * @see llvm::Value::setName()
+ */
+void LLVMSetValueName(LLVMValueRef Val, const char *Name);
+
+/**
+ * Dump a representation of a value to stderr.
+ *
+ * @see llvm::Value::dump()
+ */
+void LLVMDumpValue(LLVMValueRef Val);
+
+/**
+ * Return a string representation of the value. Use
+ * LLVMDisposeMessage to free the string.
+ *
+ * @see llvm::Value::print()
+ */
+char *LLVMPrintValueToString(LLVMValueRef Val);
+
+/**
+ * Replace all uses of a value with another one.
+ *
+ * @see llvm::Value::replaceAllUsesWith()
+ */
+void LLVMReplaceAllUsesWith(LLVMValueRef OldVal, LLVMValueRef NewVal);
+
+/**
+ * Determine whether the specified constant instance is constant.
+ */
+LLVMBool LLVMIsConstant(LLVMValueRef Val);
+
+/**
+ * Determine whether a value instance is undefined.
+ */
+LLVMBool LLVMIsUndef(LLVMValueRef Val);
+
+/**
+ * Convert value instances between types.
+ *
+ * Internally, an LLVMValueRef is "pinned" to a specific type. This
+ * series of functions allows you to cast an instance to a specific
+ * type.
+ *
+ * If the cast is not valid for the specified type, NULL is returned.
+ *
+ * @see llvm::dyn_cast_or_null<>
+ */
+#define LLVM_DECLARE_VALUE_CAST(name) \
+ LLVMValueRef LLVMIsA##name(LLVMValueRef Val);
+LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DECLARE_VALUE_CAST)
+
+LLVMValueRef LLVMIsAMDNode(LLVMValueRef Val);
+LLVMValueRef LLVMIsAMDString(LLVMValueRef Val);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueUses Usage
+ *
+ * This module defines functions that allow you to inspect the uses of a
+ * LLVMValueRef.
+ *
+ * It is possible to obtain an LLVMUseRef for any LLVMValueRef instance.
+ * Each LLVMUseRef (which corresponds to a llvm::Use instance) holds a
+ * llvm::User and llvm::Value.
+ *
+ * @{
+ */
+
+/**
+ * Obtain the first use of a value.
+ *
+ * Uses are obtained in an iterator fashion. First, call this function
+ * to obtain a reference to the first use. Then, call LLVMGetNextUse()
+ * on that instance and all subsequently obtained instances until
+ * LLVMGetNextUse() returns NULL.
+ *
+ * @see llvm::Value::use_begin()
+ */
+LLVMUseRef LLVMGetFirstUse(LLVMValueRef Val);
+
+/**
+ * Obtain the next use of a value.
+ *
+ * This effectively advances the iterator. It returns NULL if you are on
+ * the final use and no more are available.
+ */
+LLVMUseRef LLVMGetNextUse(LLVMUseRef U);
+
+/**
+ * Obtain the user value for a user.
+ *
+ * The returned value corresponds to a llvm::User type.
+ *
+ * @see llvm::Use::getUser()
+ */
+LLVMValueRef LLVMGetUser(LLVMUseRef U);
+
+/**
+ * Obtain the value this use corresponds to.
+ *
+ * @see llvm::Use::get().
+ */
+LLVMValueRef LLVMGetUsedValue(LLVMUseRef U);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueUser User value
+ *
+ * Function in this group pertain to LLVMValueRef instances that descent
+ * from llvm::User. This includes constants, instructions, and
+ * operators.
+ *
+ * @{
+ */
+
+/**
+ * Obtain an operand at a specific index in a llvm::User value.
+ *
+ * @see llvm::User::getOperand()
+ */
+LLVMValueRef LLVMGetOperand(LLVMValueRef Val, unsigned Index);
+
+/**
+ * Obtain the use of an operand at a specific index in a llvm::User value.
+ *
+ * @see llvm::User::getOperandUse()
+ */
+LLVMUseRef LLVMGetOperandUse(LLVMValueRef Val, unsigned Index);
+
+/**
+ * Set an operand at a specific index in a llvm::User value.
+ *
+ * @see llvm::User::setOperand()
+ */
+void LLVMSetOperand(LLVMValueRef User, unsigned Index, LLVMValueRef Val);
+
+/**
+ * Obtain the number of operands in a llvm::User value.
+ *
+ * @see llvm::User::getNumOperands()
+ */
+int LLVMGetNumOperands(LLVMValueRef Val);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueConstant Constants
+ *
+ * This section contains APIs for interacting with LLVMValueRef that
+ * correspond to llvm::Constant instances.
+ *
+ * These functions will work for any LLVMValueRef in the llvm::Constant
+ * class hierarchy.
+ *
+ * @{
+ */
+
+/**
+ * Obtain a constant value referring to the null instance of a type.
+ *
+ * @see llvm::Constant::getNullValue()
+ */
+LLVMValueRef LLVMConstNull(LLVMTypeRef Ty); /* all zeroes */
+
+/**
+ * Obtain a constant value referring to the instance of a type
+ * consisting of all ones.
+ *
+ * This is only valid for integer types.
+ *
+ * @see llvm::Constant::getAllOnesValue()
+ */
+LLVMValueRef LLVMConstAllOnes(LLVMTypeRef Ty);
+
+/**
+ * Obtain a constant value referring to an undefined value of a type.
+ *
+ * @see llvm::UndefValue::get()
+ */
+LLVMValueRef LLVMGetUndef(LLVMTypeRef Ty);
+
+/**
+ * Determine whether a value instance is null.
+ *
+ * @see llvm::Constant::isNullValue()
+ */
+LLVMBool LLVMIsNull(LLVMValueRef Val);
+
+/**
+ * Obtain a constant that is a constant pointer pointing to NULL for a
+ * specified type.
+ */
+LLVMValueRef LLVMConstPointerNull(LLVMTypeRef Ty);
+
+/**
+ * @defgroup LLVMCCoreValueConstantScalar Scalar constants
+ *
+ * Functions in this group model LLVMValueRef instances that correspond
+ * to constants referring to scalar types.
+ *
+ * For integer types, the LLVMTypeRef parameter should correspond to a
+ * llvm::IntegerType instance and the returned LLVMValueRef will
+ * correspond to a llvm::ConstantInt.
+ *
+ * For floating point types, the LLVMTypeRef returned corresponds to a
+ * llvm::ConstantFP.
+ *
+ * @{
+ */
+
+/**
+ * Obtain a constant value for an integer type.
+ *
+ * The returned value corresponds to a llvm::ConstantInt.
+ *
+ * @see llvm::ConstantInt::get()
+ *
+ * @param IntTy Integer type to obtain value of.
+ * @param N The value the returned instance should refer to.
+ * @param SignExtend Whether to sign extend the produced value.
+ */
+LLVMValueRef LLVMConstInt(LLVMTypeRef IntTy, unsigned long long N,
+ LLVMBool SignExtend);
+
+/**
+ * Obtain a constant value for an integer of arbitrary precision.
+ *
+ * @see llvm::ConstantInt::get()
+ */
+LLVMValueRef LLVMConstIntOfArbitraryPrecision(LLVMTypeRef IntTy,
+ unsigned NumWords,
+ const uint64_t Words[]);
+
+/**
+ * Obtain a constant value for an integer parsed from a string.
+ *
+ * A similar API, LLVMConstIntOfStringAndSize is also available. If the
+ * string's length is available, it is preferred to call that function
+ * instead.
+ *
+ * @see llvm::ConstantInt::get()
+ */
+LLVMValueRef LLVMConstIntOfString(LLVMTypeRef IntTy, const char *Text,
+ uint8_t Radix);
+
+/**
+ * Obtain a constant value for an integer parsed from a string with
+ * specified length.
+ *
+ * @see llvm::ConstantInt::get()
+ */
+LLVMValueRef LLVMConstIntOfStringAndSize(LLVMTypeRef IntTy, const char *Text,
+ unsigned SLen, uint8_t Radix);
+
+/**
+ * Obtain a constant value referring to a double floating point value.
+ */
+LLVMValueRef LLVMConstReal(LLVMTypeRef RealTy, double N);
+
+/**
+ * Obtain a constant for a floating point value parsed from a string.
+ *
+ * A similar API, LLVMConstRealOfStringAndSize is also available. It
+ * should be used if the input string's length is known.
+ */
+LLVMValueRef LLVMConstRealOfString(LLVMTypeRef RealTy, const char *Text);
+
+/**
+ * Obtain a constant for a floating point value parsed from a string.
+ */
+LLVMValueRef LLVMConstRealOfStringAndSize(LLVMTypeRef RealTy, const char *Text,
+ unsigned SLen);
+
+/**
+ * Obtain the zero extended value for an integer constant value.
+ *
+ * @see llvm::ConstantInt::getZExtValue()
+ */
+unsigned long long LLVMConstIntGetZExtValue(LLVMValueRef ConstantVal);
+
+/**
+ * Obtain the sign extended value for an integer constant value.
+ *
+ * @see llvm::ConstantInt::getSExtValue()
+ */
+long long LLVMConstIntGetSExtValue(LLVMValueRef ConstantVal);
+
+/**
+ * Obtain the double value for an floating point constant value.
+ * losesInfo indicates if some precision was lost in the conversion.
+ *
+ * @see llvm::ConstantFP::getDoubleValue
+ */
+double LLVMConstRealGetDouble(LLVMValueRef ConstantVal, LLVMBool *losesInfo);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueConstantComposite Composite Constants
+ *
+ * Functions in this group operate on composite constants.
+ *
+ * @{
+ */
+
+/**
+ * Create a ConstantDataSequential and initialize it with a string.
+ *
+ * @see llvm::ConstantDataArray::getString()
+ */
+LLVMValueRef LLVMConstStringInContext(LLVMContextRef C, const char *Str,
+ unsigned Length, LLVMBool DontNullTerminate);
+
+/**
+ * Create a ConstantDataSequential with string content in the global context.
+ *
+ * This is the same as LLVMConstStringInContext except it operates on the
+ * global context.
+ *
+ * @see LLVMConstStringInContext()
+ * @see llvm::ConstantDataArray::getString()
+ */
+LLVMValueRef LLVMConstString(const char *Str, unsigned Length,
+ LLVMBool DontNullTerminate);
+
+/**
+ * Returns true if the specified constant is an array of i8.
+ *
+ * @see ConstantDataSequential::getAsString()
+ */
+LLVMBool LLVMIsConstantString(LLVMValueRef c);
+
+/**
+ * Get the given constant data sequential as a string.
+ *
+ * @see ConstantDataSequential::getAsString()
+ */
+const char *LLVMGetAsString(LLVMValueRef c, size_t* out);
+
+/**
+ * Create an anonymous ConstantStruct with the specified values.
+ *
+ * @see llvm::ConstantStruct::getAnon()
+ */
+LLVMValueRef LLVMConstStructInContext(LLVMContextRef C,
+ LLVMValueRef *ConstantVals,
+ unsigned Count, LLVMBool Packed);
+
+/**
+ * Create a ConstantStruct in the global Context.
+ *
+ * This is the same as LLVMConstStructInContext except it operates on the
+ * global Context.
+ *
+ * @see LLVMConstStructInContext()
+ */
+LLVMValueRef LLVMConstStruct(LLVMValueRef *ConstantVals, unsigned Count,
+ LLVMBool Packed);
+
+/**
+ * Create a ConstantArray from values.
+ *
+ * @see llvm::ConstantArray::get()
+ */
+LLVMValueRef LLVMConstArray(LLVMTypeRef ElementTy,
+ LLVMValueRef *ConstantVals, unsigned Length);
+
+/**
+ * Create a non-anonymous ConstantStruct from values.
+ *
+ * @see llvm::ConstantStruct::get()
+ */
+LLVMValueRef LLVMConstNamedStruct(LLVMTypeRef StructTy,
+ LLVMValueRef *ConstantVals,
+ unsigned Count);
+
+/**
+ * Get an element at specified index as a constant.
+ *
+ * @see ConstantDataSequential::getElementAsConstant()
+ */
+LLVMValueRef LLVMGetElementAsConstant(LLVMValueRef c, unsigned idx);
+
+/**
+ * Create a ConstantVector from values.
+ *
+ * @see llvm::ConstantVector::get()
+ */
+LLVMValueRef LLVMConstVector(LLVMValueRef *ScalarConstantVals, unsigned Size);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueConstantExpressions Constant Expressions
+ *
+ * Functions in this group correspond to APIs on llvm::ConstantExpr.
+ *
+ * @see llvm::ConstantExpr.
+ *
+ * @{
+ */
+LLVMOpcode LLVMGetConstOpcode(LLVMValueRef ConstantVal);
+LLVMValueRef LLVMAlignOf(LLVMTypeRef Ty);
+LLVMValueRef LLVMSizeOf(LLVMTypeRef Ty);
+LLVMValueRef LLVMConstNeg(LLVMValueRef ConstantVal);
+LLVMValueRef LLVMConstNSWNeg(LLVMValueRef ConstantVal);
+LLVMValueRef LLVMConstNUWNeg(LLVMValueRef ConstantVal);
+LLVMValueRef LLVMConstFNeg(LLVMValueRef ConstantVal);
+LLVMValueRef LLVMConstNot(LLVMValueRef ConstantVal);
+LLVMValueRef LLVMConstAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstNSWAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstNUWAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstFAdd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstNSWSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstNUWSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstFSub(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstNSWMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstNUWMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstFMul(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstUDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstSDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstExactSDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstFDiv(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstURem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstSRem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstFRem(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstAnd(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstOr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstXor(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstICmp(LLVMIntPredicate Predicate,
+ LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstFCmp(LLVMRealPredicate Predicate,
+ LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstShl(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstLShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstAShr(LLVMValueRef LHSConstant, LLVMValueRef RHSConstant);
+LLVMValueRef LLVMConstGEP(LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices, unsigned NumIndices);
+LLVMValueRef LLVMConstInBoundsGEP(LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices,
+ unsigned NumIndices);
+LLVMValueRef LLVMConstTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstSExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstZExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstFPTrunc(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstFPExt(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstUIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstSIToFP(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstFPToUI(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstFPToSI(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstPtrToInt(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstIntToPtr(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstBitCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstAddrSpaceCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstZExtOrBitCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType);
+LLVMValueRef LLVMConstSExtOrBitCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType);
+LLVMValueRef LLVMConstTruncOrBitCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType);
+LLVMValueRef LLVMConstPointerCast(LLVMValueRef ConstantVal,
+ LLVMTypeRef ToType);
+LLVMValueRef LLVMConstIntCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType,
+ LLVMBool isSigned);
+LLVMValueRef LLVMConstFPCast(LLVMValueRef ConstantVal, LLVMTypeRef ToType);
+LLVMValueRef LLVMConstSelect(LLVMValueRef ConstantCondition,
+ LLVMValueRef ConstantIfTrue,
+ LLVMValueRef ConstantIfFalse);
+LLVMValueRef LLVMConstExtractElement(LLVMValueRef VectorConstant,
+ LLVMValueRef IndexConstant);
+LLVMValueRef LLVMConstInsertElement(LLVMValueRef VectorConstant,
+ LLVMValueRef ElementValueConstant,
+ LLVMValueRef IndexConstant);
+LLVMValueRef LLVMConstShuffleVector(LLVMValueRef VectorAConstant,
+ LLVMValueRef VectorBConstant,
+ LLVMValueRef MaskConstant);
+LLVMValueRef LLVMConstExtractValue(LLVMValueRef AggConstant, unsigned *IdxList,
+ unsigned NumIdx);
+LLVMValueRef LLVMConstInsertValue(LLVMValueRef AggConstant,
+ LLVMValueRef ElementValueConstant,
+ unsigned *IdxList, unsigned NumIdx);
+LLVMValueRef LLVMConstInlineAsm(LLVMTypeRef Ty,
+ const char *AsmString, const char *Constraints,
+ LLVMBool HasSideEffects, LLVMBool IsAlignStack);
+LLVMValueRef LLVMBlockAddress(LLVMValueRef F, LLVMBasicBlockRef BB);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueConstantGlobals Global Values
+ *
+ * This group contains functions that operate on global values. Functions in
+ * this group relate to functions in the llvm::GlobalValue class tree.
+ *
+ * @see llvm::GlobalValue
+ *
+ * @{
+ */
+
+LLVMModuleRef LLVMGetGlobalParent(LLVMValueRef Global);
+LLVMBool LLVMIsDeclaration(LLVMValueRef Global);
+LLVMLinkage LLVMGetLinkage(LLVMValueRef Global);
+void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage);
+const char *LLVMGetSection(LLVMValueRef Global);
+void LLVMSetSection(LLVMValueRef Global, const char *Section);
+LLVMVisibility LLVMGetVisibility(LLVMValueRef Global);
+void LLVMSetVisibility(LLVMValueRef Global, LLVMVisibility Viz);
+LLVMDLLStorageClass LLVMGetDLLStorageClass(LLVMValueRef Global);
+void LLVMSetDLLStorageClass(LLVMValueRef Global, LLVMDLLStorageClass Class);
+LLVMBool LLVMHasUnnamedAddr(LLVMValueRef Global);
+void LLVMSetUnnamedAddr(LLVMValueRef Global, LLVMBool HasUnnamedAddr);
+
+/**
+ * @defgroup LLVMCCoreValueWithAlignment Values with alignment
+ *
+ * Functions in this group only apply to values with alignment, i.e.
+ * global variables, load and store instructions.
+ */
+
+/**
+ * Obtain the preferred alignment of the value.
+ * @see llvm::AllocaInst::getAlignment()
+ * @see llvm::LoadInst::getAlignment()
+ * @see llvm::StoreInst::getAlignment()
+ * @see llvm::GlobalValue::getAlignment()
+ */
+unsigned LLVMGetAlignment(LLVMValueRef V);
+
+/**
+ * Set the preferred alignment of the value.
+ * @see llvm::AllocaInst::setAlignment()
+ * @see llvm::LoadInst::setAlignment()
+ * @see llvm::StoreInst::setAlignment()
+ * @see llvm::GlobalValue::setAlignment()
+ */
+void LLVMSetAlignment(LLVMValueRef V, unsigned Bytes);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCoreValueConstantGlobalVariable Global Variables
+ *
+ * This group contains functions that operate on global variable values.
+ *
+ * @see llvm::GlobalVariable
+ *
+ * @{
+ */
+LLVMValueRef LLVMAddGlobal(LLVMModuleRef M, LLVMTypeRef Ty, const char *Name);
+LLVMValueRef LLVMAddGlobalInAddressSpace(LLVMModuleRef M, LLVMTypeRef Ty,
+ const char *Name,
+ unsigned AddressSpace);
+LLVMValueRef LLVMGetNamedGlobal(LLVMModuleRef M, const char *Name);
+LLVMValueRef LLVMGetFirstGlobal(LLVMModuleRef M);
+LLVMValueRef LLVMGetLastGlobal(LLVMModuleRef M);
+LLVMValueRef LLVMGetNextGlobal(LLVMValueRef GlobalVar);
+LLVMValueRef LLVMGetPreviousGlobal(LLVMValueRef GlobalVar);
+void LLVMDeleteGlobal(LLVMValueRef GlobalVar);
+LLVMValueRef LLVMGetInitializer(LLVMValueRef GlobalVar);
+void LLVMSetInitializer(LLVMValueRef GlobalVar, LLVMValueRef ConstantVal);
+LLVMBool LLVMIsThreadLocal(LLVMValueRef GlobalVar);
+void LLVMSetThreadLocal(LLVMValueRef GlobalVar, LLVMBool IsThreadLocal);
+LLVMBool LLVMIsGlobalConstant(LLVMValueRef GlobalVar);
+void LLVMSetGlobalConstant(LLVMValueRef GlobalVar, LLVMBool IsConstant);
+LLVMThreadLocalMode LLVMGetThreadLocalMode(LLVMValueRef GlobalVar);
+void LLVMSetThreadLocalMode(LLVMValueRef GlobalVar, LLVMThreadLocalMode Mode);
+LLVMBool LLVMIsExternallyInitialized(LLVMValueRef GlobalVar);
+void LLVMSetExternallyInitialized(LLVMValueRef GlobalVar, LLVMBool IsExtInit);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCoreValueConstantGlobalAlias Global Aliases
+ *
+ * This group contains function that operate on global alias values.
+ *
+ * @see llvm::GlobalAlias
+ *
+ * @{
+ */
+LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee,
+ const char *Name);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueFunction Function values
+ *
+ * Functions in this group operate on LLVMValueRef instances that
+ * correspond to llvm::Function instances.
+ *
+ * @see llvm::Function
+ *
+ * @{
+ */
+
+/**
+ * Remove a function from its containing module and deletes it.
+ *
+ * @see llvm::Function::eraseFromParent()
+ */
+void LLVMDeleteFunction(LLVMValueRef Fn);
+
+/**
+ * Obtain the personality function attached to the function.
+ *
+ * @see llvm::Function::getPersonalityFn()
+ */
+LLVMValueRef LLVMGetPersonalityFn(LLVMValueRef Fn);
+
+/**
+ * Set the personality function attached to the function.
+ *
+ * @see llvm::Function::setPersonalityFn()
+ */
+void LLVMSetPersonalityFn(LLVMValueRef Fn, LLVMValueRef PersonalityFn);
+
+/**
+ * Obtain the ID number from a function instance.
+ *
+ * @see llvm::Function::getIntrinsicID()
+ */
+unsigned LLVMGetIntrinsicID(LLVMValueRef Fn);
+
+/**
+ * Obtain the calling function of a function.
+ *
+ * The returned value corresponds to the LLVMCallConv enumeration.
+ *
+ * @see llvm::Function::getCallingConv()
+ */
+unsigned LLVMGetFunctionCallConv(LLVMValueRef Fn);
+
+/**
+ * Set the calling convention of a function.
+ *
+ * @see llvm::Function::setCallingConv()
+ *
+ * @param Fn Function to operate on
+ * @param CC LLVMCallConv to set calling convention to
+ */
+void LLVMSetFunctionCallConv(LLVMValueRef Fn, unsigned CC);
+
+/**
+ * Obtain the name of the garbage collector to use during code
+ * generation.
+ *
+ * @see llvm::Function::getGC()
+ */
+const char *LLVMGetGC(LLVMValueRef Fn);
+
+/**
+ * Define the garbage collector to use during code generation.
+ *
+ * @see llvm::Function::setGC()
+ */
+void LLVMSetGC(LLVMValueRef Fn, const char *Name);
+
+/**
+ * Add an attribute to a function.
+ *
+ * @see llvm::Function::addAttribute()
+ */
+void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA);
+
+/**
+ * Add a target-dependent attribute to a function
+ * @see llvm::AttrBuilder::addAttribute()
+ */
+void LLVMAddTargetDependentFunctionAttr(LLVMValueRef Fn, const char *A,
+ const char *V);
+
+/**
+ * Obtain an attribute from a function.
+ *
+ * @see llvm::Function::getAttributes()
+ */
+LLVMAttribute LLVMGetFunctionAttr(LLVMValueRef Fn);
+
+/**
+ * Remove an attribute from a function.
+ */
+void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA);
+
+/**
+ * @defgroup LLVMCCoreValueFunctionParameters Function Parameters
+ *
+ * Functions in this group relate to arguments/parameters on functions.
+ *
+ * Functions in this group expect LLVMValueRef instances that correspond
+ * to llvm::Function instances.
+ *
+ * @{
+ */
+
+/**
+ * Obtain the number of parameters in a function.
+ *
+ * @see llvm::Function::arg_size()
+ */
+unsigned LLVMCountParams(LLVMValueRef Fn);
+
+/**
+ * Obtain the parameters in a function.
+ *
+ * The takes a pointer to a pre-allocated array of LLVMValueRef that is
+ * at least LLVMCountParams() long. This array will be filled with
+ * LLVMValueRef instances which correspond to the parameters the
+ * function receives. Each LLVMValueRef corresponds to a llvm::Argument
+ * instance.
+ *
+ * @see llvm::Function::arg_begin()
+ */
+void LLVMGetParams(LLVMValueRef Fn, LLVMValueRef *Params);
+
+/**
+ * Obtain the parameter at the specified index.
+ *
+ * Parameters are indexed from 0.
+ *
+ * @see llvm::Function::arg_begin()
+ */
+LLVMValueRef LLVMGetParam(LLVMValueRef Fn, unsigned Index);
+
+/**
+ * Obtain the function to which this argument belongs.
+ *
+ * Unlike other functions in this group, this one takes an LLVMValueRef
+ * that corresponds to a llvm::Attribute.
+ *
+ * The returned LLVMValueRef is the llvm::Function to which this
+ * argument belongs.
+ */
+LLVMValueRef LLVMGetParamParent(LLVMValueRef Inst);
+
+/**
+ * Obtain the first parameter to a function.
+ *
+ * @see llvm::Function::arg_begin()
+ */
+LLVMValueRef LLVMGetFirstParam(LLVMValueRef Fn);
+
+/**
+ * Obtain the last parameter to a function.
+ *
+ * @see llvm::Function::arg_end()
+ */
+LLVMValueRef LLVMGetLastParam(LLVMValueRef Fn);
+
+/**
+ * Obtain the next parameter to a function.
+ *
+ * This takes an LLVMValueRef obtained from LLVMGetFirstParam() (which is
+ * actually a wrapped iterator) and obtains the next parameter from the
+ * underlying iterator.
+ */
+LLVMValueRef LLVMGetNextParam(LLVMValueRef Arg);
+
+/**
+ * Obtain the previous parameter to a function.
+ *
+ * This is the opposite of LLVMGetNextParam().
+ */
+LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg);
+
+/**
+ * Add an attribute to a function argument.
+ *
+ * @see llvm::Argument::addAttr()
+ */
+void LLVMAddAttribute(LLVMValueRef Arg, LLVMAttribute PA);
+
+/**
+ * Remove an attribute from a function argument.
+ *
+ * @see llvm::Argument::removeAttr()
+ */
+void LLVMRemoveAttribute(LLVMValueRef Arg, LLVMAttribute PA);
+
+/**
+ * Get an attribute from a function argument.
+ */
+LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg);
+
+/**
+ * Set the alignment for a function parameter.
+ *
+ * @see llvm::Argument::addAttr()
+ * @see llvm::AttrBuilder::addAlignmentAttr()
+ */
+void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align);
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueMetadata Metadata
+ *
+ * @{
+ */
+
+/**
+ * Obtain a MDString value from a context.
+ *
+ * The returned instance corresponds to the llvm::MDString class.
+ *
+ * The instance is specified by string data of a specified length. The
+ * string content is copied, so the backing memory can be freed after
+ * this function returns.
+ */
+LLVMValueRef LLVMMDStringInContext(LLVMContextRef C, const char *Str,
+ unsigned SLen);
+
+/**
+ * Obtain a MDString value from the global context.
+ */
+LLVMValueRef LLVMMDString(const char *Str, unsigned SLen);
+
+/**
+ * Obtain a MDNode value from a context.
+ *
+ * The returned value corresponds to the llvm::MDNode class.
+ */
+LLVMValueRef LLVMMDNodeInContext(LLVMContextRef C, LLVMValueRef *Vals,
+ unsigned Count);
+
+/**
+ * Obtain a MDNode value from the global context.
+ */
+LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count);
+
+/**
+ * Obtain the underlying string from a MDString value.
+ *
+ * @param V Instance to obtain string from.
+ * @param Len Memory address which will hold length of returned string.
+ * @return String data in MDString.
+ */
+const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len);
+
+/**
+ * Obtain the number of operands from an MDNode value.
+ *
+ * @param V MDNode to get number of operands from.
+ * @return Number of operands of the MDNode.
+ */
+unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V);
+
+/**
+ * Obtain the given MDNode's operands.
+ *
+ * The passed LLVMValueRef pointer should point to enough memory to hold all of
+ * the operands of the given MDNode (see LLVMGetMDNodeNumOperands) as
+ * LLVMValueRefs. This memory will be populated with the LLVMValueRefs of the
+ * MDNode's operands.
+ *
+ * @param V MDNode to get the operands from.
+ * @param Dest Destination array for operands.
+ */
+void LLVMGetMDNodeOperands(LLVMValueRef V, LLVMValueRef *Dest);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueBasicBlock Basic Block
+ *
+ * A basic block represents a single entry single exit section of code.
+ * Basic blocks contain a list of instructions which form the body of
+ * the block.
+ *
+ * Basic blocks belong to functions. They have the type of label.
+ *
+ * Basic blocks are themselves values. However, the C API models them as
+ * LLVMBasicBlockRef.
+ *
+ * @see llvm::BasicBlock
+ *
+ * @{
+ */
+
+/**
+ * Convert a basic block instance to a value type.
+ */
+LLVMValueRef LLVMBasicBlockAsValue(LLVMBasicBlockRef BB);
+
+/**
+ * Determine whether an LLVMValueRef is itself a basic block.
+ */
+LLVMBool LLVMValueIsBasicBlock(LLVMValueRef Val);
+
+/**
+ * Convert an LLVMValueRef to an LLVMBasicBlockRef instance.
+ */
+LLVMBasicBlockRef LLVMValueAsBasicBlock(LLVMValueRef Val);
+
+/**
+ * Obtain the function to which a basic block belongs.
+ *
+ * @see llvm::BasicBlock::getParent()
+ */
+LLVMValueRef LLVMGetBasicBlockParent(LLVMBasicBlockRef BB);
+
+/**
+ * Obtain the terminator instruction for a basic block.
+ *
+ * If the basic block does not have a terminator (it is not well-formed
+ * if it doesn't), then NULL is returned.
+ *
+ * The returned LLVMValueRef corresponds to a llvm::TerminatorInst.
+ *
+ * @see llvm::BasicBlock::getTerminator()
+ */
+LLVMValueRef LLVMGetBasicBlockTerminator(LLVMBasicBlockRef BB);
+
+/**
+ * Obtain the number of basic blocks in a function.
+ *
+ * @param Fn Function value to operate on.
+ */
+unsigned LLVMCountBasicBlocks(LLVMValueRef Fn);
+
+/**
+ * Obtain all of the basic blocks in a function.
+ *
+ * This operates on a function value. The BasicBlocks parameter is a
+ * pointer to a pre-allocated array of LLVMBasicBlockRef of at least
+ * LLVMCountBasicBlocks() in length. This array is populated with
+ * LLVMBasicBlockRef instances.
+ */
+void LLVMGetBasicBlocks(LLVMValueRef Fn, LLVMBasicBlockRef *BasicBlocks);
+
+/**
+ * Obtain the first basic block in a function.
+ *
+ * The returned basic block can be used as an iterator. You will likely
+ * eventually call into LLVMGetNextBasicBlock() with it.
+ *
+ * @see llvm::Function::begin()
+ */
+LLVMBasicBlockRef LLVMGetFirstBasicBlock(LLVMValueRef Fn);
+
+/**
+ * Obtain the last basic block in a function.
+ *
+ * @see llvm::Function::end()
+ */
+LLVMBasicBlockRef LLVMGetLastBasicBlock(LLVMValueRef Fn);
+
+/**
+ * Advance a basic block iterator.
+ */
+LLVMBasicBlockRef LLVMGetNextBasicBlock(LLVMBasicBlockRef BB);
+
+/**
+ * Go backwards in a basic block iterator.
+ */
+LLVMBasicBlockRef LLVMGetPreviousBasicBlock(LLVMBasicBlockRef BB);
+
+/**
+ * Obtain the basic block that corresponds to the entry point of a
+ * function.
+ *
+ * @see llvm::Function::getEntryBlock()
+ */
+LLVMBasicBlockRef LLVMGetEntryBasicBlock(LLVMValueRef Fn);
+
+/**
+ * Append a basic block to the end of a function.
+ *
+ * @see llvm::BasicBlock::Create()
+ */
+LLVMBasicBlockRef LLVMAppendBasicBlockInContext(LLVMContextRef C,
+ LLVMValueRef Fn,
+ const char *Name);
+
+/**
+ * Append a basic block to the end of a function using the global
+ * context.
+ *
+ * @see llvm::BasicBlock::Create()
+ */
+LLVMBasicBlockRef LLVMAppendBasicBlock(LLVMValueRef Fn, const char *Name);
+
+/**
+ * Insert a basic block in a function before another basic block.
+ *
+ * The function to add to is determined by the function of the
+ * passed basic block.
+ *
+ * @see llvm::BasicBlock::Create()
+ */
+LLVMBasicBlockRef LLVMInsertBasicBlockInContext(LLVMContextRef C,
+ LLVMBasicBlockRef BB,
+ const char *Name);
+
+/**
+ * Insert a basic block in a function using the global context.
+ *
+ * @see llvm::BasicBlock::Create()
+ */
+LLVMBasicBlockRef LLVMInsertBasicBlock(LLVMBasicBlockRef InsertBeforeBB,
+ const char *Name);
+
+/**
+ * Remove a basic block from a function and delete it.
+ *
+ * This deletes the basic block from its containing function and deletes
+ * the basic block itself.
+ *
+ * @see llvm::BasicBlock::eraseFromParent()
+ */
+void LLVMDeleteBasicBlock(LLVMBasicBlockRef BB);
+
+/**
+ * Remove a basic block from a function.
+ *
+ * This deletes the basic block from its containing function but keep
+ * the basic block alive.
+ *
+ * @see llvm::BasicBlock::removeFromParent()
+ */
+void LLVMRemoveBasicBlockFromParent(LLVMBasicBlockRef BB);
+
+/**
+ * Move a basic block to before another one.
+ *
+ * @see llvm::BasicBlock::moveBefore()
+ */
+void LLVMMoveBasicBlockBefore(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos);
+
+/**
+ * Move a basic block to after another one.
+ *
+ * @see llvm::BasicBlock::moveAfter()
+ */
+void LLVMMoveBasicBlockAfter(LLVMBasicBlockRef BB, LLVMBasicBlockRef MovePos);
+
+/**
+ * Obtain the first instruction in a basic block.
+ *
+ * The returned LLVMValueRef corresponds to a llvm::Instruction
+ * instance.
+ */
+LLVMValueRef LLVMGetFirstInstruction(LLVMBasicBlockRef BB);
+
+/**
+ * Obtain the last instruction in a basic block.
+ *
+ * The returned LLVMValueRef corresponds to an LLVM:Instruction.
+ */
+LLVMValueRef LLVMGetLastInstruction(LLVMBasicBlockRef BB);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueInstruction Instructions
+ *
+ * Functions in this group relate to the inspection and manipulation of
+ * individual instructions.
+ *
+ * In the C++ API, an instruction is modeled by llvm::Instruction. This
+ * class has a large number of descendents. llvm::Instruction is a
+ * llvm::Value and in the C API, instructions are modeled by
+ * LLVMValueRef.
+ *
+ * This group also contains sub-groups which operate on specific
+ * llvm::Instruction types, e.g. llvm::CallInst.
+ *
+ * @{
+ */
+
+/**
+ * Determine whether an instruction has any metadata attached.
+ */
+int LLVMHasMetadata(LLVMValueRef Val);
+
+/**
+ * Return metadata associated with an instruction value.
+ */
+LLVMValueRef LLVMGetMetadata(LLVMValueRef Val, unsigned KindID);
+
+/**
+ * Set metadata associated with an instruction value.
+ */
+void LLVMSetMetadata(LLVMValueRef Val, unsigned KindID, LLVMValueRef Node);
+
+/**
+ * Obtain the basic block to which an instruction belongs.
+ *
+ * @see llvm::Instruction::getParent()
+ */
+LLVMBasicBlockRef LLVMGetInstructionParent(LLVMValueRef Inst);
+
+/**
+ * Obtain the instruction that occurs after the one specified.
+ *
+ * The next instruction will be from the same basic block.
+ *
+ * If this is the last instruction in a basic block, NULL will be
+ * returned.
+ */
+LLVMValueRef LLVMGetNextInstruction(LLVMValueRef Inst);
+
+/**
+ * Obtain the instruction that occurred before this one.
+ *
+ * If the instruction is the first instruction in a basic block, NULL
+ * will be returned.
+ */
+LLVMValueRef LLVMGetPreviousInstruction(LLVMValueRef Inst);
+
+/**
+ * Remove and delete an instruction.
+ *
+ * The instruction specified is removed from its containing building
+ * block and then deleted.
+ *
+ * @see llvm::Instruction::eraseFromParent()
+ */
+void LLVMInstructionEraseFromParent(LLVMValueRef Inst);
+
+/**
+ * Obtain the code opcode for an individual instruction.
+ *
+ * @see llvm::Instruction::getOpCode()
+ */
+LLVMOpcode LLVMGetInstructionOpcode(LLVMValueRef Inst);
+
+/**
+ * Obtain the predicate of an instruction.
+ *
+ * This is only valid for instructions that correspond to llvm::ICmpInst
+ * or llvm::ConstantExpr whose opcode is llvm::Instruction::ICmp.
+ *
+ * @see llvm::ICmpInst::getPredicate()
+ */
+LLVMIntPredicate LLVMGetICmpPredicate(LLVMValueRef Inst);
+
+/**
+ * Obtain the float predicate of an instruction.
+ *
+ * This is only valid for instructions that correspond to llvm::FCmpInst
+ * or llvm::ConstantExpr whose opcode is llvm::Instruction::FCmp.
+ *
+ * @see llvm::FCmpInst::getPredicate()
+ */
+LLVMRealPredicate LLVMGetFCmpPredicate(LLVMValueRef Inst);
+
+/**
+ * Create a copy of 'this' instruction that is identical in all ways
+ * except the following:
+ * * The instruction has no parent
+ * * The instruction has no name
+ *
+ * @see llvm::Instruction::clone()
+ */
+LLVMValueRef LLVMInstructionClone(LLVMValueRef Inst);
+
+/**
+ * @defgroup LLVMCCoreValueInstructionCall Call Sites and Invocations
+ *
+ * Functions in this group apply to instructions that refer to call
+ * sites and invocations. These correspond to C++ types in the
+ * llvm::CallInst class tree.
+ *
+ * @{
+ */
+
+/**
+ * Set the calling convention for a call instruction.
+ *
+ * This expects an LLVMValueRef that corresponds to a llvm::CallInst or
+ * llvm::InvokeInst.
+ *
+ * @see llvm::CallInst::setCallingConv()
+ * @see llvm::InvokeInst::setCallingConv()
+ */
+void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC);
+
+/**
+ * Obtain the calling convention for a call instruction.
+ *
+ * This is the opposite of LLVMSetInstructionCallConv(). Reads its
+ * usage.
+ *
+ * @see LLVMSetInstructionCallConv()
+ */
+unsigned LLVMGetInstructionCallConv(LLVMValueRef Instr);
+
+
+void LLVMAddInstrAttribute(LLVMValueRef Instr, unsigned index, LLVMAttribute);
+void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index,
+ LLVMAttribute);
+void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index,
+ unsigned align);
+
+/**
+ * Obtain whether a call instruction is a tail call.
+ *
+ * This only works on llvm::CallInst instructions.
+ *
+ * @see llvm::CallInst::isTailCall()
+ */
+LLVMBool LLVMIsTailCall(LLVMValueRef CallInst);
+
+/**
+ * Set whether a call instruction is a tail call.
+ *
+ * This only works on llvm::CallInst instructions.
+ *
+ * @see llvm::CallInst::setTailCall()
+ */
+void LLVMSetTailCall(LLVMValueRef CallInst, LLVMBool IsTailCall);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueInstructionTerminator Terminators
+ *
+ * Functions in this group only apply to instructions that map to
+ * llvm::TerminatorInst instances.
+ *
+ * @{
+ */
+
+/**
+ * Return the number of successors that this terminator has.
+ *
+ * @see llvm::TerminatorInst::getNumSuccessors
+ */
+unsigned LLVMGetNumSuccessors(LLVMValueRef Term);
+
+/**
+ * Return the specified successor.
+ *
+ * @see llvm::TerminatorInst::getSuccessor
+ */
+LLVMBasicBlockRef LLVMGetSuccessor(LLVMValueRef Term, unsigned i);
+
+/**
+ * Update the specified successor to point at the provided block.
+ *
+ * @see llvm::TerminatorInst::setSuccessor
+ */
+void LLVMSetSuccessor(LLVMValueRef Term, unsigned i, LLVMBasicBlockRef block);
+
+/**
+ * Return if a branch is conditional.
+ *
+ * This only works on llvm::BranchInst instructions.
+ *
+ * @see llvm::BranchInst::isConditional
+ */
+LLVMBool LLVMIsConditional(LLVMValueRef Branch);
+
+/**
+ * Return the condition of a branch instruction.
+ *
+ * This only works on llvm::BranchInst instructions.
+ *
+ * @see llvm::BranchInst::getCondition
+ */
+LLVMValueRef LLVMGetCondition(LLVMValueRef Branch);
+
+/**
+ * Set the condition of a branch instruction.
+ *
+ * This only works on llvm::BranchInst instructions.
+ *
+ * @see llvm::BranchInst::setCondition
+ */
+void LLVMSetCondition(LLVMValueRef Branch, LLVMValueRef Cond);
+
+/**
+ * Obtain the default destination basic block of a switch instruction.
+ *
+ * This only works on llvm::SwitchInst instructions.
+ *
+ * @see llvm::SwitchInst::getDefaultDest()
+ */
+LLVMBasicBlockRef LLVMGetSwitchDefaultDest(LLVMValueRef SwitchInstr);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreValueInstructionPHINode PHI Nodes
+ *
+ * Functions in this group only apply to instructions that map to
+ * llvm::PHINode instances.
+ *
+ * @{
+ */
+
+/**
+ * Add an incoming value to the end of a PHI list.
+ */
+void LLVMAddIncoming(LLVMValueRef PhiNode, LLVMValueRef *IncomingValues,
+ LLVMBasicBlockRef *IncomingBlocks, unsigned Count);
+
+/**
+ * Obtain the number of incoming basic blocks to a PHI node.
+ */
+unsigned LLVMCountIncoming(LLVMValueRef PhiNode);
+
+/**
+ * Obtain an incoming value to a PHI node as an LLVMValueRef.
+ */
+LLVMValueRef LLVMGetIncomingValue(LLVMValueRef PhiNode, unsigned Index);
+
+/**
+ * Obtain an incoming value to a PHI node as an LLVMBasicBlockRef.
+ */
+LLVMBasicBlockRef LLVMGetIncomingBlock(LLVMValueRef PhiNode, unsigned Index);
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreInstructionBuilder Instruction Builders
+ *
+ * An instruction builder represents a point within a basic block and is
+ * the exclusive means of building instructions using the C interface.
+ *
+ * @{
+ */
+
+LLVMBuilderRef LLVMCreateBuilderInContext(LLVMContextRef C);
+LLVMBuilderRef LLVMCreateBuilder(void);
+void LLVMPositionBuilder(LLVMBuilderRef Builder, LLVMBasicBlockRef Block,
+ LLVMValueRef Instr);
+void LLVMPositionBuilderBefore(LLVMBuilderRef Builder, LLVMValueRef Instr);
+void LLVMPositionBuilderAtEnd(LLVMBuilderRef Builder, LLVMBasicBlockRef Block);
+LLVMBasicBlockRef LLVMGetInsertBlock(LLVMBuilderRef Builder);
+void LLVMClearInsertionPosition(LLVMBuilderRef Builder);
+void LLVMInsertIntoBuilder(LLVMBuilderRef Builder, LLVMValueRef Instr);
+void LLVMInsertIntoBuilderWithName(LLVMBuilderRef Builder, LLVMValueRef Instr,
+ const char *Name);
+void LLVMDisposeBuilder(LLVMBuilderRef Builder);
+
+/* Metadata */
+void LLVMSetCurrentDebugLocation(LLVMBuilderRef Builder, LLVMValueRef L);
+LLVMValueRef LLVMGetCurrentDebugLocation(LLVMBuilderRef Builder);
+void LLVMSetInstDebugLocation(LLVMBuilderRef Builder, LLVMValueRef Inst);
+
+/* Terminators */
+LLVMValueRef LLVMBuildRetVoid(LLVMBuilderRef);
+LLVMValueRef LLVMBuildRet(LLVMBuilderRef, LLVMValueRef V);
+LLVMValueRef LLVMBuildAggregateRet(LLVMBuilderRef, LLVMValueRef *RetVals,
+ unsigned N);
+LLVMValueRef LLVMBuildBr(LLVMBuilderRef, LLVMBasicBlockRef Dest);
+LLVMValueRef LLVMBuildCondBr(LLVMBuilderRef, LLVMValueRef If,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Else);
+LLVMValueRef LLVMBuildSwitch(LLVMBuilderRef, LLVMValueRef V,
+ LLVMBasicBlockRef Else, unsigned NumCases);
+LLVMValueRef LLVMBuildIndirectBr(LLVMBuilderRef B, LLVMValueRef Addr,
+ unsigned NumDests);
+LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
+ const char *Name);
+LLVMValueRef LLVMBuildLandingPad(LLVMBuilderRef B, LLVMTypeRef Ty,
+ LLVMValueRef PersFn, unsigned NumClauses,
+ const char *Name);
+LLVMValueRef LLVMBuildResume(LLVMBuilderRef B, LLVMValueRef Exn);
+LLVMValueRef LLVMBuildUnreachable(LLVMBuilderRef);
+
+/* Add a case to the switch instruction */
+void LLVMAddCase(LLVMValueRef Switch, LLVMValueRef OnVal,
+ LLVMBasicBlockRef Dest);
+
+/* Add a destination to the indirectbr instruction */
+void LLVMAddDestination(LLVMValueRef IndirectBr, LLVMBasicBlockRef Dest);
+
+/* Add a catch or filter clause to the landingpad instruction */
+void LLVMAddClause(LLVMValueRef LandingPad, LLVMValueRef ClauseVal);
+
+/* Set the 'cleanup' flag in the landingpad instruction */
+void LLVMSetCleanup(LLVMValueRef LandingPad, LLVMBool Val);
+
+/* Arithmetic */
+LLVMValueRef LLVMBuildAdd(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildNSWAdd(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildNUWAdd(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildFAdd(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildSub(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildNSWSub(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildNUWSub(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildFSub(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildMul(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildNSWMul(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildNUWMul(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildFMul(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildUDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildSDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildExactSDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildFDiv(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildURem(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildSRem(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildFRem(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildShl(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildLShr(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildAShr(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildAnd(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildOr(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildXor(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildBinOp(LLVMBuilderRef B, LLVMOpcode Op,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildNeg(LLVMBuilderRef, LLVMValueRef V, const char *Name);
+LLVMValueRef LLVMBuildNSWNeg(LLVMBuilderRef B, LLVMValueRef V,
+ const char *Name);
+LLVMValueRef LLVMBuildNUWNeg(LLVMBuilderRef B, LLVMValueRef V,
+ const char *Name);
+LLVMValueRef LLVMBuildFNeg(LLVMBuilderRef, LLVMValueRef V, const char *Name);
+LLVMValueRef LLVMBuildNot(LLVMBuilderRef, LLVMValueRef V, const char *Name);
+
+/* Memory */
+LLVMValueRef LLVMBuildMalloc(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name);
+LLVMValueRef LLVMBuildArrayMalloc(LLVMBuilderRef, LLVMTypeRef Ty,
+ LLVMValueRef Val, const char *Name);
+LLVMValueRef LLVMBuildAlloca(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name);
+LLVMValueRef LLVMBuildArrayAlloca(LLVMBuilderRef, LLVMTypeRef Ty,
+ LLVMValueRef Val, const char *Name);
+LLVMValueRef LLVMBuildFree(LLVMBuilderRef, LLVMValueRef PointerVal);
+LLVMValueRef LLVMBuildLoad(LLVMBuilderRef, LLVMValueRef PointerVal,
+ const char *Name);
+LLVMValueRef LLVMBuildStore(LLVMBuilderRef, LLVMValueRef Val, LLVMValueRef Ptr);
+LLVMValueRef LLVMBuildGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ LLVMValueRef *Indices, unsigned NumIndices,
+ const char *Name);
+LLVMValueRef LLVMBuildInBoundsGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ LLVMValueRef *Indices, unsigned NumIndices,
+ const char *Name);
+LLVMValueRef LLVMBuildStructGEP(LLVMBuilderRef B, LLVMValueRef Pointer,
+ unsigned Idx, const char *Name);
+LLVMValueRef LLVMBuildGlobalString(LLVMBuilderRef B, const char *Str,
+ const char *Name);
+LLVMValueRef LLVMBuildGlobalStringPtr(LLVMBuilderRef B, const char *Str,
+ const char *Name);
+LLVMBool LLVMGetVolatile(LLVMValueRef MemoryAccessInst);
+void LLVMSetVolatile(LLVMValueRef MemoryAccessInst, LLVMBool IsVolatile);
+LLVMAtomicOrdering LLVMGetOrdering(LLVMValueRef MemoryAccessInst);
+void LLVMSetOrdering(LLVMValueRef MemoryAccessInst, LLVMAtomicOrdering Ordering);
+
+/* Casts */
+LLVMValueRef LLVMBuildTrunc(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildZExt(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildSExt(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildFPToUI(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildFPToSI(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildUIToFP(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildSIToFP(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildFPTrunc(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildFPExt(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildPtrToInt(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildIntToPtr(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildBitCast(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildAddrSpaceCast(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildZExtOrBitCast(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildSExtOrBitCast(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildTruncOrBitCast(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildCast(LLVMBuilderRef B, LLVMOpcode Op, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildPointerCast(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildIntCast(LLVMBuilderRef, LLVMValueRef Val, /*Signed cast!*/
+ LLVMTypeRef DestTy, const char *Name);
+LLVMValueRef LLVMBuildFPCast(LLVMBuilderRef, LLVMValueRef Val,
+ LLVMTypeRef DestTy, const char *Name);
+
+/* Comparisons */
+LLVMValueRef LLVMBuildICmp(LLVMBuilderRef, LLVMIntPredicate Op,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+LLVMValueRef LLVMBuildFCmp(LLVMBuilderRef, LLVMRealPredicate Op,
+ LLVMValueRef LHS, LLVMValueRef RHS,
+ const char *Name);
+
+/* Miscellaneous instructions */
+LLVMValueRef LLVMBuildPhi(LLVMBuilderRef, LLVMTypeRef Ty, const char *Name);
+LLVMValueRef LLVMBuildCall(LLVMBuilderRef, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ const char *Name);
+LLVMValueRef LLVMBuildSelect(LLVMBuilderRef, LLVMValueRef If,
+ LLVMValueRef Then, LLVMValueRef Else,
+ const char *Name);
+LLVMValueRef LLVMBuildVAArg(LLVMBuilderRef, LLVMValueRef List, LLVMTypeRef Ty,
+ const char *Name);
+LLVMValueRef LLVMBuildExtractElement(LLVMBuilderRef, LLVMValueRef VecVal,
+ LLVMValueRef Index, const char *Name);
+LLVMValueRef LLVMBuildInsertElement(LLVMBuilderRef, LLVMValueRef VecVal,
+ LLVMValueRef EltVal, LLVMValueRef Index,
+ const char *Name);
+LLVMValueRef LLVMBuildShuffleVector(LLVMBuilderRef, LLVMValueRef V1,
+ LLVMValueRef V2, LLVMValueRef Mask,
+ const char *Name);
+LLVMValueRef LLVMBuildExtractValue(LLVMBuilderRef, LLVMValueRef AggVal,
+ unsigned Index, const char *Name);
+LLVMValueRef LLVMBuildInsertValue(LLVMBuilderRef, LLVMValueRef AggVal,
+ LLVMValueRef EltVal, unsigned Index,
+ const char *Name);
+
+LLVMValueRef LLVMBuildIsNull(LLVMBuilderRef, LLVMValueRef Val,
+ const char *Name);
+LLVMValueRef LLVMBuildIsNotNull(LLVMBuilderRef, LLVMValueRef Val,
+ const char *Name);
+LLVMValueRef LLVMBuildPtrDiff(LLVMBuilderRef, LLVMValueRef LHS,
+ LLVMValueRef RHS, const char *Name);
+LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering ordering,
+ LLVMBool singleThread, const char *Name);
+LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B, LLVMAtomicRMWBinOp op,
+ LLVMValueRef PTR, LLVMValueRef Val,
+ LLVMAtomicOrdering ordering,
+ LLVMBool singleThread);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreModuleProvider Module Providers
+ *
+ * @{
+ */
+
+/**
+ * Changes the type of M so it can be passed to FunctionPassManagers and the
+ * JIT. They take ModuleProviders for historical reasons.
+ */
+LLVMModuleProviderRef
+LLVMCreateModuleProviderForExistingModule(LLVMModuleRef M);
+
+/**
+ * Destroys the module M.
+ */
+void LLVMDisposeModuleProvider(LLVMModuleProviderRef M);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreMemoryBuffers Memory Buffers
+ *
+ * @{
+ */
+
+LLVMBool LLVMCreateMemoryBufferWithContentsOfFile(const char *Path,
+ LLVMMemoryBufferRef *OutMemBuf,
+ char **OutMessage);
+LLVMBool LLVMCreateMemoryBufferWithSTDIN(LLVMMemoryBufferRef *OutMemBuf,
+ char **OutMessage);
+LLVMMemoryBufferRef LLVMCreateMemoryBufferWithMemoryRange(const char *InputData,
+ size_t InputDataLength,
+ const char *BufferName,
+ LLVMBool RequiresNullTerminator);
+LLVMMemoryBufferRef LLVMCreateMemoryBufferWithMemoryRangeCopy(const char *InputData,
+ size_t InputDataLength,
+ const char *BufferName);
+const char *LLVMGetBufferStart(LLVMMemoryBufferRef MemBuf);
+size_t LLVMGetBufferSize(LLVMMemoryBufferRef MemBuf);
+void LLVMDisposeMemoryBuffer(LLVMMemoryBufferRef MemBuf);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCorePassRegistry Pass Registry
+ *
+ * @{
+ */
+
+/** Return the global pass registry, for use with initialization functions.
+ @see llvm::PassRegistry::getPassRegistry */
+LLVMPassRegistryRef LLVMGetGlobalPassRegistry(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCorePassManagers Pass Managers
+ *
+ * @{
+ */
+
+/** Constructs a new whole-module pass pipeline. This type of pipeline is
+ suitable for link-time optimization and whole-module transformations.
+ @see llvm::PassManager::PassManager */
+LLVMPassManagerRef LLVMCreatePassManager(void);
+
+/** Constructs a new function-by-function pass pipeline over the module
+ provider. It does not take ownership of the module provider. This type of
+ pipeline is suitable for code generation and JIT compilation tasks.
+ @see llvm::FunctionPassManager::FunctionPassManager */
+LLVMPassManagerRef LLVMCreateFunctionPassManagerForModule(LLVMModuleRef M);
+
+/** Deprecated: Use LLVMCreateFunctionPassManagerForModule instead. */
+LLVMPassManagerRef LLVMCreateFunctionPassManager(LLVMModuleProviderRef MP);
+
+/** Initializes, executes on the provided module, and finalizes all of the
+ passes scheduled in the pass manager. Returns 1 if any of the passes
+ modified the module, 0 otherwise.
+ @see llvm::PassManager::run(Module&) */
+LLVMBool LLVMRunPassManager(LLVMPassManagerRef PM, LLVMModuleRef M);
+
+/** Initializes all of the function passes scheduled in the function pass
+ manager. Returns 1 if any of the passes modified the module, 0 otherwise.
+ @see llvm::FunctionPassManager::doInitialization */
+LLVMBool LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM);
+
+/** Executes all of the function passes scheduled in the function pass manager
+ on the provided function. Returns 1 if any of the passes modified the
+ function, false otherwise.
+ @see llvm::FunctionPassManager::run(Function&) */
+LLVMBool LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F);
+
+/** Finalizes all of the function passes scheduled in in the function pass
+ manager. Returns 1 if any of the passes modified the module, 0 otherwise.
+ @see llvm::FunctionPassManager::doFinalization */
+LLVMBool LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM);
+
+/** Frees the memory of a pass pipeline. For function pipelines, does not free
+ the module provider.
+ @see llvm::PassManagerBase::~PassManagerBase. */
+void LLVMDisposePassManager(LLVMPassManagerRef PM);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup LLVMCCoreThreading Threading
+ *
+ * Handle the structures needed to make LLVM safe for multithreading.
+ *
+ * @{
+ */
+
+/** Deprecated: Multi-threading can only be enabled/disabled with the compile
+ time define LLVM_ENABLE_THREADS. This function always returns
+ LLVMIsMultithreaded(). */
+LLVMBool LLVMStartMultithreaded(void);
+
+/** Deprecated: Multi-threading can only be enabled/disabled with the compile
+ time define LLVM_ENABLE_THREADS. */
+void LLVMStopMultithreaded(void);
+
+/** Check whether LLVM is executing in thread-safe mode or not.
+ @see llvm::llvm_is_multithreaded */
+LLVMBool LLVMIsMultithreaded(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* LLVM_C_CORE_H */
diff --git a/contrib/llvm/include/llvm-c/Disassembler.h b/contrib/llvm/include/llvm-c/Disassembler.h
new file mode 100644
index 0000000..d6cbe31c
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Disassembler.h
@@ -0,0 +1,254 @@
+/*===-- llvm-c/Disassembler.h - Disassembler Public C Interface ---*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header provides a public interface to a disassembler library. *|
+|* LLVM provides an implementation of this interface. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_DISASSEMBLER_H
+#define LLVM_C_DISASSEMBLER_H
+
+#include "llvm/Support/DataTypes.h"
+#include <stddef.h>
+
+/**
+ * @defgroup LLVMCDisassembler Disassembler
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+/**
+ * An opaque reference to a disassembler context.
+ */
+typedef void *LLVMDisasmContextRef;
+
+/**
+ * The type for the operand information call back function. This is called to
+ * get the symbolic information for an operand of an instruction. Typically
+ * this is from the relocation information, symbol table, etc. That block of
+ * information is saved when the disassembler context is created and passed to
+ * the call back in the DisInfo parameter. The instruction containing operand
+ * is at the PC parameter. For some instruction sets, there can be more than
+ * one operand with symbolic information. To determine the symbolic operand
+ * information for each operand, the bytes for the specific operand in the
+ * instruction are specified by the Offset parameter and its byte widith is the
+ * size parameter. For instructions sets with fixed widths and one symbolic
+ * operand per instruction, the Offset parameter will be zero and Size parameter
+ * will be the instruction width. The information is returned in TagBuf and is
+ * Triple specific with its specific information defined by the value of
+ * TagType for that Triple. If symbolic information is returned the function
+ * returns 1, otherwise it returns 0.
+ */
+typedef int (*LLVMOpInfoCallback)(void *DisInfo, uint64_t PC,
+ uint64_t Offset, uint64_t Size,
+ int TagType, void *TagBuf);
+
+/**
+ * The initial support in LLVM MC for the most general form of a relocatable
+ * expression is "AddSymbol - SubtractSymbol + Offset". For some Darwin targets
+ * this full form is encoded in the relocation information so that AddSymbol and
+ * SubtractSymbol can be link edited independent of each other. Many other
+ * platforms only allow a relocatable expression of the form AddSymbol + Offset
+ * to be encoded.
+ *
+ * The LLVMOpInfoCallback() for the TagType value of 1 uses the struct
+ * LLVMOpInfo1. The value of the relocatable expression for the operand,
+ * including any PC adjustment, is passed in to the call back in the Value
+ * field. The symbolic information about the operand is returned using all
+ * the fields of the structure with the Offset of the relocatable expression
+ * returned in the Value field. It is possible that some symbols in the
+ * relocatable expression were assembly temporary symbols, for example
+ * "Ldata - LpicBase + constant", and only the Values of the symbols without
+ * symbol names are present in the relocation information. The VariantKind
+ * type is one of the Target specific #defines below and is used to print
+ * operands like "_foo@GOT", ":lower16:_foo", etc.
+ */
+struct LLVMOpInfoSymbol1 {
+ uint64_t Present; /* 1 if this symbol is present */
+ const char *Name; /* symbol name if not NULL */
+ uint64_t Value; /* symbol value if name is NULL */
+};
+
+struct LLVMOpInfo1 {
+ struct LLVMOpInfoSymbol1 AddSymbol;
+ struct LLVMOpInfoSymbol1 SubtractSymbol;
+ uint64_t Value;
+ uint64_t VariantKind;
+};
+
+/**
+ * The operand VariantKinds for symbolic disassembly.
+ */
+#define LLVMDisassembler_VariantKind_None 0 /* all targets */
+
+/**
+ * The ARM target VariantKinds.
+ */
+#define LLVMDisassembler_VariantKind_ARM_HI16 1 /* :upper16: */
+#define LLVMDisassembler_VariantKind_ARM_LO16 2 /* :lower16: */
+
+/**
+ * The ARM64 target VariantKinds.
+ */
+#define LLVMDisassembler_VariantKind_ARM64_PAGE 1 /* @page */
+#define LLVMDisassembler_VariantKind_ARM64_PAGEOFF 2 /* @pageoff */
+#define LLVMDisassembler_VariantKind_ARM64_GOTPAGE 3 /* @gotpage */
+#define LLVMDisassembler_VariantKind_ARM64_GOTPAGEOFF 4 /* @gotpageoff */
+#define LLVMDisassembler_VariantKind_ARM64_TLVP 5 /* @tvlppage */
+#define LLVMDisassembler_VariantKind_ARM64_TLVOFF 6 /* @tvlppageoff */
+
+/**
+ * The type for the symbol lookup function. This may be called by the
+ * disassembler for things like adding a comment for a PC plus a constant
+ * offset load instruction to use a symbol name instead of a load address value.
+ * It is passed the block information is saved when the disassembler context is
+ * created and the ReferenceValue to look up as a symbol. If no symbol is found
+ * for the ReferenceValue NULL is returned. The ReferenceType of the
+ * instruction is passed indirectly as is the PC of the instruction in
+ * ReferencePC. If the output reference can be determined its type is returned
+ * indirectly in ReferenceType along with ReferenceName if any, or that is set
+ * to NULL.
+ */
+typedef const char *(*LLVMSymbolLookupCallback)(void *DisInfo,
+ uint64_t ReferenceValue,
+ uint64_t *ReferenceType,
+ uint64_t ReferencePC,
+ const char **ReferenceName);
+/**
+ * The reference types on input and output.
+ */
+/* No input reference type or no output reference type. */
+#define LLVMDisassembler_ReferenceType_InOut_None 0
+
+/* The input reference is from a branch instruction. */
+#define LLVMDisassembler_ReferenceType_In_Branch 1
+/* The input reference is from a PC relative load instruction. */
+#define LLVMDisassembler_ReferenceType_In_PCrel_Load 2
+
+/* The input reference is from an ARM64::ADRP instruction. */
+#define LLVMDisassembler_ReferenceType_In_ARM64_ADRP 0x100000001
+/* The input reference is from an ARM64::ADDXri instruction. */
+#define LLVMDisassembler_ReferenceType_In_ARM64_ADDXri 0x100000002
+/* The input reference is from an ARM64::LDRXui instruction. */
+#define LLVMDisassembler_ReferenceType_In_ARM64_LDRXui 0x100000003
+/* The input reference is from an ARM64::LDRXl instruction. */
+#define LLVMDisassembler_ReferenceType_In_ARM64_LDRXl 0x100000004
+/* The input reference is from an ARM64::ADR instruction. */
+#define LLVMDisassembler_ReferenceType_In_ARM64_ADR 0x100000005
+
+/* The output reference is to as symbol stub. */
+#define LLVMDisassembler_ReferenceType_Out_SymbolStub 1
+/* The output reference is to a symbol address in a literal pool. */
+#define LLVMDisassembler_ReferenceType_Out_LitPool_SymAddr 2
+/* The output reference is to a cstring address in a literal pool. */
+#define LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr 3
+
+/* The output reference is to a Objective-C CoreFoundation string. */
+#define LLVMDisassembler_ReferenceType_Out_Objc_CFString_Ref 4
+/* The output reference is to a Objective-C message. */
+#define LLVMDisassembler_ReferenceType_Out_Objc_Message 5
+/* The output reference is to a Objective-C message ref. */
+#define LLVMDisassembler_ReferenceType_Out_Objc_Message_Ref 6
+/* The output reference is to a Objective-C selector ref. */
+#define LLVMDisassembler_ReferenceType_Out_Objc_Selector_Ref 7
+/* The output reference is to a Objective-C class ref. */
+#define LLVMDisassembler_ReferenceType_Out_Objc_Class_Ref 8
+
+/* The output reference is to a C++ symbol name. */
+#define LLVMDisassembler_ReferenceType_DeMangled_Name 9
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* !defined(__cplusplus) */
+
+/**
+ * Create a disassembler for the TripleName. Symbolic disassembly is supported
+ * by passing a block of information in the DisInfo parameter and specifying the
+ * TagType and callback functions as described above. These can all be passed
+ * as NULL. If successful, this returns a disassembler context. If not, it
+ * returns NULL. This function is equivalent to calling
+ * LLVMCreateDisasmCPUFeatures() with an empty CPU name and feature set.
+ */
+LLVMDisasmContextRef LLVMCreateDisasm(const char *TripleName, void *DisInfo,
+ int TagType, LLVMOpInfoCallback GetOpInfo,
+ LLVMSymbolLookupCallback SymbolLookUp);
+
+/**
+ * Create a disassembler for the TripleName and a specific CPU. Symbolic
+ * disassembly is supported by passing a block of information in the DisInfo
+ * parameter and specifying the TagType and callback functions as described
+ * above. These can all be passed * as NULL. If successful, this returns a
+ * disassembler context. If not, it returns NULL. This function is equivalent
+ * to calling LLVMCreateDisasmCPUFeatures() with an empty feature set.
+ */
+LLVMDisasmContextRef LLVMCreateDisasmCPU(const char *Triple, const char *CPU,
+ void *DisInfo, int TagType,
+ LLVMOpInfoCallback GetOpInfo,
+ LLVMSymbolLookupCallback SymbolLookUp);
+
+/**
+ * Create a disassembler for the TripleName, a specific CPU and specific feature
+ * string. Symbolic disassembly is supported by passing a block of information
+ * in the DisInfo parameter and specifying the TagType and callback functions as
+ * described above. These can all be passed * as NULL. If successful, this
+ * returns a disassembler context. If not, it returns NULL.
+ */
+LLVMDisasmContextRef
+LLVMCreateDisasmCPUFeatures(const char *Triple, const char *CPU,
+ const char *Features, void *DisInfo, int TagType,
+ LLVMOpInfoCallback GetOpInfo,
+ LLVMSymbolLookupCallback SymbolLookUp);
+
+/**
+ * Set the disassembler's options. Returns 1 if it can set the Options and 0
+ * otherwise.
+ */
+int LLVMSetDisasmOptions(LLVMDisasmContextRef DC, uint64_t Options);
+
+/* The option to produce marked up assembly. */
+#define LLVMDisassembler_Option_UseMarkup 1
+/* The option to print immediates as hex. */
+#define LLVMDisassembler_Option_PrintImmHex 2
+/* The option use the other assembler printer variant */
+#define LLVMDisassembler_Option_AsmPrinterVariant 4
+/* The option to set comment on instructions */
+#define LLVMDisassembler_Option_SetInstrComments 8
+ /* The option to print latency information alongside instructions */
+#define LLVMDisassembler_Option_PrintLatency 16
+
+/**
+ * Dispose of a disassembler context.
+ */
+void LLVMDisasmDispose(LLVMDisasmContextRef DC);
+
+/**
+ * Disassemble a single instruction using the disassembler context specified in
+ * the parameter DC. The bytes of the instruction are specified in the
+ * parameter Bytes, and contains at least BytesSize number of bytes. The
+ * instruction is at the address specified by the PC parameter. If a valid
+ * instruction can be disassembled, its string is returned indirectly in
+ * OutString whose size is specified in the parameter OutStringSize. This
+ * function returns the number of bytes in the instruction or zero if there was
+ * no valid instruction.
+ */
+size_t LLVMDisasmInstruction(LLVMDisasmContextRef DC, uint8_t *Bytes,
+ uint64_t BytesSize, uint64_t PC,
+ char *OutString, size_t OutStringSize);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* !defined(__cplusplus) */
+
+#endif /* !defined(LLVM_C_DISASSEMBLER_H) */
diff --git a/contrib/llvm/include/llvm-c/ErrorHandling.h b/contrib/llvm/include/llvm-c/ErrorHandling.h
new file mode 100644
index 0000000..5a80bc5
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/ErrorHandling.h
@@ -0,0 +1,51 @@
+/*===-- llvm-c/ErrorHandling.h - Error Handling C Interface -------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file defines the C interface to LLVM's error handling mechanism. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_ERROR_HANDLING_H
+#define LLVM_C_ERROR_HANDLING_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void (*LLVMFatalErrorHandler)(const char *Reason);
+
+/**
+ * Install a fatal error handler. By default, if LLVM detects a fatal error, it
+ * will call exit(1). This may not be appropriate in many contexts. For example,
+ * doing exit(1) will bypass many crash reporting/tracing system tools. This
+ * function allows you to install a callback that will be invoked prior to the
+ * call to exit(1).
+ */
+void LLVMInstallFatalErrorHandler(LLVMFatalErrorHandler Handler);
+
+/**
+ * Reset the fatal error handler. This resets LLVM's fatal error handling
+ * behavior to the default.
+ */
+void LLVMResetFatalErrorHandler(void);
+
+/**
+ * Enable LLVM's built-in stack trace code. This intercepts the OS's crash
+ * signals and prints which component of LLVM you were in at the time if the
+ * crash.
+ */
+void LLVMEnablePrettyStackTrace(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/ExecutionEngine.h b/contrib/llvm/include/llvm-c/ExecutionEngine.h
new file mode 100644
index 0000000..b72a91a
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/ExecutionEngine.h
@@ -0,0 +1,193 @@
+/*===-- llvm-c/ExecutionEngine.h - ExecutionEngine Lib C Iface --*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMExecutionEngine.o, which *|
+|* implements various analyses of the LLVM IR. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_EXECUTIONENGINE_H
+#define LLVM_C_EXECUTIONENGINE_H
+
+#include "llvm-c/Types.h"
+#include "llvm-c/Target.h"
+#include "llvm-c/TargetMachine.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCExecutionEngine Execution Engine
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+void LLVMLinkInMCJIT(void);
+void LLVMLinkInInterpreter(void);
+
+typedef struct LLVMOpaqueGenericValue *LLVMGenericValueRef;
+typedef struct LLVMOpaqueExecutionEngine *LLVMExecutionEngineRef;
+typedef struct LLVMOpaqueMCJITMemoryManager *LLVMMCJITMemoryManagerRef;
+
+struct LLVMMCJITCompilerOptions {
+ unsigned OptLevel;
+ LLVMCodeModel CodeModel;
+ LLVMBool NoFramePointerElim;
+ LLVMBool EnableFastISel;
+ LLVMMCJITMemoryManagerRef MCJMM;
+};
+
+/*===-- Operations on generic values --------------------------------------===*/
+
+LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty,
+ unsigned long long N,
+ LLVMBool IsSigned);
+
+LLVMGenericValueRef LLVMCreateGenericValueOfPointer(void *P);
+
+LLVMGenericValueRef LLVMCreateGenericValueOfFloat(LLVMTypeRef Ty, double N);
+
+unsigned LLVMGenericValueIntWidth(LLVMGenericValueRef GenValRef);
+
+unsigned long long LLVMGenericValueToInt(LLVMGenericValueRef GenVal,
+ LLVMBool IsSigned);
+
+void *LLVMGenericValueToPointer(LLVMGenericValueRef GenVal);
+
+double LLVMGenericValueToFloat(LLVMTypeRef TyRef, LLVMGenericValueRef GenVal);
+
+void LLVMDisposeGenericValue(LLVMGenericValueRef GenVal);
+
+/*===-- Operations on execution engines -----------------------------------===*/
+
+LLVMBool LLVMCreateExecutionEngineForModule(LLVMExecutionEngineRef *OutEE,
+ LLVMModuleRef M,
+ char **OutError);
+
+LLVMBool LLVMCreateInterpreterForModule(LLVMExecutionEngineRef *OutInterp,
+ LLVMModuleRef M,
+ char **OutError);
+
+LLVMBool LLVMCreateJITCompilerForModule(LLVMExecutionEngineRef *OutJIT,
+ LLVMModuleRef M,
+ unsigned OptLevel,
+ char **OutError);
+
+void LLVMInitializeMCJITCompilerOptions(
+ struct LLVMMCJITCompilerOptions *Options, size_t SizeOfOptions);
+
+/**
+ * Create an MCJIT execution engine for a module, with the given options. It is
+ * the responsibility of the caller to ensure that all fields in Options up to
+ * the given SizeOfOptions are initialized. It is correct to pass a smaller
+ * value of SizeOfOptions that omits some fields. The canonical way of using
+ * this is:
+ *
+ * LLVMMCJITCompilerOptions options;
+ * LLVMInitializeMCJITCompilerOptions(&options, sizeof(options));
+ * ... fill in those options you care about
+ * LLVMCreateMCJITCompilerForModule(&jit, mod, &options, sizeof(options),
+ * &error);
+ *
+ * Note that this is also correct, though possibly suboptimal:
+ *
+ * LLVMCreateMCJITCompilerForModule(&jit, mod, 0, 0, &error);
+ */
+LLVMBool LLVMCreateMCJITCompilerForModule(
+ LLVMExecutionEngineRef *OutJIT, LLVMModuleRef M,
+ struct LLVMMCJITCompilerOptions *Options, size_t SizeOfOptions,
+ char **OutError);
+
+void LLVMDisposeExecutionEngine(LLVMExecutionEngineRef EE);
+
+void LLVMRunStaticConstructors(LLVMExecutionEngineRef EE);
+
+void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE);
+
+int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F,
+ unsigned ArgC, const char * const *ArgV,
+ const char * const *EnvP);
+
+LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F,
+ unsigned NumArgs,
+ LLVMGenericValueRef *Args);
+
+void LLVMFreeMachineCodeForFunction(LLVMExecutionEngineRef EE, LLVMValueRef F);
+
+void LLVMAddModule(LLVMExecutionEngineRef EE, LLVMModuleRef M);
+
+LLVMBool LLVMRemoveModule(LLVMExecutionEngineRef EE, LLVMModuleRef M,
+ LLVMModuleRef *OutMod, char **OutError);
+
+LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name,
+ LLVMValueRef *OutFn);
+
+void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE,
+ LLVMValueRef Fn);
+
+LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE);
+LLVMTargetMachineRef
+LLVMGetExecutionEngineTargetMachine(LLVMExecutionEngineRef EE);
+
+void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
+ void* Addr);
+
+void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global);
+
+uint64_t LLVMGetGlobalValueAddress(LLVMExecutionEngineRef EE, const char *Name);
+
+uint64_t LLVMGetFunctionAddress(LLVMExecutionEngineRef EE, const char *Name);
+
+/*===-- Operations on memory managers -------------------------------------===*/
+
+typedef uint8_t *(*LLVMMemoryManagerAllocateCodeSectionCallback)(
+ void *Opaque, uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ const char *SectionName);
+typedef uint8_t *(*LLVMMemoryManagerAllocateDataSectionCallback)(
+ void *Opaque, uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ const char *SectionName, LLVMBool IsReadOnly);
+typedef LLVMBool (*LLVMMemoryManagerFinalizeMemoryCallback)(
+ void *Opaque, char **ErrMsg);
+typedef void (*LLVMMemoryManagerDestroyCallback)(void *Opaque);
+
+/**
+ * Create a simple custom MCJIT memory manager. This memory manager can
+ * intercept allocations in a module-oblivious way. This will return NULL
+ * if any of the passed functions are NULL.
+ *
+ * @param Opaque An opaque client object to pass back to the callbacks.
+ * @param AllocateCodeSection Allocate a block of memory for executable code.
+ * @param AllocateDataSection Allocate a block of memory for data.
+ * @param FinalizeMemory Set page permissions and flush cache. Return 0 on
+ * success, 1 on error.
+ */
+LLVMMCJITMemoryManagerRef LLVMCreateSimpleMCJITMemoryManager(
+ void *Opaque,
+ LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection,
+ LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection,
+ LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory,
+ LLVMMemoryManagerDestroyCallback Destroy);
+
+void LLVMDisposeMCJITMemoryManager(LLVMMCJITMemoryManagerRef MM);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* defined(__cplusplus) */
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/IRReader.h b/contrib/llvm/include/llvm-c/IRReader.h
new file mode 100644
index 0000000..5b58d99
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/IRReader.h
@@ -0,0 +1,40 @@
+/*===-- llvm-c/IRReader.h - IR Reader C Interface -----------------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file defines the C interface to the IR Reader. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_IRREADER_H
+#define LLVM_C_IRREADER_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Read LLVM IR from a memory buffer and convert it into an in-memory Module
+ * object. Returns 0 on success.
+ * Optionally returns a human-readable description of any errors that
+ * occurred during parsing IR. OutMessage must be disposed with
+ * LLVMDisposeMessage.
+ *
+ * @see llvm::ParseIR()
+ */
+LLVMBool LLVMParseIRInContext(LLVMContextRef ContextRef,
+ LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM,
+ char **OutMessage);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Initialization.h b/contrib/llvm/include/llvm-c/Initialization.h
new file mode 100644
index 0000000..90c8396
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Initialization.h
@@ -0,0 +1,55 @@
+/*===-- llvm-c/Initialization.h - Initialization C Interface ------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to LLVM initialization routines, *|
+|* which must be called before you can use the functionality provided by *|
+|* the corresponding LLVM library. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_INITIALIZATION_H
+#define LLVM_C_INITIALIZATION_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCInitialization Initialization Routines
+ * @ingroup LLVMC
+ *
+ * This module contains routines used to initialize the LLVM system.
+ *
+ * @{
+ */
+
+void LLVMInitializeCore(LLVMPassRegistryRef R);
+void LLVMInitializeTransformUtils(LLVMPassRegistryRef R);
+void LLVMInitializeScalarOpts(LLVMPassRegistryRef R);
+void LLVMInitializeObjCARCOpts(LLVMPassRegistryRef R);
+void LLVMInitializeVectorization(LLVMPassRegistryRef R);
+void LLVMInitializeInstCombine(LLVMPassRegistryRef R);
+void LLVMInitializeIPO(LLVMPassRegistryRef R);
+void LLVMInitializeInstrumentation(LLVMPassRegistryRef R);
+void LLVMInitializeAnalysis(LLVMPassRegistryRef R);
+void LLVMInitializeIPA(LLVMPassRegistryRef R);
+void LLVMInitializeCodeGen(LLVMPassRegistryRef R);
+void LLVMInitializeTarget(LLVMPassRegistryRef R);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h b/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h
new file mode 100644
index 0000000..8bcf599
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/LinkTimeOptimizer.h
@@ -0,0 +1,69 @@
+//===-- llvm/LinkTimeOptimizer.h - LTO Public C Interface -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header provides a C API to use the LLVM link time optimization
+// library. This is intended to be used by linkers which are C-only in
+// their implementation for performing LTO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_C_LINKTIMEOPTIMIZER_H
+#define LLVM_C_LINKTIMEOPTIMIZER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCLinkTimeOptimizer Link Time Optimization
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+ /// This provides a dummy type for pointers to the LTO object.
+ typedef void* llvm_lto_t;
+
+ /// This provides a C-visible enumerator to manage status codes.
+ /// This should map exactly onto the C++ enumerator LTOStatus.
+ typedef enum llvm_lto_status {
+ LLVM_LTO_UNKNOWN,
+ LLVM_LTO_OPT_SUCCESS,
+ LLVM_LTO_READ_SUCCESS,
+ LLVM_LTO_READ_FAILURE,
+ LLVM_LTO_WRITE_FAILURE,
+ LLVM_LTO_NO_TARGET,
+ LLVM_LTO_NO_WORK,
+ LLVM_LTO_MODULE_MERGE_FAILURE,
+ LLVM_LTO_ASM_FAILURE,
+
+ // Added C-specific error codes
+ LLVM_LTO_NULL_OBJECT
+ } llvm_lto_status_t;
+
+ /// This provides C interface to initialize link time optimizer. This allows
+ /// linker to use dlopen() interface to dynamically load LinkTimeOptimizer.
+ /// extern "C" helps, because dlopen() interface uses name to find the symbol.
+ extern llvm_lto_t llvm_create_optimizer(void);
+ extern void llvm_destroy_optimizer(llvm_lto_t lto);
+
+ extern llvm_lto_status_t llvm_read_object_file
+ (llvm_lto_t lto, const char* input_filename);
+ extern llvm_lto_status_t llvm_optimize_modules
+ (llvm_lto_t lto, const char* output_filename);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Linker.h b/contrib/llvm/include/llvm-c/Linker.h
new file mode 100644
index 0000000..4d9bd46
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Linker.h
@@ -0,0 +1,55 @@
+/*===-- llvm-c/Linker.h - Module Linker C Interface -------------*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file defines the C interface to the module/file/archive linker. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_LINKER_H
+#define LLVM_C_LINKER_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* This enum is provided for backwards-compatibility only. It has no effect. */
+typedef enum {
+ LLVMLinkerDestroySource = 0, /* This is the default behavior. */
+ LLVMLinkerPreserveSource_Removed = 1 /* This option has been deprecated and
+ should not be used. */
+} LLVMLinkerMode;
+
+/* Links the source module into the destination module. The source module is
+ * damaged. The only thing that can be done is destroy it. Optionally returns a
+ * human-readable description of any errors that occurred in linking. OutMessage
+ * must be disposed with LLVMDisposeMessage. The return value is true if an
+ * error occurred, false otherwise.
+ *
+ * Note that the linker mode parameter \p Unused is no longer used, and has
+ * no effect.
+ *
+ * This function is deprecated. Use LLVMLinkModules2 instead.
+ */
+LLVMBool LLVMLinkModules(LLVMModuleRef Dest, LLVMModuleRef Src,
+ LLVMLinkerMode Unused, char **OutMessage);
+
+/* Links the source module into the destination module. The source module is
+ * destroyed.
+ * The return value is true if an error occurred, false otherwise.
+ * Use the diagnostic handler to get any diagnostic message.
+*/
+LLVMBool LLVMLinkModules2(LLVMModuleRef Dest, LLVMModuleRef Src);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Object.h b/contrib/llvm/include/llvm-c/Object.h
new file mode 100644
index 0000000..a2980e8
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Object.h
@@ -0,0 +1,100 @@
+/*===-- llvm-c/Object.h - Object Lib C Iface --------------------*- C++ -*-===*/
+/* */
+/* The LLVM Compiler Infrastructure */
+/* */
+/* This file is distributed under the University of Illinois Open Source */
+/* License. See LICENSE.TXT for details. */
+/* */
+/*===----------------------------------------------------------------------===*/
+/* */
+/* This header declares the C interface to libLLVMObject.a, which */
+/* implements object file reading and writing. */
+/* */
+/* Many exotic languages can interoperate with C code but have a harder time */
+/* with C++ due to name mangling. So in addition to C, this interface enables */
+/* tools written in such languages. */
+/* */
+/*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_OBJECT_H
+#define LLVM_C_OBJECT_H
+
+#include "llvm-c/Types.h"
+#include "llvm/Config/llvm-config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCObject Object file reading and writing
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+// Opaque type wrappers
+typedef struct LLVMOpaqueObjectFile *LLVMObjectFileRef;
+typedef struct LLVMOpaqueSectionIterator *LLVMSectionIteratorRef;
+typedef struct LLVMOpaqueSymbolIterator *LLVMSymbolIteratorRef;
+typedef struct LLVMOpaqueRelocationIterator *LLVMRelocationIteratorRef;
+
+// ObjectFile creation
+LLVMObjectFileRef LLVMCreateObjectFile(LLVMMemoryBufferRef MemBuf);
+void LLVMDisposeObjectFile(LLVMObjectFileRef ObjectFile);
+
+// ObjectFile Section iterators
+LLVMSectionIteratorRef LLVMGetSections(LLVMObjectFileRef ObjectFile);
+void LLVMDisposeSectionIterator(LLVMSectionIteratorRef SI);
+LLVMBool LLVMIsSectionIteratorAtEnd(LLVMObjectFileRef ObjectFile,
+ LLVMSectionIteratorRef SI);
+void LLVMMoveToNextSection(LLVMSectionIteratorRef SI);
+void LLVMMoveToContainingSection(LLVMSectionIteratorRef Sect,
+ LLVMSymbolIteratorRef Sym);
+
+// ObjectFile Symbol iterators
+LLVMSymbolIteratorRef LLVMGetSymbols(LLVMObjectFileRef ObjectFile);
+void LLVMDisposeSymbolIterator(LLVMSymbolIteratorRef SI);
+LLVMBool LLVMIsSymbolIteratorAtEnd(LLVMObjectFileRef ObjectFile,
+ LLVMSymbolIteratorRef SI);
+void LLVMMoveToNextSymbol(LLVMSymbolIteratorRef SI);
+
+// SectionRef accessors
+const char *LLVMGetSectionName(LLVMSectionIteratorRef SI);
+uint64_t LLVMGetSectionSize(LLVMSectionIteratorRef SI);
+const char *LLVMGetSectionContents(LLVMSectionIteratorRef SI);
+uint64_t LLVMGetSectionAddress(LLVMSectionIteratorRef SI);
+LLVMBool LLVMGetSectionContainsSymbol(LLVMSectionIteratorRef SI,
+ LLVMSymbolIteratorRef Sym);
+
+// Section Relocation iterators
+LLVMRelocationIteratorRef LLVMGetRelocations(LLVMSectionIteratorRef Section);
+void LLVMDisposeRelocationIterator(LLVMRelocationIteratorRef RI);
+LLVMBool LLVMIsRelocationIteratorAtEnd(LLVMSectionIteratorRef Section,
+ LLVMRelocationIteratorRef RI);
+void LLVMMoveToNextRelocation(LLVMRelocationIteratorRef RI);
+
+
+// SymbolRef accessors
+const char *LLVMGetSymbolName(LLVMSymbolIteratorRef SI);
+uint64_t LLVMGetSymbolAddress(LLVMSymbolIteratorRef SI);
+uint64_t LLVMGetSymbolSize(LLVMSymbolIteratorRef SI);
+
+// RelocationRef accessors
+uint64_t LLVMGetRelocationOffset(LLVMRelocationIteratorRef RI);
+LLVMSymbolIteratorRef LLVMGetRelocationSymbol(LLVMRelocationIteratorRef RI);
+uint64_t LLVMGetRelocationType(LLVMRelocationIteratorRef RI);
+// NOTE: Caller takes ownership of returned string of the two
+// following functions.
+const char *LLVMGetRelocationTypeName(LLVMRelocationIteratorRef RI);
+const char *LLVMGetRelocationValueString(LLVMRelocationIteratorRef RI);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* defined(__cplusplus) */
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/OrcBindings.h b/contrib/llvm/include/llvm-c/OrcBindings.h
new file mode 100644
index 0000000..f6aff91
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/OrcBindings.h
@@ -0,0 +1,134 @@
+/*===----------- llvm-c/OrcBindings.h - Orc Lib C Iface ---------*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMOrcJIT.a, which implements *|
+|* JIT compilation of LLVM IR. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+|* Note: This interface is experimental. It is *NOT* stable, and may be *|
+|* changed without warning. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_ORCBINDINGS_H
+#define LLVM_C_ORCBINDINGS_H
+
+#include "llvm-c/Object.h"
+#include "llvm-c/Support.h"
+#include "llvm-c/TargetMachine.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct LLVMOrcOpaqueJITStack *LLVMOrcJITStackRef;
+typedef uint32_t LLVMOrcModuleHandle;
+typedef uint64_t LLVMOrcTargetAddress;
+typedef uint64_t (*LLVMOrcSymbolResolverFn)(const char *Name,
+ void *LookupCtx);
+typedef uint64_t (*LLVMOrcLazyCompileCallbackFn)(LLVMOrcJITStackRef JITStack,
+ void *CallbackCtx);
+
+/**
+ * Create an ORC JIT stack.
+ *
+ * The client owns the resulting stack, and must call OrcDisposeInstance(...)
+ * to destroy it and free its memory. The JIT stack will take ownership of the
+ * TargetMachine, which will be destroyed when the stack is destroyed. The
+ * client should not attempt to dispose of the Target Machine, or it will result
+ * in a double-free.
+ */
+LLVMOrcJITStackRef LLVMOrcCreateInstance(LLVMTargetMachineRef TM);
+
+/**
+ * Mangle the given symbol.
+ * Memory will be allocated for MangledSymbol to hold the result. The client
+ */
+void LLVMOrcGetMangledSymbol(LLVMOrcJITStackRef JITStack, char **MangledSymbol,
+ const char *Symbol);
+
+/**
+ * Dispose of a mangled symbol.
+ */
+
+void LLVMOrcDisposeMangledSymbol(char *MangledSymbol);
+
+/**
+ * Create a lazy compile callback.
+ */
+LLVMOrcTargetAddress
+LLVMOrcCreateLazyCompileCallback(LLVMOrcJITStackRef JITStack,
+ LLVMOrcLazyCompileCallbackFn Callback,
+ void *CallbackCtx);
+
+/**
+ * Create a named indirect call stub.
+ */
+void LLVMOrcCreateIndirectStub(LLVMOrcJITStackRef JITStack,
+ const char *StubName,
+ LLVMOrcTargetAddress InitAddr);
+
+/**
+ * Set the pointer for the given indirect stub.
+ */
+void LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
+ const char *StubName,
+ LLVMOrcTargetAddress NewAddr);
+
+/**
+ * Add module to be eagerly compiled.
+ */
+LLVMOrcModuleHandle
+LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack, LLVMModuleRef Mod,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx);
+
+/**
+ * Add module to be lazily compiled one function at a time.
+ */
+LLVMOrcModuleHandle
+LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack, LLVMModuleRef Mod,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx);
+
+/**
+ * Add an object file.
+ */
+LLVMOrcModuleHandle
+LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack, LLVMObjectFileRef Obj,
+ LLVMOrcSymbolResolverFn SymbolResolver,
+ void *SymbolResolverCtx);
+
+/**
+ * Remove a module set from the JIT.
+ *
+ * This works for all modules that can be added via OrcAdd*, including object
+ * files.
+ */
+void LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack, LLVMOrcModuleHandle H);
+
+/**
+ * Get symbol address from JIT instance.
+ */
+LLVMOrcTargetAddress LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
+ const char *SymbolName);
+
+/**
+ * Dispose of an ORC JIT stack.
+ */
+void LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
+
+#ifdef __cplusplus
+}
+#endif /* extern "C" */
+
+#endif /* LLVM_C_ORCBINDINGS_H */
diff --git a/contrib/llvm/include/llvm-c/Support.h b/contrib/llvm/include/llvm-c/Support.h
new file mode 100644
index 0000000..735d1fb
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Support.h
@@ -0,0 +1,65 @@
+/*===-- llvm-c/Support.h - Support C Interface --------------------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file defines the C interface to the LLVM support library. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_SUPPORT_H
+#define LLVM_C_SUPPORT_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * This function permanently loads the dynamic library at the given path.
+ * It is safe to call this function multiple times for the same library.
+ *
+ * @see sys::DynamicLibrary::LoadLibraryPermanently()
+ */
+LLVMBool LLVMLoadLibraryPermanently(const char* Filename);
+
+/**
+ * This function parses the given arguments using the LLVM command line parser.
+ * Note that the only stable thing about this function is its signature; you
+ * cannot rely on any particular set of command line arguments being interpreted
+ * the same way across LLVM versions.
+ *
+ * @see llvm::cl::ParseCommandLineOptions()
+ */
+void LLVMParseCommandLineOptions(int argc, const char *const *argv,
+ const char *Overview);
+
+/**
+ * This function will search through all previously loaded dynamic
+ * libraries for the symbol \p symbolName. If it is found, the address of
+ * that symbol is returned. If not, null is returned.
+ *
+ * @see sys::DynamicLibrary::SearchForAddressOfSymbol()
+ */
+void *LLVMSearchForAddressOfSymbol(const char *symbolName);
+
+/**
+ * This functions permanently adds the symbol \p symbolName with the
+ * value \p symbolValue. These symbols are searched before any
+ * libraries.
+ *
+ * @see sys::DynamicLibrary::AddSymbol()
+ */
+void LLVMAddSymbol(const char *symbolName, void *symbolValue);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Target.h b/contrib/llvm/include/llvm-c/Target.h
new file mode 100644
index 0000000..24d2cb4
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Target.h
@@ -0,0 +1,290 @@
+/*===-- llvm-c/Target.h - Target Lib C Iface --------------------*- C++ -*-===*/
+/* */
+/* The LLVM Compiler Infrastructure */
+/* */
+/* This file is distributed under the University of Illinois Open Source */
+/* License. See LICENSE.TXT for details. */
+/* */
+/*===----------------------------------------------------------------------===*/
+/* */
+/* This header declares the C interface to libLLVMTarget.a, which */
+/* implements target information. */
+/* */
+/* Many exotic languages can interoperate with C code but have a harder time */
+/* with C++ due to name mangling. So in addition to C, this interface enables */
+/* tools written in such languages. */
+/* */
+/*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TARGET_H
+#define LLVM_C_TARGET_H
+
+#include "llvm-c/Types.h"
+#include "llvm/Config/llvm-config.h"
+
+#if defined(_MSC_VER) && !defined(inline)
+#define inline __inline
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCTarget Target information
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+enum LLVMByteOrdering { LLVMBigEndian, LLVMLittleEndian };
+
+typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef;
+typedef struct LLVMOpaqueTargetLibraryInfotData *LLVMTargetLibraryInfoRef;
+
+/* Declare all of the target-initialization functions that are available. */
+#define LLVM_TARGET(TargetName) \
+ void LLVMInitialize##TargetName##TargetInfo(void);
+#include "llvm/Config/Targets.def"
+#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
+
+#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##Target(void);
+#include "llvm/Config/Targets.def"
+#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
+
+#define LLVM_TARGET(TargetName) \
+ void LLVMInitialize##TargetName##TargetMC(void);
+#include "llvm/Config/Targets.def"
+#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
+
+/* Declare all of the available assembly printer initialization functions. */
+#define LLVM_ASM_PRINTER(TargetName) \
+ void LLVMInitialize##TargetName##AsmPrinter(void);
+#include "llvm/Config/AsmPrinters.def"
+#undef LLVM_ASM_PRINTER /* Explicit undef to make SWIG happier */
+
+/* Declare all of the available assembly parser initialization functions. */
+#define LLVM_ASM_PARSER(TargetName) \
+ void LLVMInitialize##TargetName##AsmParser(void);
+#include "llvm/Config/AsmParsers.def"
+#undef LLVM_ASM_PARSER /* Explicit undef to make SWIG happier */
+
+/* Declare all of the available disassembler initialization functions. */
+#define LLVM_DISASSEMBLER(TargetName) \
+ void LLVMInitialize##TargetName##Disassembler(void);
+#include "llvm/Config/Disassemblers.def"
+#undef LLVM_DISASSEMBLER /* Explicit undef to make SWIG happier */
+
+/** LLVMInitializeAllTargetInfos - The main program should call this function if
+ it wants access to all available targets that LLVM is configured to
+ support. */
+static inline void LLVMInitializeAllTargetInfos(void) {
+#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetInfo();
+#include "llvm/Config/Targets.def"
+#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
+}
+
+/** LLVMInitializeAllTargets - The main program should call this function if it
+ wants to link in all available targets that LLVM is configured to
+ support. */
+static inline void LLVMInitializeAllTargets(void) {
+#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##Target();
+#include "llvm/Config/Targets.def"
+#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
+}
+
+/** LLVMInitializeAllTargetMCs - The main program should call this function if
+ it wants access to all available target MC that LLVM is configured to
+ support. */
+static inline void LLVMInitializeAllTargetMCs(void) {
+#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetMC();
+#include "llvm/Config/Targets.def"
+#undef LLVM_TARGET /* Explicit undef to make SWIG happier */
+}
+
+/** LLVMInitializeAllAsmPrinters - The main program should call this function if
+ it wants all asm printers that LLVM is configured to support, to make them
+ available via the TargetRegistry. */
+static inline void LLVMInitializeAllAsmPrinters(void) {
+#define LLVM_ASM_PRINTER(TargetName) LLVMInitialize##TargetName##AsmPrinter();
+#include "llvm/Config/AsmPrinters.def"
+#undef LLVM_ASM_PRINTER /* Explicit undef to make SWIG happier */
+}
+
+/** LLVMInitializeAllAsmParsers - The main program should call this function if
+ it wants all asm parsers that LLVM is configured to support, to make them
+ available via the TargetRegistry. */
+static inline void LLVMInitializeAllAsmParsers(void) {
+#define LLVM_ASM_PARSER(TargetName) LLVMInitialize##TargetName##AsmParser();
+#include "llvm/Config/AsmParsers.def"
+#undef LLVM_ASM_PARSER /* Explicit undef to make SWIG happier */
+}
+
+/** LLVMInitializeAllDisassemblers - The main program should call this function
+ if it wants all disassemblers that LLVM is configured to support, to make
+ them available via the TargetRegistry. */
+static inline void LLVMInitializeAllDisassemblers(void) {
+#define LLVM_DISASSEMBLER(TargetName) \
+ LLVMInitialize##TargetName##Disassembler();
+#include "llvm/Config/Disassemblers.def"
+#undef LLVM_DISASSEMBLER /* Explicit undef to make SWIG happier */
+}
+
+/** LLVMInitializeNativeTarget - The main program should call this function to
+ initialize the native target corresponding to the host. This is useful
+ for JIT applications to ensure that the target gets linked in correctly. */
+static inline LLVMBool LLVMInitializeNativeTarget(void) {
+ /* If we have a native target, initialize it to ensure it is linked in. */
+#ifdef LLVM_NATIVE_TARGET
+ LLVM_NATIVE_TARGETINFO();
+ LLVM_NATIVE_TARGET();
+ LLVM_NATIVE_TARGETMC();
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+/** LLVMInitializeNativeTargetAsmParser - The main program should call this
+ function to initialize the parser for the native target corresponding to the
+ host. */
+static inline LLVMBool LLVMInitializeNativeAsmParser(void) {
+#ifdef LLVM_NATIVE_ASMPARSER
+ LLVM_NATIVE_ASMPARSER();
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+/** LLVMInitializeNativeTargetAsmPrinter - The main program should call this
+ function to initialize the printer for the native target corresponding to
+ the host. */
+static inline LLVMBool LLVMInitializeNativeAsmPrinter(void) {
+#ifdef LLVM_NATIVE_ASMPRINTER
+ LLVM_NATIVE_ASMPRINTER();
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+/** LLVMInitializeNativeTargetDisassembler - The main program should call this
+ function to initialize the disassembler for the native target corresponding
+ to the host. */
+static inline LLVMBool LLVMInitializeNativeDisassembler(void) {
+#ifdef LLVM_NATIVE_DISASSEMBLER
+ LLVM_NATIVE_DISASSEMBLER();
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+/*===-- Target Data -------------------------------------------------------===*/
+
+/** Creates target data from a target layout string.
+ See the constructor llvm::DataLayout::DataLayout. */
+LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep);
+
+/** Adds target data information to a pass manager. This does not take ownership
+ of the target data.
+ See the method llvm::PassManagerBase::add. */
+void LLVMAddTargetData(LLVMTargetDataRef TD, LLVMPassManagerRef PM);
+
+/** Adds target library information to a pass manager. This does not take
+ ownership of the target library info.
+ See the method llvm::PassManagerBase::add. */
+void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI,
+ LLVMPassManagerRef PM);
+
+/** Converts target data to a target layout string. The string must be disposed
+ with LLVMDisposeMessage.
+ See the constructor llvm::DataLayout::DataLayout. */
+char *LLVMCopyStringRepOfTargetData(LLVMTargetDataRef TD);
+
+/** Returns the byte order of a target, either LLVMBigEndian or
+ LLVMLittleEndian.
+ See the method llvm::DataLayout::isLittleEndian. */
+enum LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef TD);
+
+/** Returns the pointer size in bytes for a target.
+ See the method llvm::DataLayout::getPointerSize. */
+unsigned LLVMPointerSize(LLVMTargetDataRef TD);
+
+/** Returns the pointer size in bytes for a target for a specified
+ address space.
+ See the method llvm::DataLayout::getPointerSize. */
+unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS);
+
+/** Returns the integer type that is the same size as a pointer on a target.
+ See the method llvm::DataLayout::getIntPtrType. */
+LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD);
+
+/** Returns the integer type that is the same size as a pointer on a target.
+ This version allows the address space to be specified.
+ See the method llvm::DataLayout::getIntPtrType. */
+LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS);
+
+/** Returns the integer type that is the same size as a pointer on a target.
+ See the method llvm::DataLayout::getIntPtrType. */
+LLVMTypeRef LLVMIntPtrTypeInContext(LLVMContextRef C, LLVMTargetDataRef TD);
+
+/** Returns the integer type that is the same size as a pointer on a target.
+ This version allows the address space to be specified.
+ See the method llvm::DataLayout::getIntPtrType. */
+LLVMTypeRef LLVMIntPtrTypeForASInContext(LLVMContextRef C, LLVMTargetDataRef TD,
+ unsigned AS);
+
+/** Computes the size of a type in bytes for a target.
+ See the method llvm::DataLayout::getTypeSizeInBits. */
+unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef TD, LLVMTypeRef Ty);
+
+/** Computes the storage size of a type in bytes for a target.
+ See the method llvm::DataLayout::getTypeStoreSize. */
+unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty);
+
+/** Computes the ABI size of a type in bytes for a target.
+ See the method llvm::DataLayout::getTypeAllocSize. */
+unsigned long long LLVMABISizeOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty);
+
+/** Computes the ABI alignment of a type in bytes for a target.
+ See the method llvm::DataLayout::getTypeABISize. */
+unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty);
+
+/** Computes the call frame alignment of a type in bytes for a target.
+ See the method llvm::DataLayout::getTypeABISize. */
+unsigned LLVMCallFrameAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty);
+
+/** Computes the preferred alignment of a type in bytes for a target.
+ See the method llvm::DataLayout::getTypeABISize. */
+unsigned LLVMPreferredAlignmentOfType(LLVMTargetDataRef TD, LLVMTypeRef Ty);
+
+/** Computes the preferred alignment of a global variable in bytes for a target.
+ See the method llvm::DataLayout::getPreferredAlignment. */
+unsigned LLVMPreferredAlignmentOfGlobal(LLVMTargetDataRef TD,
+ LLVMValueRef GlobalVar);
+
+/** Computes the structure element that contains the byte offset for a target.
+ See the method llvm::StructLayout::getElementContainingOffset. */
+unsigned LLVMElementAtOffset(LLVMTargetDataRef TD, LLVMTypeRef StructTy,
+ unsigned long long Offset);
+
+/** Computes the byte offset of the indexed struct element for a target.
+ See the method llvm::StructLayout::getElementContainingOffset. */
+unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef TD,
+ LLVMTypeRef StructTy, unsigned Element);
+
+/** Deallocates a TargetData.
+ See the destructor llvm::DataLayout::~DataLayout. */
+void LLVMDisposeTargetData(LLVMTargetDataRef TD);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* defined(__cplusplus) */
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/TargetMachine.h b/contrib/llvm/include/llvm-c/TargetMachine.h
new file mode 100644
index 0000000..3037080
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/TargetMachine.h
@@ -0,0 +1,147 @@
+/*===-- llvm-c/TargetMachine.h - Target Machine Library C Interface - C++ -*-=*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to the Target and TargetMachine *|
+|* classes, which can be used to generate assembly or object files. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TARGETMACHINE_H
+#define LLVM_C_TARGETMACHINE_H
+
+#include "llvm-c/Types.h"
+#include "llvm-c/Target.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+typedef struct LLVMOpaqueTargetMachine *LLVMTargetMachineRef;
+typedef struct LLVMTarget *LLVMTargetRef;
+
+typedef enum {
+ LLVMCodeGenLevelNone,
+ LLVMCodeGenLevelLess,
+ LLVMCodeGenLevelDefault,
+ LLVMCodeGenLevelAggressive
+} LLVMCodeGenOptLevel;
+
+typedef enum {
+ LLVMRelocDefault,
+ LLVMRelocStatic,
+ LLVMRelocPIC,
+ LLVMRelocDynamicNoPic
+} LLVMRelocMode;
+
+typedef enum {
+ LLVMCodeModelDefault,
+ LLVMCodeModelJITDefault,
+ LLVMCodeModelSmall,
+ LLVMCodeModelKernel,
+ LLVMCodeModelMedium,
+ LLVMCodeModelLarge
+} LLVMCodeModel;
+
+typedef enum {
+ LLVMAssemblyFile,
+ LLVMObjectFile
+} LLVMCodeGenFileType;
+
+/** Returns the first llvm::Target in the registered targets list. */
+LLVMTargetRef LLVMGetFirstTarget(void);
+/** Returns the next llvm::Target given a previous one (or null if there's none) */
+LLVMTargetRef LLVMGetNextTarget(LLVMTargetRef T);
+
+/*===-- Target ------------------------------------------------------------===*/
+/** Finds the target corresponding to the given name and stores it in \p T.
+ Returns 0 on success. */
+LLVMTargetRef LLVMGetTargetFromName(const char *Name);
+
+/** Finds the target corresponding to the given triple and stores it in \p T.
+ Returns 0 on success. Optionally returns any error in ErrorMessage.
+ Use LLVMDisposeMessage to dispose the message. */
+LLVMBool LLVMGetTargetFromTriple(const char* Triple, LLVMTargetRef *T,
+ char **ErrorMessage);
+
+/** Returns the name of a target. See llvm::Target::getName */
+const char *LLVMGetTargetName(LLVMTargetRef T);
+
+/** Returns the description of a target. See llvm::Target::getDescription */
+const char *LLVMGetTargetDescription(LLVMTargetRef T);
+
+/** Returns if the target has a JIT */
+LLVMBool LLVMTargetHasJIT(LLVMTargetRef T);
+
+/** Returns if the target has a TargetMachine associated */
+LLVMBool LLVMTargetHasTargetMachine(LLVMTargetRef T);
+
+/** Returns if the target as an ASM backend (required for emitting output) */
+LLVMBool LLVMTargetHasAsmBackend(LLVMTargetRef T);
+
+/*===-- Target Machine ----------------------------------------------------===*/
+/** Creates a new llvm::TargetMachine. See llvm::Target::createTargetMachine */
+LLVMTargetMachineRef LLVMCreateTargetMachine(LLVMTargetRef T,
+ const char *Triple, const char *CPU, const char *Features,
+ LLVMCodeGenOptLevel Level, LLVMRelocMode Reloc, LLVMCodeModel CodeModel);
+
+/** Dispose the LLVMTargetMachineRef instance generated by
+ LLVMCreateTargetMachine. */
+void LLVMDisposeTargetMachine(LLVMTargetMachineRef T);
+
+/** Returns the Target used in a TargetMachine */
+LLVMTargetRef LLVMGetTargetMachineTarget(LLVMTargetMachineRef T);
+
+/** Returns the triple used creating this target machine. See
+ llvm::TargetMachine::getTriple. The result needs to be disposed with
+ LLVMDisposeMessage. */
+char *LLVMGetTargetMachineTriple(LLVMTargetMachineRef T);
+
+/** Returns the cpu used creating this target machine. See
+ llvm::TargetMachine::getCPU. The result needs to be disposed with
+ LLVMDisposeMessage. */
+char *LLVMGetTargetMachineCPU(LLVMTargetMachineRef T);
+
+/** Returns the feature string used creating this target machine. See
+ llvm::TargetMachine::getFeatureString. The result needs to be disposed with
+ LLVMDisposeMessage. */
+char *LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T);
+
+/** Returns the llvm::DataLayout used for this llvm:TargetMachine. */
+LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T);
+
+/** Set the target machine's ASM verbosity. */
+void LLVMSetTargetMachineAsmVerbosity(LLVMTargetMachineRef T,
+ LLVMBool VerboseAsm);
+
+/** Emits an asm or object file for the given module to the filename. This
+ wraps several c++ only classes (among them a file stream). Returns any
+ error in ErrorMessage. Use LLVMDisposeMessage to dispose the message. */
+LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M,
+ char *Filename, LLVMCodeGenFileType codegen, char **ErrorMessage);
+
+/** Compile the LLVM IR stored in \p M and store the result in \p OutMemBuf. */
+LLVMBool LLVMTargetMachineEmitToMemoryBuffer(LLVMTargetMachineRef T, LLVMModuleRef M,
+ LLVMCodeGenFileType codegen, char** ErrorMessage, LLVMMemoryBufferRef *OutMemBuf);
+
+/*===-- Triple ------------------------------------------------------------===*/
+/** Get a triple for the host machine as a string. The result needs to be
+ disposed with LLVMDisposeMessage. */
+char* LLVMGetDefaultTargetTriple(void);
+
+/** Adds the target-specific analysis passes to the pass manager. */
+void LLVMAddAnalysisPasses(LLVMTargetMachineRef T, LLVMPassManagerRef PM);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Transforms/IPO.h b/contrib/llvm/include/llvm-c/Transforms/IPO.h
new file mode 100644
index 0000000..3af7425
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Transforms/IPO.h
@@ -0,0 +1,81 @@
+/*===-- IPO.h - Interprocedural Transformations C Interface -----*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMIPO.a, which implements *|
+|* various interprocedural transformations of the LLVM IR. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TRANSFORMS_IPO_H
+#define LLVM_C_TRANSFORMS_IPO_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCTransformsIPO Interprocedural transformations
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
+/** See llvm::createArgumentPromotionPass function. */
+void LLVMAddArgumentPromotionPass(LLVMPassManagerRef PM);
+
+/** See llvm::createConstantMergePass function. */
+void LLVMAddConstantMergePass(LLVMPassManagerRef PM);
+
+/** See llvm::createDeadArgEliminationPass function. */
+void LLVMAddDeadArgEliminationPass(LLVMPassManagerRef PM);
+
+/** See llvm::createFunctionAttrsPass function. */
+void LLVMAddFunctionAttrsPass(LLVMPassManagerRef PM);
+
+/** See llvm::createFunctionInliningPass function. */
+void LLVMAddFunctionInliningPass(LLVMPassManagerRef PM);
+
+/** See llvm::createAlwaysInlinerPass function. */
+void LLVMAddAlwaysInlinerPass(LLVMPassManagerRef PM);
+
+/** See llvm::createGlobalDCEPass function. */
+void LLVMAddGlobalDCEPass(LLVMPassManagerRef PM);
+
+/** See llvm::createGlobalOptimizerPass function. */
+void LLVMAddGlobalOptimizerPass(LLVMPassManagerRef PM);
+
+/** See llvm::createIPConstantPropagationPass function. */
+void LLVMAddIPConstantPropagationPass(LLVMPassManagerRef PM);
+
+/** See llvm::createPruneEHPass function. */
+void LLVMAddPruneEHPass(LLVMPassManagerRef PM);
+
+/** See llvm::createIPSCCPPass function. */
+void LLVMAddIPSCCPPass(LLVMPassManagerRef PM);
+
+/** See llvm::createInternalizePass function. */
+void LLVMAddInternalizePass(LLVMPassManagerRef, unsigned AllButMain);
+
+/** See llvm::createStripDeadPrototypesPass function. */
+void LLVMAddStripDeadPrototypesPass(LLVMPassManagerRef PM);
+
+/** See llvm::createStripSymbolsPass function. */
+void LLVMAddStripSymbolsPass(LLVMPassManagerRef PM);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* defined(__cplusplus) */
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Transforms/PassManagerBuilder.h b/contrib/llvm/include/llvm-c/Transforms/PassManagerBuilder.h
new file mode 100644
index 0000000..69786b3
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Transforms/PassManagerBuilder.h
@@ -0,0 +1,90 @@
+/*===-- llvm-c/Transform/PassManagerBuilder.h - PMB C Interface ---*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to the PassManagerBuilder class. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TRANSFORMS_PASSMANAGERBUILDER_H
+#define LLVM_C_TRANSFORMS_PASSMANAGERBUILDER_H
+
+#include "llvm-c/Types.h"
+
+typedef struct LLVMOpaquePassManagerBuilder *LLVMPassManagerBuilderRef;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCTransformsPassManagerBuilder Pass manager builder
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
+/** See llvm::PassManagerBuilder. */
+LLVMPassManagerBuilderRef LLVMPassManagerBuilderCreate(void);
+void LLVMPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB);
+
+/** See llvm::PassManagerBuilder::OptLevel. */
+void
+LLVMPassManagerBuilderSetOptLevel(LLVMPassManagerBuilderRef PMB,
+ unsigned OptLevel);
+
+/** See llvm::PassManagerBuilder::SizeLevel. */
+void
+LLVMPassManagerBuilderSetSizeLevel(LLVMPassManagerBuilderRef PMB,
+ unsigned SizeLevel);
+
+/** See llvm::PassManagerBuilder::DisableUnitAtATime. */
+void
+LLVMPassManagerBuilderSetDisableUnitAtATime(LLVMPassManagerBuilderRef PMB,
+ LLVMBool Value);
+
+/** See llvm::PassManagerBuilder::DisableUnrollLoops. */
+void
+LLVMPassManagerBuilderSetDisableUnrollLoops(LLVMPassManagerBuilderRef PMB,
+ LLVMBool Value);
+
+/** See llvm::PassManagerBuilder::DisableSimplifyLibCalls */
+void
+LLVMPassManagerBuilderSetDisableSimplifyLibCalls(LLVMPassManagerBuilderRef PMB,
+ LLVMBool Value);
+
+/** See llvm::PassManagerBuilder::Inliner. */
+void
+LLVMPassManagerBuilderUseInlinerWithThreshold(LLVMPassManagerBuilderRef PMB,
+ unsigned Threshold);
+
+/** See llvm::PassManagerBuilder::populateFunctionPassManager. */
+void
+LLVMPassManagerBuilderPopulateFunctionPassManager(LLVMPassManagerBuilderRef PMB,
+ LLVMPassManagerRef PM);
+
+/** See llvm::PassManagerBuilder::populateModulePassManager. */
+void
+LLVMPassManagerBuilderPopulateModulePassManager(LLVMPassManagerBuilderRef PMB,
+ LLVMPassManagerRef PM);
+
+/** See llvm::PassManagerBuilder::populateLTOPassManager. */
+void LLVMPassManagerBuilderPopulateLTOPassManager(LLVMPassManagerBuilderRef PMB,
+ LLVMPassManagerRef PM,
+ LLVMBool Internalize,
+ LLVMBool RunInliner);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Transforms/Scalar.h b/contrib/llvm/include/llvm-c/Transforms/Scalar.h
new file mode 100644
index 0000000..c989ee8
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Transforms/Scalar.h
@@ -0,0 +1,158 @@
+/*===-- Scalar.h - Scalar Transformation Library C Interface ----*- C++ -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMScalarOpts.a, which *|
+|* implements various scalar transformations of the LLVM IR. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TRANSFORMS_SCALAR_H
+#define LLVM_C_TRANSFORMS_SCALAR_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCTransformsScalar Scalar transformations
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
+/** See llvm::createAggressiveDCEPass function. */
+void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM);
+
+/** See llvm::createBitTrackingDCEPass function. */
+void LLVMAddBitTrackingDCEPass(LLVMPassManagerRef PM);
+
+/** See llvm::createAlignmentFromAssumptionsPass function. */
+void LLVMAddAlignmentFromAssumptionsPass(LLVMPassManagerRef PM);
+
+/** See llvm::createCFGSimplificationPass function. */
+void LLVMAddCFGSimplificationPass(LLVMPassManagerRef PM);
+
+/** See llvm::createDeadStoreEliminationPass function. */
+void LLVMAddDeadStoreEliminationPass(LLVMPassManagerRef PM);
+
+/** See llvm::createScalarizerPass function. */
+void LLVMAddScalarizerPass(LLVMPassManagerRef PM);
+
+/** See llvm::createMergedLoadStoreMotionPass function. */
+void LLVMAddMergedLoadStoreMotionPass(LLVMPassManagerRef PM);
+
+/** See llvm::createGVNPass function. */
+void LLVMAddGVNPass(LLVMPassManagerRef PM);
+
+/** See llvm::createIndVarSimplifyPass function. */
+void LLVMAddIndVarSimplifyPass(LLVMPassManagerRef PM);
+
+/** See llvm::createInstructionCombiningPass function. */
+void LLVMAddInstructionCombiningPass(LLVMPassManagerRef PM);
+
+/** See llvm::createJumpThreadingPass function. */
+void LLVMAddJumpThreadingPass(LLVMPassManagerRef PM);
+
+/** See llvm::createLICMPass function. */
+void LLVMAddLICMPass(LLVMPassManagerRef PM);
+
+/** See llvm::createLoopDeletionPass function. */
+void LLVMAddLoopDeletionPass(LLVMPassManagerRef PM);
+
+/** See llvm::createLoopIdiomPass function */
+void LLVMAddLoopIdiomPass(LLVMPassManagerRef PM);
+
+/** See llvm::createLoopRotatePass function. */
+void LLVMAddLoopRotatePass(LLVMPassManagerRef PM);
+
+/** See llvm::createLoopRerollPass function. */
+void LLVMAddLoopRerollPass(LLVMPassManagerRef PM);
+
+/** See llvm::createLoopUnrollPass function. */
+void LLVMAddLoopUnrollPass(LLVMPassManagerRef PM);
+
+/** See llvm::createLoopUnswitchPass function. */
+void LLVMAddLoopUnswitchPass(LLVMPassManagerRef PM);
+
+/** See llvm::createMemCpyOptPass function. */
+void LLVMAddMemCpyOptPass(LLVMPassManagerRef PM);
+
+/** See llvm::createPartiallyInlineLibCallsPass function. */
+void LLVMAddPartiallyInlineLibCallsPass(LLVMPassManagerRef PM);
+
+/** See llvm::createLowerSwitchPass function. */
+void LLVMAddLowerSwitchPass(LLVMPassManagerRef PM);
+
+/** See llvm::createPromoteMemoryToRegisterPass function. */
+void LLVMAddPromoteMemoryToRegisterPass(LLVMPassManagerRef PM);
+
+/** See llvm::createReassociatePass function. */
+void LLVMAddReassociatePass(LLVMPassManagerRef PM);
+
+/** See llvm::createSCCPPass function. */
+void LLVMAddSCCPPass(LLVMPassManagerRef PM);
+
+/** See llvm::createScalarReplAggregatesPass function. */
+void LLVMAddScalarReplAggregatesPass(LLVMPassManagerRef PM);
+
+/** See llvm::createScalarReplAggregatesPass function. */
+void LLVMAddScalarReplAggregatesPassSSA(LLVMPassManagerRef PM);
+
+/** See llvm::createScalarReplAggregatesPass function. */
+void LLVMAddScalarReplAggregatesPassWithThreshold(LLVMPassManagerRef PM,
+ int Threshold);
+
+/** See llvm::createSimplifyLibCallsPass function. */
+void LLVMAddSimplifyLibCallsPass(LLVMPassManagerRef PM);
+
+/** See llvm::createTailCallEliminationPass function. */
+void LLVMAddTailCallEliminationPass(LLVMPassManagerRef PM);
+
+/** See llvm::createConstantPropagationPass function. */
+void LLVMAddConstantPropagationPass(LLVMPassManagerRef PM);
+
+/** See llvm::demotePromoteMemoryToRegisterPass function. */
+void LLVMAddDemoteMemoryToRegisterPass(LLVMPassManagerRef PM);
+
+/** See llvm::createVerifierPass function. */
+void LLVMAddVerifierPass(LLVMPassManagerRef PM);
+
+/** See llvm::createCorrelatedValuePropagationPass function */
+void LLVMAddCorrelatedValuePropagationPass(LLVMPassManagerRef PM);
+
+/** See llvm::createEarlyCSEPass function */
+void LLVMAddEarlyCSEPass(LLVMPassManagerRef PM);
+
+/** See llvm::createLowerExpectIntrinsicPass function */
+void LLVMAddLowerExpectIntrinsicPass(LLVMPassManagerRef PM);
+
+/** See llvm::createTypeBasedAliasAnalysisPass function */
+void LLVMAddTypeBasedAliasAnalysisPass(LLVMPassManagerRef PM);
+
+/** See llvm::createScopedNoAliasAAPass function */
+void LLVMAddScopedNoAliasAAPass(LLVMPassManagerRef PM);
+
+/** See llvm::createBasicAliasAnalysisPass function */
+void LLVMAddBasicAliasAnalysisPass(LLVMPassManagerRef PM);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* defined(__cplusplus) */
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Transforms/Vectorize.h b/contrib/llvm/include/llvm-c/Transforms/Vectorize.h
new file mode 100644
index 0000000..a82ef49
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Transforms/Vectorize.h
@@ -0,0 +1,53 @@
+/*===---------------------------Vectorize.h --------------------- -*- C -*-===*\
+|*===----------- Vectorization Transformation Library C Interface ---------===*|
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header declares the C interface to libLLVMVectorize.a, which *|
+|* implements various vectorization transformations of the LLVM IR. *|
+|* *|
+|* Many exotic languages can interoperate with C code but have a harder time *|
+|* with C++ due to name mangling. So in addition to C, this interface enables *|
+|* tools written in such languages. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TRANSFORMS_VECTORIZE_H
+#define LLVM_C_TRANSFORMS_VECTORIZE_H
+
+#include "llvm-c/Types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCTransformsVectorize Vectorization transformations
+ * @ingroup LLVMCTransforms
+ *
+ * @{
+ */
+
+/** See llvm::createBBVectorizePass function. */
+void LLVMAddBBVectorizePass(LLVMPassManagerRef PM);
+
+/** See llvm::createLoopVectorizePass function. */
+void LLVMAddLoopVectorizePass(LLVMPassManagerRef PM);
+
+/** See llvm::createSLPVectorizerPass function. */
+void LLVMAddSLPVectorizePass(LLVMPassManagerRef PM);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* defined(__cplusplus) */
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/Types.h b/contrib/llvm/include/llvm-c/Types.h
new file mode 100644
index 0000000..1902958
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/Types.h
@@ -0,0 +1,124 @@
+/*===-- llvm-c/Support.h - C Interface Types declarations ---------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file defines types used by the the C interface to LLVM. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_TYPES_H
+#define LLVM_C_TYPES_H
+
+#include "llvm/Support/DataTypes.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup LLVMCSupportTypes Types and Enumerations
+ *
+ * @{
+ */
+
+typedef int LLVMBool;
+
+/* Opaque types. */
+
+/**
+ * LLVM uses a polymorphic type hierarchy which C cannot represent, therefore
+ * parameters must be passed as base types. Despite the declared types, most
+ * of the functions provided operate only on branches of the type hierarchy.
+ * The declared parameter names are descriptive and specify which type is
+ * required. Additionally, each type hierarchy is documented along with the
+ * functions that operate upon it. For more detail, refer to LLVM's C++ code.
+ * If in doubt, refer to Core.cpp, which performs parameter downcasts in the
+ * form unwrap<RequiredType>(Param).
+ */
+
+/**
+ * Used to pass regions of memory through LLVM interfaces.
+ *
+ * @see llvm::MemoryBuffer
+ */
+typedef struct LLVMOpaqueMemoryBuffer *LLVMMemoryBufferRef;
+
+/**
+ * The top-level container for all LLVM global data. See the LLVMContext class.
+ */
+typedef struct LLVMOpaqueContext *LLVMContextRef;
+
+/**
+ * The top-level container for all other LLVM Intermediate Representation (IR)
+ * objects.
+ *
+ * @see llvm::Module
+ */
+typedef struct LLVMOpaqueModule *LLVMModuleRef;
+
+/**
+ * Each value in the LLVM IR has a type, an LLVMTypeRef.
+ *
+ * @see llvm::Type
+ */
+typedef struct LLVMOpaqueType *LLVMTypeRef;
+
+/**
+ * Represents an individual value in LLVM IR.
+ *
+ * This models llvm::Value.
+ */
+typedef struct LLVMOpaqueValue *LLVMValueRef;
+
+/**
+ * Represents a basic block of instructions in LLVM IR.
+ *
+ * This models llvm::BasicBlock.
+ */
+typedef struct LLVMOpaqueBasicBlock *LLVMBasicBlockRef;
+
+/**
+ * Represents an LLVM basic block builder.
+ *
+ * This models llvm::IRBuilder.
+ */
+typedef struct LLVMOpaqueBuilder *LLVMBuilderRef;
+
+/**
+ * Interface used to provide a module to JIT or interpreter.
+ * This is now just a synonym for llvm::Module, but we have to keep using the
+ * different type to keep binary compatibility.
+ */
+typedef struct LLVMOpaqueModuleProvider *LLVMModuleProviderRef;
+
+/** @see llvm::PassManagerBase */
+typedef struct LLVMOpaquePassManager *LLVMPassManagerRef;
+
+/** @see llvm::PassRegistry */
+typedef struct LLVMOpaquePassRegistry *LLVMPassRegistryRef;
+
+/**
+ * Used to get the users and usees of a Value.
+ *
+ * @see llvm::Use */
+typedef struct LLVMOpaqueUse *LLVMUseRef;
+
+/**
+ * @see llvm::DiagnosticInfo
+ */
+typedef struct LLVMOpaqueDiagnosticInfo *LLVMDiagnosticInfoRef;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/lto.h b/contrib/llvm/include/llvm-c/lto.h
new file mode 100644
index 0000000..691a0cd
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/lto.h
@@ -0,0 +1,559 @@
+/*===-- llvm-c/lto.h - LTO Public C Interface ---------------------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header provides public interface to an abstract link time optimization*|
+|* library. LLVM provides an implementation of this interface for use with *|
+|* llvm bitcode files. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_C_LTO_H
+#define LLVM_C_LTO_H
+
+#include <stddef.h>
+#include <sys/types.h>
+
+#ifndef __cplusplus
+#if !defined(_MSC_VER)
+#include <stdbool.h>
+typedef bool lto_bool_t;
+#else
+/* MSVC in particular does not have anything like _Bool or bool in C, but we can
+ at least make sure the type is the same size. The implementation side will
+ use C++ bool. */
+typedef unsigned char lto_bool_t;
+#endif
+#else
+typedef bool lto_bool_t;
+#endif
+
+/**
+ * @defgroup LLVMCLTO LTO
+ * @ingroup LLVMC
+ *
+ * @{
+ */
+
+#define LTO_API_VERSION 17
+
+/**
+ * \since prior to LTO_API_VERSION=3
+ */
+typedef enum {
+ LTO_SYMBOL_ALIGNMENT_MASK = 0x0000001F, /* log2 of alignment */
+ LTO_SYMBOL_PERMISSIONS_MASK = 0x000000E0,
+ LTO_SYMBOL_PERMISSIONS_CODE = 0x000000A0,
+ LTO_SYMBOL_PERMISSIONS_DATA = 0x000000C0,
+ LTO_SYMBOL_PERMISSIONS_RODATA = 0x00000080,
+ LTO_SYMBOL_DEFINITION_MASK = 0x00000700,
+ LTO_SYMBOL_DEFINITION_REGULAR = 0x00000100,
+ LTO_SYMBOL_DEFINITION_TENTATIVE = 0x00000200,
+ LTO_SYMBOL_DEFINITION_WEAK = 0x00000300,
+ LTO_SYMBOL_DEFINITION_UNDEFINED = 0x00000400,
+ LTO_SYMBOL_DEFINITION_WEAKUNDEF = 0x00000500,
+ LTO_SYMBOL_SCOPE_MASK = 0x00003800,
+ LTO_SYMBOL_SCOPE_INTERNAL = 0x00000800,
+ LTO_SYMBOL_SCOPE_HIDDEN = 0x00001000,
+ LTO_SYMBOL_SCOPE_PROTECTED = 0x00002000,
+ LTO_SYMBOL_SCOPE_DEFAULT = 0x00001800,
+ LTO_SYMBOL_SCOPE_DEFAULT_CAN_BE_HIDDEN = 0x00002800,
+ LTO_SYMBOL_COMDAT = 0x00004000,
+ LTO_SYMBOL_ALIAS = 0x00008000
+} lto_symbol_attributes;
+
+/**
+ * \since prior to LTO_API_VERSION=3
+ */
+typedef enum {
+ LTO_DEBUG_MODEL_NONE = 0,
+ LTO_DEBUG_MODEL_DWARF = 1
+} lto_debug_model;
+
+/**
+ * \since prior to LTO_API_VERSION=3
+ */
+typedef enum {
+ LTO_CODEGEN_PIC_MODEL_STATIC = 0,
+ LTO_CODEGEN_PIC_MODEL_DYNAMIC = 1,
+ LTO_CODEGEN_PIC_MODEL_DYNAMIC_NO_PIC = 2,
+ LTO_CODEGEN_PIC_MODEL_DEFAULT = 3
+} lto_codegen_model;
+
+/** opaque reference to a loaded object module */
+typedef struct LLVMOpaqueLTOModule *lto_module_t;
+
+/** opaque reference to a code generator */
+typedef struct LLVMOpaqueLTOCodeGenerator *lto_code_gen_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Returns a printable string.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern const char*
+lto_get_version(void);
+
+
+/**
+ * Returns the last error string or NULL if last operation was successful.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern const char*
+lto_get_error_message(void);
+
+/**
+ * Checks if a file is a loadable object file.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_bool_t
+lto_module_is_object_file(const char* path);
+
+
+/**
+ * Checks if a file is a loadable object compiled for requested target.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_bool_t
+lto_module_is_object_file_for_target(const char* path,
+ const char* target_triple_prefix);
+
+
+/**
+ * Checks if a buffer is a loadable object file.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_bool_t
+lto_module_is_object_file_in_memory(const void* mem, size_t length);
+
+
+/**
+ * Checks if a buffer is a loadable object compiled for requested target.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_bool_t
+lto_module_is_object_file_in_memory_for_target(const void* mem, size_t length,
+ const char* target_triple_prefix);
+
+
+/**
+ * Loads an object file from disk.
+ * Returns NULL on error (check lto_get_error_message() for details).
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_module_t
+lto_module_create(const char* path);
+
+
+/**
+ * Loads an object file from memory.
+ * Returns NULL on error (check lto_get_error_message() for details).
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_module_t
+lto_module_create_from_memory(const void* mem, size_t length);
+
+/**
+ * Loads an object file from memory with an extra path argument.
+ * Returns NULL on error (check lto_get_error_message() for details).
+ *
+ * \since LTO_API_VERSION=9
+ */
+extern lto_module_t
+lto_module_create_from_memory_with_path(const void* mem, size_t length,
+ const char *path);
+
+/**
+ * \brief Loads an object file in its own context.
+ *
+ * Loads an object file in its own LLVMContext. This function call is
+ * thread-safe. However, modules created this way should not be merged into an
+ * lto_code_gen_t using \a lto_codegen_add_module().
+ *
+ * Returns NULL on error (check lto_get_error_message() for details).
+ *
+ * \since LTO_API_VERSION=11
+ */
+extern lto_module_t
+lto_module_create_in_local_context(const void *mem, size_t length,
+ const char *path);
+
+/**
+ * \brief Loads an object file in the codegen context.
+ *
+ * Loads an object file into the same context as \c cg. The module is safe to
+ * add using \a lto_codegen_add_module().
+ *
+ * Returns NULL on error (check lto_get_error_message() for details).
+ *
+ * \since LTO_API_VERSION=11
+ */
+extern lto_module_t
+lto_module_create_in_codegen_context(const void *mem, size_t length,
+ const char *path, lto_code_gen_t cg);
+
+/**
+ * Loads an object file from disk. The seek point of fd is not preserved.
+ * Returns NULL on error (check lto_get_error_message() for details).
+ *
+ * \since LTO_API_VERSION=5
+ */
+extern lto_module_t
+lto_module_create_from_fd(int fd, const char *path, size_t file_size);
+
+/**
+ * Loads an object file from disk. The seek point of fd is not preserved.
+ * Returns NULL on error (check lto_get_error_message() for details).
+ *
+ * \since LTO_API_VERSION=5
+ */
+extern lto_module_t
+lto_module_create_from_fd_at_offset(int fd, const char *path, size_t file_size,
+ size_t map_size, off_t offset);
+
+/**
+ * Frees all memory internally allocated by the module.
+ * Upon return the lto_module_t is no longer valid.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern void
+lto_module_dispose(lto_module_t mod);
+
+/**
+ * Returns triple string which the object module was compiled under.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern const char*
+lto_module_get_target_triple(lto_module_t mod);
+
+/**
+ * Sets triple string with which the object will be codegened.
+ *
+ * \since LTO_API_VERSION=4
+ */
+extern void
+lto_module_set_target_triple(lto_module_t mod, const char *triple);
+
+
+/**
+ * Returns the number of symbols in the object module.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern unsigned int
+lto_module_get_num_symbols(lto_module_t mod);
+
+
+/**
+ * Returns the name of the ith symbol in the object module.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern const char*
+lto_module_get_symbol_name(lto_module_t mod, unsigned int index);
+
+
+/**
+ * Returns the attributes of the ith symbol in the object module.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_symbol_attributes
+lto_module_get_symbol_attribute(lto_module_t mod, unsigned int index);
+
+
+/**
+ * Returns the module's linker options.
+ *
+ * The linker options may consist of multiple flags. It is the linker's
+ * responsibility to split the flags using a platform-specific mechanism.
+ *
+ * \since LTO_API_VERSION=16
+ */
+extern const char*
+lto_module_get_linkeropts(lto_module_t mod);
+
+
+/**
+ * Diagnostic severity.
+ *
+ * \since LTO_API_VERSION=7
+ */
+typedef enum {
+ LTO_DS_ERROR = 0,
+ LTO_DS_WARNING = 1,
+ LTO_DS_REMARK = 3, // Added in LTO_API_VERSION=10.
+ LTO_DS_NOTE = 2
+} lto_codegen_diagnostic_severity_t;
+
+/**
+ * Diagnostic handler type.
+ * \p severity defines the severity.
+ * \p diag is the actual diagnostic.
+ * The diagnostic is not prefixed by any of severity keyword, e.g., 'error: '.
+ * \p ctxt is used to pass the context set with the diagnostic handler.
+ *
+ * \since LTO_API_VERSION=7
+ */
+typedef void (*lto_diagnostic_handler_t)(
+ lto_codegen_diagnostic_severity_t severity, const char *diag, void *ctxt);
+
+/**
+ * Set a diagnostic handler and the related context (void *).
+ * This is more general than lto_get_error_message, as the diagnostic handler
+ * can be called at anytime within lto.
+ *
+ * \since LTO_API_VERSION=7
+ */
+extern void lto_codegen_set_diagnostic_handler(lto_code_gen_t,
+ lto_diagnostic_handler_t,
+ void *);
+
+/**
+ * Instantiates a code generator.
+ * Returns NULL on error (check lto_get_error_message() for details).
+ *
+ * All modules added using \a lto_codegen_add_module() must have been created
+ * in the same context as the codegen.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_code_gen_t
+lto_codegen_create(void);
+
+/**
+ * \brief Instantiate a code generator in its own context.
+ *
+ * Instantiates a code generator in its own context. Modules added via \a
+ * lto_codegen_add_module() must have all been created in the same context,
+ * using \a lto_module_create_in_codegen_context().
+ *
+ * \since LTO_API_VERSION=11
+ */
+extern lto_code_gen_t
+lto_codegen_create_in_local_context(void);
+
+/**
+ * Frees all code generator and all memory it internally allocated.
+ * Upon return the lto_code_gen_t is no longer valid.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern void
+lto_codegen_dispose(lto_code_gen_t);
+
+/**
+ * Add an object module to the set of modules for which code will be generated.
+ * Returns true on error (check lto_get_error_message() for details).
+ *
+ * \c cg and \c mod must both be in the same context. See \a
+ * lto_codegen_create_in_local_context() and \a
+ * lto_module_create_in_codegen_context().
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_bool_t
+lto_codegen_add_module(lto_code_gen_t cg, lto_module_t mod);
+
+/**
+ * Sets the object module for code generation. This will transfer the ownership
+ * of the module to the code generator.
+ *
+ * \c cg and \c mod must both be in the same context.
+ *
+ * \since LTO_API_VERSION=13
+ */
+extern void
+lto_codegen_set_module(lto_code_gen_t cg, lto_module_t mod);
+
+/**
+ * Sets if debug info should be generated.
+ * Returns true on error (check lto_get_error_message() for details).
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_bool_t
+lto_codegen_set_debug_model(lto_code_gen_t cg, lto_debug_model);
+
+
+/**
+ * Sets which PIC code model to generated.
+ * Returns true on error (check lto_get_error_message() for details).
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern lto_bool_t
+lto_codegen_set_pic_model(lto_code_gen_t cg, lto_codegen_model);
+
+
+/**
+ * Sets the cpu to generate code for.
+ *
+ * \since LTO_API_VERSION=4
+ */
+extern void
+lto_codegen_set_cpu(lto_code_gen_t cg, const char *cpu);
+
+
+/**
+ * Sets the location of the assembler tool to run. If not set, libLTO
+ * will use gcc to invoke the assembler.
+ *
+ * \since LTO_API_VERSION=3
+ */
+extern void
+lto_codegen_set_assembler_path(lto_code_gen_t cg, const char* path);
+
+/**
+ * Sets extra arguments that libLTO should pass to the assembler.
+ *
+ * \since LTO_API_VERSION=4
+ */
+extern void
+lto_codegen_set_assembler_args(lto_code_gen_t cg, const char **args,
+ int nargs);
+
+/**
+ * Adds to a list of all global symbols that must exist in the final generated
+ * code. If a function is not listed there, it might be inlined into every usage
+ * and optimized away.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern void
+lto_codegen_add_must_preserve_symbol(lto_code_gen_t cg, const char* symbol);
+
+/**
+ * Writes a new object file at the specified path that contains the
+ * merged contents of all modules added so far.
+ * Returns true on error (check lto_get_error_message() for details).
+ *
+ * \since LTO_API_VERSION=5
+ */
+extern lto_bool_t
+lto_codegen_write_merged_modules(lto_code_gen_t cg, const char* path);
+
+/**
+ * Generates code for all added modules into one native object file.
+ * This calls lto_codegen_optimize then lto_codegen_compile_optimized.
+ *
+ * On success returns a pointer to a generated mach-o/ELF buffer and
+ * length set to the buffer size. The buffer is owned by the
+ * lto_code_gen_t and will be freed when lto_codegen_dispose()
+ * is called, or lto_codegen_compile() is called again.
+ * On failure, returns NULL (check lto_get_error_message() for details).
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern const void*
+lto_codegen_compile(lto_code_gen_t cg, size_t* length);
+
+/**
+ * Generates code for all added modules into one native object file.
+ * This calls lto_codegen_optimize then lto_codegen_compile_optimized (instead
+ * of returning a generated mach-o/ELF buffer, it writes to a file).
+ *
+ * The name of the file is written to name. Returns true on error.
+ *
+ * \since LTO_API_VERSION=5
+ */
+extern lto_bool_t
+lto_codegen_compile_to_file(lto_code_gen_t cg, const char** name);
+
+/**
+ * Runs optimization for the merged module. Returns true on error.
+ *
+ * \since LTO_API_VERSION=12
+ */
+extern lto_bool_t
+lto_codegen_optimize(lto_code_gen_t cg);
+
+/**
+ * Generates code for the optimized merged module into one native object file.
+ * It will not run any IR optimizations on the merged module.
+ *
+ * On success returns a pointer to a generated mach-o/ELF buffer and length set
+ * to the buffer size. The buffer is owned by the lto_code_gen_t and will be
+ * freed when lto_codegen_dispose() is called, or
+ * lto_codegen_compile_optimized() is called again. On failure, returns NULL
+ * (check lto_get_error_message() for details).
+ *
+ * \since LTO_API_VERSION=12
+ */
+extern const void*
+lto_codegen_compile_optimized(lto_code_gen_t cg, size_t* length);
+
+/**
+ * Returns the runtime API version.
+ *
+ * \since LTO_API_VERSION=12
+ */
+extern unsigned int
+lto_api_version(void);
+
+/**
+ * Sets options to help debug codegen bugs.
+ *
+ * \since prior to LTO_API_VERSION=3
+ */
+extern void
+lto_codegen_debug_options(lto_code_gen_t cg, const char *);
+
+/**
+ * Initializes LLVM disassemblers.
+ * FIXME: This doesn't really belong here.
+ *
+ * \since LTO_API_VERSION=5
+ */
+extern void
+lto_initialize_disassembler(void);
+
+/**
+ * Sets if we should run internalize pass during optimization and code
+ * generation.
+ *
+ * \since LTO_API_VERSION=14
+ */
+extern void
+lto_codegen_set_should_internalize(lto_code_gen_t cg,
+ lto_bool_t ShouldInternalize);
+
+/**
+ * \brief Set whether to embed uselists in bitcode.
+ *
+ * Sets whether \a lto_codegen_write_merged_modules() should embed uselists in
+ * output bitcode. This should be turned on for all -save-temps output.
+ *
+ * \since LTO_API_VERSION=15
+ */
+extern void
+lto_codegen_set_should_embed_uselists(lto_code_gen_t cg,
+ lto_bool_t ShouldEmbedUselists);
+
+#ifdef __cplusplus
+}
+#endif
+
+/**
+ * @}
+ */
+
+#endif
diff --git a/contrib/llvm/include/llvm-c/module.modulemap b/contrib/llvm/include/llvm-c/module.modulemap
new file mode 100644
index 0000000..a456119
--- /dev/null
+++ b/contrib/llvm/include/llvm-c/module.modulemap
@@ -0,0 +1,4 @@
+module LLVM_C {
+ umbrella "."
+ module * { export * }
+}
diff --git a/contrib/llvm/include/llvm/ADT/APFloat.h b/contrib/llvm/include/llvm/ADT/APFloat.h
new file mode 100644
index 0000000..3fe0406
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/APFloat.h
@@ -0,0 +1,686 @@
+//===- llvm/ADT/APFloat.h - Arbitrary Precision Floating Point ---*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief
+/// This file declares a class to represent arbitrary precision floating point
+/// values and provide a variety of arithmetic operations on them.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APFLOAT_H
+#define LLVM_ADT_APFLOAT_H
+
+#include "llvm/ADT/APInt.h"
+
+namespace llvm {
+
+struct fltSemantics;
+class APSInt;
+class StringRef;
+
+/// Enum that represents what fraction of the LSB truncated bits of an fp number
+/// represent.
+///
+/// This essentially combines the roles of guard and sticky bits.
+enum lostFraction { // Example of truncated bits:
+ lfExactlyZero, // 000000
+ lfLessThanHalf, // 0xxxxx x's not all zero
+ lfExactlyHalf, // 100000
+ lfMoreThanHalf // 1xxxxx x's not all zero
+};
+
+/// \brief A self-contained host- and target-independent arbitrary-precision
+/// floating-point software implementation.
+///
+/// APFloat uses bignum integer arithmetic as provided by static functions in
+/// the APInt class. The library will work with bignum integers whose parts are
+/// any unsigned type at least 16 bits wide, but 64 bits is recommended.
+///
+/// Written for clarity rather than speed, in particular with a view to use in
+/// the front-end of a cross compiler so that target arithmetic can be correctly
+/// performed on the host. Performance should nonetheless be reasonable,
+/// particularly for its intended use. It may be useful as a base
+/// implementation for a run-time library during development of a faster
+/// target-specific one.
+///
+/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all
+/// implemented operations. Currently implemented operations are add, subtract,
+/// multiply, divide, fused-multiply-add, conversion-to-float,
+/// conversion-to-integer and conversion-from-integer. New rounding modes
+/// (e.g. away from zero) can be added with three or four lines of code.
+///
+/// Four formats are built-in: IEEE single precision, double precision,
+/// quadruple precision, and x87 80-bit extended double (when operating with
+/// full extended precision). Adding a new format that obeys IEEE semantics
+/// only requires adding two lines of code: a declaration and definition of the
+/// format.
+///
+/// All operations return the status of that operation as an exception bit-mask,
+/// so multiple operations can be done consecutively with their results or-ed
+/// together. The returned status can be useful for compiler diagnostics; e.g.,
+/// inexact, underflow and overflow can be easily diagnosed on constant folding,
+/// and compiler optimizers can determine what exceptions would be raised by
+/// folding operations and optimize, or perhaps not optimize, accordingly.
+///
+/// At present, underflow tininess is detected after rounding; it should be
+/// straight forward to add support for the before-rounding case too.
+///
+/// The library reads hexadecimal floating point numbers as per C99, and
+/// correctly rounds if necessary according to the specified rounding mode.
+/// Syntax is required to have been validated by the caller. It also converts
+/// floating point numbers to hexadecimal text as per the C99 %a and %A
+/// conversions. The output precision (or alternatively the natural minimal
+/// precision) can be specified; if the requested precision is less than the
+/// natural precision the output is correctly rounded for the specified rounding
+/// mode.
+///
+/// It also reads decimal floating point numbers and correctly rounds according
+/// to the specified rounding mode.
+///
+/// Conversion to decimal text is not currently implemented.
+///
+/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit
+/// signed exponent, and the significand as an array of integer parts. After
+/// normalization of a number of precision P the exponent is within the range of
+/// the format, and if the number is not denormal the P-th bit of the
+/// significand is set as an explicit integer bit. For denormals the most
+/// significant bit is shifted right so that the exponent is maintained at the
+/// format's minimum, so that the smallest denormal has just the least
+/// significant bit of the significand set. The sign of zeroes and infinities
+/// is significant; the exponent and significand of such numbers is not stored,
+/// but has a known implicit (deterministic) value: 0 for the significands, 0
+/// for zero exponent, all 1 bits for infinity exponent. For NaNs the sign and
+/// significand are deterministic, although not really meaningful, and preserved
+/// in non-conversion operations. The exponent is implicitly all 1 bits.
+///
+/// APFloat does not provide any exception handling beyond default exception
+/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause
+/// by encoding Signaling NaNs with the first bit of its trailing significand as
+/// 0.
+///
+/// TODO
+/// ====
+///
+/// Some features that may or may not be worth adding:
+///
+/// Binary to decimal conversion (hard).
+///
+/// Optional ability to detect underflow tininess before rounding.
+///
+/// New formats: x87 in single and double precision mode (IEEE apart from
+/// extended exponent range) (hard).
+///
+/// New operations: sqrt, IEEE remainder, C90 fmod, nexttoward.
+///
+class APFloat {
+public:
+
+ /// A signed type to represent a floating point numbers unbiased exponent.
+ typedef signed short ExponentType;
+
+ /// \name Floating Point Semantics.
+ /// @{
+
+ static const fltSemantics IEEEhalf;
+ static const fltSemantics IEEEsingle;
+ static const fltSemantics IEEEdouble;
+ static const fltSemantics IEEEquad;
+ static const fltSemantics PPCDoubleDouble;
+ static const fltSemantics x87DoubleExtended;
+
+ /// A Pseudo fltsemantic used to construct APFloats that cannot conflict with
+ /// anything real.
+ static const fltSemantics Bogus;
+
+ /// @}
+
+ static unsigned int semanticsPrecision(const fltSemantics &);
+ static ExponentType semanticsMinExponent(const fltSemantics &);
+ static ExponentType semanticsMaxExponent(const fltSemantics &);
+ static unsigned int semanticsSizeInBits(const fltSemantics &);
+
+ /// IEEE-754R 5.11: Floating Point Comparison Relations.
+ enum cmpResult {
+ cmpLessThan,
+ cmpEqual,
+ cmpGreaterThan,
+ cmpUnordered
+ };
+
+ /// IEEE-754R 4.3: Rounding-direction attributes.
+ enum roundingMode {
+ rmNearestTiesToEven,
+ rmTowardPositive,
+ rmTowardNegative,
+ rmTowardZero,
+ rmNearestTiesToAway
+ };
+
+ /// IEEE-754R 7: Default exception handling.
+ ///
+ /// opUnderflow or opOverflow are always returned or-ed with opInexact.
+ enum opStatus {
+ opOK = 0x00,
+ opInvalidOp = 0x01,
+ opDivByZero = 0x02,
+ opOverflow = 0x04,
+ opUnderflow = 0x08,
+ opInexact = 0x10
+ };
+
+ /// Category of internally-represented number.
+ enum fltCategory {
+ fcInfinity,
+ fcNaN,
+ fcNormal,
+ fcZero
+ };
+
+ /// Convenience enum used to construct an uninitialized APFloat.
+ enum uninitializedTag {
+ uninitialized
+ };
+
+ /// \name Constructors
+ /// @{
+
+ APFloat(const fltSemantics &); // Default construct to 0.0
+ APFloat(const fltSemantics &, StringRef);
+ APFloat(const fltSemantics &, integerPart);
+ APFloat(const fltSemantics &, uninitializedTag);
+ APFloat(const fltSemantics &, const APInt &);
+ explicit APFloat(double d);
+ explicit APFloat(float f);
+ APFloat(const APFloat &);
+ APFloat(APFloat &&);
+ ~APFloat();
+
+ /// @}
+
+ /// \brief Returns whether this instance allocated memory.
+ bool needsCleanup() const { return partCount() > 1; }
+
+ /// \name Convenience "constructors"
+ /// @{
+
+ /// Factory for Positive and Negative Zero.
+ ///
+ /// \param Negative True iff the number should be negative.
+ static APFloat getZero(const fltSemantics &Sem, bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeZero(Negative);
+ return Val;
+ }
+
+ /// Factory for Positive and Negative Infinity.
+ ///
+ /// \param Negative True iff the number should be negative.
+ static APFloat getInf(const fltSemantics &Sem, bool Negative = false) {
+ APFloat Val(Sem, uninitialized);
+ Val.makeInf(Negative);
+ return Val;
+ }
+
+ /// Factory for QNaN values.
+ ///
+ /// \param Negative - True iff the NaN generated should be negative.
+ /// \param type - The unspecified fill bits for creating the NaN, 0 by
+ /// default. The value is truncated as necessary.
+ static APFloat getNaN(const fltSemantics &Sem, bool Negative = false,
+ unsigned type = 0) {
+ if (type) {
+ APInt fill(64, type);
+ return getQNaN(Sem, Negative, &fill);
+ } else {
+ return getQNaN(Sem, Negative, nullptr);
+ }
+ }
+
+ /// Factory for QNaN values.
+ static APFloat getQNaN(const fltSemantics &Sem, bool Negative = false,
+ const APInt *payload = nullptr) {
+ return makeNaN(Sem, false, Negative, payload);
+ }
+
+ /// Factory for SNaN values.
+ static APFloat getSNaN(const fltSemantics &Sem, bool Negative = false,
+ const APInt *payload = nullptr) {
+ return makeNaN(Sem, true, Negative, payload);
+ }
+
+ /// Returns the largest finite number in the given semantics.
+ ///
+ /// \param Negative - True iff the number should be negative
+ static APFloat getLargest(const fltSemantics &Sem, bool Negative = false);
+
+ /// Returns the smallest (by magnitude) finite number in the given semantics.
+ /// Might be denormalized, which implies a relative loss of precision.
+ ///
+ /// \param Negative - True iff the number should be negative
+ static APFloat getSmallest(const fltSemantics &Sem, bool Negative = false);
+
+ /// Returns the smallest (by magnitude) normalized finite number in the given
+ /// semantics.
+ ///
+ /// \param Negative - True iff the number should be negative
+ static APFloat getSmallestNormalized(const fltSemantics &Sem,
+ bool Negative = false);
+
+ /// Returns a float which is bitcasted from an all one value int.
+ ///
+ /// \param BitWidth - Select float type
+ /// \param isIEEE - If 128 bit number, select between PPC and IEEE
+ static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false);
+
+ /// Returns the size of the floating point number (in bits) in the given
+ /// semantics.
+ static unsigned getSizeInBits(const fltSemantics &Sem);
+
+ /// @}
+
+ /// Used to insert APFloat objects, or objects that contain APFloat objects,
+ /// into FoldingSets.
+ void Profile(FoldingSetNodeID &NID) const;
+
+ /// \name Arithmetic
+ /// @{
+
+ opStatus add(const APFloat &, roundingMode);
+ opStatus subtract(const APFloat &, roundingMode);
+ opStatus multiply(const APFloat &, roundingMode);
+ opStatus divide(const APFloat &, roundingMode);
+ /// IEEE remainder.
+ opStatus remainder(const APFloat &);
+ /// C fmod, or llvm frem.
+ opStatus mod(const APFloat &);
+ opStatus fusedMultiplyAdd(const APFloat &, const APFloat &, roundingMode);
+ opStatus roundToIntegral(roundingMode);
+ /// IEEE-754R 5.3.1: nextUp/nextDown.
+ opStatus next(bool nextDown);
+
+ /// \brief Operator+ overload which provides the default
+ /// \c nmNearestTiesToEven rounding mode and *no* error checking.
+ APFloat operator+(const APFloat &RHS) const {
+ APFloat Result = *this;
+ Result.add(RHS, rmNearestTiesToEven);
+ return Result;
+ }
+
+ /// \brief Operator- overload which provides the default
+ /// \c nmNearestTiesToEven rounding mode and *no* error checking.
+ APFloat operator-(const APFloat &RHS) const {
+ APFloat Result = *this;
+ Result.subtract(RHS, rmNearestTiesToEven);
+ return Result;
+ }
+
+ /// \brief Operator* overload which provides the default
+ /// \c nmNearestTiesToEven rounding mode and *no* error checking.
+ APFloat operator*(const APFloat &RHS) const {
+ APFloat Result = *this;
+ Result.multiply(RHS, rmNearestTiesToEven);
+ return Result;
+ }
+
+ /// \brief Operator/ overload which provides the default
+ /// \c nmNearestTiesToEven rounding mode and *no* error checking.
+ APFloat operator/(const APFloat &RHS) const {
+ APFloat Result = *this;
+ Result.divide(RHS, rmNearestTiesToEven);
+ return Result;
+ }
+
+ /// @}
+
+ /// \name Sign operations.
+ /// @{
+
+ void changeSign();
+ void clearSign();
+ void copySign(const APFloat &);
+
+ /// \brief A static helper to produce a copy of an APFloat value with its sign
+ /// copied from some other APFloat.
+ static APFloat copySign(APFloat Value, const APFloat &Sign) {
+ Value.copySign(Sign);
+ return Value;
+ }
+
+ /// @}
+
+ /// \name Conversions
+ /// @{
+
+ opStatus convert(const fltSemantics &, roundingMode, bool *);
+ opStatus convertToInteger(integerPart *, unsigned int, bool, roundingMode,
+ bool *) const;
+ opStatus convertToInteger(APSInt &, roundingMode, bool *) const;
+ opStatus convertFromAPInt(const APInt &, bool, roundingMode);
+ opStatus convertFromSignExtendedInteger(const integerPart *, unsigned int,
+ bool, roundingMode);
+ opStatus convertFromZeroExtendedInteger(const integerPart *, unsigned int,
+ bool, roundingMode);
+ opStatus convertFromString(StringRef, roundingMode);
+ APInt bitcastToAPInt() const;
+ double convertToDouble() const;
+ float convertToFloat() const;
+
+ /// @}
+
+ /// The definition of equality is not straightforward for floating point, so
+ /// we won't use operator==. Use one of the following, or write whatever it
+ /// is you really mean.
+ bool operator==(const APFloat &) const = delete;
+
+ /// IEEE comparison with another floating point number (NaNs compare
+ /// unordered, 0==-0).
+ cmpResult compare(const APFloat &) const;
+
+ /// Bitwise comparison for equality (QNaNs compare equal, 0!=-0).
+ bool bitwiseIsEqual(const APFloat &) const;
+
+ /// Write out a hexadecimal representation of the floating point value to DST,
+ /// which must be of sufficient size, in the C99 form [-]0xh.hhhhp[+-]d.
+ /// Return the number of characters written, excluding the terminating NUL.
+ unsigned int convertToHexString(char *dst, unsigned int hexDigits,
+ bool upperCase, roundingMode) const;
+
+ /// \name IEEE-754R 5.7.2 General operations.
+ /// @{
+
+ /// IEEE-754R isSignMinus: Returns true if and only if the current value is
+ /// negative.
+ ///
+ /// This applies to zeros and NaNs as well.
+ bool isNegative() const { return sign; }
+
+ /// IEEE-754R isNormal: Returns true if and only if the current value is normal.
+ ///
+ /// This implies that the current value of the float is not zero, subnormal,
+ /// infinite, or NaN following the definition of normality from IEEE-754R.
+ bool isNormal() const { return !isDenormal() && isFiniteNonZero(); }
+
+ /// Returns true if and only if the current value is zero, subnormal, or
+ /// normal.
+ ///
+ /// This means that the value is not infinite or NaN.
+ bool isFinite() const { return !isNaN() && !isInfinity(); }
+
+ /// Returns true if and only if the float is plus or minus zero.
+ bool isZero() const { return category == fcZero; }
+
+ /// IEEE-754R isSubnormal(): Returns true if and only if the float is a
+ /// denormal.
+ bool isDenormal() const;
+
+ /// IEEE-754R isInfinite(): Returns true if and only if the float is infinity.
+ bool isInfinity() const { return category == fcInfinity; }
+
+ /// Returns true if and only if the float is a quiet or signaling NaN.
+ bool isNaN() const { return category == fcNaN; }
+
+ /// Returns true if and only if the float is a signaling NaN.
+ bool isSignaling() const;
+
+ /// @}
+
+ /// \name Simple Queries
+ /// @{
+
+ fltCategory getCategory() const { return category; }
+ const fltSemantics &getSemantics() const { return *semantics; }
+ bool isNonZero() const { return category != fcZero; }
+ bool isFiniteNonZero() const { return isFinite() && !isZero(); }
+ bool isPosZero() const { return isZero() && !isNegative(); }
+ bool isNegZero() const { return isZero() && isNegative(); }
+
+ /// Returns true if and only if the number has the smallest possible non-zero
+ /// magnitude in the current semantics.
+ bool isSmallest() const;
+
+ /// Returns true if and only if the number has the largest possible finite
+ /// magnitude in the current semantics.
+ bool isLargest() const;
+
+ /// Returns true if and only if the number is an exact integer.
+ bool isInteger() const;
+
+ /// @}
+
+ APFloat &operator=(const APFloat &);
+ APFloat &operator=(APFloat &&);
+
+ /// \brief Overload to compute a hash code for an APFloat value.
+ ///
+ /// Note that the use of hash codes for floating point values is in general
+ /// frought with peril. Equality is hard to define for these values. For
+ /// example, should negative and positive zero hash to different codes? Are
+ /// they equal or not? This hash value implementation specifically
+ /// emphasizes producing different codes for different inputs in order to
+ /// be used in canonicalization and memoization. As such, equality is
+ /// bitwiseIsEqual, and 0 != -0.
+ friend hash_code hash_value(const APFloat &Arg);
+
+ /// Converts this value into a decimal string.
+ ///
+ /// \param FormatPrecision The maximum number of digits of
+ /// precision to output. If there are fewer digits available,
+ /// zero padding will not be used unless the value is
+ /// integral and small enough to be expressed in
+ /// FormatPrecision digits. 0 means to use the natural
+ /// precision of the number.
+ /// \param FormatMaxPadding The maximum number of zeros to
+ /// consider inserting before falling back to scientific
+ /// notation. 0 means to always use scientific notation.
+ ///
+ /// Number Precision MaxPadding Result
+ /// ------ --------- ---------- ------
+ /// 1.01E+4 5 2 10100
+ /// 1.01E+4 4 2 1.01E+4
+ /// 1.01E+4 5 1 1.01E+4
+ /// 1.01E-2 5 2 0.0101
+ /// 1.01E-2 4 2 0.0101
+ /// 1.01E-2 4 1 1.01E-2
+ void toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision = 0,
+ unsigned FormatMaxPadding = 3) const;
+
+ /// If this value has an exact multiplicative inverse, store it in inv and
+ /// return true.
+ bool getExactInverse(APFloat *inv) const;
+
+ /// \brief Enumeration of \c ilogb error results.
+ enum IlogbErrorKinds {
+ IEK_Zero = INT_MIN+1,
+ IEK_NaN = INT_MIN,
+ IEK_Inf = INT_MAX
+ };
+
+ /// \brief Returns the exponent of the internal representation of the APFloat.
+ ///
+ /// Because the radix of APFloat is 2, this is equivalent to floor(log2(x)).
+ /// For special APFloat values, this returns special error codes:
+ ///
+ /// NaN -> \c IEK_NaN
+ /// 0 -> \c IEK_Zero
+ /// Inf -> \c IEK_Inf
+ ///
+ friend int ilogb(const APFloat &Arg) {
+ if (Arg.isNaN())
+ return IEK_NaN;
+ if (Arg.isZero())
+ return IEK_Zero;
+ if (Arg.isInfinity())
+ return IEK_Inf;
+
+ return Arg.exponent;
+ }
+
+ /// \brief Returns: X * 2^Exp for integral exponents.
+ friend APFloat scalbn(APFloat X, int Exp);
+
+private:
+
+ /// \name Simple Queries
+ /// @{
+
+ integerPart *significandParts();
+ const integerPart *significandParts() const;
+ unsigned int partCount() const;
+
+ /// @}
+
+ /// \name Significand operations.
+ /// @{
+
+ integerPart addSignificand(const APFloat &);
+ integerPart subtractSignificand(const APFloat &, integerPart);
+ lostFraction addOrSubtractSignificand(const APFloat &, bool subtract);
+ lostFraction multiplySignificand(const APFloat &, const APFloat *);
+ lostFraction divideSignificand(const APFloat &);
+ void incrementSignificand();
+ void initialize(const fltSemantics *);
+ void shiftSignificandLeft(unsigned int);
+ lostFraction shiftSignificandRight(unsigned int);
+ unsigned int significandLSB() const;
+ unsigned int significandMSB() const;
+ void zeroSignificand();
+ /// Return true if the significand excluding the integral bit is all ones.
+ bool isSignificandAllOnes() const;
+ /// Return true if the significand excluding the integral bit is all zeros.
+ bool isSignificandAllZeros() const;
+
+ /// @}
+
+ /// \name Arithmetic on special values.
+ /// @{
+
+ opStatus addOrSubtractSpecials(const APFloat &, bool subtract);
+ opStatus divideSpecials(const APFloat &);
+ opStatus multiplySpecials(const APFloat &);
+ opStatus modSpecials(const APFloat &);
+
+ /// @}
+
+ /// \name Special value setters.
+ /// @{
+
+ void makeLargest(bool Neg = false);
+ void makeSmallest(bool Neg = false);
+ void makeNaN(bool SNaN = false, bool Neg = false,
+ const APInt *fill = nullptr);
+ static APFloat makeNaN(const fltSemantics &Sem, bool SNaN, bool Negative,
+ const APInt *fill);
+ void makeInf(bool Neg = false);
+ void makeZero(bool Neg = false);
+
+ /// @}
+
+ /// \name Miscellany
+ /// @{
+
+ bool convertFromStringSpecials(StringRef str);
+ opStatus normalize(roundingMode, lostFraction);
+ opStatus addOrSubtract(const APFloat &, roundingMode, bool subtract);
+ cmpResult compareAbsoluteValue(const APFloat &) const;
+ opStatus handleOverflow(roundingMode);
+ bool roundAwayFromZero(roundingMode, lostFraction, unsigned int) const;
+ opStatus convertToSignExtendedInteger(integerPart *, unsigned int, bool,
+ roundingMode, bool *) const;
+ opStatus convertFromUnsignedParts(const integerPart *, unsigned int,
+ roundingMode);
+ opStatus convertFromHexadecimalString(StringRef, roundingMode);
+ opStatus convertFromDecimalString(StringRef, roundingMode);
+ char *convertNormalToHexString(char *, unsigned int, bool,
+ roundingMode) const;
+ opStatus roundSignificandWithExponent(const integerPart *, unsigned int, int,
+ roundingMode);
+
+ /// @}
+
+ APInt convertHalfAPFloatToAPInt() const;
+ APInt convertFloatAPFloatToAPInt() const;
+ APInt convertDoubleAPFloatToAPInt() const;
+ APInt convertQuadrupleAPFloatToAPInt() const;
+ APInt convertF80LongDoubleAPFloatToAPInt() const;
+ APInt convertPPCDoubleDoubleAPFloatToAPInt() const;
+ void initFromAPInt(const fltSemantics *Sem, const APInt &api);
+ void initFromHalfAPInt(const APInt &api);
+ void initFromFloatAPInt(const APInt &api);
+ void initFromDoubleAPInt(const APInt &api);
+ void initFromQuadrupleAPInt(const APInt &api);
+ void initFromF80LongDoubleAPInt(const APInt &api);
+ void initFromPPCDoubleDoubleAPInt(const APInt &api);
+
+ void assign(const APFloat &);
+ void copySignificand(const APFloat &);
+ void freeSignificand();
+
+ /// The semantics that this value obeys.
+ const fltSemantics *semantics;
+
+ /// A binary fraction with an explicit integer bit.
+ ///
+ /// The significand must be at least one bit wider than the target precision.
+ union Significand {
+ integerPart part;
+ integerPart *parts;
+ } significand;
+
+ /// The signed unbiased exponent of the value.
+ ExponentType exponent;
+
+ /// What kind of floating point number this is.
+ ///
+ /// Only 2 bits are required, but VisualStudio incorrectly sign extends it.
+ /// Using the extra bit keeps it from failing under VisualStudio.
+ fltCategory category : 3;
+
+ /// Sign bit of the number.
+ unsigned int sign : 1;
+};
+
+/// See friend declarations above.
+///
+/// These additional declarations are required in order to compile LLVM with IBM
+/// xlC compiler.
+hash_code hash_value(const APFloat &Arg);
+APFloat scalbn(APFloat X, int Exp);
+
+/// \brief Returns the absolute value of the argument.
+inline APFloat abs(APFloat X) {
+ X.clearSign();
+ return X;
+}
+
+/// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
+/// both are not NaN. If either argument is a NaN, returns the other argument.
+LLVM_READONLY
+inline APFloat minnum(const APFloat &A, const APFloat &B) {
+ if (A.isNaN())
+ return B;
+ if (B.isNaN())
+ return A;
+ return (B.compare(A) == APFloat::cmpLessThan) ? B : A;
+}
+
+/// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
+/// both are not NaN. If either argument is a NaN, returns the other argument.
+LLVM_READONLY
+inline APFloat maxnum(const APFloat &A, const APFloat &B) {
+ if (A.isNaN())
+ return B;
+ if (B.isNaN())
+ return A;
+ return (A.compare(B) == APFloat::cmpLessThan) ? B : A;
+}
+
+} // namespace llvm
+
+#endif // LLVM_ADT_APFLOAT_H
diff --git a/contrib/llvm/include/llvm/ADT/APInt.h b/contrib/llvm/include/llvm/ADT/APInt.h
new file mode 100644
index 0000000..e2a0cb5
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/APInt.h
@@ -0,0 +1,1915 @@
+//===-- llvm/ADT/APInt.h - For Arbitrary Precision Integer -----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file implements a class to represent arbitrary precision
+/// integral constant values and operations on them.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APINT_H
+#define LLVM_ADT_APINT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <string>
+
+namespace llvm {
+class FoldingSetNodeID;
+class StringRef;
+class hash_code;
+class raw_ostream;
+
+template <typename T> class SmallVectorImpl;
+
+// An unsigned host type used as a single part of a multi-part
+// bignum.
+typedef uint64_t integerPart;
+
+const unsigned int host_char_bit = 8;
+const unsigned int integerPartWidth =
+ host_char_bit * static_cast<unsigned int>(sizeof(integerPart));
+
+//===----------------------------------------------------------------------===//
+// APInt Class
+//===----------------------------------------------------------------------===//
+
+/// \brief Class for arbitrary precision integers.
+///
+/// APInt is a functional replacement for common case unsigned integer type like
+/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width
+/// integer sizes and large integer value types such as 3-bits, 15-bits, or more
+/// than 64-bits of precision. APInt provides a variety of arithmetic operators
+/// and methods to manipulate integer values of any bit-width. It supports both
+/// the typical integer arithmetic and comparison operations as well as bitwise
+/// manipulation.
+///
+/// The class has several invariants worth noting:
+/// * All bit, byte, and word positions are zero-based.
+/// * Once the bit width is set, it doesn't change except by the Truncate,
+/// SignExtend, or ZeroExtend operations.
+/// * All binary operators must be on APInt instances of the same bit width.
+/// Attempting to use these operators on instances with different bit
+/// widths will yield an assertion.
+/// * The value is stored canonically as an unsigned value. For operations
+/// where it makes a difference, there are both signed and unsigned variants
+/// of the operation. For example, sdiv and udiv. However, because the bit
+/// widths must be the same, operations such as Mul and Add produce the same
+/// results regardless of whether the values are interpreted as signed or
+/// not.
+/// * In general, the class tries to follow the style of computation that LLVM
+/// uses in its IR. This simplifies its use for LLVM.
+///
+class APInt {
+ unsigned BitWidth; ///< The number of bits in this APInt.
+
+ /// This union is used to store the integer value. When the
+ /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal.
+ union {
+ uint64_t VAL; ///< Used to store the <= 64 bits integer value.
+ uint64_t *pVal; ///< Used to store the >64 bits integer value.
+ };
+
+ /// This enum is used to hold the constants we needed for APInt.
+ enum {
+ /// Bits in a word
+ APINT_BITS_PER_WORD =
+ static_cast<unsigned int>(sizeof(uint64_t)) * CHAR_BIT,
+ /// Byte size of a word
+ APINT_WORD_SIZE = static_cast<unsigned int>(sizeof(uint64_t))
+ };
+
+ friend struct DenseMapAPIntKeyInfo;
+
+ /// \brief Fast internal constructor
+ ///
+ /// This constructor is used only internally for speed of construction of
+ /// temporaries. It is unsafe for general use so it is not public.
+ APInt(uint64_t *val, unsigned bits) : BitWidth(bits), pVal(val) {}
+
+ /// \brief Determine if this APInt just has one word to store value.
+ ///
+ /// \returns true if the number of bits <= 64, false otherwise.
+ bool isSingleWord() const { return BitWidth <= APINT_BITS_PER_WORD; }
+
+ /// \brief Determine which word a bit is in.
+ ///
+ /// \returns the word position for the specified bit position.
+ static unsigned whichWord(unsigned bitPosition) {
+ return bitPosition / APINT_BITS_PER_WORD;
+ }
+
+ /// \brief Determine which bit in a word a bit is in.
+ ///
+ /// \returns the bit position in a word for the specified bit position
+ /// in the APInt.
+ static unsigned whichBit(unsigned bitPosition) {
+ return bitPosition % APINT_BITS_PER_WORD;
+ }
+
+ /// \brief Get a single bit mask.
+ ///
+ /// \returns a uint64_t with only bit at "whichBit(bitPosition)" set
+ /// This method generates and returns a uint64_t (word) mask for a single
+ /// bit at a specific bit position. This is used to mask the bit in the
+ /// corresponding word.
+ static uint64_t maskBit(unsigned bitPosition) {
+ return 1ULL << whichBit(bitPosition);
+ }
+
+ /// \brief Clear unused high order bits
+ ///
+ /// This method is used internally to clear the top "N" bits in the high order
+ /// word that are not used by the APInt. This is needed after the most
+ /// significant word is assigned a value to ensure that those bits are
+ /// zero'd out.
+ APInt &clearUnusedBits() {
+ // Compute how many bits are used in the final word
+ unsigned wordBits = BitWidth % APINT_BITS_PER_WORD;
+ if (wordBits == 0)
+ // If all bits are used, we want to leave the value alone. This also
+ // avoids the undefined behavior of >> when the shift is the same size as
+ // the word size (64).
+ return *this;
+
+ // Mask out the high bits.
+ uint64_t mask = ~uint64_t(0ULL) >> (APINT_BITS_PER_WORD - wordBits);
+ if (isSingleWord())
+ VAL &= mask;
+ else
+ pVal[getNumWords() - 1] &= mask;
+ return *this;
+ }
+
+ /// \brief Get the word corresponding to a bit position
+ /// \returns the corresponding word for the specified bit position.
+ uint64_t getWord(unsigned bitPosition) const {
+ return isSingleWord() ? VAL : pVal[whichWord(bitPosition)];
+ }
+
+ /// \brief Convert a char array into an APInt
+ ///
+ /// \param radix 2, 8, 10, 16, or 36
+ /// Converts a string into a number. The string must be non-empty
+ /// and well-formed as a number of the given base. The bit-width
+ /// must be sufficient to hold the result.
+ ///
+ /// This is used by the constructors that take string arguments.
+ ///
+ /// StringRef::getAsInteger is superficially similar but (1) does
+ /// not assume that the string is well-formed and (2) grows the
+ /// result to hold the input.
+ void fromString(unsigned numBits, StringRef str, uint8_t radix);
+
+ /// \brief An internal division function for dividing APInts.
+ ///
+ /// This is used by the toString method to divide by the radix. It simply
+ /// provides a more convenient form of divide for internal use since KnuthDiv
+ /// has specific constraints on its inputs. If those constraints are not met
+ /// then it provides a simpler form of divide.
+ static void divide(const APInt LHS, unsigned lhsWords, const APInt &RHS,
+ unsigned rhsWords, APInt *Quotient, APInt *Remainder);
+
+ /// out-of-line slow case for inline constructor
+ void initSlowCase(unsigned numBits, uint64_t val, bool isSigned);
+
+ /// shared code between two array constructors
+ void initFromArray(ArrayRef<uint64_t> array);
+
+ /// out-of-line slow case for inline copy constructor
+ void initSlowCase(const APInt &that);
+
+ /// out-of-line slow case for shl
+ APInt shlSlowCase(unsigned shiftAmt) const;
+
+ /// out-of-line slow case for operator&
+ APInt AndSlowCase(const APInt &RHS) const;
+
+ /// out-of-line slow case for operator|
+ APInt OrSlowCase(const APInt &RHS) const;
+
+ /// out-of-line slow case for operator^
+ APInt XorSlowCase(const APInt &RHS) const;
+
+ /// out-of-line slow case for operator=
+ APInt &AssignSlowCase(const APInt &RHS);
+
+ /// out-of-line slow case for operator==
+ bool EqualSlowCase(const APInt &RHS) const;
+
+ /// out-of-line slow case for operator==
+ bool EqualSlowCase(uint64_t Val) const;
+
+ /// out-of-line slow case for countLeadingZeros
+ unsigned countLeadingZerosSlowCase() const;
+
+ /// out-of-line slow case for countTrailingOnes
+ unsigned countTrailingOnesSlowCase() const;
+
+ /// out-of-line slow case for countPopulation
+ unsigned countPopulationSlowCase() const;
+
+public:
+ /// \name Constructors
+ /// @{
+
+ /// \brief Create a new APInt of numBits width, initialized as val.
+ ///
+ /// If isSigned is true then val is treated as if it were a signed value
+ /// (i.e. as an int64_t) and the appropriate sign extension to the bit width
+ /// will be done. Otherwise, no sign extension occurs (high order bits beyond
+ /// the range of val are zero filled).
+ ///
+ /// \param numBits the bit width of the constructed APInt
+ /// \param val the initial value of the APInt
+ /// \param isSigned how to treat signedness of val
+ APInt(unsigned numBits, uint64_t val, bool isSigned = false)
+ : BitWidth(numBits), VAL(0) {
+ assert(BitWidth && "bitwidth too small");
+ if (isSingleWord())
+ VAL = val;
+ else
+ initSlowCase(numBits, val, isSigned);
+ clearUnusedBits();
+ }
+
+ /// \brief Construct an APInt of numBits width, initialized as bigVal[].
+ ///
+ /// Note that bigVal.size() can be smaller or larger than the corresponding
+ /// bit width but any extraneous bits will be dropped.
+ ///
+ /// \param numBits the bit width of the constructed APInt
+ /// \param bigVal a sequence of words to form the initial value of the APInt
+ APInt(unsigned numBits, ArrayRef<uint64_t> bigVal);
+
+ /// Equivalent to APInt(numBits, ArrayRef<uint64_t>(bigVal, numWords)), but
+ /// deprecated because this constructor is prone to ambiguity with the
+ /// APInt(unsigned, uint64_t, bool) constructor.
+ ///
+ /// If this overload is ever deleted, care should be taken to prevent calls
+ /// from being incorrectly captured by the APInt(unsigned, uint64_t, bool)
+ /// constructor.
+ APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]);
+
+ /// \brief Construct an APInt from a string representation.
+ ///
+ /// This constructor interprets the string \p str in the given radix. The
+ /// interpretation stops when the first character that is not suitable for the
+ /// radix is encountered, or the end of the string. Acceptable radix values
+ /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the
+ /// string to require more bits than numBits.
+ ///
+ /// \param numBits the bit width of the constructed APInt
+ /// \param str the string to be interpreted
+ /// \param radix the radix to use for the conversion
+ APInt(unsigned numBits, StringRef str, uint8_t radix);
+
+ /// Simply makes *this a copy of that.
+ /// @brief Copy Constructor.
+ APInt(const APInt &that) : BitWidth(that.BitWidth), VAL(0) {
+ if (isSingleWord())
+ VAL = that.VAL;
+ else
+ initSlowCase(that);
+ }
+
+ /// \brief Move Constructor.
+ APInt(APInt &&that) : BitWidth(that.BitWidth), VAL(that.VAL) {
+ that.BitWidth = 0;
+ }
+
+ /// \brief Destructor.
+ ~APInt() {
+ if (needsCleanup())
+ delete[] pVal;
+ }
+
+ /// \brief Default constructor that creates an uninteresting APInt
+ /// representing a 1-bit zero value.
+ ///
+ /// This is useful for object deserialization (pair this with the static
+ /// method Read).
+ explicit APInt() : BitWidth(1), VAL(0) {}
+
+ /// \brief Returns whether this instance allocated memory.
+ bool needsCleanup() const { return !isSingleWord(); }
+
+ /// Used to insert APInt objects, or objects that contain APInt objects, into
+ /// FoldingSets.
+ void Profile(FoldingSetNodeID &id) const;
+
+ /// @}
+ /// \name Value Tests
+ /// @{
+
+ /// \brief Determine sign of this APInt.
+ ///
+ /// This tests the high bit of this APInt to determine if it is set.
+ ///
+ /// \returns true if this APInt is negative, false otherwise
+ bool isNegative() const { return (*this)[BitWidth - 1]; }
+
+ /// \brief Determine if this APInt Value is non-negative (>= 0)
+ ///
+ /// This tests the high bit of the APInt to determine if it is unset.
+ bool isNonNegative() const { return !isNegative(); }
+
+ /// \brief Determine if this APInt Value is positive.
+ ///
+ /// This tests if the value of this APInt is positive (> 0). Note
+ /// that 0 is not a positive value.
+ ///
+ /// \returns true if this APInt is positive.
+ bool isStrictlyPositive() const { return isNonNegative() && !!*this; }
+
+ /// \brief Determine if all bits are set
+ ///
+ /// This checks to see if the value has all bits of the APInt are set or not.
+ bool isAllOnesValue() const {
+ if (isSingleWord())
+ return VAL == ~integerPart(0) >> (APINT_BITS_PER_WORD - BitWidth);
+ return countPopulationSlowCase() == BitWidth;
+ }
+
+ /// \brief Determine if this is the largest unsigned value.
+ ///
+ /// This checks to see if the value of this APInt is the maximum unsigned
+ /// value for the APInt's bit width.
+ bool isMaxValue() const { return isAllOnesValue(); }
+
+ /// \brief Determine if this is the largest signed value.
+ ///
+ /// This checks to see if the value of this APInt is the maximum signed
+ /// value for the APInt's bit width.
+ bool isMaxSignedValue() const {
+ return !isNegative() && countPopulation() == BitWidth - 1;
+ }
+
+ /// \brief Determine if this is the smallest unsigned value.
+ ///
+ /// This checks to see if the value of this APInt is the minimum unsigned
+ /// value for the APInt's bit width.
+ bool isMinValue() const { return !*this; }
+
+ /// \brief Determine if this is the smallest signed value.
+ ///
+ /// This checks to see if the value of this APInt is the minimum signed
+ /// value for the APInt's bit width.
+ bool isMinSignedValue() const {
+ return isNegative() && isPowerOf2();
+ }
+
+ /// \brief Check if this APInt has an N-bits unsigned integer value.
+ bool isIntN(unsigned N) const {
+ assert(N && "N == 0 ???");
+ return getActiveBits() <= N;
+ }
+
+ /// \brief Check if this APInt has an N-bits signed integer value.
+ bool isSignedIntN(unsigned N) const {
+ assert(N && "N == 0 ???");
+ return getMinSignedBits() <= N;
+ }
+
+ /// \brief Check if this APInt's value is a power of two greater than zero.
+ ///
+ /// \returns true if the argument APInt value is a power of two > 0.
+ bool isPowerOf2() const {
+ if (isSingleWord())
+ return isPowerOf2_64(VAL);
+ return countPopulationSlowCase() == 1;
+ }
+
+ /// \brief Check if the APInt's value is returned by getSignBit.
+ ///
+ /// \returns true if this is the value returned by getSignBit.
+ bool isSignBit() const { return isMinSignedValue(); }
+
+ /// \brief Convert APInt to a boolean value.
+ ///
+ /// This converts the APInt to a boolean value as a test against zero.
+ bool getBoolValue() const { return !!*this; }
+
+ /// If this value is smaller than the specified limit, return it, otherwise
+ /// return the limit value. This causes the value to saturate to the limit.
+ uint64_t getLimitedValue(uint64_t Limit = ~0ULL) const {
+ return (getActiveBits() > 64 || getZExtValue() > Limit) ? Limit
+ : getZExtValue();
+ }
+
+ /// \brief Check if the APInt consists of a repeated bit pattern.
+ ///
+ /// e.g. 0x01010101 satisfies isSplat(8).
+ /// \param SplatSizeInBits The size of the pattern in bits. Must divide bit
+ /// width without remainder.
+ bool isSplat(unsigned SplatSizeInBits) const;
+
+ /// @}
+ /// \name Value Generators
+ /// @{
+
+ /// \brief Gets maximum unsigned value of APInt for specific bit width.
+ static APInt getMaxValue(unsigned numBits) {
+ return getAllOnesValue(numBits);
+ }
+
+ /// \brief Gets maximum signed value of APInt for a specific bit width.
+ static APInt getSignedMaxValue(unsigned numBits) {
+ APInt API = getAllOnesValue(numBits);
+ API.clearBit(numBits - 1);
+ return API;
+ }
+
+ /// \brief Gets minimum unsigned value of APInt for a specific bit width.
+ static APInt getMinValue(unsigned numBits) { return APInt(numBits, 0); }
+
+ /// \brief Gets minimum signed value of APInt for a specific bit width.
+ static APInt getSignedMinValue(unsigned numBits) {
+ APInt API(numBits, 0);
+ API.setBit(numBits - 1);
+ return API;
+ }
+
+ /// \brief Get the SignBit for a specific bit width.
+ ///
+ /// This is just a wrapper function of getSignedMinValue(), and it helps code
+ /// readability when we want to get a SignBit.
+ static APInt getSignBit(unsigned BitWidth) {
+ return getSignedMinValue(BitWidth);
+ }
+
+ /// \brief Get the all-ones value.
+ ///
+ /// \returns the all-ones value for an APInt of the specified bit-width.
+ static APInt getAllOnesValue(unsigned numBits) {
+ return APInt(numBits, UINT64_MAX, true);
+ }
+
+ /// \brief Get the '0' value.
+ ///
+ /// \returns the '0' value for an APInt of the specified bit-width.
+ static APInt getNullValue(unsigned numBits) { return APInt(numBits, 0); }
+
+ /// \brief Compute an APInt containing numBits highbits from this APInt.
+ ///
+ /// Get an APInt with the same BitWidth as this APInt, just zero mask
+ /// the low bits and right shift to the least significant bit.
+ ///
+ /// \returns the high "numBits" bits of this APInt.
+ APInt getHiBits(unsigned numBits) const;
+
+ /// \brief Compute an APInt containing numBits lowbits from this APInt.
+ ///
+ /// Get an APInt with the same BitWidth as this APInt, just zero mask
+ /// the high bits.
+ ///
+ /// \returns the low "numBits" bits of this APInt.
+ APInt getLoBits(unsigned numBits) const;
+
+ /// \brief Return an APInt with exactly one bit set in the result.
+ static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
+ APInt Res(numBits, 0);
+ Res.setBit(BitNo);
+ return Res;
+ }
+
+ /// \brief Get a value with a block of bits set.
+ ///
+ /// Constructs an APInt value that has a contiguous range of bits set. The
+ /// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
+ /// bits will be zero. For example, with parameters(32, 0, 16) you would get
+ /// 0x0000FFFF. If hiBit is less than loBit then the set bits "wrap". For
+ /// example, with parameters (32, 28, 4), you would get 0xF000000F.
+ ///
+ /// \param numBits the intended bit width of the result
+ /// \param loBit the index of the lowest bit set.
+ /// \param hiBit the index of the highest bit set.
+ ///
+ /// \returns An APInt value with the requested bits set.
+ static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit) {
+ assert(hiBit <= numBits && "hiBit out of range");
+ assert(loBit < numBits && "loBit out of range");
+ if (hiBit < loBit)
+ return getLowBitsSet(numBits, hiBit) |
+ getHighBitsSet(numBits, numBits - loBit);
+ return getLowBitsSet(numBits, hiBit - loBit).shl(loBit);
+ }
+
+ /// \brief Get a value with high bits set
+ ///
+ /// Constructs an APInt value that has the top hiBitsSet bits set.
+ ///
+ /// \param numBits the bitwidth of the result
+ /// \param hiBitsSet the number of high-order bits set in the result.
+ static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet) {
+ assert(hiBitsSet <= numBits && "Too many bits to set!");
+ // Handle a degenerate case, to avoid shifting by word size
+ if (hiBitsSet == 0)
+ return APInt(numBits, 0);
+ unsigned shiftAmt = numBits - hiBitsSet;
+ // For small values, return quickly
+ if (numBits <= APINT_BITS_PER_WORD)
+ return APInt(numBits, ~0ULL << shiftAmt);
+ return getAllOnesValue(numBits).shl(shiftAmt);
+ }
+
+ /// \brief Get a value with low bits set
+ ///
+ /// Constructs an APInt value that has the bottom loBitsSet bits set.
+ ///
+ /// \param numBits the bitwidth of the result
+ /// \param loBitsSet the number of low-order bits set in the result.
+ static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet) {
+ assert(loBitsSet <= numBits && "Too many bits to set!");
+ // Handle a degenerate case, to avoid shifting by word size
+ if (loBitsSet == 0)
+ return APInt(numBits, 0);
+ if (loBitsSet == APINT_BITS_PER_WORD)
+ return APInt(numBits, UINT64_MAX);
+ // For small values, return quickly.
+ if (loBitsSet <= APINT_BITS_PER_WORD)
+ return APInt(numBits, UINT64_MAX >> (APINT_BITS_PER_WORD - loBitsSet));
+ return getAllOnesValue(numBits).lshr(numBits - loBitsSet);
+ }
+
+ /// \brief Return a value containing V broadcasted over NewLen bits.
+ static APInt getSplat(unsigned NewLen, const APInt &V) {
+ assert(NewLen >= V.getBitWidth() && "Can't splat to smaller bit width!");
+
+ APInt Val = V.zextOrSelf(NewLen);
+ for (unsigned I = V.getBitWidth(); I < NewLen; I <<= 1)
+ Val |= Val << I;
+
+ return Val;
+ }
+
+ /// \brief Determine if two APInts have the same value, after zero-extending
+ /// one of them (if needed!) to ensure that the bit-widths match.
+ static bool isSameValue(const APInt &I1, const APInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth())
+ return I1 == I2;
+
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return I1 == I2.zext(I1.getBitWidth());
+
+ return I1.zext(I2.getBitWidth()) == I2;
+ }
+
+ /// \brief Overload to compute a hash_code for an APInt value.
+ friend hash_code hash_value(const APInt &Arg);
+
+ /// This function returns a pointer to the internal storage of the APInt.
+ /// This is useful for writing out the APInt in binary form without any
+ /// conversions.
+ const uint64_t *getRawData() const {
+ if (isSingleWord())
+ return &VAL;
+ return &pVal[0];
+ }
+
+ /// @}
+ /// \name Unary Operators
+ /// @{
+
+ /// \brief Postfix increment operator.
+ ///
+ /// \returns a new APInt value representing *this incremented by one
+ const APInt operator++(int) {
+ APInt API(*this);
+ ++(*this);
+ return API;
+ }
+
+ /// \brief Prefix increment operator.
+ ///
+ /// \returns *this incremented by one
+ APInt &operator++();
+
+ /// \brief Postfix decrement operator.
+ ///
+ /// \returns a new APInt representing *this decremented by one.
+ const APInt operator--(int) {
+ APInt API(*this);
+ --(*this);
+ return API;
+ }
+
+ /// \brief Prefix decrement operator.
+ ///
+ /// \returns *this decremented by one.
+ APInt &operator--();
+
+ /// \brief Unary bitwise complement operator.
+ ///
+ /// Performs a bitwise complement operation on this APInt.
+ ///
+ /// \returns an APInt that is the bitwise complement of *this
+ APInt operator~() const {
+ APInt Result(*this);
+ Result.flipAllBits();
+ return Result;
+ }
+
+ /// \brief Unary negation operator
+ ///
+ /// Negates *this using two's complement logic.
+ ///
+ /// \returns An APInt value representing the negation of *this.
+ APInt operator-() const { return APInt(BitWidth, 0) - (*this); }
+
+ /// \brief Logical negation operator.
+ ///
+ /// Performs logical negation operation on this APInt.
+ ///
+ /// \returns true if *this is zero, false otherwise.
+ bool operator!() const {
+ if (isSingleWord())
+ return !VAL;
+
+ for (unsigned i = 0; i != getNumWords(); ++i)
+ if (pVal[i])
+ return false;
+ return true;
+ }
+
+ /// @}
+ /// \name Assignment Operators
+ /// @{
+
+ /// \brief Copy assignment operator.
+ ///
+ /// \returns *this after assignment of RHS.
+ APInt &operator=(const APInt &RHS) {
+ // If the bitwidths are the same, we can avoid mucking with memory
+ if (isSingleWord() && RHS.isSingleWord()) {
+ VAL = RHS.VAL;
+ BitWidth = RHS.BitWidth;
+ return clearUnusedBits();
+ }
+
+ return AssignSlowCase(RHS);
+ }
+
+ /// @brief Move assignment operator.
+ APInt &operator=(APInt &&that) {
+ if (!isSingleWord()) {
+ // The MSVC STL shipped in 2013 requires that self move assignment be a
+ // no-op. Otherwise algorithms like stable_sort will produce answers
+ // where half of the output is left in a moved-from state.
+ if (this == &that)
+ return *this;
+ delete[] pVal;
+ }
+
+ // Use memcpy so that type based alias analysis sees both VAL and pVal
+ // as modified.
+ memcpy(&VAL, &that.VAL, sizeof(uint64_t));
+
+ // If 'this == &that', avoid zeroing our own bitwidth by storing to 'that'
+ // first.
+ unsigned ThatBitWidth = that.BitWidth;
+ that.BitWidth = 0;
+ BitWidth = ThatBitWidth;
+
+ return *this;
+ }
+
+ /// \brief Assignment operator.
+ ///
+ /// The RHS value is assigned to *this. If the significant bits in RHS exceed
+ /// the bit width, the excess bits are truncated. If the bit width is larger
+ /// than 64, the value is zero filled in the unspecified high order bits.
+ ///
+ /// \returns *this after assignment of RHS value.
+ APInt &operator=(uint64_t RHS);
+
+ /// \brief Bitwise AND assignment operator.
+ ///
+ /// Performs a bitwise AND operation on this APInt and RHS. The result is
+ /// assigned to *this.
+ ///
+ /// \returns *this after ANDing with RHS.
+ APInt &operator&=(const APInt &RHS);
+
+ /// \brief Bitwise OR assignment operator.
+ ///
+ /// Performs a bitwise OR operation on this APInt and RHS. The result is
+ /// assigned *this;
+ ///
+ /// \returns *this after ORing with RHS.
+ APInt &operator|=(const APInt &RHS);
+
+ /// \brief Bitwise OR assignment operator.
+ ///
+ /// Performs a bitwise OR operation on this APInt and RHS. RHS is
+ /// logically zero-extended or truncated to match the bit-width of
+ /// the LHS.
+ APInt &operator|=(uint64_t RHS) {
+ if (isSingleWord()) {
+ VAL |= RHS;
+ clearUnusedBits();
+ } else {
+ pVal[0] |= RHS;
+ }
+ return *this;
+ }
+
+ /// \brief Bitwise XOR assignment operator.
+ ///
+ /// Performs a bitwise XOR operation on this APInt and RHS. The result is
+ /// assigned to *this.
+ ///
+ /// \returns *this after XORing with RHS.
+ APInt &operator^=(const APInt &RHS);
+
+ /// \brief Multiplication assignment operator.
+ ///
+ /// Multiplies this APInt by RHS and assigns the result to *this.
+ ///
+ /// \returns *this
+ APInt &operator*=(const APInt &RHS);
+
+ /// \brief Addition assignment operator.
+ ///
+ /// Adds RHS to *this and assigns the result to *this.
+ ///
+ /// \returns *this
+ APInt &operator+=(const APInt &RHS);
+
+ /// \brief Subtraction assignment operator.
+ ///
+ /// Subtracts RHS from *this and assigns the result to *this.
+ ///
+ /// \returns *this
+ APInt &operator-=(const APInt &RHS);
+
+ /// \brief Left-shift assignment function.
+ ///
+ /// Shifts *this left by shiftAmt and assigns the result to *this.
+ ///
+ /// \returns *this after shifting left by shiftAmt
+ APInt &operator<<=(unsigned shiftAmt) {
+ *this = shl(shiftAmt);
+ return *this;
+ }
+
+ /// @}
+ /// \name Binary Operators
+ /// @{
+
+ /// \brief Bitwise AND operator.
+ ///
+ /// Performs a bitwise AND operation on *this and RHS.
+ ///
+ /// \returns An APInt value representing the bitwise AND of *this and RHS.
+ APInt operator&(const APInt &RHS) const {
+ assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+ if (isSingleWord())
+ return APInt(getBitWidth(), VAL & RHS.VAL);
+ return AndSlowCase(RHS);
+ }
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT And(const APInt &RHS) const {
+ return this->operator&(RHS);
+ }
+
+ /// \brief Bitwise OR operator.
+ ///
+ /// Performs a bitwise OR operation on *this and RHS.
+ ///
+ /// \returns An APInt value representing the bitwise OR of *this and RHS.
+ APInt operator|(const APInt &RHS) const {
+ assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+ if (isSingleWord())
+ return APInt(getBitWidth(), VAL | RHS.VAL);
+ return OrSlowCase(RHS);
+ }
+
+ /// \brief Bitwise OR function.
+ ///
+ /// Performs a bitwise or on *this and RHS. This is implemented by simply
+ /// calling operator|.
+ ///
+ /// \returns An APInt value representing the bitwise OR of *this and RHS.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT Or(const APInt &RHS) const {
+ return this->operator|(RHS);
+ }
+
+ /// \brief Bitwise XOR operator.
+ ///
+ /// Performs a bitwise XOR operation on *this and RHS.
+ ///
+ /// \returns An APInt value representing the bitwise XOR of *this and RHS.
+ APInt operator^(const APInt &RHS) const {
+ assert(BitWidth == RHS.BitWidth && "Bit widths must be the same");
+ if (isSingleWord())
+ return APInt(BitWidth, VAL ^ RHS.VAL);
+ return XorSlowCase(RHS);
+ }
+
+ /// \brief Bitwise XOR function.
+ ///
+ /// Performs a bitwise XOR operation on *this and RHS. This is implemented
+ /// through the usage of operator^.
+ ///
+ /// \returns An APInt value representing the bitwise XOR of *this and RHS.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT Xor(const APInt &RHS) const {
+ return this->operator^(RHS);
+ }
+
+ /// \brief Multiplication operator.
+ ///
+ /// Multiplies this APInt by RHS and returns the result.
+ APInt operator*(const APInt &RHS) const;
+
+ /// \brief Addition operator.
+ ///
+ /// Adds RHS to this APInt and returns the result.
+ APInt operator+(const APInt &RHS) const;
+ APInt operator+(uint64_t RHS) const { return (*this) + APInt(BitWidth, RHS); }
+
+ /// \brief Subtraction operator.
+ ///
+ /// Subtracts RHS from this APInt and returns the result.
+ APInt operator-(const APInt &RHS) const;
+ APInt operator-(uint64_t RHS) const { return (*this) - APInt(BitWidth, RHS); }
+
+ /// \brief Left logical shift operator.
+ ///
+ /// Shifts this APInt left by \p Bits and returns the result.
+ APInt operator<<(unsigned Bits) const { return shl(Bits); }
+
+ /// \brief Left logical shift operator.
+ ///
+ /// Shifts this APInt left by \p Bits and returns the result.
+ APInt operator<<(const APInt &Bits) const { return shl(Bits); }
+
+ /// \brief Arithmetic right-shift function.
+ ///
+ /// Arithmetic right-shift this APInt by shiftAmt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT ashr(unsigned shiftAmt) const;
+
+ /// \brief Logical right-shift function.
+ ///
+ /// Logical right-shift this APInt by shiftAmt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT lshr(unsigned shiftAmt) const;
+
+ /// \brief Left-shift function.
+ ///
+ /// Left-shift this APInt by shiftAmt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT shl(unsigned shiftAmt) const {
+ assert(shiftAmt <= BitWidth && "Invalid shift amount");
+ if (isSingleWord()) {
+ if (shiftAmt >= BitWidth)
+ return APInt(BitWidth, 0); // avoid undefined shift results
+ return APInt(BitWidth, VAL << shiftAmt);
+ }
+ return shlSlowCase(shiftAmt);
+ }
+
+ /// \brief Rotate left by rotateAmt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotl(unsigned rotateAmt) const;
+
+ /// \brief Rotate right by rotateAmt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotr(unsigned rotateAmt) const;
+
+ /// \brief Arithmetic right-shift function.
+ ///
+ /// Arithmetic right-shift this APInt by shiftAmt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT ashr(const APInt &shiftAmt) const;
+
+ /// \brief Logical right-shift function.
+ ///
+ /// Logical right-shift this APInt by shiftAmt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT lshr(const APInt &shiftAmt) const;
+
+ /// \brief Left-shift function.
+ ///
+ /// Left-shift this APInt by shiftAmt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT shl(const APInt &shiftAmt) const;
+
+ /// \brief Rotate left by rotateAmt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotl(const APInt &rotateAmt) const;
+
+ /// \brief Rotate right by rotateAmt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT rotr(const APInt &rotateAmt) const;
+
+ /// \brief Unsigned division operation.
+ ///
+ /// Perform an unsigned divide operation on this APInt by RHS. Both this and
+ /// RHS are treated as unsigned quantities for purposes of this division.
+ ///
+ /// \returns a new APInt value containing the division result
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT udiv(const APInt &RHS) const;
+
+ /// \brief Signed division function for APInt.
+ ///
+ /// Signed divide this APInt by APInt RHS.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT sdiv(const APInt &RHS) const;
+
+ /// \brief Unsigned remainder operation.
+ ///
+ /// Perform an unsigned remainder operation on this APInt with RHS being the
+ /// divisor. Both this and RHS are treated as unsigned quantities for purposes
+ /// of this operation. Note that this is a true remainder operation and not a
+ /// modulo operation because the sign follows the sign of the dividend which
+ /// is *this.
+ ///
+ /// \returns a new APInt value containing the remainder result
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT urem(const APInt &RHS) const;
+
+ /// \brief Function for signed remainder operation.
+ ///
+ /// Signed remainder operation on APInt.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT srem(const APInt &RHS) const;
+
+ /// \brief Dual division/remainder interface.
+ ///
+ /// Sometimes it is convenient to divide two APInt values and obtain both the
+ /// quotient and remainder. This function does both operations in the same
+ /// computation making it a little more efficient. The pair of input arguments
+ /// may overlap with the pair of output arguments. It is safe to call
+ /// udivrem(X, Y, X, Y), for example.
+ static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
+ APInt &Remainder);
+
+ static void sdivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient,
+ APInt &Remainder);
+
+ // Operations that return overflow indicators.
+ APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
+ APInt uadd_ov(const APInt &RHS, bool &Overflow) const;
+ APInt ssub_ov(const APInt &RHS, bool &Overflow) const;
+ APInt usub_ov(const APInt &RHS, bool &Overflow) const;
+ APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
+ APInt smul_ov(const APInt &RHS, bool &Overflow) const;
+ APInt umul_ov(const APInt &RHS, bool &Overflow) const;
+ APInt sshl_ov(const APInt &Amt, bool &Overflow) const;
+ APInt ushl_ov(const APInt &Amt, bool &Overflow) const;
+
+ /// \brief Array-indexing support.
+ ///
+ /// \returns the bit value at bitPosition
+ bool operator[](unsigned bitPosition) const {
+ assert(bitPosition < getBitWidth() && "Bit position out of bounds!");
+ return (maskBit(bitPosition) &
+ (isSingleWord() ? VAL : pVal[whichWord(bitPosition)])) !=
+ 0;
+ }
+
+ /// @}
+ /// \name Comparison Operators
+ /// @{
+
+ /// \brief Equality operator.
+ ///
+ /// Compares this APInt with RHS for the validity of the equality
+ /// relationship.
+ bool operator==(const APInt &RHS) const {
+ assert(BitWidth == RHS.BitWidth && "Comparison requires equal bit widths");
+ if (isSingleWord())
+ return VAL == RHS.VAL;
+ return EqualSlowCase(RHS);
+ }
+
+ /// \brief Equality operator.
+ ///
+ /// Compares this APInt with a uint64_t for the validity of the equality
+ /// relationship.
+ ///
+ /// \returns true if *this == Val
+ bool operator==(uint64_t Val) const {
+ if (isSingleWord())
+ return VAL == Val;
+ return EqualSlowCase(Val);
+ }
+
+ /// \brief Equality comparison.
+ ///
+ /// Compares this APInt with RHS for the validity of the equality
+ /// relationship.
+ ///
+ /// \returns true if *this == Val
+ bool eq(const APInt &RHS) const { return (*this) == RHS; }
+
+ /// \brief Inequality operator.
+ ///
+ /// Compares this APInt with RHS for the validity of the inequality
+ /// relationship.
+ ///
+ /// \returns true if *this != Val
+ bool operator!=(const APInt &RHS) const { return !((*this) == RHS); }
+
+ /// \brief Inequality operator.
+ ///
+ /// Compares this APInt with a uint64_t for the validity of the inequality
+ /// relationship.
+ ///
+ /// \returns true if *this != Val
+ bool operator!=(uint64_t Val) const { return !((*this) == Val); }
+
+ /// \brief Inequality comparison
+ ///
+ /// Compares this APInt with RHS for the validity of the inequality
+ /// relationship.
+ ///
+ /// \returns true if *this != Val
+ bool ne(const APInt &RHS) const { return !((*this) == RHS); }
+
+ /// \brief Unsigned less than comparison
+ ///
+ /// Regards both *this and RHS as unsigned quantities and compares them for
+ /// the validity of the less-than relationship.
+ ///
+ /// \returns true if *this < RHS when both are considered unsigned.
+ bool ult(const APInt &RHS) const;
+
+ /// \brief Unsigned less than comparison
+ ///
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the less-than relationship.
+ ///
+ /// \returns true if *this < RHS when considered unsigned.
+ bool ult(uint64_t RHS) const {
+ return getActiveBits() > 64 ? false : getZExtValue() < RHS;
+ }
+
+ /// \brief Signed less than comparison
+ ///
+ /// Regards both *this and RHS as signed quantities and compares them for
+ /// validity of the less-than relationship.
+ ///
+ /// \returns true if *this < RHS when both are considered signed.
+ bool slt(const APInt &RHS) const;
+
+ /// \brief Signed less than comparison
+ ///
+ /// Regards both *this as a signed quantity and compares it with RHS for
+ /// the validity of the less-than relationship.
+ ///
+ /// \returns true if *this < RHS when considered signed.
+ bool slt(int64_t RHS) const {
+ return getMinSignedBits() > 64 ? isNegative() : getSExtValue() < RHS;
+ }
+
+ /// \brief Unsigned less or equal comparison
+ ///
+ /// Regards both *this and RHS as unsigned quantities and compares them for
+ /// validity of the less-or-equal relationship.
+ ///
+ /// \returns true if *this <= RHS when both are considered unsigned.
+ bool ule(const APInt &RHS) const { return ult(RHS) || eq(RHS); }
+
+ /// \brief Unsigned less or equal comparison
+ ///
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the less-or-equal relationship.
+ ///
+ /// \returns true if *this <= RHS when considered unsigned.
+ bool ule(uint64_t RHS) const { return !ugt(RHS); }
+
+ /// \brief Signed less or equal comparison
+ ///
+ /// Regards both *this and RHS as signed quantities and compares them for
+ /// validity of the less-or-equal relationship.
+ ///
+ /// \returns true if *this <= RHS when both are considered signed.
+ bool sle(const APInt &RHS) const { return slt(RHS) || eq(RHS); }
+
+ /// \brief Signed less or equal comparison
+ ///
+ /// Regards both *this as a signed quantity and compares it with RHS for the
+ /// validity of the less-or-equal relationship.
+ ///
+ /// \returns true if *this <= RHS when considered signed.
+ bool sle(uint64_t RHS) const { return !sgt(RHS); }
+
+ /// \brief Unsigned greather than comparison
+ ///
+ /// Regards both *this and RHS as unsigned quantities and compares them for
+ /// the validity of the greater-than relationship.
+ ///
+ /// \returns true if *this > RHS when both are considered unsigned.
+ bool ugt(const APInt &RHS) const { return !ult(RHS) && !eq(RHS); }
+
+ /// \brief Unsigned greater than comparison
+ ///
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the greater-than relationship.
+ ///
+ /// \returns true if *this > RHS when considered unsigned.
+ bool ugt(uint64_t RHS) const {
+ return getActiveBits() > 64 ? true : getZExtValue() > RHS;
+ }
+
+ /// \brief Signed greather than comparison
+ ///
+ /// Regards both *this and RHS as signed quantities and compares them for the
+ /// validity of the greater-than relationship.
+ ///
+ /// \returns true if *this > RHS when both are considered signed.
+ bool sgt(const APInt &RHS) const { return !slt(RHS) && !eq(RHS); }
+
+ /// \brief Signed greater than comparison
+ ///
+ /// Regards both *this as a signed quantity and compares it with RHS for
+ /// the validity of the greater-than relationship.
+ ///
+ /// \returns true if *this > RHS when considered signed.
+ bool sgt(int64_t RHS) const {
+ return getMinSignedBits() > 64 ? !isNegative() : getSExtValue() > RHS;
+ }
+
+ /// \brief Unsigned greater or equal comparison
+ ///
+ /// Regards both *this and RHS as unsigned quantities and compares them for
+ /// validity of the greater-or-equal relationship.
+ ///
+ /// \returns true if *this >= RHS when both are considered unsigned.
+ bool uge(const APInt &RHS) const { return !ult(RHS); }
+
+ /// \brief Unsigned greater or equal comparison
+ ///
+ /// Regards both *this as an unsigned quantity and compares it with RHS for
+ /// the validity of the greater-or-equal relationship.
+ ///
+ /// \returns true if *this >= RHS when considered unsigned.
+ bool uge(uint64_t RHS) const { return !ult(RHS); }
+
+ /// \brief Signed greather or equal comparison
+ ///
+ /// Regards both *this and RHS as signed quantities and compares them for
+ /// validity of the greater-or-equal relationship.
+ ///
+ /// \returns true if *this >= RHS when both are considered signed.
+ bool sge(const APInt &RHS) const { return !slt(RHS); }
+
+ /// \brief Signed greater or equal comparison
+ ///
+ /// Regards both *this as a signed quantity and compares it with RHS for
+ /// the validity of the greater-or-equal relationship.
+ ///
+ /// \returns true if *this >= RHS when considered signed.
+ bool sge(int64_t RHS) const { return !slt(RHS); }
+
+ /// This operation tests if there are any pairs of corresponding bits
+ /// between this APInt and RHS that are both set.
+ bool intersects(const APInt &RHS) const { return (*this & RHS) != 0; }
+
+ /// @}
+ /// \name Resizing Operators
+ /// @{
+
+ /// \brief Truncate to new width.
+ ///
+ /// Truncate the APInt to a specified width. It is an error to specify a width
+ /// that is greater than or equal to the current width.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT trunc(unsigned width) const;
+
+ /// \brief Sign extend to a new width.
+ ///
+ /// This operation sign extends the APInt to a new width. If the high order
+ /// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
+ /// It is an error to specify a width that is less than or equal to the
+ /// current width.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT sext(unsigned width) const;
+
+ /// \brief Zero extend to a new width.
+ ///
+ /// This operation zero extends the APInt to a new width. The high order bits
+ /// are filled with 0 bits. It is an error to specify a width that is less
+ /// than or equal to the current width.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT zext(unsigned width) const;
+
+ /// \brief Sign extend or truncate to width
+ ///
+ /// Make this APInt have the bit width given by \p width. The value is sign
+ /// extended, truncated, or left alone to make it that width.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT sextOrTrunc(unsigned width) const;
+
+ /// \brief Zero extend or truncate to width
+ ///
+ /// Make this APInt have the bit width given by \p width. The value is zero
+ /// extended, truncated, or left alone to make it that width.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT zextOrTrunc(unsigned width) const;
+
+ /// \brief Sign extend or truncate to width
+ ///
+ /// Make this APInt have the bit width given by \p width. The value is sign
+ /// extended, or left alone to make it that width.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT sextOrSelf(unsigned width) const;
+
+ /// \brief Zero extend or truncate to width
+ ///
+ /// Make this APInt have the bit width given by \p width. The value is zero
+ /// extended, or left alone to make it that width.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT zextOrSelf(unsigned width) const;
+
+ /// @}
+ /// \name Bit Manipulation Operators
+ /// @{
+
+ /// \brief Set every bit to 1.
+ void setAllBits() {
+ if (isSingleWord())
+ VAL = UINT64_MAX;
+ else {
+ // Set all the bits in all the words.
+ for (unsigned i = 0; i < getNumWords(); ++i)
+ pVal[i] = UINT64_MAX;
+ }
+ // Clear the unused ones
+ clearUnusedBits();
+ }
+
+ /// \brief Set a given bit to 1.
+ ///
+ /// Set the given bit to 1 whose position is given as "bitPosition".
+ void setBit(unsigned bitPosition);
+
+ /// \brief Set every bit to 0.
+ void clearAllBits() {
+ if (isSingleWord())
+ VAL = 0;
+ else
+ memset(pVal, 0, getNumWords() * APINT_WORD_SIZE);
+ }
+
+ /// \brief Set a given bit to 0.
+ ///
+ /// Set the given bit to 0 whose position is given as "bitPosition".
+ void clearBit(unsigned bitPosition);
+
+ /// \brief Toggle every bit to its opposite value.
+ void flipAllBits() {
+ if (isSingleWord())
+ VAL ^= UINT64_MAX;
+ else {
+ for (unsigned i = 0; i < getNumWords(); ++i)
+ pVal[i] ^= UINT64_MAX;
+ }
+ clearUnusedBits();
+ }
+
+ /// \brief Toggles a given bit to its opposite value.
+ ///
+ /// Toggle a given bit to its opposite value whose position is given
+ /// as "bitPosition".
+ void flipBit(unsigned bitPosition);
+
+ /// @}
+ /// \name Value Characterization Functions
+ /// @{
+
+ /// \brief Return the number of bits in the APInt.
+ unsigned getBitWidth() const { return BitWidth; }
+
+ /// \brief Get the number of words.
+ ///
+ /// Here one word's bitwidth equals to that of uint64_t.
+ ///
+ /// \returns the number of words to hold the integer value of this APInt.
+ unsigned getNumWords() const { return getNumWords(BitWidth); }
+
+ /// \brief Get the number of words.
+ ///
+ /// *NOTE* Here one word's bitwidth equals to that of uint64_t.
+ ///
+ /// \returns the number of words to hold the integer value with a given bit
+ /// width.
+ static unsigned getNumWords(unsigned BitWidth) {
+ return ((uint64_t)BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD;
+ }
+
+ /// \brief Compute the number of active bits in the value
+ ///
+ /// This function returns the number of active bits which is defined as the
+ /// bit width minus the number of leading zeros. This is used in several
+ /// computations to see how "wide" the value is.
+ unsigned getActiveBits() const { return BitWidth - countLeadingZeros(); }
+
+ /// \brief Compute the number of active words in the value of this APInt.
+ ///
+ /// This is used in conjunction with getActiveData to extract the raw value of
+ /// the APInt.
+ unsigned getActiveWords() const {
+ unsigned numActiveBits = getActiveBits();
+ return numActiveBits ? whichWord(numActiveBits - 1) + 1 : 1;
+ }
+
+ /// \brief Get the minimum bit size for this signed APInt
+ ///
+ /// Computes the minimum bit width for this APInt while considering it to be a
+ /// signed (and probably negative) value. If the value is not negative, this
+ /// function returns the same value as getActiveBits()+1. Otherwise, it
+ /// returns the smallest bit width that will retain the negative value. For
+ /// example, -1 can be written as 0b1 or 0xFFFFFFFFFF. 0b1 is shorter and so
+ /// for -1, this function will always return 1.
+ unsigned getMinSignedBits() const {
+ if (isNegative())
+ return BitWidth - countLeadingOnes() + 1;
+ return getActiveBits() + 1;
+ }
+
+ /// \brief Get zero extended value
+ ///
+ /// This method attempts to return the value of this APInt as a zero extended
+ /// uint64_t. The bitwidth must be <= 64 or the value must fit within a
+ /// uint64_t. Otherwise an assertion will result.
+ uint64_t getZExtValue() const {
+ if (isSingleWord())
+ return VAL;
+ assert(getActiveBits() <= 64 && "Too many bits for uint64_t");
+ return pVal[0];
+ }
+
+ /// \brief Get sign extended value
+ ///
+ /// This method attempts to return the value of this APInt as a sign extended
+ /// int64_t. The bit width must be <= 64 or the value must fit within an
+ /// int64_t. Otherwise an assertion will result.
+ int64_t getSExtValue() const {
+ if (isSingleWord())
+ return int64_t(VAL << (APINT_BITS_PER_WORD - BitWidth)) >>
+ (APINT_BITS_PER_WORD - BitWidth);
+ assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
+ return int64_t(pVal[0]);
+ }
+
+ /// \brief Get bits required for string value.
+ ///
+ /// This method determines how many bits are required to hold the APInt
+ /// equivalent of the string given by \p str.
+ static unsigned getBitsNeeded(StringRef str, uint8_t radix);
+
+ /// \brief The APInt version of the countLeadingZeros functions in
+ /// MathExtras.h.
+ ///
+ /// It counts the number of zeros from the most significant bit to the first
+ /// one bit.
+ ///
+ /// \returns BitWidth if the value is zero, otherwise returns the number of
+ /// zeros from the most significant bit to the first one bits.
+ unsigned countLeadingZeros() const {
+ if (isSingleWord()) {
+ unsigned unusedBits = APINT_BITS_PER_WORD - BitWidth;
+ return llvm::countLeadingZeros(VAL) - unusedBits;
+ }
+ return countLeadingZerosSlowCase();
+ }
+
+ /// \brief Count the number of leading one bits.
+ ///
+ /// This function is an APInt version of the countLeadingOnes
+ /// functions in MathExtras.h. It counts the number of ones from the most
+ /// significant bit to the first zero bit.
+ ///
+ /// \returns 0 if the high order bit is not set, otherwise returns the number
+ /// of 1 bits from the most significant to the least
+ unsigned countLeadingOnes() const;
+
+ /// Computes the number of leading bits of this APInt that are equal to its
+ /// sign bit.
+ unsigned getNumSignBits() const {
+ return isNegative() ? countLeadingOnes() : countLeadingZeros();
+ }
+
+ /// \brief Count the number of trailing zero bits.
+ ///
+ /// This function is an APInt version of the countTrailingZeros
+ /// functions in MathExtras.h. It counts the number of zeros from the least
+ /// significant bit to the first set bit.
+ ///
+ /// \returns BitWidth if the value is zero, otherwise returns the number of
+ /// zeros from the least significant bit to the first one bit.
+ unsigned countTrailingZeros() const;
+
+ /// \brief Count the number of trailing one bits.
+ ///
+ /// This function is an APInt version of the countTrailingOnes
+ /// functions in MathExtras.h. It counts the number of ones from the least
+ /// significant bit to the first zero bit.
+ ///
+ /// \returns BitWidth if the value is all ones, otherwise returns the number
+ /// of ones from the least significant bit to the first zero bit.
+ unsigned countTrailingOnes() const {
+ if (isSingleWord())
+ return llvm::countTrailingOnes(VAL);
+ return countTrailingOnesSlowCase();
+ }
+
+ /// \brief Count the number of bits set.
+ ///
+ /// This function is an APInt version of the countPopulation functions
+ /// in MathExtras.h. It counts the number of 1 bits in the APInt value.
+ ///
+ /// \returns 0 if the value is zero, otherwise returns the number of set bits.
+ unsigned countPopulation() const {
+ if (isSingleWord())
+ return llvm::countPopulation(VAL);
+ return countPopulationSlowCase();
+ }
+
+ /// @}
+ /// \name Conversion Functions
+ /// @{
+ void print(raw_ostream &OS, bool isSigned) const;
+
+ /// Converts an APInt to a string and append it to Str. Str is commonly a
+ /// SmallString.
+ void toString(SmallVectorImpl<char> &Str, unsigned Radix, bool Signed,
+ bool formatAsCLiteral = false) const;
+
+ /// Considers the APInt to be unsigned and converts it into a string in the
+ /// radix given. The radix can be 2, 8, 10 16, or 36.
+ void toStringUnsigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+ toString(Str, Radix, false, false);
+ }
+
+ /// Considers the APInt to be signed and converts it into a string in the
+ /// radix given. The radix can be 2, 8, 10, 16, or 36.
+ void toStringSigned(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+ toString(Str, Radix, true, false);
+ }
+
+ /// \brief Return the APInt as a std::string.
+ ///
+ /// Note that this is an inefficient method. It is better to pass in a
+ /// SmallVector/SmallString to the methods above to avoid thrashing the heap
+ /// for the string.
+ std::string toString(unsigned Radix, bool Signed) const;
+
+ /// \returns a byte-swapped representation of this APInt Value.
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT byteSwap() const;
+
+ /// \brief Converts this APInt to a double value.
+ double roundToDouble(bool isSigned) const;
+
+ /// \brief Converts this unsigned APInt to a double value.
+ double roundToDouble() const { return roundToDouble(false); }
+
+ /// \brief Converts this signed APInt to a double value.
+ double signedRoundToDouble() const { return roundToDouble(true); }
+
+ /// \brief Converts APInt bits to a double
+ ///
+ /// The conversion does not do a translation from integer to double, it just
+ /// re-interprets the bits as a double. Note that it is valid to do this on
+ /// any bit width. Exactly 64 bits will be translated.
+ double bitsToDouble() const {
+ union {
+ uint64_t I;
+ double D;
+ } T;
+ T.I = (isSingleWord() ? VAL : pVal[0]);
+ return T.D;
+ }
+
+ /// \brief Converts APInt bits to a double
+ ///
+ /// The conversion does not do a translation from integer to float, it just
+ /// re-interprets the bits as a float. Note that it is valid to do this on
+ /// any bit width. Exactly 32 bits will be translated.
+ float bitsToFloat() const {
+ union {
+ unsigned I;
+ float F;
+ } T;
+ T.I = unsigned((isSingleWord() ? VAL : pVal[0]));
+ return T.F;
+ }
+
+ /// \brief Converts a double to APInt bits.
+ ///
+ /// The conversion does not do a translation from double to integer, it just
+ /// re-interprets the bits of the double.
+ static APInt LLVM_ATTRIBUTE_UNUSED_RESULT doubleToBits(double V) {
+ union {
+ uint64_t I;
+ double D;
+ } T;
+ T.D = V;
+ return APInt(sizeof T * CHAR_BIT, T.I);
+ }
+
+ /// \brief Converts a float to APInt bits.
+ ///
+ /// The conversion does not do a translation from float to integer, it just
+ /// re-interprets the bits of the float.
+ static APInt LLVM_ATTRIBUTE_UNUSED_RESULT floatToBits(float V) {
+ union {
+ unsigned I;
+ float F;
+ } T;
+ T.F = V;
+ return APInt(sizeof T * CHAR_BIT, T.I);
+ }
+
+ /// @}
+ /// \name Mathematics Operations
+ /// @{
+
+ /// \returns the floor log base 2 of this APInt.
+ unsigned logBase2() const { return BitWidth - 1 - countLeadingZeros(); }
+
+ /// \returns the ceil log base 2 of this APInt.
+ unsigned ceilLogBase2() const {
+ return BitWidth - (*this - 1).countLeadingZeros();
+ }
+
+ /// \returns the nearest log base 2 of this APInt. Ties round up.
+ ///
+ /// NOTE: When we have a BitWidth of 1, we define:
+ ///
+ /// log2(0) = UINT32_MAX
+ /// log2(1) = 0
+ ///
+ /// to get around any mathematical concerns resulting from
+ /// referencing 2 in a space where 2 does no exist.
+ unsigned nearestLogBase2() const {
+ // Special case when we have a bitwidth of 1. If VAL is 1, then we
+ // get 0. If VAL is 0, we get UINT64_MAX which gets truncated to
+ // UINT32_MAX.
+ if (BitWidth == 1)
+ return VAL - 1;
+
+ // Handle the zero case.
+ if (!getBoolValue())
+ return UINT32_MAX;
+
+ // The non-zero case is handled by computing:
+ //
+ // nearestLogBase2(x) = logBase2(x) + x[logBase2(x)-1].
+ //
+ // where x[i] is referring to the value of the ith bit of x.
+ unsigned lg = logBase2();
+ return lg + unsigned((*this)[lg - 1]);
+ }
+
+ /// \returns the log base 2 of this APInt if its an exact power of two, -1
+ /// otherwise
+ int32_t exactLogBase2() const {
+ if (!isPowerOf2())
+ return -1;
+ return logBase2();
+ }
+
+ /// \brief Compute the square root
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT sqrt() const;
+
+ /// \brief Get the absolute value;
+ ///
+ /// If *this is < 0 then return -(*this), otherwise *this;
+ APInt LLVM_ATTRIBUTE_UNUSED_RESULT abs() const {
+ if (isNegative())
+ return -(*this);
+ return *this;
+ }
+
+ /// \returns the multiplicative inverse for a given modulo.
+ APInt multiplicativeInverse(const APInt &modulo) const;
+
+ /// @}
+ /// \name Support for division by constant
+ /// @{
+
+ /// Calculate the magic number for signed division by a constant.
+ struct ms;
+ ms magic() const;
+
+ /// Calculate the magic number for unsigned division by a constant.
+ struct mu;
+ mu magicu(unsigned LeadingZeros = 0) const;
+
+ /// @}
+ /// \name Building-block Operations for APInt and APFloat
+ /// @{
+
+ // These building block operations operate on a representation of arbitrary
+ // precision, two's-complement, bignum integer values. They should be
+ // sufficient to implement APInt and APFloat bignum requirements. Inputs are
+ // generally a pointer to the base of an array of integer parts, representing
+ // an unsigned bignum, and a count of how many parts there are.
+
+ /// Sets the least significant part of a bignum to the input value, and zeroes
+ /// out higher parts.
+ static void tcSet(integerPart *, integerPart, unsigned int);
+
+ /// Assign one bignum to another.
+ static void tcAssign(integerPart *, const integerPart *, unsigned int);
+
+ /// Returns true if a bignum is zero, false otherwise.
+ static bool tcIsZero(const integerPart *, unsigned int);
+
+ /// Extract the given bit of a bignum; returns 0 or 1. Zero-based.
+ static int tcExtractBit(const integerPart *, unsigned int bit);
+
+ /// Copy the bit vector of width srcBITS from SRC, starting at bit srcLSB, to
+ /// DST, of dstCOUNT parts, such that the bit srcLSB becomes the least
+ /// significant bit of DST. All high bits above srcBITS in DST are
+ /// zero-filled.
+ static void tcExtract(integerPart *, unsigned int dstCount,
+ const integerPart *, unsigned int srcBits,
+ unsigned int srcLSB);
+
+ /// Set the given bit of a bignum. Zero-based.
+ static void tcSetBit(integerPart *, unsigned int bit);
+
+ /// Clear the given bit of a bignum. Zero-based.
+ static void tcClearBit(integerPart *, unsigned int bit);
+
+ /// Returns the bit number of the least or most significant set bit of a
+ /// number. If the input number has no bits set -1U is returned.
+ static unsigned int tcLSB(const integerPart *, unsigned int);
+ static unsigned int tcMSB(const integerPart *parts, unsigned int n);
+
+ /// Negate a bignum in-place.
+ static void tcNegate(integerPart *, unsigned int);
+
+ /// DST += RHS + CARRY where CARRY is zero or one. Returns the carry flag.
+ static integerPart tcAdd(integerPart *, const integerPart *,
+ integerPart carry, unsigned);
+
+ /// DST -= RHS + CARRY where CARRY is zero or one. Returns the carry flag.
+ static integerPart tcSubtract(integerPart *, const integerPart *,
+ integerPart carry, unsigned);
+
+ /// DST += SRC * MULTIPLIER + PART if add is true
+ /// DST = SRC * MULTIPLIER + PART if add is false
+ ///
+ /// Requires 0 <= DSTPARTS <= SRCPARTS + 1. If DST overlaps SRC they must
+ /// start at the same point, i.e. DST == SRC.
+ ///
+ /// If DSTPARTS == SRC_PARTS + 1 no overflow occurs and zero is returned.
+ /// Otherwise DST is filled with the least significant DSTPARTS parts of the
+ /// result, and if all of the omitted higher parts were zero return zero,
+ /// otherwise overflow occurred and return one.
+ static int tcMultiplyPart(integerPart *dst, const integerPart *src,
+ integerPart multiplier, integerPart carry,
+ unsigned int srcParts, unsigned int dstParts,
+ bool add);
+
+ /// DST = LHS * RHS, where DST has the same width as the operands and is
+ /// filled with the least significant parts of the result. Returns one if
+ /// overflow occurred, otherwise zero. DST must be disjoint from both
+ /// operands.
+ static int tcMultiply(integerPart *, const integerPart *, const integerPart *,
+ unsigned);
+
+ /// DST = LHS * RHS, where DST has width the sum of the widths of the
+ /// operands. No overflow occurs. DST must be disjoint from both
+ /// operands. Returns the number of parts required to hold the result.
+ static unsigned int tcFullMultiply(integerPart *, const integerPart *,
+ const integerPart *, unsigned, unsigned);
+
+ /// If RHS is zero LHS and REMAINDER are left unchanged, return one.
+ /// Otherwise set LHS to LHS / RHS with the fractional part discarded, set
+ /// REMAINDER to the remainder, return zero. i.e.
+ ///
+ /// OLD_LHS = RHS * LHS + REMAINDER
+ ///
+ /// SCRATCH is a bignum of the same size as the operands and result for use by
+ /// the routine; its contents need not be initialized and are destroyed. LHS,
+ /// REMAINDER and SCRATCH must be distinct.
+ static int tcDivide(integerPart *lhs, const integerPart *rhs,
+ integerPart *remainder, integerPart *scratch,
+ unsigned int parts);
+
+ /// Shift a bignum left COUNT bits. Shifted in bits are zero. There are no
+ /// restrictions on COUNT.
+ static void tcShiftLeft(integerPart *, unsigned int parts,
+ unsigned int count);
+
+ /// Shift a bignum right COUNT bits. Shifted in bits are zero. There are no
+ /// restrictions on COUNT.
+ static void tcShiftRight(integerPart *, unsigned int parts,
+ unsigned int count);
+
+ /// The obvious AND, OR and XOR and complement operations.
+ static void tcAnd(integerPart *, const integerPart *, unsigned int);
+ static void tcOr(integerPart *, const integerPart *, unsigned int);
+ static void tcXor(integerPart *, const integerPart *, unsigned int);
+ static void tcComplement(integerPart *, unsigned int);
+
+ /// Comparison (unsigned) of two bignums.
+ static int tcCompare(const integerPart *, const integerPart *, unsigned int);
+
+ /// Increment a bignum in-place. Return the carry flag.
+ static integerPart tcIncrement(integerPart *, unsigned int);
+
+ /// Decrement a bignum in-place. Return the borrow flag.
+ static integerPart tcDecrement(integerPart *, unsigned int);
+
+ /// Set the least significant BITS and clear the rest.
+ static void tcSetLeastSignificantBits(integerPart *, unsigned int,
+ unsigned int bits);
+
+ /// \brief debug method
+ void dump() const;
+
+ /// @}
+};
+
+/// Magic data for optimising signed division by a constant.
+struct APInt::ms {
+ APInt m; ///< magic number
+ unsigned s; ///< shift amount
+};
+
+/// Magic data for optimising unsigned division by a constant.
+struct APInt::mu {
+ APInt m; ///< magic number
+ bool a; ///< add indicator
+ unsigned s; ///< shift amount
+};
+
+inline bool operator==(uint64_t V1, const APInt &V2) { return V2 == V1; }
+
+inline bool operator!=(uint64_t V1, const APInt &V2) { return V2 != V1; }
+
+inline raw_ostream &operator<<(raw_ostream &OS, const APInt &I) {
+ I.print(OS, true);
+ return OS;
+}
+
+namespace APIntOps {
+
+/// \brief Determine the smaller of two APInts considered to be signed.
+inline APInt smin(const APInt &A, const APInt &B) { return A.slt(B) ? A : B; }
+
+/// \brief Determine the larger of two APInts considered to be signed.
+inline APInt smax(const APInt &A, const APInt &B) { return A.sgt(B) ? A : B; }
+
+/// \brief Determine the smaller of two APInts considered to be signed.
+inline APInt umin(const APInt &A, const APInt &B) { return A.ult(B) ? A : B; }
+
+/// \brief Determine the larger of two APInts considered to be unsigned.
+inline APInt umax(const APInt &A, const APInt &B) { return A.ugt(B) ? A : B; }
+
+/// \brief Check if the specified APInt has a N-bits unsigned integer value.
+inline bool isIntN(unsigned N, const APInt &APIVal) { return APIVal.isIntN(N); }
+
+/// \brief Check if the specified APInt has a N-bits signed integer value.
+inline bool isSignedIntN(unsigned N, const APInt &APIVal) {
+ return APIVal.isSignedIntN(N);
+}
+
+/// \returns true if the argument APInt value is a sequence of ones starting at
+/// the least significant bit with the remainder zero.
+inline bool isMask(unsigned numBits, const APInt &APIVal) {
+ return numBits <= APIVal.getBitWidth() &&
+ APIVal == APInt::getLowBitsSet(APIVal.getBitWidth(), numBits);
+}
+
+/// \brief Return true if the argument APInt value contains a sequence of ones
+/// with the remainder zero.
+inline bool isShiftedMask(unsigned numBits, const APInt &APIVal) {
+ return isMask(numBits, (APIVal - APInt(numBits, 1)) | APIVal);
+}
+
+/// \brief Returns a byte-swapped representation of the specified APInt Value.
+inline APInt byteSwap(const APInt &APIVal) { return APIVal.byteSwap(); }
+
+/// \brief Returns the floor log base 2 of the specified APInt value.
+inline unsigned logBase2(const APInt &APIVal) { return APIVal.logBase2(); }
+
+/// \brief Compute GCD of two APInt values.
+///
+/// This function returns the greatest common divisor of the two APInt values
+/// using Euclid's algorithm.
+///
+/// \returns the greatest common divisor of Val1 and Val2
+APInt GreatestCommonDivisor(const APInt &Val1, const APInt &Val2);
+
+/// \brief Converts the given APInt to a double value.
+///
+/// Treats the APInt as an unsigned value for conversion purposes.
+inline double RoundAPIntToDouble(const APInt &APIVal) {
+ return APIVal.roundToDouble();
+}
+
+/// \brief Converts the given APInt to a double value.
+///
+/// Treats the APInt as a signed value for conversion purposes.
+inline double RoundSignedAPIntToDouble(const APInt &APIVal) {
+ return APIVal.signedRoundToDouble();
+}
+
+/// \brief Converts the given APInt to a float vlalue.
+inline float RoundAPIntToFloat(const APInt &APIVal) {
+ return float(RoundAPIntToDouble(APIVal));
+}
+
+/// \brief Converts the given APInt to a float value.
+///
+/// Treast the APInt as a signed value for conversion purposes.
+inline float RoundSignedAPIntToFloat(const APInt &APIVal) {
+ return float(APIVal.signedRoundToDouble());
+}
+
+/// \brief Converts the given double value into a APInt.
+///
+/// This function convert a double value to an APInt value.
+APInt RoundDoubleToAPInt(double Double, unsigned width);
+
+/// \brief Converts a float value into a APInt.
+///
+/// Converts a float value into an APInt value.
+inline APInt RoundFloatToAPInt(float Float, unsigned width) {
+ return RoundDoubleToAPInt(double(Float), width);
+}
+
+/// \brief Arithmetic right-shift function.
+///
+/// Arithmetic right-shift the APInt by shiftAmt.
+inline APInt ashr(const APInt &LHS, unsigned shiftAmt) {
+ return LHS.ashr(shiftAmt);
+}
+
+/// \brief Logical right-shift function.
+///
+/// Logical right-shift the APInt by shiftAmt.
+inline APInt lshr(const APInt &LHS, unsigned shiftAmt) {
+ return LHS.lshr(shiftAmt);
+}
+
+/// \brief Left-shift function.
+///
+/// Left-shift the APInt by shiftAmt.
+inline APInt shl(const APInt &LHS, unsigned shiftAmt) {
+ return LHS.shl(shiftAmt);
+}
+
+/// \brief Signed division function for APInt.
+///
+/// Signed divide APInt LHS by APInt RHS.
+inline APInt sdiv(const APInt &LHS, const APInt &RHS) { return LHS.sdiv(RHS); }
+
+/// \brief Unsigned division function for APInt.
+///
+/// Unsigned divide APInt LHS by APInt RHS.
+inline APInt udiv(const APInt &LHS, const APInt &RHS) { return LHS.udiv(RHS); }
+
+/// \brief Function for signed remainder operation.
+///
+/// Signed remainder operation on APInt.
+inline APInt srem(const APInt &LHS, const APInt &RHS) { return LHS.srem(RHS); }
+
+/// \brief Function for unsigned remainder operation.
+///
+/// Unsigned remainder operation on APInt.
+inline APInt urem(const APInt &LHS, const APInt &RHS) { return LHS.urem(RHS); }
+
+/// \brief Function for multiplication operation.
+///
+/// Performs multiplication on APInt values.
+inline APInt mul(const APInt &LHS, const APInt &RHS) { return LHS * RHS; }
+
+/// \brief Function for addition operation.
+///
+/// Performs addition on APInt values.
+inline APInt add(const APInt &LHS, const APInt &RHS) { return LHS + RHS; }
+
+/// \brief Function for subtraction operation.
+///
+/// Performs subtraction on APInt values.
+inline APInt sub(const APInt &LHS, const APInt &RHS) { return LHS - RHS; }
+
+/// \brief Bitwise AND function for APInt.
+///
+/// Performs bitwise AND operation on APInt LHS and
+/// APInt RHS.
+inline APInt And(const APInt &LHS, const APInt &RHS) { return LHS & RHS; }
+
+/// \brief Bitwise OR function for APInt.
+///
+/// Performs bitwise OR operation on APInt LHS and APInt RHS.
+inline APInt Or(const APInt &LHS, const APInt &RHS) { return LHS | RHS; }
+
+/// \brief Bitwise XOR function for APInt.
+///
+/// Performs bitwise XOR operation on APInt.
+inline APInt Xor(const APInt &LHS, const APInt &RHS) { return LHS ^ RHS; }
+
+/// \brief Bitwise complement function.
+///
+/// Performs a bitwise complement operation on APInt.
+inline APInt Not(const APInt &APIVal) { return ~APIVal; }
+
+} // End of APIntOps namespace
+
+// See friend declaration above. This additional declaration is required in
+// order to compile LLVM with IBM xlC compiler.
+hash_code hash_value(const APInt &Arg);
+} // End of llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/APSInt.h b/contrib/llvm/include/llvm/ADT/APSInt.h
new file mode 100644
index 0000000..a6552d0
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/APSInt.h
@@ -0,0 +1,342 @@
+//===-- llvm/ADT/APSInt.h - Arbitrary Precision Signed Int -----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the APSInt class, which is a simple class that
+// represents an arbitrary sized integer that knows its signedness.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_APSINT_H
+#define LLVM_ADT_APSINT_H
+
+#include "llvm/ADT/APInt.h"
+
+namespace llvm {
+
+class APSInt : public APInt {
+ bool IsUnsigned;
+
+public:
+ /// Default constructor that creates an uninitialized APInt.
+ explicit APSInt() : IsUnsigned(false) {}
+
+ /// APSInt ctor - Create an APSInt with the specified width, default to
+ /// unsigned.
+ explicit APSInt(uint32_t BitWidth, bool isUnsigned = true)
+ : APInt(BitWidth, 0), IsUnsigned(isUnsigned) {}
+
+ explicit APSInt(APInt I, bool isUnsigned = true)
+ : APInt(std::move(I)), IsUnsigned(isUnsigned) {}
+
+ /// Construct an APSInt from a string representation.
+ ///
+ /// This constructor interprets the string \p Str using the radix of 10.
+ /// The interpretation stops at the end of the string. The bit width of the
+ /// constructed APSInt is determined automatically.
+ ///
+ /// \param Str the string to be interpreted.
+ explicit APSInt(StringRef Str);
+
+ APSInt &operator=(APInt RHS) {
+ // Retain our current sign.
+ APInt::operator=(std::move(RHS));
+ return *this;
+ }
+
+ APSInt &operator=(uint64_t RHS) {
+ // Retain our current sign.
+ APInt::operator=(RHS);
+ return *this;
+ }
+
+ // Query sign information.
+ bool isSigned() const { return !IsUnsigned; }
+ bool isUnsigned() const { return IsUnsigned; }
+ void setIsUnsigned(bool Val) { IsUnsigned = Val; }
+ void setIsSigned(bool Val) { IsUnsigned = !Val; }
+
+ /// toString - Append this APSInt to the specified SmallString.
+ void toString(SmallVectorImpl<char> &Str, unsigned Radix = 10) const {
+ APInt::toString(Str, Radix, isSigned());
+ }
+ /// toString - Converts an APInt to a std::string. This is an inefficient
+ /// method; you should prefer passing in a SmallString instead.
+ std::string toString(unsigned Radix) const {
+ return APInt::toString(Radix, isSigned());
+ }
+ using APInt::toString;
+
+ /// \brief Get the correctly-extended \c int64_t value.
+ int64_t getExtValue() const {
+ assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
+ return isSigned() ? getSExtValue() : getZExtValue();
+ }
+
+ APSInt LLVM_ATTRIBUTE_UNUSED_RESULT trunc(uint32_t width) const {
+ return APSInt(APInt::trunc(width), IsUnsigned);
+ }
+
+ APSInt LLVM_ATTRIBUTE_UNUSED_RESULT extend(uint32_t width) const {
+ if (IsUnsigned)
+ return APSInt(zext(width), IsUnsigned);
+ else
+ return APSInt(sext(width), IsUnsigned);
+ }
+
+ APSInt LLVM_ATTRIBUTE_UNUSED_RESULT extOrTrunc(uint32_t width) const {
+ if (IsUnsigned)
+ return APSInt(zextOrTrunc(width), IsUnsigned);
+ else
+ return APSInt(sextOrTrunc(width), IsUnsigned);
+ }
+
+ const APSInt &operator%=(const APSInt &RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ if (IsUnsigned)
+ *this = urem(RHS);
+ else
+ *this = srem(RHS);
+ return *this;
+ }
+ const APSInt &operator/=(const APSInt &RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ if (IsUnsigned)
+ *this = udiv(RHS);
+ else
+ *this = sdiv(RHS);
+ return *this;
+ }
+ APSInt operator%(const APSInt &RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? APSInt(urem(RHS), true) : APSInt(srem(RHS), false);
+ }
+ APSInt operator/(const APSInt &RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? APSInt(udiv(RHS), true) : APSInt(sdiv(RHS), false);
+ }
+
+ APSInt operator>>(unsigned Amt) const {
+ return IsUnsigned ? APSInt(lshr(Amt), true) : APSInt(ashr(Amt), false);
+ }
+ APSInt& operator>>=(unsigned Amt) {
+ *this = *this >> Amt;
+ return *this;
+ }
+
+ inline bool operator<(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? ult(RHS) : slt(RHS);
+ }
+ inline bool operator>(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? ugt(RHS) : sgt(RHS);
+ }
+ inline bool operator<=(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? ule(RHS) : sle(RHS);
+ }
+ inline bool operator>=(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return IsUnsigned ? uge(RHS) : sge(RHS);
+ }
+ inline bool operator==(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return eq(RHS);
+ }
+ inline bool operator!=(const APSInt& RHS) const {
+ return !((*this) == RHS);
+ }
+
+ bool operator==(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) == 0;
+ }
+ bool operator!=(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) != 0;
+ }
+ bool operator<=(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) <= 0;
+ }
+ bool operator>=(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) >= 0;
+ }
+ bool operator<(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) < 0;
+ }
+ bool operator>(int64_t RHS) const {
+ return compareValues(*this, get(RHS)) > 0;
+ }
+
+ // The remaining operators just wrap the logic of APInt, but retain the
+ // signedness information.
+
+ APSInt operator<<(unsigned Bits) const {
+ return APSInt(static_cast<const APInt&>(*this) << Bits, IsUnsigned);
+ }
+ APSInt& operator<<=(unsigned Amt) {
+ *this = *this << Amt;
+ return *this;
+ }
+
+ APSInt& operator++() {
+ ++(static_cast<APInt&>(*this));
+ return *this;
+ }
+ APSInt& operator--() {
+ --(static_cast<APInt&>(*this));
+ return *this;
+ }
+ APSInt operator++(int) {
+ return APSInt(++static_cast<APInt&>(*this), IsUnsigned);
+ }
+ APSInt operator--(int) {
+ return APSInt(--static_cast<APInt&>(*this), IsUnsigned);
+ }
+ APSInt operator-() const {
+ return APSInt(-static_cast<const APInt&>(*this), IsUnsigned);
+ }
+ APSInt& operator+=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) += RHS;
+ return *this;
+ }
+ APSInt& operator-=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) -= RHS;
+ return *this;
+ }
+ APSInt& operator*=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) *= RHS;
+ return *this;
+ }
+ APSInt& operator&=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) &= RHS;
+ return *this;
+ }
+ APSInt& operator|=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) |= RHS;
+ return *this;
+ }
+ APSInt& operator^=(const APSInt& RHS) {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ static_cast<APInt&>(*this) ^= RHS;
+ return *this;
+ }
+
+ APSInt operator&(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) & RHS, IsUnsigned);
+ }
+ APSInt LLVM_ATTRIBUTE_UNUSED_RESULT And(const APSInt& RHS) const {
+ return this->operator&(RHS);
+ }
+
+ APSInt operator|(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) | RHS, IsUnsigned);
+ }
+ APSInt LLVM_ATTRIBUTE_UNUSED_RESULT Or(const APSInt& RHS) const {
+ return this->operator|(RHS);
+ }
+
+ APSInt operator^(const APSInt &RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) ^ RHS, IsUnsigned);
+ }
+ APSInt LLVM_ATTRIBUTE_UNUSED_RESULT Xor(const APSInt& RHS) const {
+ return this->operator^(RHS);
+ }
+
+ APSInt operator*(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) * RHS, IsUnsigned);
+ }
+ APSInt operator+(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) + RHS, IsUnsigned);
+ }
+ APSInt operator-(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return APSInt(static_cast<const APInt&>(*this) - RHS, IsUnsigned);
+ }
+ APSInt operator~() const {
+ return APSInt(~static_cast<const APInt&>(*this), IsUnsigned);
+ }
+
+ /// getMaxValue - Return the APSInt representing the maximum integer value
+ /// with the given bit width and signedness.
+ static APSInt getMaxValue(uint32_t numBits, bool Unsigned) {
+ return APSInt(Unsigned ? APInt::getMaxValue(numBits)
+ : APInt::getSignedMaxValue(numBits), Unsigned);
+ }
+
+ /// getMinValue - Return the APSInt representing the minimum integer value
+ /// with the given bit width and signedness.
+ static APSInt getMinValue(uint32_t numBits, bool Unsigned) {
+ return APSInt(Unsigned ? APInt::getMinValue(numBits)
+ : APInt::getSignedMinValue(numBits), Unsigned);
+ }
+
+ /// \brief Determine if two APSInts have the same value, zero- or
+ /// sign-extending as needed.
+ static bool isSameValue(const APSInt &I1, const APSInt &I2) {
+ return !compareValues(I1, I2);
+ }
+
+ /// \brief Compare underlying values of two numbers.
+ static int compareValues(const APSInt &I1, const APSInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
+ return I1 == I2 ? 0 : I1 > I2 ? 1 : -1;
+
+ // Check for a bit-width mismatch.
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return compareValues(I1, I2.extend(I1.getBitWidth()));
+ else if (I2.getBitWidth() > I1.getBitWidth())
+ return compareValues(I1.extend(I2.getBitWidth()), I2);
+
+ // We have a signedness mismatch. Check for negative values and do an
+ // unsigned compare if both are positive.
+ if (I1.isSigned()) {
+ assert(!I2.isSigned() && "Expected signed mismatch");
+ if (I1.isNegative())
+ return -1;
+ } else {
+ assert(I2.isSigned() && "Expected signed mismatch");
+ if (I2.isNegative())
+ return 1;
+ }
+
+ return I1.eq(I2) ? 0 : I1.ugt(I2) ? 1 : -1;
+ }
+
+ static APSInt get(int64_t X) { return APSInt(APInt(64, X), false); }
+ static APSInt getUnsigned(uint64_t X) { return APSInt(APInt(64, X), true); }
+
+ /// Profile - Used to insert APSInt objects, or objects that contain APSInt
+ /// objects, into FoldingSets.
+ void Profile(FoldingSetNodeID& ID) const;
+};
+
+inline bool operator==(int64_t V1, const APSInt &V2) { return V2 == V1; }
+inline bool operator!=(int64_t V1, const APSInt &V2) { return V2 != V1; }
+inline bool operator<=(int64_t V1, const APSInt &V2) { return V2 >= V1; }
+inline bool operator>=(int64_t V1, const APSInt &V2) { return V2 <= V1; }
+inline bool operator<(int64_t V1, const APSInt &V2) { return V2 > V1; }
+inline bool operator>(int64_t V1, const APSInt &V2) { return V2 < V1; }
+
+inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) {
+ I.print(OS, I.isSigned());
+ return OS;
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/ArrayRef.h b/contrib/llvm/include/llvm/ADT/ArrayRef.h
new file mode 100644
index 0000000..517ba39
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/ArrayRef.h
@@ -0,0 +1,384 @@
+//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ARRAYREF_H
+#define LLVM_ADT_ARRAYREF_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include <vector>
+
+namespace llvm {
+
+ /// ArrayRef - Represent a constant reference to an array (0 or more elements
+ /// consecutively in memory), i.e. a start pointer and a length. It allows
+ /// various APIs to take consecutive elements easily and conveniently.
+ ///
+ /// This class does not own the underlying data, it is expected to be used in
+ /// situations where the data resides in some other buffer, whose lifetime
+ /// extends past that of the ArrayRef. For this reason, it is not in general
+ /// safe to store an ArrayRef.
+ ///
+ /// This is intended to be trivially copyable, so it should be passed by
+ /// value.
+ template<typename T>
+ class ArrayRef {
+ public:
+ typedef const T *iterator;
+ typedef const T *const_iterator;
+ typedef size_t size_type;
+
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ private:
+ /// The start of the array, in an external buffer.
+ const T *Data;
+
+ /// The number of elements.
+ size_type Length;
+
+ public:
+ /// @name Constructors
+ /// @{
+
+ /// Construct an empty ArrayRef.
+ /*implicit*/ ArrayRef() : Data(nullptr), Length(0) {}
+
+ /// Construct an empty ArrayRef from None.
+ /*implicit*/ ArrayRef(NoneType) : Data(nullptr), Length(0) {}
+
+ /// Construct an ArrayRef from a single element.
+ /*implicit*/ ArrayRef(const T &OneElt)
+ : Data(&OneElt), Length(1) {}
+
+ /// Construct an ArrayRef from a pointer and length.
+ /*implicit*/ ArrayRef(const T *data, size_t length)
+ : Data(data), Length(length) {}
+
+ /// Construct an ArrayRef from a range.
+ ArrayRef(const T *begin, const T *end)
+ : Data(begin), Length(end - begin) {}
+
+ /// Construct an ArrayRef from a SmallVector. This is templated in order to
+ /// avoid instantiating SmallVectorTemplateCommon<T> whenever we
+ /// copy-construct an ArrayRef.
+ template<typename U>
+ /*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T, U> &Vec)
+ : Data(Vec.data()), Length(Vec.size()) {
+ }
+
+ /// Construct an ArrayRef from a std::vector.
+ template<typename A>
+ /*implicit*/ ArrayRef(const std::vector<T, A> &Vec)
+ : Data(Vec.data()), Length(Vec.size()) {}
+
+ /// Construct an ArrayRef from a C array.
+ template <size_t N>
+ /*implicit*/ LLVM_CONSTEXPR ArrayRef(const T (&Arr)[N])
+ : Data(Arr), Length(N) {}
+
+ /// Construct an ArrayRef from a std::initializer_list.
+ /*implicit*/ ArrayRef(const std::initializer_list<T> &Vec)
+ : Data(Vec.begin() == Vec.end() ? (T*)nullptr : Vec.begin()),
+ Length(Vec.size()) {}
+
+ /// Construct an ArrayRef<const T*> from ArrayRef<T*>. This uses SFINAE to
+ /// ensure that only ArrayRefs of pointers can be converted.
+ template <typename U>
+ ArrayRef(const ArrayRef<U *> &A,
+ typename std::enable_if<
+ std::is_convertible<U *const *, T const *>::value>::type* = 0)
+ : Data(A.data()), Length(A.size()) {}
+
+ /// Construct an ArrayRef<const T*> from a SmallVector<T*>. This is
+ /// templated in order to avoid instantiating SmallVectorTemplateCommon<T>
+ /// whenever we copy-construct an ArrayRef.
+ template<typename U, typename DummyT>
+ /*implicit*/ ArrayRef(const SmallVectorTemplateCommon<U*, DummyT> &Vec,
+ typename std::enable_if<
+ std::is_convertible<U *const *,
+ T const *>::value>::type* = 0)
+ : Data(Vec.data()), Length(Vec.size()) {
+ }
+
+ /// Construct an ArrayRef<const T*> from std::vector<T*>. This uses SFINAE
+ /// to ensure that only vectors of pointers can be converted.
+ template<typename U, typename A>
+ ArrayRef(const std::vector<U *, A> &Vec,
+ typename std::enable_if<
+ std::is_convertible<U *const *, T const *>::value>::type* = 0)
+ : Data(Vec.data()), Length(Vec.size()) {}
+
+ /// @}
+ /// @name Simple Operations
+ /// @{
+
+ iterator begin() const { return Data; }
+ iterator end() const { return Data + Length; }
+
+ reverse_iterator rbegin() const { return reverse_iterator(end()); }
+ reverse_iterator rend() const { return reverse_iterator(begin()); }
+
+ /// empty - Check if the array is empty.
+ bool empty() const { return Length == 0; }
+
+ const T *data() const { return Data; }
+
+ /// size - Get the array size.
+ size_t size() const { return Length; }
+
+ /// front - Get the first element.
+ const T &front() const {
+ assert(!empty());
+ return Data[0];
+ }
+
+ /// back - Get the last element.
+ const T &back() const {
+ assert(!empty());
+ return Data[Length-1];
+ }
+
+ // copy - Allocate copy in Allocator and return ArrayRef<T> to it.
+ template <typename Allocator> ArrayRef<T> copy(Allocator &A) {
+ T *Buff = A.template Allocate<T>(Length);
+ std::uninitialized_copy(begin(), end(), Buff);
+ return ArrayRef<T>(Buff, Length);
+ }
+
+ /// equals - Check for element-wise equality.
+ bool equals(ArrayRef RHS) const {
+ if (Length != RHS.Length)
+ return false;
+ return std::equal(begin(), end(), RHS.begin());
+ }
+
+ /// slice(n) - Chop off the first N elements of the array.
+ ArrayRef<T> slice(unsigned N) const {
+ assert(N <= size() && "Invalid specifier");
+ return ArrayRef<T>(data()+N, size()-N);
+ }
+
+ /// slice(n, m) - Chop off the first N elements of the array, and keep M
+ /// elements in the array.
+ ArrayRef<T> slice(unsigned N, unsigned M) const {
+ assert(N+M <= size() && "Invalid specifier");
+ return ArrayRef<T>(data()+N, M);
+ }
+
+ // \brief Drop the last \p N elements of the array.
+ ArrayRef<T> drop_back(unsigned N = 1) const {
+ assert(size() >= N && "Dropping more elements than exist");
+ return slice(0, size() - N);
+ }
+
+ /// @}
+ /// @name Operator Overloads
+ /// @{
+ const T &operator[](size_t Index) const {
+ assert(Index < Length && "Invalid index!");
+ return Data[Index];
+ }
+
+ /// @}
+ /// @name Expensive Operations
+ /// @{
+ std::vector<T> vec() const {
+ return std::vector<T>(Data, Data+Length);
+ }
+
+ /// @}
+ /// @name Conversion operators
+ /// @{
+ operator std::vector<T>() const {
+ return std::vector<T>(Data, Data+Length);
+ }
+
+ /// @}
+ };
+
+ /// MutableArrayRef - Represent a mutable reference to an array (0 or more
+ /// elements consecutively in memory), i.e. a start pointer and a length. It
+ /// allows various APIs to take and modify consecutive elements easily and
+ /// conveniently.
+ ///
+ /// This class does not own the underlying data, it is expected to be used in
+ /// situations where the data resides in some other buffer, whose lifetime
+ /// extends past that of the MutableArrayRef. For this reason, it is not in
+ /// general safe to store a MutableArrayRef.
+ ///
+ /// This is intended to be trivially copyable, so it should be passed by
+ /// value.
+ template<typename T>
+ class MutableArrayRef : public ArrayRef<T> {
+ public:
+ typedef T *iterator;
+
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ /// Construct an empty MutableArrayRef.
+ /*implicit*/ MutableArrayRef() : ArrayRef<T>() {}
+
+ /// Construct an empty MutableArrayRef from None.
+ /*implicit*/ MutableArrayRef(NoneType) : ArrayRef<T>() {}
+
+ /// Construct an MutableArrayRef from a single element.
+ /*implicit*/ MutableArrayRef(T &OneElt) : ArrayRef<T>(OneElt) {}
+
+ /// Construct an MutableArrayRef from a pointer and length.
+ /*implicit*/ MutableArrayRef(T *data, size_t length)
+ : ArrayRef<T>(data, length) {}
+
+ /// Construct an MutableArrayRef from a range.
+ MutableArrayRef(T *begin, T *end) : ArrayRef<T>(begin, end) {}
+
+ /// Construct an MutableArrayRef from a SmallVector.
+ /*implicit*/ MutableArrayRef(SmallVectorImpl<T> &Vec)
+ : ArrayRef<T>(Vec) {}
+
+ /// Construct a MutableArrayRef from a std::vector.
+ /*implicit*/ MutableArrayRef(std::vector<T> &Vec)
+ : ArrayRef<T>(Vec) {}
+
+ /// Construct an MutableArrayRef from a C array.
+ template <size_t N>
+ /*implicit*/ LLVM_CONSTEXPR MutableArrayRef(T (&Arr)[N])
+ : ArrayRef<T>(Arr) {}
+
+ T *data() const { return const_cast<T*>(ArrayRef<T>::data()); }
+
+ iterator begin() const { return data(); }
+ iterator end() const { return data() + this->size(); }
+
+ reverse_iterator rbegin() const { return reverse_iterator(end()); }
+ reverse_iterator rend() const { return reverse_iterator(begin()); }
+
+ /// front - Get the first element.
+ T &front() const {
+ assert(!this->empty());
+ return data()[0];
+ }
+
+ /// back - Get the last element.
+ T &back() const {
+ assert(!this->empty());
+ return data()[this->size()-1];
+ }
+
+ /// slice(n) - Chop off the first N elements of the array.
+ MutableArrayRef<T> slice(unsigned N) const {
+ assert(N <= this->size() && "Invalid specifier");
+ return MutableArrayRef<T>(data()+N, this->size()-N);
+ }
+
+ /// slice(n, m) - Chop off the first N elements of the array, and keep M
+ /// elements in the array.
+ MutableArrayRef<T> slice(unsigned N, unsigned M) const {
+ assert(N+M <= this->size() && "Invalid specifier");
+ return MutableArrayRef<T>(data()+N, M);
+ }
+
+ MutableArrayRef<T> drop_back(unsigned N) const {
+ assert(this->size() >= N && "Dropping more elements than exist");
+ return slice(0, this->size() - N);
+ }
+
+ /// @}
+ /// @name Operator Overloads
+ /// @{
+ T &operator[](size_t Index) const {
+ assert(Index < this->size() && "Invalid index!");
+ return data()[Index];
+ }
+ };
+
+ /// @name ArrayRef Convenience constructors
+ /// @{
+
+ /// Construct an ArrayRef from a single element.
+ template<typename T>
+ ArrayRef<T> makeArrayRef(const T &OneElt) {
+ return OneElt;
+ }
+
+ /// Construct an ArrayRef from a pointer and length.
+ template<typename T>
+ ArrayRef<T> makeArrayRef(const T *data, size_t length) {
+ return ArrayRef<T>(data, length);
+ }
+
+ /// Construct an ArrayRef from a range.
+ template<typename T>
+ ArrayRef<T> makeArrayRef(const T *begin, const T *end) {
+ return ArrayRef<T>(begin, end);
+ }
+
+ /// Construct an ArrayRef from a SmallVector.
+ template <typename T>
+ ArrayRef<T> makeArrayRef(const SmallVectorImpl<T> &Vec) {
+ return Vec;
+ }
+
+ /// Construct an ArrayRef from a SmallVector.
+ template <typename T, unsigned N>
+ ArrayRef<T> makeArrayRef(const SmallVector<T, N> &Vec) {
+ return Vec;
+ }
+
+ /// Construct an ArrayRef from a std::vector.
+ template<typename T>
+ ArrayRef<T> makeArrayRef(const std::vector<T> &Vec) {
+ return Vec;
+ }
+
+ /// Construct an ArrayRef from an ArrayRef (no-op) (const)
+ template <typename T> ArrayRef<T> makeArrayRef(const ArrayRef<T> &Vec) {
+ return Vec;
+ }
+
+ /// Construct an ArrayRef from an ArrayRef (no-op)
+ template <typename T> ArrayRef<T> &makeArrayRef(ArrayRef<T> &Vec) {
+ return Vec;
+ }
+
+ /// Construct an ArrayRef from a C array.
+ template<typename T, size_t N>
+ ArrayRef<T> makeArrayRef(const T (&Arr)[N]) {
+ return ArrayRef<T>(Arr);
+ }
+
+ /// @}
+ /// @name ArrayRef Comparison Operators
+ /// @{
+
+ template<typename T>
+ inline bool operator==(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+ return LHS.equals(RHS);
+ }
+
+ template<typename T>
+ inline bool operator!=(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+ return !(LHS == RHS);
+ }
+
+ /// @}
+
+ // ArrayRefs can be treated like a POD type.
+ template <typename T> struct isPodLike;
+ template <typename T> struct isPodLike<ArrayRef<T> > {
+ static const bool value = true;
+ };
+
+ template <typename T> hash_code hash_value(ArrayRef<T> S) {
+ return hash_combine_range(S.begin(), S.end());
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/BitVector.h b/contrib/llvm/include/llvm/ADT/BitVector.h
new file mode 100644
index 0000000..ad00d51
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/BitVector.h
@@ -0,0 +1,589 @@
+//===- llvm/ADT/BitVector.h - Bit vectors -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the BitVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BITVECTOR_H
+#define LLVM_ADT_BITVECTOR_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstdlib>
+
+namespace llvm {
+
+class BitVector {
+ typedef unsigned long BitWord;
+
+ enum { BITWORD_SIZE = (unsigned)sizeof(BitWord) * CHAR_BIT };
+
+ static_assert(BITWORD_SIZE == 64 || BITWORD_SIZE == 32,
+ "Unsupported word size");
+
+ BitWord *Bits; // Actual bits.
+ unsigned Size; // Size of bitvector in bits.
+ unsigned Capacity; // Number of BitWords allocated in the Bits array.
+
+public:
+ typedef unsigned size_type;
+ // Encapsulation of a single bit.
+ class reference {
+ friend class BitVector;
+
+ BitWord *WordRef;
+ unsigned BitPos;
+
+ reference(); // Undefined
+
+ public:
+ reference(BitVector &b, unsigned Idx) {
+ WordRef = &b.Bits[Idx / BITWORD_SIZE];
+ BitPos = Idx % BITWORD_SIZE;
+ }
+
+ reference(const reference&) = default;
+
+ reference &operator=(reference t) {
+ *this = bool(t);
+ return *this;
+ }
+
+ reference& operator=(bool t) {
+ if (t)
+ *WordRef |= BitWord(1) << BitPos;
+ else
+ *WordRef &= ~(BitWord(1) << BitPos);
+ return *this;
+ }
+
+ operator bool() const {
+ return ((*WordRef) & (BitWord(1) << BitPos)) ? true : false;
+ }
+ };
+
+
+ /// BitVector default ctor - Creates an empty bitvector.
+ BitVector() : Size(0), Capacity(0) {
+ Bits = nullptr;
+ }
+
+ /// BitVector ctor - Creates a bitvector of specified number of bits. All
+ /// bits are initialized to the specified value.
+ explicit BitVector(unsigned s, bool t = false) : Size(s) {
+ Capacity = NumBitWords(s);
+ Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
+ init_words(Bits, Capacity, t);
+ if (t)
+ clear_unused_bits();
+ }
+
+ /// BitVector copy ctor.
+ BitVector(const BitVector &RHS) : Size(RHS.size()) {
+ if (Size == 0) {
+ Bits = nullptr;
+ Capacity = 0;
+ return;
+ }
+
+ Capacity = NumBitWords(RHS.size());
+ Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
+ std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord));
+ }
+
+ BitVector(BitVector &&RHS)
+ : Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) {
+ RHS.Bits = nullptr;
+ }
+
+ ~BitVector() {
+ std::free(Bits);
+ }
+
+ /// empty - Tests whether there are no bits in this bitvector.
+ bool empty() const { return Size == 0; }
+
+ /// size - Returns the number of bits in this bitvector.
+ size_type size() const { return Size; }
+
+ /// count - Returns the number of bits which are set.
+ size_type count() const {
+ unsigned NumBits = 0;
+ for (unsigned i = 0; i < NumBitWords(size()); ++i)
+ NumBits += countPopulation(Bits[i]);
+ return NumBits;
+ }
+
+ /// any - Returns true if any bit is set.
+ bool any() const {
+ for (unsigned i = 0; i < NumBitWords(size()); ++i)
+ if (Bits[i] != 0)
+ return true;
+ return false;
+ }
+
+ /// all - Returns true if all bits are set.
+ bool all() const {
+ for (unsigned i = 0; i < Size / BITWORD_SIZE; ++i)
+ if (Bits[i] != ~0UL)
+ return false;
+
+ // If bits remain check that they are ones. The unused bits are always zero.
+ if (unsigned Remainder = Size % BITWORD_SIZE)
+ return Bits[Size / BITWORD_SIZE] == (1UL << Remainder) - 1;
+
+ return true;
+ }
+
+ /// none - Returns true if none of the bits are set.
+ bool none() const {
+ return !any();
+ }
+
+ /// find_first - Returns the index of the first set bit, -1 if none
+ /// of the bits are set.
+ int find_first() const {
+ for (unsigned i = 0; i < NumBitWords(size()); ++i)
+ if (Bits[i] != 0)
+ return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
+ return -1;
+ }
+
+ /// find_next - Returns the index of the next set bit following the
+ /// "Prev" bit. Returns -1 if the next set bit is not found.
+ int find_next(unsigned Prev) const {
+ ++Prev;
+ if (Prev >= Size)
+ return -1;
+
+ unsigned WordPos = Prev / BITWORD_SIZE;
+ unsigned BitPos = Prev % BITWORD_SIZE;
+ BitWord Copy = Bits[WordPos];
+ // Mask off previous bits.
+ Copy &= ~0UL << BitPos;
+
+ if (Copy != 0)
+ return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
+
+ // Check subsequent words.
+ for (unsigned i = WordPos+1; i < NumBitWords(size()); ++i)
+ if (Bits[i] != 0)
+ return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
+ return -1;
+ }
+
+ /// clear - Clear all bits.
+ void clear() {
+ Size = 0;
+ }
+
+ /// resize - Grow or shrink the bitvector.
+ void resize(unsigned N, bool t = false) {
+ if (N > Capacity * BITWORD_SIZE) {
+ unsigned OldCapacity = Capacity;
+ grow(N);
+ init_words(&Bits[OldCapacity], (Capacity-OldCapacity), t);
+ }
+
+ // Set any old unused bits that are now included in the BitVector. This
+ // may set bits that are not included in the new vector, but we will clear
+ // them back out below.
+ if (N > Size)
+ set_unused_bits(t);
+
+ // Update the size, and clear out any bits that are now unused
+ unsigned OldSize = Size;
+ Size = N;
+ if (t || N < OldSize)
+ clear_unused_bits();
+ }
+
+ void reserve(unsigned N) {
+ if (N > Capacity * BITWORD_SIZE)
+ grow(N);
+ }
+
+ // Set, reset, flip
+ BitVector &set() {
+ init_words(Bits, Capacity, true);
+ clear_unused_bits();
+ return *this;
+ }
+
+ BitVector &set(unsigned Idx) {
+ assert(Bits && "Bits never allocated");
+ Bits[Idx / BITWORD_SIZE] |= BitWord(1) << (Idx % BITWORD_SIZE);
+ return *this;
+ }
+
+ /// set - Efficiently set a range of bits in [I, E)
+ BitVector &set(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to set backwards range!");
+ assert(E <= size() && "Attempted to set out-of-bounds range!");
+
+ if (I == E) return *this;
+
+ if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
+ BitWord EMask = 1UL << (E % BITWORD_SIZE);
+ BitWord IMask = 1UL << (I % BITWORD_SIZE);
+ BitWord Mask = EMask - IMask;
+ Bits[I / BITWORD_SIZE] |= Mask;
+ return *this;
+ }
+
+ BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
+ Bits[I / BITWORD_SIZE] |= PrefixMask;
+ I = RoundUpToAlignment(I, BITWORD_SIZE);
+
+ for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
+ Bits[I / BITWORD_SIZE] = ~0UL;
+
+ BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
+ if (I < E)
+ Bits[I / BITWORD_SIZE] |= PostfixMask;
+
+ return *this;
+ }
+
+ BitVector &reset() {
+ init_words(Bits, Capacity, false);
+ return *this;
+ }
+
+ BitVector &reset(unsigned Idx) {
+ Bits[Idx / BITWORD_SIZE] &= ~(BitWord(1) << (Idx % BITWORD_SIZE));
+ return *this;
+ }
+
+ /// reset - Efficiently reset a range of bits in [I, E)
+ BitVector &reset(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to reset backwards range!");
+ assert(E <= size() && "Attempted to reset out-of-bounds range!");
+
+ if (I == E) return *this;
+
+ if (I / BITWORD_SIZE == E / BITWORD_SIZE) {
+ BitWord EMask = 1UL << (E % BITWORD_SIZE);
+ BitWord IMask = 1UL << (I % BITWORD_SIZE);
+ BitWord Mask = EMask - IMask;
+ Bits[I / BITWORD_SIZE] &= ~Mask;
+ return *this;
+ }
+
+ BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE);
+ Bits[I / BITWORD_SIZE] &= ~PrefixMask;
+ I = RoundUpToAlignment(I, BITWORD_SIZE);
+
+ for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE)
+ Bits[I / BITWORD_SIZE] = 0UL;
+
+ BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1;
+ if (I < E)
+ Bits[I / BITWORD_SIZE] &= ~PostfixMask;
+
+ return *this;
+ }
+
+ BitVector &flip() {
+ for (unsigned i = 0; i < NumBitWords(size()); ++i)
+ Bits[i] = ~Bits[i];
+ clear_unused_bits();
+ return *this;
+ }
+
+ BitVector &flip(unsigned Idx) {
+ Bits[Idx / BITWORD_SIZE] ^= BitWord(1) << (Idx % BITWORD_SIZE);
+ return *this;
+ }
+
+ // Indexing.
+ reference operator[](unsigned Idx) {
+ assert (Idx < Size && "Out-of-bounds Bit access.");
+ return reference(*this, Idx);
+ }
+
+ bool operator[](unsigned Idx) const {
+ assert (Idx < Size && "Out-of-bounds Bit access.");
+ BitWord Mask = BitWord(1) << (Idx % BITWORD_SIZE);
+ return (Bits[Idx / BITWORD_SIZE] & Mask) != 0;
+ }
+
+ bool test(unsigned Idx) const {
+ return (*this)[Idx];
+ }
+
+ /// Test if any common bits are set.
+ bool anyCommon(const BitVector &RHS) const {
+ unsigned ThisWords = NumBitWords(size());
+ unsigned RHSWords = NumBitWords(RHS.size());
+ for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
+ if (Bits[i] & RHS.Bits[i])
+ return true;
+ return false;
+ }
+
+ // Comparison operators.
+ bool operator==(const BitVector &RHS) const {
+ unsigned ThisWords = NumBitWords(size());
+ unsigned RHSWords = NumBitWords(RHS.size());
+ unsigned i;
+ for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+ if (Bits[i] != RHS.Bits[i])
+ return false;
+
+ // Verify that any extra words are all zeros.
+ if (i != ThisWords) {
+ for (; i != ThisWords; ++i)
+ if (Bits[i])
+ return false;
+ } else if (i != RHSWords) {
+ for (; i != RHSWords; ++i)
+ if (RHS.Bits[i])
+ return false;
+ }
+ return true;
+ }
+
+ bool operator!=(const BitVector &RHS) const {
+ return !(*this == RHS);
+ }
+
+ /// Intersection, union, disjoint union.
+ BitVector &operator&=(const BitVector &RHS) {
+ unsigned ThisWords = NumBitWords(size());
+ unsigned RHSWords = NumBitWords(RHS.size());
+ unsigned i;
+ for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+ Bits[i] &= RHS.Bits[i];
+
+ // Any bits that are just in this bitvector become zero, because they aren't
+ // in the RHS bit vector. Any words only in RHS are ignored because they
+ // are already zero in the LHS.
+ for (; i != ThisWords; ++i)
+ Bits[i] = 0;
+
+ return *this;
+ }
+
+ /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
+ BitVector &reset(const BitVector &RHS) {
+ unsigned ThisWords = NumBitWords(size());
+ unsigned RHSWords = NumBitWords(RHS.size());
+ unsigned i;
+ for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+ Bits[i] &= ~RHS.Bits[i];
+ return *this;
+ }
+
+ /// test - Check if (This - RHS) is zero.
+ /// This is the same as reset(RHS) and any().
+ bool test(const BitVector &RHS) const {
+ unsigned ThisWords = NumBitWords(size());
+ unsigned RHSWords = NumBitWords(RHS.size());
+ unsigned i;
+ for (i = 0; i != std::min(ThisWords, RHSWords); ++i)
+ if ((Bits[i] & ~RHS.Bits[i]) != 0)
+ return true;
+
+ for (; i != ThisWords ; ++i)
+ if (Bits[i] != 0)
+ return true;
+
+ return false;
+ }
+
+ BitVector &operator|=(const BitVector &RHS) {
+ if (size() < RHS.size())
+ resize(RHS.size());
+ for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
+ Bits[i] |= RHS.Bits[i];
+ return *this;
+ }
+
+ BitVector &operator^=(const BitVector &RHS) {
+ if (size() < RHS.size())
+ resize(RHS.size());
+ for (size_t i = 0, e = NumBitWords(RHS.size()); i != e; ++i)
+ Bits[i] ^= RHS.Bits[i];
+ return *this;
+ }
+
+ // Assignment operator.
+ const BitVector &operator=(const BitVector &RHS) {
+ if (this == &RHS) return *this;
+
+ Size = RHS.size();
+ unsigned RHSWords = NumBitWords(Size);
+ if (Size <= Capacity * BITWORD_SIZE) {
+ if (Size)
+ std::memcpy(Bits, RHS.Bits, RHSWords * sizeof(BitWord));
+ clear_unused_bits();
+ return *this;
+ }
+
+ // Grow the bitvector to have enough elements.
+ Capacity = RHSWords;
+ assert(Capacity > 0 && "negative capacity?");
+ BitWord *NewBits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
+ std::memcpy(NewBits, RHS.Bits, Capacity * sizeof(BitWord));
+
+ // Destroy the old bits.
+ std::free(Bits);
+ Bits = NewBits;
+
+ return *this;
+ }
+
+ const BitVector &operator=(BitVector &&RHS) {
+ if (this == &RHS) return *this;
+
+ std::free(Bits);
+ Bits = RHS.Bits;
+ Size = RHS.Size;
+ Capacity = RHS.Capacity;
+
+ RHS.Bits = nullptr;
+
+ return *this;
+ }
+
+ void swap(BitVector &RHS) {
+ std::swap(Bits, RHS.Bits);
+ std::swap(Size, RHS.Size);
+ std::swap(Capacity, RHS.Capacity);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Portable bit mask operations.
+ //===--------------------------------------------------------------------===//
+ //
+ // These methods all operate on arrays of uint32_t, each holding 32 bits. The
+ // fixed word size makes it easier to work with literal bit vector constants
+ // in portable code.
+ //
+ // The LSB in each word is the lowest numbered bit. The size of a portable
+ // bit mask is always a whole multiple of 32 bits. If no bit mask size is
+ // given, the bit mask is assumed to cover the entire BitVector.
+
+ /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
+ /// This computes "*this |= Mask".
+ void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<true, false>(Mask, MaskWords);
+ }
+
+ /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
+ /// Don't resize. This computes "*this &= ~Mask".
+ void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<false, false>(Mask, MaskWords);
+ }
+
+ /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this |= ~Mask".
+ void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<true, true>(Mask, MaskWords);
+ }
+
+ /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this &= Mask".
+ void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ applyMask<false, true>(Mask, MaskWords);
+ }
+
+private:
+ unsigned NumBitWords(unsigned S) const {
+ return (S + BITWORD_SIZE-1) / BITWORD_SIZE;
+ }
+
+ // Set the unused bits in the high words.
+ void set_unused_bits(bool t = true) {
+ // Set high words first.
+ unsigned UsedWords = NumBitWords(Size);
+ if (Capacity > UsedWords)
+ init_words(&Bits[UsedWords], (Capacity-UsedWords), t);
+
+ // Then set any stray high bits of the last used word.
+ unsigned ExtraBits = Size % BITWORD_SIZE;
+ if (ExtraBits) {
+ BitWord ExtraBitMask = ~0UL << ExtraBits;
+ if (t)
+ Bits[UsedWords-1] |= ExtraBitMask;
+ else
+ Bits[UsedWords-1] &= ~ExtraBitMask;
+ }
+ }
+
+ // Clear the unused bits in the high words.
+ void clear_unused_bits() {
+ set_unused_bits(false);
+ }
+
+ void grow(unsigned NewSize) {
+ Capacity = std::max(NumBitWords(NewSize), Capacity * 2);
+ assert(Capacity > 0 && "realloc-ing zero space");
+ Bits = (BitWord *)std::realloc(Bits, Capacity * sizeof(BitWord));
+
+ clear_unused_bits();
+ }
+
+ void init_words(BitWord *B, unsigned NumWords, bool t) {
+ memset(B, 0 - (int)t, NumWords*sizeof(BitWord));
+ }
+
+ template<bool AddBits, bool InvertMask>
+ void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+ static_assert(BITWORD_SIZE % 32 == 0, "Unsupported BitWord size.");
+ MaskWords = std::min(MaskWords, (size() + 31) / 32);
+ const unsigned Scale = BITWORD_SIZE / 32;
+ unsigned i;
+ for (i = 0; MaskWords >= Scale; ++i, MaskWords -= Scale) {
+ BitWord BW = Bits[i];
+ // This inner loop should unroll completely when BITWORD_SIZE > 32.
+ for (unsigned b = 0; b != BITWORD_SIZE; b += 32) {
+ uint32_t M = *Mask++;
+ if (InvertMask) M = ~M;
+ if (AddBits) BW |= BitWord(M) << b;
+ else BW &= ~(BitWord(M) << b);
+ }
+ Bits[i] = BW;
+ }
+ for (unsigned b = 0; MaskWords; b += 32, --MaskWords) {
+ uint32_t M = *Mask++;
+ if (InvertMask) M = ~M;
+ if (AddBits) Bits[i] |= BitWord(M) << b;
+ else Bits[i] &= ~(BitWord(M) << b);
+ }
+ if (AddBits)
+ clear_unused_bits();
+ }
+
+public:
+ /// Return the size (in bytes) of the bit vector.
+ size_t getMemorySize() const { return Capacity * sizeof(BitWord); }
+};
+
+static inline size_t capacity_in_bytes(const BitVector &X) {
+ return X.getMemorySize();
+}
+
+} // End llvm namespace
+
+namespace std {
+ /// Implement std::swap in terms of BitVector swap.
+ inline void
+ swap(llvm::BitVector &LHS, llvm::BitVector &RHS) {
+ LHS.swap(RHS);
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h b/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
new file mode 100644
index 0000000..3dd862c
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -0,0 +1,77 @@
+//===--- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DAGDELTAALGORITHM_H
+#define LLVM_ADT_DAGDELTAALGORITHM_H
+
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+/// DAGDeltaAlgorithm - Implements a "delta debugging" algorithm for minimizing
+/// directed acyclic graphs using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element not required by the dependencies on the other
+/// elements would falsify the predicate.
+///
+/// The DAG should be used to represent dependencies in the changes which are
+/// likely to hold across the predicate function. That is, for a particular
+/// changeset S and predicate P:
+///
+/// P(S) => P(S union pred(S))
+///
+/// The minization algorithm uses this dependency information to attempt to
+/// eagerly prune large subsets of changes. As with \see DeltaAlgorithm, the DAG
+/// is not required to satisfy this property, but the algorithm will run
+/// substantially fewer tests with appropriate dependencies. \see DeltaAlgorithm
+/// for more information on the properties which the predicate function itself
+/// should satisfy.
+class DAGDeltaAlgorithm {
+ virtual void anchor();
+public:
+ typedef unsigned change_ty;
+ typedef std::pair<change_ty, change_ty> edge_ty;
+
+ // FIXME: Use a decent data structure.
+ typedef std::set<change_ty> changeset_ty;
+ typedef std::vector<changeset_ty> changesetlist_ty;
+
+public:
+ virtual ~DAGDeltaAlgorithm() {}
+
+ /// Run - Minimize the DAG formed by the \p Changes vertices and the
+ /// \p Dependencies edges by executing \see ExecuteOneTest() on subsets of
+ /// changes and returning the smallest set which still satisfies the test
+ /// predicate and the input \p Dependencies.
+ ///
+ /// \param Changes The list of changes.
+ ///
+ /// \param Dependencies The list of dependencies amongst changes. For each
+ /// (x,y) in \p Dependencies, both x and y must be in \p Changes. The
+ /// minimization algorithm guarantees that for each tested changed set S,
+ /// \f$ x \in S \f$ implies \f$ y \in S \f$. It is an error to have cyclic
+ /// dependencies.
+ changeset_ty Run(const changeset_ty &Changes,
+ const std::vector<edge_ty> &Dependencies);
+
+ /// UpdatedSearchState - Callback used when the search state changes.
+ virtual void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets,
+ const changeset_ty &Required) {}
+
+ /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
+ virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/DeltaAlgorithm.h b/contrib/llvm/include/llvm/ADT/DeltaAlgorithm.h
new file mode 100644
index 0000000..a26f37d
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/DeltaAlgorithm.h
@@ -0,0 +1,93 @@
+//===--- DeltaAlgorithm.h - A Set Minimization Algorithm -------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DELTAALGORITHM_H
+#define LLVM_ADT_DELTAALGORITHM_H
+
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+/// DeltaAlgorithm - Implements the delta debugging algorithm (A. Zeller '99)
+/// for minimizing arbitrary sets using a predicate function.
+///
+/// The result of the algorithm is a subset of the input change set which is
+/// guaranteed to satisfy the predicate, assuming that the input set did. For
+/// well formed predicates, the result set is guaranteed to be such that
+/// removing any single element would falsify the predicate.
+///
+/// For best results the predicate function *should* (but need not) satisfy
+/// certain properties, in particular:
+/// (1) The predicate should return false on an empty set and true on the full
+/// set.
+/// (2) If the predicate returns true for a set of changes, it should return
+/// true for all supersets of that set.
+///
+/// It is not an error to provide a predicate that does not satisfy these
+/// requirements, and the algorithm will generally produce reasonable
+/// results. However, it may run substantially more tests than with a good
+/// predicate.
+class DeltaAlgorithm {
+public:
+ typedef unsigned change_ty;
+ // FIXME: Use a decent data structure.
+ typedef std::set<change_ty> changeset_ty;
+ typedef std::vector<changeset_ty> changesetlist_ty;
+
+private:
+ /// Cache of failed test results. Successful test results are never cached
+ /// since we always reduce following a success.
+ std::set<changeset_ty> FailedTestsCache;
+
+ /// GetTestResult - Get the test result for the \p Changes from the
+ /// cache, executing the test if necessary.
+ ///
+ /// \param Changes - The change set to test.
+ /// \return - The test result.
+ bool GetTestResult(const changeset_ty &Changes);
+
+ /// Split - Partition a set of changes \p S into one or two subsets.
+ void Split(const changeset_ty &S, changesetlist_ty &Res);
+
+ /// Delta - Minimize a set of \p Changes which has been partioned into
+ /// smaller sets, by attempting to remove individual subsets.
+ changeset_ty Delta(const changeset_ty &Changes,
+ const changesetlist_ty &Sets);
+
+ /// Search - Search for a subset (or subsets) in \p Sets which can be
+ /// removed from \p Changes while still satisfying the predicate.
+ ///
+ /// \param Res - On success, a subset of Changes which satisfies the
+ /// predicate.
+ /// \return - True on success.
+ bool Search(const changeset_ty &Changes, const changesetlist_ty &Sets,
+ changeset_ty &Res);
+
+protected:
+ /// UpdatedSearchState - Callback used when the search state changes.
+ virtual void UpdatedSearchState(const changeset_ty &Changes,
+ const changesetlist_ty &Sets) {}
+
+ /// ExecuteOneTest - Execute a single test predicate on the change set \p S.
+ virtual bool ExecuteOneTest(const changeset_ty &S) = 0;
+
+ DeltaAlgorithm& operator=(const DeltaAlgorithm&) = default;
+
+public:
+ virtual ~DeltaAlgorithm();
+
+ /// Run - Minimize the set \p Changes by executing \see ExecuteOneTest() on
+ /// subsets of changes and returning the smallest set which still satisfies
+ /// the test predicate.
+ changeset_ty Run(const changeset_ty &Changes);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/DenseMap.h b/contrib/llvm/include/llvm/ADT/DenseMap.h
new file mode 100644
index 0000000..6ee1960
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/DenseMap.h
@@ -0,0 +1,1074 @@
+//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DenseMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSEMAP_H
+#define LLVM_ADT_DENSEMAP_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/EpochTracker.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <new>
+#include <utility>
+
+namespace llvm {
+
+namespace detail {
+// We extend a pair to allow users to override the bucket type with their own
+// implementation without requiring two members.
+template <typename KeyT, typename ValueT>
+struct DenseMapPair : public std::pair<KeyT, ValueT> {
+ KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
+ const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
+ ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
+ const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
+};
+}
+
+template <
+ typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false>
+class DenseMapIterator;
+
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+class DenseMapBase : public DebugEpochBase {
+public:
+ typedef unsigned size_type;
+ typedef KeyT key_type;
+ typedef ValueT mapped_type;
+ typedef BucketT value_type;
+
+ typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT> iterator;
+ typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>
+ const_iterator;
+ inline iterator begin() {
+ // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
+ return empty() ? end() : iterator(getBuckets(), getBucketsEnd(), *this);
+ }
+ inline iterator end() {
+ return iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
+ }
+ inline const_iterator begin() const {
+ return empty() ? end()
+ : const_iterator(getBuckets(), getBucketsEnd(), *this);
+ }
+ inline const_iterator end() const {
+ return const_iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
+ }
+
+ bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
+ return getNumEntries() == 0;
+ }
+ unsigned size() const { return getNumEntries(); }
+
+ /// Grow the densemap so that it has at least Size buckets. Does not shrink
+ void resize(size_type Size) {
+ incrementEpoch();
+ if (Size > getNumBuckets())
+ grow(Size);
+ }
+
+ void clear() {
+ incrementEpoch();
+ if (getNumEntries() == 0 && getNumTombstones() == 0) return;
+
+ // If the capacity of the array is huge, and the # elements used is small,
+ // shrink the array.
+ if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
+ shrink_and_clear();
+ return;
+ }
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ unsigned NumEntries = getNumEntries();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
+ if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+ P->getSecond().~ValueT();
+ --NumEntries;
+ }
+ P->getFirst() = EmptyKey;
+ }
+ }
+ assert(NumEntries == 0 && "Node count imbalance!");
+ setNumEntries(0);
+ setNumTombstones(0);
+ }
+
+ /// Return 1 if the specified key is in the map, 0 otherwise.
+ size_type count(const KeyT &Val) const {
+ const BucketT *TheBucket;
+ return LookupBucketFor(Val, TheBucket) ? 1 : 0;
+ }
+
+ iterator find(const KeyT &Val) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return iterator(TheBucket, getBucketsEnd(), *this, true);
+ return end();
+ }
+ const_iterator find(const KeyT &Val) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return const_iterator(TheBucket, getBucketsEnd(), *this, true);
+ return end();
+ }
+
+ /// Alternate version of find() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template<class LookupKeyT>
+ iterator find_as(const LookupKeyT &Val) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return iterator(TheBucket, getBucketsEnd(), *this, true);
+ return end();
+ }
+ template<class LookupKeyT>
+ const_iterator find_as(const LookupKeyT &Val) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return const_iterator(TheBucket, getBucketsEnd(), *this, true);
+ return end();
+ }
+
+ /// lookup - Return the entry for the specified key, or a default
+ /// constructed value if no such entry exists.
+ ValueT lookup(const KeyT &Val) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return TheBucket->getSecond();
+ return ValueT();
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(KV.first, TheBucket))
+ return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
+ false); // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
+ return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
+ true);
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(KV.first, TheBucket))
+ return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
+ false); // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket = InsertIntoBucket(std::move(KV.first),
+ std::move(KV.second),
+ TheBucket);
+ return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
+ true);
+ }
+
+ /// insert - Range insertion of pairs.
+ template<typename InputIt>
+ void insert(InputIt I, InputIt E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+
+
+ bool erase(const KeyT &Val) {
+ BucketT *TheBucket;
+ if (!LookupBucketFor(Val, TheBucket))
+ return false; // not in map.
+
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
+ decrementNumEntries();
+ incrementNumTombstones();
+ return true;
+ }
+ void erase(iterator I) {
+ BucketT *TheBucket = &*I;
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
+ decrementNumEntries();
+ incrementNumTombstones();
+ }
+
+ value_type& FindAndConstruct(const KeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(Key, ValueT(), TheBucket);
+ }
+
+ ValueT &operator[](const KeyT &Key) {
+ return FindAndConstruct(Key).second;
+ }
+
+ value_type& FindAndConstruct(KeyT &&Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(std::move(Key), ValueT(), TheBucket);
+ }
+
+ ValueT &operator[](KeyT &&Key) {
+ return FindAndConstruct(std::move(Key)).second;
+ }
+
+ /// isPointerIntoBucketsArray - Return true if the specified pointer points
+ /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
+ /// value in the DenseMap).
+ bool isPointerIntoBucketsArray(const void *Ptr) const {
+ return Ptr >= getBuckets() && Ptr < getBucketsEnd();
+ }
+
+ /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
+ /// array. In conjunction with the previous method, this can be used to
+ /// determine whether an insertion caused the DenseMap to reallocate.
+ const void *getPointerIntoBucketsArray() const { return getBuckets(); }
+
+protected:
+ DenseMapBase() = default;
+
+ void destroyAll() {
+ if (getNumBuckets() == 0) // Nothing to do.
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
+ P->getSecond().~ValueT();
+ P->getFirst().~KeyT();
+ }
+ }
+
+ void initEmpty() {
+ setNumEntries(0);
+ setNumTombstones(0);
+
+ assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
+ "# initial buckets must be a power of two!");
+ const KeyT EmptyKey = getEmptyKey();
+ for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+ ::new (&B->getFirst()) KeyT(EmptyKey);
+ }
+
+ void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+ initEmpty();
+
+ // Insert all the old elements.
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+ if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
+ // Insert the key/value into the new table.
+ BucketT *DestBucket;
+ bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
+ (void)FoundVal; // silence warning.
+ assert(!FoundVal && "Key already in new map?");
+ DestBucket->getFirst() = std::move(B->getFirst());
+ ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
+ incrementNumEntries();
+
+ // Free the value.
+ B->getSecond().~ValueT();
+ }
+ B->getFirst().~KeyT();
+ }
+ }
+
+ template <typename OtherBaseT>
+ void copyFrom(
+ const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
+ assert(&other != this);
+ assert(getNumBuckets() == other.getNumBuckets());
+
+ setNumEntries(other.getNumEntries());
+ setNumTombstones(other.getNumTombstones());
+
+ if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
+ memcpy(getBuckets(), other.getBuckets(),
+ getNumBuckets() * sizeof(BucketT));
+ else
+ for (size_t i = 0; i < getNumBuckets(); ++i) {
+ ::new (&getBuckets()[i].getFirst())
+ KeyT(other.getBuckets()[i].getFirst());
+ if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
+ !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
+ ::new (&getBuckets()[i].getSecond())
+ ValueT(other.getBuckets()[i].getSecond());
+ }
+ }
+
+ static unsigned getHashValue(const KeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+ template<typename LookupKeyT>
+ static unsigned getHashValue(const LookupKeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+ static const KeyT getEmptyKey() {
+ return KeyInfoT::getEmptyKey();
+ }
+ static const KeyT getTombstoneKey() {
+ return KeyInfoT::getTombstoneKey();
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return static_cast<const DerivedT *>(this)->getNumEntries();
+ }
+ void setNumEntries(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumEntries(Num);
+ }
+ void incrementNumEntries() {
+ setNumEntries(getNumEntries() + 1);
+ }
+ void decrementNumEntries() {
+ setNumEntries(getNumEntries() - 1);
+ }
+ unsigned getNumTombstones() const {
+ return static_cast<const DerivedT *>(this)->getNumTombstones();
+ }
+ void setNumTombstones(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumTombstones(Num);
+ }
+ void incrementNumTombstones() {
+ setNumTombstones(getNumTombstones() + 1);
+ }
+ void decrementNumTombstones() {
+ setNumTombstones(getNumTombstones() - 1);
+ }
+ const BucketT *getBuckets() const {
+ return static_cast<const DerivedT *>(this)->getBuckets();
+ }
+ BucketT *getBuckets() {
+ return static_cast<DerivedT *>(this)->getBuckets();
+ }
+ unsigned getNumBuckets() const {
+ return static_cast<const DerivedT *>(this)->getNumBuckets();
+ }
+ BucketT *getBucketsEnd() {
+ return getBuckets() + getNumBuckets();
+ }
+ const BucketT *getBucketsEnd() const {
+ return getBuckets() + getNumBuckets();
+ }
+
+ void grow(unsigned AtLeast) {
+ static_cast<DerivedT *>(this)->grow(AtLeast);
+ }
+
+ void shrink_and_clear() {
+ static_cast<DerivedT *>(this)->shrink_and_clear();
+ }
+
+
+ BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
+ BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->getFirst() = Key;
+ ::new (&TheBucket->getSecond()) ValueT(Value);
+ return TheBucket;
+ }
+
+ BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
+ BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->getFirst() = Key;
+ ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
+ return TheBucket;
+ }
+
+ BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->getFirst() = std::move(Key);
+ ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
+ return TheBucket;
+ }
+
+ BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
+ incrementEpoch();
+
+ // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
+ // the buckets are empty (meaning that many are filled with tombstones),
+ // grow the table.
+ //
+ // The later case is tricky. For example, if we had one empty bucket with
+ // tons of tombstones, failing lookups (e.g. for insertion) would have to
+ // probe almost the entire table until it found the empty bucket. If the
+ // table completely filled with tombstones, no lookup would ever succeed,
+ // causing infinite loops in lookup.
+ unsigned NewNumEntries = getNumEntries() + 1;
+ unsigned NumBuckets = getNumBuckets();
+ if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
+ this->grow(NumBuckets * 2);
+ LookupBucketFor(Key, TheBucket);
+ NumBuckets = getNumBuckets();
+ } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
+ NumBuckets/8)) {
+ this->grow(NumBuckets);
+ LookupBucketFor(Key, TheBucket);
+ }
+ assert(TheBucket);
+
+ // Only update the state after we've grown our bucket space appropriately
+ // so that when growing buckets we have self-consistent entry count.
+ incrementNumEntries();
+
+ // If we are writing over a tombstone, remember this.
+ const KeyT EmptyKey = getEmptyKey();
+ if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
+ decrementNumTombstones();
+
+ return TheBucket;
+ }
+
+ /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
+ /// FoundBucket. If the bucket contains the key and a value, this returns
+ /// true, otherwise it returns a bucket with an empty marker or tombstone and
+ /// returns false.
+ template<typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val,
+ const BucketT *&FoundBucket) const {
+ const BucketT *BucketsPtr = getBuckets();
+ const unsigned NumBuckets = getNumBuckets();
+
+ if (NumBuckets == 0) {
+ FoundBucket = nullptr;
+ return false;
+ }
+
+ // FoundTombstone - Keep track of whether we find a tombstone while probing.
+ const BucketT *FoundTombstone = nullptr;
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
+ !KeyInfoT::isEqual(Val, TombstoneKey) &&
+ "Empty/Tombstone value shouldn't be inserted into map!");
+
+ unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
+ unsigned ProbeAmt = 1;
+ while (1) {
+ const BucketT *ThisBucket = BucketsPtr + BucketNo;
+ // Found Val's bucket? If so, return it.
+ if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
+ FoundBucket = ThisBucket;
+ return true;
+ }
+
+ // If we found an empty bucket, the key doesn't exist in the set.
+ // Insert it and return the default value.
+ if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
+ // If we've already seen a tombstone while probing, fill it in instead
+ // of the empty bucket we eventually probed to.
+ FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
+ return false;
+ }
+
+ // If this is a tombstone, remember it. If Val ends up not in the map, we
+ // prefer to return it than something that would require more probing.
+ if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
+ !FoundTombstone)
+ FoundTombstone = ThisBucket; // Remember the first tombstone found.
+
+ // Otherwise, it's a hash collision or a tombstone, continue quadratic
+ // probing.
+ BucketNo += ProbeAmt++;
+ BucketNo &= (NumBuckets-1);
+ }
+ }
+
+ template <typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
+ const BucketT *ConstFoundBucket;
+ bool Result = const_cast<const DenseMapBase *>(this)
+ ->LookupBucketFor(Val, ConstFoundBucket);
+ FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
+ return Result;
+ }
+
+public:
+ /// Return the approximate size (in bytes) of the actual map.
+ /// This is just the raw memory used by DenseMap.
+ /// If entries are pointers to objects, the size of the referenced objects
+ /// are not included.
+ size_t getMemorySize() const {
+ return getNumBuckets() * sizeof(BucketT);
+ }
+};
+
+template <typename KeyT, typename ValueT,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
+ KeyT, ValueT, KeyInfoT, BucketT> {
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
+ friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+ BucketT *Buckets;
+ unsigned NumEntries;
+ unsigned NumTombstones;
+ unsigned NumBuckets;
+
+public:
+ explicit DenseMap(unsigned NumInitBuckets = 0) {
+ init(NumInitBuckets);
+ }
+
+ DenseMap(const DenseMap &other) : BaseT() {
+ init(0);
+ copyFrom(other);
+ }
+
+ DenseMap(DenseMap &&other) : BaseT() {
+ init(0);
+ swap(other);
+ }
+
+ template<typename InputIt>
+ DenseMap(const InputIt &I, const InputIt &E) {
+ init(NextPowerOf2(std::distance(I, E)));
+ this->insert(I, E);
+ }
+
+ ~DenseMap() {
+ this->destroyAll();
+ operator delete(Buckets);
+ }
+
+ void swap(DenseMap& RHS) {
+ this->incrementEpoch();
+ RHS.incrementEpoch();
+ std::swap(Buckets, RHS.Buckets);
+ std::swap(NumEntries, RHS.NumEntries);
+ std::swap(NumTombstones, RHS.NumTombstones);
+ std::swap(NumBuckets, RHS.NumBuckets);
+ }
+
+ DenseMap& operator=(const DenseMap& other) {
+ if (&other != this)
+ copyFrom(other);
+ return *this;
+ }
+
+ DenseMap& operator=(DenseMap &&other) {
+ this->destroyAll();
+ operator delete(Buckets);
+ init(0);
+ swap(other);
+ return *this;
+ }
+
+ void copyFrom(const DenseMap& other) {
+ this->destroyAll();
+ operator delete(Buckets);
+ if (allocateBuckets(other.NumBuckets)) {
+ this->BaseT::copyFrom(other);
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
+ }
+
+ void init(unsigned InitBuckets) {
+ if (allocateBuckets(InitBuckets)) {
+ this->BaseT::initEmpty();
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
+ }
+
+ void grow(unsigned AtLeast) {
+ unsigned OldNumBuckets = NumBuckets;
+ BucketT *OldBuckets = Buckets;
+
+ allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
+ assert(Buckets);
+ if (!OldBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
+
+ // Free the old table.
+ operator delete(OldBuckets);
+ }
+
+ void shrink_and_clear() {
+ unsigned OldNumEntries = NumEntries;
+ this->destroyAll();
+
+ // Reduce the number of buckets.
+ unsigned NewNumBuckets = 0;
+ if (OldNumEntries)
+ NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
+ if (NewNumBuckets == NumBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ operator delete(Buckets);
+ init(NewNumBuckets);
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return NumEntries;
+ }
+ void setNumEntries(unsigned Num) {
+ NumEntries = Num;
+ }
+
+ unsigned getNumTombstones() const {
+ return NumTombstones;
+ }
+ void setNumTombstones(unsigned Num) {
+ NumTombstones = Num;
+ }
+
+ BucketT *getBuckets() const {
+ return Buckets;
+ }
+
+ unsigned getNumBuckets() const {
+ return NumBuckets;
+ }
+
+ bool allocateBuckets(unsigned Num) {
+ NumBuckets = Num;
+ if (NumBuckets == 0) {
+ Buckets = nullptr;
+ return false;
+ }
+
+ Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
+ return true;
+ }
+};
+
+template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+class SmallDenseMap
+ : public DenseMapBase<
+ SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
+ ValueT, KeyInfoT, BucketT> {
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
+ friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+ unsigned Small : 1;
+ unsigned NumEntries : 31;
+ unsigned NumTombstones;
+
+ struct LargeRep {
+ BucketT *Buckets;
+ unsigned NumBuckets;
+ };
+
+ /// A "union" of an inline bucket array and the struct representing
+ /// a large bucket. This union will be discriminated by the 'Small' bit.
+ AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
+
+public:
+ explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
+ init(NumInitBuckets);
+ }
+
+ SmallDenseMap(const SmallDenseMap &other) : BaseT() {
+ init(0);
+ copyFrom(other);
+ }
+
+ SmallDenseMap(SmallDenseMap &&other) : BaseT() {
+ init(0);
+ swap(other);
+ }
+
+ template<typename InputIt>
+ SmallDenseMap(const InputIt &I, const InputIt &E) {
+ init(NextPowerOf2(std::distance(I, E)));
+ this->insert(I, E);
+ }
+
+ ~SmallDenseMap() {
+ this->destroyAll();
+ deallocateBuckets();
+ }
+
+ void swap(SmallDenseMap& RHS) {
+ unsigned TmpNumEntries = RHS.NumEntries;
+ RHS.NumEntries = NumEntries;
+ NumEntries = TmpNumEntries;
+ std::swap(NumTombstones, RHS.NumTombstones);
+
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ if (Small && RHS.Small) {
+ // If we're swapping inline bucket arrays, we have to cope with some of
+ // the tricky bits of DenseMap's storage system: the buckets are not
+ // fully initialized. Thus we swap every key, but we may have
+ // a one-directional move of the value.
+ for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+ BucketT *LHSB = &getInlineBuckets()[i],
+ *RHSB = &RHS.getInlineBuckets()[i];
+ bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
+ bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
+ if (hasLHSValue && hasRHSValue) {
+ // Swap together if we can...
+ std::swap(*LHSB, *RHSB);
+ continue;
+ }
+ // Swap separately and handle any assymetry.
+ std::swap(LHSB->getFirst(), RHSB->getFirst());
+ if (hasLHSValue) {
+ ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
+ LHSB->getSecond().~ValueT();
+ } else if (hasRHSValue) {
+ ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
+ RHSB->getSecond().~ValueT();
+ }
+ }
+ return;
+ }
+ if (!Small && !RHS.Small) {
+ std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
+ std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
+ return;
+ }
+
+ SmallDenseMap &SmallSide = Small ? *this : RHS;
+ SmallDenseMap &LargeSide = Small ? RHS : *this;
+
+ // First stash the large side's rep and move the small side across.
+ LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
+ LargeSide.getLargeRep()->~LargeRep();
+ LargeSide.Small = true;
+ // This is similar to the standard move-from-old-buckets, but the bucket
+ // count hasn't actually rotated in this case. So we have to carefully
+ // move construct the keys and values into their new locations, but there
+ // is no need to re-hash things.
+ for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+ BucketT *NewB = &LargeSide.getInlineBuckets()[i],
+ *OldB = &SmallSide.getInlineBuckets()[i];
+ ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
+ OldB->getFirst().~KeyT();
+ if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
+ ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
+ OldB->getSecond().~ValueT();
+ }
+ }
+
+ // The hard part of moving the small buckets across is done, just move
+ // the TmpRep into its new home.
+ SmallSide.Small = false;
+ new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
+ }
+
+ SmallDenseMap& operator=(const SmallDenseMap& other) {
+ if (&other != this)
+ copyFrom(other);
+ return *this;
+ }
+
+ SmallDenseMap& operator=(SmallDenseMap &&other) {
+ this->destroyAll();
+ deallocateBuckets();
+ init(0);
+ swap(other);
+ return *this;
+ }
+
+ void copyFrom(const SmallDenseMap& other) {
+ this->destroyAll();
+ deallocateBuckets();
+ Small = true;
+ if (other.getNumBuckets() > InlineBuckets) {
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
+ }
+ this->BaseT::copyFrom(other);
+ }
+
+ void init(unsigned InitBuckets) {
+ Small = true;
+ if (InitBuckets > InlineBuckets) {
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
+ }
+ this->BaseT::initEmpty();
+ }
+
+ void grow(unsigned AtLeast) {
+ if (AtLeast >= InlineBuckets)
+ AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
+
+ if (Small) {
+ if (AtLeast < InlineBuckets)
+ return; // Nothing to do.
+
+ // First move the inline buckets into a temporary storage.
+ AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
+ BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
+ BucketT *TmpEnd = TmpBegin;
+
+ // Loop over the buckets, moving non-empty, non-tombstones into the
+ // temporary storage. Have the loop move the TmpEnd forward as it goes.
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+ assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
+ "Too many inline buckets!");
+ ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
+ ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
+ ++TmpEnd;
+ P->getSecond().~ValueT();
+ }
+ P->getFirst().~KeyT();
+ }
+
+ // Now make this map use the large rep, and move all the entries back
+ // into it.
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ this->moveFromOldBuckets(TmpBegin, TmpEnd);
+ return;
+ }
+
+ LargeRep OldRep = std::move(*getLargeRep());
+ getLargeRep()->~LargeRep();
+ if (AtLeast <= InlineBuckets) {
+ Small = true;
+ } else {
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ }
+
+ this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
+
+ // Free the old table.
+ operator delete(OldRep.Buckets);
+ }
+
+ void shrink_and_clear() {
+ unsigned OldSize = this->size();
+ this->destroyAll();
+
+ // Reduce the number of buckets.
+ unsigned NewNumBuckets = 0;
+ if (OldSize) {
+ NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
+ if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
+ NewNumBuckets = 64;
+ }
+ if ((Small && NewNumBuckets <= InlineBuckets) ||
+ (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ deallocateBuckets();
+ init(NewNumBuckets);
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return NumEntries;
+ }
+ void setNumEntries(unsigned Num) {
+ assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
+ NumEntries = Num;
+ }
+
+ unsigned getNumTombstones() const {
+ return NumTombstones;
+ }
+ void setNumTombstones(unsigned Num) {
+ NumTombstones = Num;
+ }
+
+ const BucketT *getInlineBuckets() const {
+ assert(Small);
+ // Note that this cast does not violate aliasing rules as we assert that
+ // the memory's dynamic type is the small, inline bucket buffer, and the
+ // 'storage.buffer' static type is 'char *'.
+ return reinterpret_cast<const BucketT *>(storage.buffer);
+ }
+ BucketT *getInlineBuckets() {
+ return const_cast<BucketT *>(
+ const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
+ }
+ const LargeRep *getLargeRep() const {
+ assert(!Small);
+ // Note, same rule about aliasing as with getInlineBuckets.
+ return reinterpret_cast<const LargeRep *>(storage.buffer);
+ }
+ LargeRep *getLargeRep() {
+ return const_cast<LargeRep *>(
+ const_cast<const SmallDenseMap *>(this)->getLargeRep());
+ }
+
+ const BucketT *getBuckets() const {
+ return Small ? getInlineBuckets() : getLargeRep()->Buckets;
+ }
+ BucketT *getBuckets() {
+ return const_cast<BucketT *>(
+ const_cast<const SmallDenseMap *>(this)->getBuckets());
+ }
+ unsigned getNumBuckets() const {
+ return Small ? InlineBuckets : getLargeRep()->NumBuckets;
+ }
+
+ void deallocateBuckets() {
+ if (Small)
+ return;
+
+ operator delete(getLargeRep()->Buckets);
+ getLargeRep()->~LargeRep();
+ }
+
+ LargeRep allocateBuckets(unsigned Num) {
+ assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
+ LargeRep Rep = {
+ static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
+ };
+ return Rep;
+ }
+};
+
+template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
+ bool IsConst>
+class DenseMapIterator : DebugEpochBase::HandleBase {
+ typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true> ConstIterator;
+ friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
+ friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
+
+public:
+ typedef ptrdiff_t difference_type;
+ typedef typename std::conditional<IsConst, const Bucket, Bucket>::type
+ value_type;
+ typedef value_type *pointer;
+ typedef value_type &reference;
+ typedef std::forward_iterator_tag iterator_category;
+private:
+ pointer Ptr, End;
+public:
+ DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
+
+ DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
+ bool NoAdvance = false)
+ : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
+ assert(isHandleInSync() && "invalid construction!");
+ if (!NoAdvance) AdvancePastEmptyBuckets();
+ }
+
+ // Converting ctor from non-const iterators to const iterators. SFINAE'd out
+ // for const iterator destinations so it doesn't end up as a user defined copy
+ // constructor.
+ template <bool IsConstSrc,
+ typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
+ DenseMapIterator(
+ const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
+ : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
+
+ reference operator*() const {
+ assert(isHandleInSync() && "invalid iterator access!");
+ return *Ptr;
+ }
+ pointer operator->() const {
+ assert(isHandleInSync() && "invalid iterator access!");
+ return Ptr;
+ }
+
+ bool operator==(const ConstIterator &RHS) const {
+ assert((!Ptr || isHandleInSync()) && "handle not in sync!");
+ assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
+ assert(getEpochAddress() == RHS.getEpochAddress() &&
+ "comparing incomparable iterators!");
+ return Ptr == RHS.Ptr;
+ }
+ bool operator!=(const ConstIterator &RHS) const {
+ assert((!Ptr || isHandleInSync()) && "handle not in sync!");
+ assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
+ assert(getEpochAddress() == RHS.getEpochAddress() &&
+ "comparing incomparable iterators!");
+ return Ptr != RHS.Ptr;
+ }
+
+ inline DenseMapIterator& operator++() { // Preincrement
+ assert(isHandleInSync() && "invalid iterator access!");
+ ++Ptr;
+ AdvancePastEmptyBuckets();
+ return *this;
+ }
+ DenseMapIterator operator++(int) { // Postincrement
+ assert(isHandleInSync() && "invalid iterator access!");
+ DenseMapIterator tmp = *this; ++*this; return tmp;
+ }
+
+private:
+ void AdvancePastEmptyBuckets() {
+ const KeyT Empty = KeyInfoT::getEmptyKey();
+ const KeyT Tombstone = KeyInfoT::getTombstoneKey();
+
+ while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
+ KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
+ ++Ptr;
+ }
+};
+
+template<typename KeyT, typename ValueT, typename KeyInfoT>
+static inline size_t
+capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
+ return X.getMemorySize();
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/DenseMapInfo.h b/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
new file mode 100644
index 0000000..a844ebc
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/DenseMapInfo.h
@@ -0,0 +1,221 @@
+//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines DenseMapInfo traits for DenseMap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSEMAPINFO_H
+#define LLVM_ADT_DENSEMAPINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include "llvm/Support/type_traits.h"
+
+namespace llvm {
+
+template<typename T>
+struct DenseMapInfo {
+ //static inline T getEmptyKey();
+ //static inline T getTombstoneKey();
+ //static unsigned getHashValue(const T &Val);
+ //static bool isEqual(const T &LHS, const T &RHS);
+};
+
+// Provide DenseMapInfo for all pointers.
+template<typename T>
+struct DenseMapInfo<T*> {
+ static inline T* getEmptyKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-1);
+ Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
+ return reinterpret_cast<T*>(Val);
+ }
+ static inline T* getTombstoneKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-2);
+ Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
+ return reinterpret_cast<T*>(Val);
+ }
+ static unsigned getHashValue(const T *PtrVal) {
+ return (unsigned((uintptr_t)PtrVal) >> 4) ^
+ (unsigned((uintptr_t)PtrVal) >> 9);
+ }
+ static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
+};
+
+// Provide DenseMapInfo for chars.
+template<> struct DenseMapInfo<char> {
+ static inline char getEmptyKey() { return ~0; }
+ static inline char getTombstoneKey() { return ~0 - 1; }
+ static unsigned getHashValue(const char& Val) { return Val * 37U; }
+ static bool isEqual(const char &LHS, const char &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned ints.
+template<> struct DenseMapInfo<unsigned> {
+ static inline unsigned getEmptyKey() { return ~0U; }
+ static inline unsigned getTombstoneKey() { return ~0U - 1; }
+ static unsigned getHashValue(const unsigned& Val) { return Val * 37U; }
+ static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned longs.
+template<> struct DenseMapInfo<unsigned long> {
+ static inline unsigned long getEmptyKey() { return ~0UL; }
+ static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }
+ static unsigned getHashValue(const unsigned long& Val) {
+ return (unsigned)(Val * 37UL);
+ }
+ static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned long longs.
+template<> struct DenseMapInfo<unsigned long long> {
+ static inline unsigned long long getEmptyKey() { return ~0ULL; }
+ static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
+ static unsigned getHashValue(const unsigned long long& Val) {
+ return (unsigned)(Val * 37ULL);
+ }
+ static bool isEqual(const unsigned long long& LHS,
+ const unsigned long long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for ints.
+template<> struct DenseMapInfo<int> {
+ static inline int getEmptyKey() { return 0x7fffffff; }
+ static inline int getTombstoneKey() { return -0x7fffffff - 1; }
+ static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); }
+ static bool isEqual(const int& LHS, const int& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for longs.
+template<> struct DenseMapInfo<long> {
+ static inline long getEmptyKey() {
+ return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
+ }
+ static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
+ static unsigned getHashValue(const long& Val) {
+ return (unsigned)(Val * 37UL);
+ }
+ static bool isEqual(const long& LHS, const long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for long longs.
+template<> struct DenseMapInfo<long long> {
+ static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
+ static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }
+ static unsigned getHashValue(const long long& Val) {
+ return (unsigned)(Val * 37ULL);
+ }
+ static bool isEqual(const long long& LHS,
+ const long long& RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for all pairs whose members have info.
+template<typename T, typename U>
+struct DenseMapInfo<std::pair<T, U> > {
+ typedef std::pair<T, U> Pair;
+ typedef DenseMapInfo<T> FirstInfo;
+ typedef DenseMapInfo<U> SecondInfo;
+
+ static inline Pair getEmptyKey() {
+ return std::make_pair(FirstInfo::getEmptyKey(),
+ SecondInfo::getEmptyKey());
+ }
+ static inline Pair getTombstoneKey() {
+ return std::make_pair(FirstInfo::getTombstoneKey(),
+ SecondInfo::getTombstoneKey());
+ }
+ static unsigned getHashValue(const Pair& PairVal) {
+ uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32
+ | (uint64_t)SecondInfo::getHashValue(PairVal.second);
+ key += ~(key << 32);
+ key ^= (key >> 22);
+ key += ~(key << 13);
+ key ^= (key >> 8);
+ key += (key << 3);
+ key ^= (key >> 15);
+ key += ~(key << 27);
+ key ^= (key >> 31);
+ return (unsigned)key;
+ }
+ static bool isEqual(const Pair &LHS, const Pair &RHS) {
+ return FirstInfo::isEqual(LHS.first, RHS.first) &&
+ SecondInfo::isEqual(LHS.second, RHS.second);
+ }
+};
+
+// Provide DenseMapInfo for StringRefs.
+template <> struct DenseMapInfo<StringRef> {
+ static inline StringRef getEmptyKey() {
+ return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(0)),
+ 0);
+ }
+ static inline StringRef getTombstoneKey() {
+ return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)),
+ 0);
+ }
+ static unsigned getHashValue(StringRef Val) {
+ assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
+ assert(Val.data() != getTombstoneKey().data() &&
+ "Cannot hash the tombstone key!");
+ return (unsigned)(hash_value(Val));
+ }
+ static bool isEqual(StringRef LHS, StringRef RHS) {
+ if (RHS.data() == getEmptyKey().data())
+ return LHS.data() == getEmptyKey().data();
+ if (RHS.data() == getTombstoneKey().data())
+ return LHS.data() == getTombstoneKey().data();
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for ArrayRefs.
+template <typename T> struct DenseMapInfo<ArrayRef<T>> {
+ static inline ArrayRef<T> getEmptyKey() {
+ return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(0)),
+ size_t(0));
+ }
+ static inline ArrayRef<T> getTombstoneKey() {
+ return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(1)),
+ size_t(0));
+ }
+ static unsigned getHashValue(ArrayRef<T> Val) {
+ assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
+ assert(Val.data() != getTombstoneKey().data() &&
+ "Cannot hash the tombstone key!");
+ return (unsigned)(hash_value(Val));
+ }
+ static bool isEqual(ArrayRef<T> LHS, ArrayRef<T> RHS) {
+ if (RHS.data() == getEmptyKey().data())
+ return LHS.data() == getEmptyKey().data();
+ if (RHS.data() == getTombstoneKey().data())
+ return LHS.data() == getTombstoneKey().data();
+ return LHS == RHS;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/DenseSet.h b/contrib/llvm/include/llvm/ADT/DenseSet.h
new file mode 100644
index 0000000..ef09dce
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/DenseSet.h
@@ -0,0 +1,165 @@
+//===- llvm/ADT/DenseSet.h - Dense probed hash table ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DenseSet class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DENSESET_H
+#define LLVM_ADT_DENSESET_H
+
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+
+namespace detail {
+struct DenseSetEmpty {};
+
+// Use the empty base class trick so we can create a DenseMap where the buckets
+// contain only a single item.
+template <typename KeyT> class DenseSetPair : public DenseSetEmpty {
+ KeyT key;
+
+public:
+ KeyT &getFirst() { return key; }
+ const KeyT &getFirst() const { return key; }
+ DenseSetEmpty &getSecond() { return *this; }
+ const DenseSetEmpty &getSecond() const { return *this; }
+};
+}
+
+/// DenseSet - This implements a dense probed hash-table based set.
+template<typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT> >
+class DenseSet {
+ typedef DenseMap<ValueT, detail::DenseSetEmpty, ValueInfoT,
+ detail::DenseSetPair<ValueT>> MapTy;
+ static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT),
+ "DenseMap buckets unexpectedly large!");
+ MapTy TheMap;
+
+public:
+ typedef ValueT key_type;
+ typedef ValueT value_type;
+ typedef unsigned size_type;
+
+ explicit DenseSet(unsigned NumInitBuckets = 0) : TheMap(NumInitBuckets) {}
+
+ bool empty() const { return TheMap.empty(); }
+ size_type size() const { return TheMap.size(); }
+ size_t getMemorySize() const { return TheMap.getMemorySize(); }
+
+ /// Grow the DenseSet so that it has at least Size buckets. Will not shrink
+ /// the Size of the set.
+ void resize(size_t Size) { TheMap.resize(Size); }
+
+ void clear() {
+ TheMap.clear();
+ }
+
+ /// Return 1 if the specified key is in the set, 0 otherwise.
+ size_type count(const ValueT &V) const {
+ return TheMap.count(V);
+ }
+
+ bool erase(const ValueT &V) {
+ return TheMap.erase(V);
+ }
+
+ void swap(DenseSet& RHS) {
+ TheMap.swap(RHS.TheMap);
+ }
+
+ // Iterators.
+
+ class Iterator {
+ typename MapTy::iterator I;
+ friend class DenseSet;
+
+ public:
+ typedef typename MapTy::iterator::difference_type difference_type;
+ typedef ValueT value_type;
+ typedef value_type *pointer;
+ typedef value_type &reference;
+ typedef std::forward_iterator_tag iterator_category;
+
+ Iterator(const typename MapTy::iterator &i) : I(i) {}
+
+ ValueT &operator*() { return I->getFirst(); }
+ ValueT *operator->() { return &I->getFirst(); }
+
+ Iterator& operator++() { ++I; return *this; }
+ bool operator==(const Iterator& X) const { return I == X.I; }
+ bool operator!=(const Iterator& X) const { return I != X.I; }
+ };
+
+ class ConstIterator {
+ typename MapTy::const_iterator I;
+ friend class DenseSet;
+
+ public:
+ typedef typename MapTy::const_iterator::difference_type difference_type;
+ typedef ValueT value_type;
+ typedef value_type *pointer;
+ typedef value_type &reference;
+ typedef std::forward_iterator_tag iterator_category;
+
+ ConstIterator(const typename MapTy::const_iterator &i) : I(i) {}
+
+ const ValueT &operator*() { return I->getFirst(); }
+ const ValueT *operator->() { return &I->getFirst(); }
+
+ ConstIterator& operator++() { ++I; return *this; }
+ bool operator==(const ConstIterator& X) const { return I == X.I; }
+ bool operator!=(const ConstIterator& X) const { return I != X.I; }
+ };
+
+ typedef Iterator iterator;
+ typedef ConstIterator const_iterator;
+
+ iterator begin() { return Iterator(TheMap.begin()); }
+ iterator end() { return Iterator(TheMap.end()); }
+
+ const_iterator begin() const { return ConstIterator(TheMap.begin()); }
+ const_iterator end() const { return ConstIterator(TheMap.end()); }
+
+ iterator find(const ValueT &V) { return Iterator(TheMap.find(V)); }
+
+ /// Alternative version of find() which allows a different, and possibly less
+ /// expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key type
+ /// used.
+ template <class LookupKeyT>
+ iterator find_as(const LookupKeyT &Val) {
+ return Iterator(TheMap.find_as(Val));
+ }
+ template <class LookupKeyT>
+ const_iterator find_as(const LookupKeyT &Val) const {
+ return ConstIterator(TheMap.find_as(Val));
+ }
+
+ void erase(Iterator I) { return TheMap.erase(I.I); }
+ void erase(ConstIterator CI) { return TheMap.erase(CI.I); }
+
+ std::pair<iterator, bool> insert(const ValueT &V) {
+ detail::DenseSetEmpty Empty;
+ return TheMap.insert(std::make_pair(V, Empty));
+ }
+
+ // Range insertion of values.
+ template<typename InputIt>
+ void insert(InputIt I, InputIt E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/DepthFirstIterator.h b/contrib/llvm/include/llvm/ADT/DepthFirstIterator.h
new file mode 100644
index 0000000..c9317b8
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/DepthFirstIterator.h
@@ -0,0 +1,291 @@
+//===- llvm/ADT/DepthFirstIterator.h - Depth First iterator -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file builds on the ADT/GraphTraits.h file to build generic depth
+// first graph iterator. This file exposes the following functions/types:
+//
+// df_begin/df_end/df_iterator
+// * Normal depth-first iteration - visit a node and then all of its children.
+//
+// idf_begin/idf_end/idf_iterator
+// * Depth-first iteration on the 'inverse' graph.
+//
+// df_ext_begin/df_ext_end/df_ext_iterator
+// * Normal depth-first iteration - visit a node and then all of its children.
+// This iterator stores the 'visited' set in an external set, which allows
+// it to be more efficient, and allows external clients to use the set for
+// other purposes.
+//
+// idf_ext_begin/idf_ext_end/idf_ext_iterator
+// * Depth-first iteration on the 'inverse' graph.
+// This iterator stores the 'visited' set in an external set, which allows
+// it to be more efficient, and allows external clients to use the set for
+// other purposes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_DEPTHFIRSTITERATOR_H
+#define LLVM_ADT_DEPTHFIRSTITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+// df_iterator_storage - A private class which is used to figure out where to
+// store the visited set.
+template<class SetType, bool External> // Non-external set
+class df_iterator_storage {
+public:
+ SetType Visited;
+};
+
+template<class SetType>
+class df_iterator_storage<SetType, true> {
+public:
+ df_iterator_storage(SetType &VSet) : Visited(VSet) {}
+ df_iterator_storage(const df_iterator_storage &S) : Visited(S.Visited) {}
+ SetType &Visited;
+};
+
+// Generic Depth First Iterator
+template<class GraphT,
+class SetType = llvm::SmallPtrSet<typename GraphTraits<GraphT>::NodeType*, 8>,
+ bool ExtStorage = false, class GT = GraphTraits<GraphT> >
+class df_iterator : public std::iterator<std::forward_iterator_tag,
+ typename GT::NodeType, ptrdiff_t>,
+ public df_iterator_storage<SetType, ExtStorage> {
+ typedef std::iterator<std::forward_iterator_tag,
+ typename GT::NodeType, ptrdiff_t> super;
+
+ typedef typename GT::NodeType NodeType;
+ typedef typename GT::ChildIteratorType ChildItTy;
+ typedef PointerIntPair<NodeType*, 1> PointerIntTy;
+
+ // VisitStack - Used to maintain the ordering. Top = current block
+ // First element is node pointer, second is the 'next child' to visit
+ // if the int in PointerIntTy is 0, the 'next child' to visit is invalid
+ std::vector<std::pair<PointerIntTy, ChildItTy>> VisitStack;
+
+private:
+ inline df_iterator(NodeType *Node) {
+ this->Visited.insert(Node);
+ VisitStack.push_back(
+ std::make_pair(PointerIntTy(Node, 0), GT::child_begin(Node)));
+ }
+ inline df_iterator() {
+ // End is when stack is empty
+ }
+ inline df_iterator(NodeType *Node, SetType &S)
+ : df_iterator_storage<SetType, ExtStorage>(S) {
+ if (!S.count(Node)) {
+ VisitStack.push_back(
+ std::make_pair(PointerIntTy(Node, 0), GT::child_begin(Node)));
+ this->Visited.insert(Node);
+ }
+ }
+ inline df_iterator(SetType &S)
+ : df_iterator_storage<SetType, ExtStorage>(S) {
+ // End is when stack is empty
+ }
+
+ inline void toNext() {
+ do {
+ std::pair<PointerIntTy, ChildItTy> &Top = VisitStack.back();
+ NodeType *Node = Top.first.getPointer();
+ ChildItTy &It = Top.second;
+ if (!Top.first.getInt()) {
+ // now retrieve the real begin of the children before we dive in
+ It = GT::child_begin(Node);
+ Top.first.setInt(1);
+ }
+
+ while (It != GT::child_end(Node)) {
+ NodeType *Next = *It++;
+ // Has our next sibling been visited?
+ if (Next && this->Visited.insert(Next).second) {
+ // No, do it now.
+ VisitStack.push_back(
+ std::make_pair(PointerIntTy(Next, 0), GT::child_begin(Next)));
+ return;
+ }
+ }
+
+ // Oops, ran out of successors... go up a level on the stack.
+ VisitStack.pop_back();
+ } while (!VisitStack.empty());
+ }
+
+public:
+ typedef typename super::pointer pointer;
+
+ // Provide static begin and end methods as our public "constructors"
+ static df_iterator begin(const GraphT &G) {
+ return df_iterator(GT::getEntryNode(G));
+ }
+ static df_iterator end(const GraphT &G) { return df_iterator(); }
+
+ // Static begin and end methods as our public ctors for external iterators
+ static df_iterator begin(const GraphT &G, SetType &S) {
+ return df_iterator(GT::getEntryNode(G), S);
+ }
+ static df_iterator end(const GraphT &G, SetType &S) { return df_iterator(S); }
+
+ bool operator==(const df_iterator &x) const {
+ return VisitStack == x.VisitStack;
+ }
+ bool operator!=(const df_iterator &x) const { return !(*this == x); }
+
+ pointer operator*() const { return VisitStack.back().first.getPointer(); }
+
+ // This is a nonstandard operator-> that dereferences the pointer an extra
+ // time... so that you can actually call methods ON the Node, because
+ // the contained type is a pointer. This allows BBIt->getTerminator() f.e.
+ //
+ NodeType *operator->() const { return **this; }
+
+ df_iterator &operator++() { // Preincrement
+ toNext();
+ return *this;
+ }
+
+ /// \brief Skips all children of the current node and traverses to next node
+ ///
+ /// Note: This function takes care of incrementing the iterator. If you
+ /// always increment and call this function, you risk walking off the end.
+ df_iterator &skipChildren() {
+ VisitStack.pop_back();
+ if (!VisitStack.empty())
+ toNext();
+ return *this;
+ }
+
+ df_iterator operator++(int) { // Postincrement
+ df_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ // nodeVisited - return true if this iterator has already visited the
+ // specified node. This is public, and will probably be used to iterate over
+ // nodes that a depth first iteration did not find: ie unreachable nodes.
+ //
+ bool nodeVisited(NodeType *Node) const {
+ return this->Visited.count(Node) != 0;
+ }
+
+ /// getPathLength - Return the length of the path from the entry node to the
+ /// current node, counting both nodes.
+ unsigned getPathLength() const { return VisitStack.size(); }
+
+ /// getPath - Return the n'th node in the path from the entry node to the
+ /// current node.
+ NodeType *getPath(unsigned n) const {
+ return VisitStack[n].first.getPointer();
+ }
+};
+
+// Provide global constructors that automatically figure out correct types...
+//
+template <class T>
+df_iterator<T> df_begin(const T& G) {
+ return df_iterator<T>::begin(G);
+}
+
+template <class T>
+df_iterator<T> df_end(const T& G) {
+ return df_iterator<T>::end(G);
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T>
+iterator_range<df_iterator<T>> depth_first(const T& G) {
+ return make_range(df_begin(G), df_end(G));
+}
+
+// Provide global definitions of external depth first iterators...
+template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> >
+struct df_ext_iterator : public df_iterator<T, SetTy, true> {
+ df_ext_iterator(const df_iterator<T, SetTy, true> &V)
+ : df_iterator<T, SetTy, true>(V) {}
+};
+
+template <class T, class SetTy>
+df_ext_iterator<T, SetTy> df_ext_begin(const T& G, SetTy &S) {
+ return df_ext_iterator<T, SetTy>::begin(G, S);
+}
+
+template <class T, class SetTy>
+df_ext_iterator<T, SetTy> df_ext_end(const T& G, SetTy &S) {
+ return df_ext_iterator<T, SetTy>::end(G, S);
+}
+
+template <class T, class SetTy>
+iterator_range<df_ext_iterator<T, SetTy>> depth_first_ext(const T& G,
+ SetTy &S) {
+ return make_range(df_ext_begin(G, S), df_ext_end(G, S));
+}
+
+// Provide global definitions of inverse depth first iterators...
+template <class T,
+ class SetTy = llvm::SmallPtrSet<typename GraphTraits<T>::NodeType*, 8>,
+ bool External = false>
+struct idf_iterator : public df_iterator<Inverse<T>, SetTy, External> {
+ idf_iterator(const df_iterator<Inverse<T>, SetTy, External> &V)
+ : df_iterator<Inverse<T>, SetTy, External>(V) {}
+};
+
+template <class T>
+idf_iterator<T> idf_begin(const T& G) {
+ return idf_iterator<T>::begin(Inverse<T>(G));
+}
+
+template <class T>
+idf_iterator<T> idf_end(const T& G){
+ return idf_iterator<T>::end(Inverse<T>(G));
+}
+
+// Provide an accessor method to use them in range-based patterns.
+template <class T>
+iterator_range<idf_iterator<T>> inverse_depth_first(const T& G) {
+ return make_range(idf_begin(G), idf_end(G));
+}
+
+// Provide global definitions of external inverse depth first iterators...
+template <class T, class SetTy = std::set<typename GraphTraits<T>::NodeType*> >
+struct idf_ext_iterator : public idf_iterator<T, SetTy, true> {
+ idf_ext_iterator(const idf_iterator<T, SetTy, true> &V)
+ : idf_iterator<T, SetTy, true>(V) {}
+ idf_ext_iterator(const df_iterator<Inverse<T>, SetTy, true> &V)
+ : idf_iterator<T, SetTy, true>(V) {}
+};
+
+template <class T, class SetTy>
+idf_ext_iterator<T, SetTy> idf_ext_begin(const T& G, SetTy &S) {
+ return idf_ext_iterator<T, SetTy>::begin(Inverse<T>(G), S);
+}
+
+template <class T, class SetTy>
+idf_ext_iterator<T, SetTy> idf_ext_end(const T& G, SetTy &S) {
+ return idf_ext_iterator<T, SetTy>::end(Inverse<T>(G), S);
+}
+
+template <class T, class SetTy>
+iterator_range<idf_ext_iterator<T, SetTy>> inverse_depth_first_ext(const T& G,
+ SetTy &S) {
+ return make_range(idf_ext_begin(G, S), idf_ext_end(G, S));
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/EpochTracker.h b/contrib/llvm/include/llvm/ADT/EpochTracker.h
new file mode 100644
index 0000000..582d581
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/EpochTracker.h
@@ -0,0 +1,99 @@
+//===- llvm/ADT/EpochTracker.h - ADT epoch tracking --------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
+// These can be used to write iterators that are fail-fast when LLVM is built
+// with asserts enabled.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EPOCH_TRACKER_H
+#define LLVM_ADT_EPOCH_TRACKER_H
+
+#include "llvm/Config/llvm-config.h"
+
+#include <cstdint>
+
+namespace llvm {
+
+#ifndef LLVM_ENABLE_ABI_BREAKING_CHECKS
+
+class DebugEpochBase {
+public:
+ void incrementEpoch() {}
+
+ class HandleBase {
+ public:
+ HandleBase() = default;
+ explicit HandleBase(const DebugEpochBase *) {}
+ bool isHandleInSync() const { return true; }
+ const void *getEpochAddress() const { return nullptr; }
+ };
+};
+
+#else
+
+/// \brief A base class for data structure classes wishing to make iterators
+/// ("handles") pointing into themselves fail-fast. When building without
+/// asserts, this class is empty and does nothing.
+///
+/// DebugEpochBase does not by itself track handles pointing into itself. The
+/// expectation is that routines touching the handles will poll on
+/// isHandleInSync at appropriate points to assert that the handle they're using
+/// is still valid.
+///
+class DebugEpochBase {
+ uint64_t Epoch;
+
+public:
+ DebugEpochBase() : Epoch(0) {}
+
+ /// \brief Calling incrementEpoch invalidates all handles pointing into the
+ /// calling instance.
+ void incrementEpoch() { ++Epoch; }
+
+ /// \brief The destructor calls incrementEpoch to make use-after-free bugs
+ /// more likely to crash deterministically.
+ ~DebugEpochBase() { incrementEpoch(); }
+
+ /// \brief A base class for iterator classes ("handles") that wish to poll for
+ /// iterator invalidating modifications in the underlying data structure.
+ /// When LLVM is built without asserts, this class is empty and does nothing.
+ ///
+ /// HandleBase does not track the parent data structure by itself. It expects
+ /// the routines modifying the data structure to call incrementEpoch when they
+ /// make an iterator-invalidating modification.
+ ///
+ class HandleBase {
+ const uint64_t *EpochAddress;
+ uint64_t EpochAtCreation;
+
+ public:
+ HandleBase() : EpochAddress(nullptr), EpochAtCreation(UINT64_MAX) {}
+
+ explicit HandleBase(const DebugEpochBase *Parent)
+ : EpochAddress(&Parent->Epoch), EpochAtCreation(Parent->Epoch) {}
+
+ /// \brief Returns true if the DebugEpochBase this Handle is linked to has
+ /// not called incrementEpoch on itself since the creation of this
+ /// HandleBase instance.
+ bool isHandleInSync() const { return *EpochAddress == EpochAtCreation; }
+
+ /// \brief Returns a pointer to the epoch word stored in the data structure
+ /// this handle points into. Can be used to check if two iterators point
+ /// into the same data structure.
+ const void *getEpochAddress() const { return EpochAddress; }
+ };
+};
+
+#endif // LLVM_ENABLE_ABI_BREAKING_CHECKS
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h b/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
new file mode 100644
index 0000000..d6a26f8
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/EquivalenceClasses.h
@@ -0,0 +1,283 @@
+//===-- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic implementation of equivalence classes through the use Tarjan's
+// efficient union-find algorithm.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EQUIVALENCECLASSES_H
+#define LLVM_ADT_EQUIVALENCECLASSES_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstddef>
+#include <set>
+
+namespace llvm {
+
+/// EquivalenceClasses - This represents a collection of equivalence classes and
+/// supports three efficient operations: insert an element into a class of its
+/// own, union two classes, and find the class for a given element. In
+/// addition to these modification methods, it is possible to iterate over all
+/// of the equivalence classes and all of the elements in a class.
+///
+/// This implementation is an efficient implementation that only stores one copy
+/// of the element being indexed per entry in the set, and allows any arbitrary
+/// type to be indexed (as long as it can be ordered with operator<).
+///
+/// Here is a simple example using integers:
+///
+/// \code
+/// EquivalenceClasses<int> EC;
+/// EC.unionSets(1, 2); // insert 1, 2 into the same set
+/// EC.insert(4); EC.insert(5); // insert 4, 5 into own sets
+/// EC.unionSets(5, 1); // merge the set for 1 with 5's set.
+///
+/// for (EquivalenceClasses<int>::iterator I = EC.begin(), E = EC.end();
+/// I != E; ++I) { // Iterate over all of the equivalence sets.
+/// if (!I->isLeader()) continue; // Ignore non-leader sets.
+/// for (EquivalenceClasses<int>::member_iterator MI = EC.member_begin(I);
+/// MI != EC.member_end(); ++MI) // Loop over members in this set.
+/// cerr << *MI << " "; // Print member.
+/// cerr << "\n"; // Finish set.
+/// }
+/// \endcode
+///
+/// This example prints:
+/// 4
+/// 5 1 2
+///
+template <class ElemTy>
+class EquivalenceClasses {
+ /// ECValue - The EquivalenceClasses data structure is just a set of these.
+ /// Each of these represents a relation for a value. First it stores the
+ /// value itself, which provides the ordering that the set queries. Next, it
+ /// provides a "next pointer", which is used to enumerate all of the elements
+ /// in the unioned set. Finally, it defines either a "end of list pointer" or
+ /// "leader pointer" depending on whether the value itself is a leader. A
+ /// "leader pointer" points to the node that is the leader for this element,
+ /// if the node is not a leader. A "end of list pointer" points to the last
+ /// node in the list of members of this list. Whether or not a node is a
+ /// leader is determined by a bit stolen from one of the pointers.
+ class ECValue {
+ friend class EquivalenceClasses;
+ mutable const ECValue *Leader, *Next;
+ ElemTy Data;
+ // ECValue ctor - Start out with EndOfList pointing to this node, Next is
+ // Null, isLeader = true.
+ ECValue(const ElemTy &Elt)
+ : Leader(this), Next((ECValue*)(intptr_t)1), Data(Elt) {}
+
+ const ECValue *getLeader() const {
+ if (isLeader()) return this;
+ if (Leader->isLeader()) return Leader;
+ // Path compression.
+ return Leader = Leader->getLeader();
+ }
+ const ECValue *getEndOfList() const {
+ assert(isLeader() && "Cannot get the end of a list for a non-leader!");
+ return Leader;
+ }
+
+ void setNext(const ECValue *NewNext) const {
+ assert(getNext() == nullptr && "Already has a next pointer!");
+ Next = (const ECValue*)((intptr_t)NewNext | (intptr_t)isLeader());
+ }
+ public:
+ ECValue(const ECValue &RHS) : Leader(this), Next((ECValue*)(intptr_t)1),
+ Data(RHS.Data) {
+ // Only support copying of singleton nodes.
+ assert(RHS.isLeader() && RHS.getNext() == nullptr && "Not a singleton!");
+ }
+
+ bool operator<(const ECValue &UFN) const { return Data < UFN.Data; }
+
+ bool isLeader() const { return (intptr_t)Next & 1; }
+ const ElemTy &getData() const { return Data; }
+
+ const ECValue *getNext() const {
+ return (ECValue*)((intptr_t)Next & ~(intptr_t)1);
+ }
+
+ template<typename T>
+ bool operator<(const T &Val) const { return Data < Val; }
+ };
+
+ /// TheMapping - This implicitly provides a mapping from ElemTy values to the
+ /// ECValues, it just keeps the key as part of the value.
+ std::set<ECValue> TheMapping;
+
+public:
+ EquivalenceClasses() {}
+ EquivalenceClasses(const EquivalenceClasses &RHS) {
+ operator=(RHS);
+ }
+
+ const EquivalenceClasses &operator=(const EquivalenceClasses &RHS) {
+ TheMapping.clear();
+ for (iterator I = RHS.begin(), E = RHS.end(); I != E; ++I)
+ if (I->isLeader()) {
+ member_iterator MI = RHS.member_begin(I);
+ member_iterator LeaderIt = member_begin(insert(*MI));
+ for (++MI; MI != member_end(); ++MI)
+ unionSets(LeaderIt, member_begin(insert(*MI)));
+ }
+ return *this;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Inspection methods
+ //
+
+ /// iterator* - Provides a way to iterate over all values in the set.
+ typedef typename std::set<ECValue>::const_iterator iterator;
+ iterator begin() const { return TheMapping.begin(); }
+ iterator end() const { return TheMapping.end(); }
+
+ bool empty() const { return TheMapping.empty(); }
+
+ /// member_* Iterate over the members of an equivalence class.
+ ///
+ class member_iterator;
+ member_iterator member_begin(iterator I) const {
+ // Only leaders provide anything to iterate over.
+ return member_iterator(I->isLeader() ? &*I : nullptr);
+ }
+ member_iterator member_end() const {
+ return member_iterator(nullptr);
+ }
+
+ /// findValue - Return an iterator to the specified value. If it does not
+ /// exist, end() is returned.
+ iterator findValue(const ElemTy &V) const {
+ return TheMapping.find(V);
+ }
+
+ /// getLeaderValue - Return the leader for the specified value that is in the
+ /// set. It is an error to call this method for a value that is not yet in
+ /// the set. For that, call getOrInsertLeaderValue(V).
+ const ElemTy &getLeaderValue(const ElemTy &V) const {
+ member_iterator MI = findLeader(V);
+ assert(MI != member_end() && "Value is not in the set!");
+ return *MI;
+ }
+
+ /// getOrInsertLeaderValue - Return the leader for the specified value that is
+ /// in the set. If the member is not in the set, it is inserted, then
+ /// returned.
+ const ElemTy &getOrInsertLeaderValue(const ElemTy &V) {
+ member_iterator MI = findLeader(insert(V));
+ assert(MI != member_end() && "Value is not in the set!");
+ return *MI;
+ }
+
+ /// getNumClasses - Return the number of equivalence classes in this set.
+ /// Note that this is a linear time operation.
+ unsigned getNumClasses() const {
+ unsigned NC = 0;
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (I->isLeader()) ++NC;
+ return NC;
+ }
+
+
+ //===--------------------------------------------------------------------===//
+ // Mutation methods
+
+ /// insert - Insert a new value into the union/find set, ignoring the request
+ /// if the value already exists.
+ iterator insert(const ElemTy &Data) {
+ return TheMapping.insert(ECValue(Data)).first;
+ }
+
+ /// findLeader - Given a value in the set, return a member iterator for the
+ /// equivalence class it is in. This does the path-compression part that
+ /// makes union-find "union findy". This returns an end iterator if the value
+ /// is not in the equivalence class.
+ ///
+ member_iterator findLeader(iterator I) const {
+ if (I == TheMapping.end()) return member_end();
+ return member_iterator(I->getLeader());
+ }
+ member_iterator findLeader(const ElemTy &V) const {
+ return findLeader(TheMapping.find(V));
+ }
+
+
+ /// union - Merge the two equivalence sets for the specified values, inserting
+ /// them if they do not already exist in the equivalence set.
+ member_iterator unionSets(const ElemTy &V1, const ElemTy &V2) {
+ iterator V1I = insert(V1), V2I = insert(V2);
+ return unionSets(findLeader(V1I), findLeader(V2I));
+ }
+ member_iterator unionSets(member_iterator L1, member_iterator L2) {
+ assert(L1 != member_end() && L2 != member_end() && "Illegal inputs!");
+ if (L1 == L2) return L1; // Unifying the same two sets, noop.
+
+ // Otherwise, this is a real union operation. Set the end of the L1 list to
+ // point to the L2 leader node.
+ const ECValue &L1LV = *L1.Node, &L2LV = *L2.Node;
+ L1LV.getEndOfList()->setNext(&L2LV);
+
+ // Update L1LV's end of list pointer.
+ L1LV.Leader = L2LV.getEndOfList();
+
+ // Clear L2's leader flag:
+ L2LV.Next = L2LV.getNext();
+
+ // L2's leader is now L1.
+ L2LV.Leader = &L1LV;
+ return L1;
+ }
+
+ class member_iterator : public std::iterator<std::forward_iterator_tag,
+ const ElemTy, ptrdiff_t> {
+ typedef std::iterator<std::forward_iterator_tag,
+ const ElemTy, ptrdiff_t> super;
+ const ECValue *Node;
+ friend class EquivalenceClasses;
+ public:
+ typedef size_t size_type;
+ typedef typename super::pointer pointer;
+ typedef typename super::reference reference;
+
+ explicit member_iterator() {}
+ explicit member_iterator(const ECValue *N) : Node(N) {}
+
+ reference operator*() const {
+ assert(Node != nullptr && "Dereferencing end()!");
+ return Node->getData();
+ }
+ pointer operator->() const { return &operator*(); }
+
+ member_iterator &operator++() {
+ assert(Node != nullptr && "++'d off the end of the list!");
+ Node = Node->getNext();
+ return *this;
+ }
+
+ member_iterator operator++(int) { // postincrement operators.
+ member_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ bool operator==(const member_iterator &RHS) const {
+ return Node == RHS.Node;
+ }
+ bool operator!=(const member_iterator &RHS) const {
+ return Node != RHS.Node;
+ }
+ };
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/FoldingSet.h b/contrib/llvm/include/llvm/ADT/FoldingSet.h
new file mode 100644
index 0000000..c920539
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/FoldingSet.h
@@ -0,0 +1,750 @@
+//===-- llvm/ADT/FoldingSet.h - Uniquing Hash Set ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a hash set that can be used to remove duplication of nodes
+// in a graph. This code was originally created by Chris Lattner for use with
+// SelectionDAGCSEMap, but was isolated to provide use across the llvm code set.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_FOLDINGSET_H
+#define LLVM_ADT_FOLDINGSET_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+/// This folding set used for two purposes:
+/// 1. Given information about a node we want to create, look up the unique
+/// instance of the node in the set. If the node already exists, return
+/// it, otherwise return the bucket it should be inserted into.
+/// 2. Given a node that has already been created, remove it from the set.
+///
+/// This class is implemented as a single-link chained hash table, where the
+/// "buckets" are actually the nodes themselves (the next pointer is in the
+/// node). The last node points back to the bucket to simplify node removal.
+///
+/// Any node that is to be included in the folding set must be a subclass of
+/// FoldingSetNode. The node class must also define a Profile method used to
+/// establish the unique bits of data for the node. The Profile method is
+/// passed a FoldingSetNodeID object which is used to gather the bits. Just
+/// call one of the Add* functions defined in the FoldingSetImpl::NodeID class.
+/// NOTE: That the folding set does not own the nodes and it is the
+/// responsibility of the user to dispose of the nodes.
+///
+/// Eg.
+/// class MyNode : public FoldingSetNode {
+/// private:
+/// std::string Name;
+/// unsigned Value;
+/// public:
+/// MyNode(const char *N, unsigned V) : Name(N), Value(V) {}
+/// ...
+/// void Profile(FoldingSetNodeID &ID) const {
+/// ID.AddString(Name);
+/// ID.AddInteger(Value);
+/// }
+/// ...
+/// };
+///
+/// To define the folding set itself use the FoldingSet template;
+///
+/// Eg.
+/// FoldingSet<MyNode> MyFoldingSet;
+///
+/// Four public methods are available to manipulate the folding set;
+///
+/// 1) If you have an existing node that you want add to the set but unsure
+/// that the node might already exist then call;
+///
+/// MyNode *M = MyFoldingSet.GetOrInsertNode(N);
+///
+/// If The result is equal to the input then the node has been inserted.
+/// Otherwise, the result is the node existing in the folding set, and the
+/// input can be discarded (use the result instead.)
+///
+/// 2) If you are ready to construct a node but want to check if it already
+/// exists, then call FindNodeOrInsertPos with a FoldingSetNodeID of the bits to
+/// check;
+///
+/// FoldingSetNodeID ID;
+/// ID.AddString(Name);
+/// ID.AddInteger(Value);
+/// void *InsertPoint;
+///
+/// MyNode *M = MyFoldingSet.FindNodeOrInsertPos(ID, InsertPoint);
+///
+/// If found then M with be non-NULL, else InsertPoint will point to where it
+/// should be inserted using InsertNode.
+///
+/// 3) If you get a NULL result from FindNodeOrInsertPos then you can as a new
+/// node with FindNodeOrInsertPos;
+///
+/// InsertNode(N, InsertPoint);
+///
+/// 4) Finally, if you want to remove a node from the folding set call;
+///
+/// bool WasRemoved = RemoveNode(N);
+///
+/// The result indicates whether the node existed in the folding set.
+
+class FoldingSetNodeID;
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetImpl - Implements the folding set functionality. The main
+/// structure is an array of buckets. Each bucket is indexed by the hash of
+/// the nodes it contains. The bucket itself points to the nodes contained
+/// in the bucket via a singly linked list. The last node in the list points
+/// back to the bucket to facilitate node removal.
+///
+class FoldingSetImpl {
+ virtual void anchor(); // Out of line virtual method.
+
+protected:
+ /// Buckets - Array of bucket chains.
+ ///
+ void **Buckets;
+
+ /// NumBuckets - Length of the Buckets array. Always a power of 2.
+ ///
+ unsigned NumBuckets;
+
+ /// NumNodes - Number of nodes in the folding set. Growth occurs when NumNodes
+ /// is greater than twice the number of buckets.
+ unsigned NumNodes;
+
+ explicit FoldingSetImpl(unsigned Log2InitSize = 6);
+ FoldingSetImpl(FoldingSetImpl &&Arg);
+ FoldingSetImpl &operator=(FoldingSetImpl &&RHS);
+ ~FoldingSetImpl();
+
+public:
+ //===--------------------------------------------------------------------===//
+ /// Node - This class is used to maintain the singly linked bucket list in
+ /// a folding set.
+ ///
+ class Node {
+ private:
+ // NextInFoldingSetBucket - next link in the bucket list.
+ void *NextInFoldingSetBucket;
+
+ public:
+ Node() : NextInFoldingSetBucket(nullptr) {}
+
+ // Accessors
+ void *getNextInBucket() const { return NextInFoldingSetBucket; }
+ void SetNextInBucket(void *N) { NextInFoldingSetBucket = N; }
+ };
+
+ /// clear - Remove all nodes from the folding set.
+ void clear();
+
+ /// RemoveNode - Remove a node from the folding set, returning true if one
+ /// was removed or false if the node was not in the folding set.
+ bool RemoveNode(Node *N);
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N' and return
+ /// it instead.
+ Node *GetOrInsertNode(Node *N);
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
+ /// return it. If not, return the insertion token that will make insertion
+ /// faster.
+ Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set. InsertPos must be obtained from
+ /// FindNodeOrInsertPos.
+ void InsertNode(Node *N, void *InsertPos);
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set.
+ void InsertNode(Node *N) {
+ Node *Inserted = GetOrInsertNode(N);
+ (void)Inserted;
+ assert(Inserted == N && "Node already inserted!");
+ }
+
+ /// size - Returns the number of nodes in the folding set.
+ unsigned size() const { return NumNodes; }
+
+ /// empty - Returns true if there are no nodes in the folding set.
+ bool empty() const { return NumNodes == 0; }
+
+private:
+ /// GrowHashTable - Double the size of the hash table and rehash everything.
+ ///
+ void GrowHashTable();
+
+protected:
+ /// GetNodeProfile - Instantiations of the FoldingSet template implement
+ /// this function to gather data bits for the given node.
+ virtual void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const = 0;
+ /// NodeEquals - Instantiations of the FoldingSet template implement
+ /// this function to compare the given node with the given ID.
+ virtual bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID) const=0;
+ /// ComputeNodeHash - Instantiations of the FoldingSet template implement
+ /// this function to compute a hash value for the given node.
+ virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0;
+};
+
+//===----------------------------------------------------------------------===//
+
+template<typename T> struct FoldingSetTrait;
+
+/// DefaultFoldingSetTrait - This class provides default implementations
+/// for FoldingSetTrait implementations.
+///
+template<typename T> struct DefaultFoldingSetTrait {
+ static void Profile(const T &X, FoldingSetNodeID &ID) {
+ X.Profile(ID);
+ }
+ static void Profile(T &X, FoldingSetNodeID &ID) {
+ X.Profile(ID);
+ }
+
+ // Equals - Test if the profile for X would match ID, using TempID
+ // to compute a temporary ID if necessary. The default implementation
+ // just calls Profile and does a regular comparison. Implementations
+ // can override this to provide more efficient implementations.
+ static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID);
+
+ // ComputeHash - Compute a hash value for X, using TempID to
+ // compute a temporary ID if necessary. The default implementation
+ // just calls Profile and does a regular hash computation.
+ // Implementations can override this to provide more efficient
+ // implementations.
+ static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID);
+};
+
+/// FoldingSetTrait - This trait class is used to define behavior of how
+/// to "profile" (in the FoldingSet parlance) an object of a given type.
+/// The default behavior is to invoke a 'Profile' method on an object, but
+/// through template specialization the behavior can be tailored for specific
+/// types. Combined with the FoldingSetNodeWrapper class, one can add objects
+/// to FoldingSets that were not originally designed to have that behavior.
+template<typename T> struct FoldingSetTrait
+ : public DefaultFoldingSetTrait<T> {};
+
+template<typename T, typename Ctx> struct ContextualFoldingSetTrait;
+
+/// DefaultContextualFoldingSetTrait - Like DefaultFoldingSetTrait, but
+/// for ContextualFoldingSets.
+template<typename T, typename Ctx>
+struct DefaultContextualFoldingSetTrait {
+ static void Profile(T &X, FoldingSetNodeID &ID, Ctx Context) {
+ X.Profile(ID, Context);
+ }
+ static inline bool Equals(T &X, const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID, Ctx Context);
+ static inline unsigned ComputeHash(T &X, FoldingSetNodeID &TempID,
+ Ctx Context);
+};
+
+/// ContextualFoldingSetTrait - Like FoldingSetTrait, but for
+/// ContextualFoldingSets.
+template<typename T, typename Ctx> struct ContextualFoldingSetTrait
+ : public DefaultContextualFoldingSetTrait<T, Ctx> {};
+
+//===--------------------------------------------------------------------===//
+/// FoldingSetNodeIDRef - This class describes a reference to an interned
+/// FoldingSetNodeID, which can be a useful to store node id data rather
+/// than using plain FoldingSetNodeIDs, since the 32-element SmallVector
+/// is often much larger than necessary, and the possibility of heap
+/// allocation means it requires a non-trivial destructor call.
+class FoldingSetNodeIDRef {
+ const unsigned *Data;
+ size_t Size;
+
+public:
+ FoldingSetNodeIDRef() : Data(nullptr), Size(0) {}
+ FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}
+
+ /// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
+ /// used to lookup the node in the FoldingSetImpl.
+ unsigned ComputeHash() const;
+
+ bool operator==(FoldingSetNodeIDRef) const;
+
+ bool operator!=(FoldingSetNodeIDRef RHS) const { return !(*this == RHS); }
+
+ /// Used to compare the "ordering" of two nodes as defined by the
+ /// profiled bits and their ordering defined by memcmp().
+ bool operator<(FoldingSetNodeIDRef) const;
+
+ const unsigned *getData() const { return Data; }
+ size_t getSize() const { return Size; }
+};
+
+//===--------------------------------------------------------------------===//
+/// FoldingSetNodeID - This class is used to gather all the unique data bits of
+/// a node. When all the bits are gathered this class is used to produce a
+/// hash value for the node.
+///
+class FoldingSetNodeID {
+ /// Bits - Vector of all the data bits that make the node unique.
+ /// Use a SmallVector to avoid a heap allocation in the common case.
+ SmallVector<unsigned, 32> Bits;
+
+public:
+ FoldingSetNodeID() {}
+
+ FoldingSetNodeID(FoldingSetNodeIDRef Ref)
+ : Bits(Ref.getData(), Ref.getData() + Ref.getSize()) {}
+
+ /// Add* - Add various data types to Bit data.
+ ///
+ void AddPointer(const void *Ptr);
+ void AddInteger(signed I);
+ void AddInteger(unsigned I);
+ void AddInteger(long I);
+ void AddInteger(unsigned long I);
+ void AddInteger(long long I);
+ void AddInteger(unsigned long long I);
+ void AddBoolean(bool B) { AddInteger(B ? 1U : 0U); }
+ void AddString(StringRef String);
+ void AddNodeID(const FoldingSetNodeID &ID);
+
+ template <typename T>
+ inline void Add(const T &x) { FoldingSetTrait<T>::Profile(x, *this); }
+
+ /// clear - Clear the accumulated profile, allowing this FoldingSetNodeID
+ /// object to be used to compute a new profile.
+ inline void clear() { Bits.clear(); }
+
+ /// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used
+ /// to lookup the node in the FoldingSetImpl.
+ unsigned ComputeHash() const;
+
+ /// operator== - Used to compare two nodes to each other.
+ ///
+ bool operator==(const FoldingSetNodeID &RHS) const;
+ bool operator==(const FoldingSetNodeIDRef RHS) const;
+
+ bool operator!=(const FoldingSetNodeID &RHS) const { return !(*this == RHS); }
+ bool operator!=(const FoldingSetNodeIDRef RHS) const { return !(*this ==RHS);}
+
+ /// Used to compare the "ordering" of two nodes as defined by the
+ /// profiled bits and their ordering defined by memcmp().
+ bool operator<(const FoldingSetNodeID &RHS) const;
+ bool operator<(const FoldingSetNodeIDRef RHS) const;
+
+ /// Intern - Copy this node's data to a memory region allocated from the
+ /// given allocator and return a FoldingSetNodeIDRef describing the
+ /// interned data.
+ FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const;
+};
+
+// Convenience type to hide the implementation of the folding set.
+typedef FoldingSetImpl::Node FoldingSetNode;
+template<class T> class FoldingSetIterator;
+template<class T> class FoldingSetBucketIterator;
+
+// Definitions of FoldingSetTrait and ContextualFoldingSetTrait functions, which
+// require the definition of FoldingSetNodeID.
+template<typename T>
+inline bool
+DefaultFoldingSetTrait<T>::Equals(T &X, const FoldingSetNodeID &ID,
+ unsigned /*IDHash*/,
+ FoldingSetNodeID &TempID) {
+ FoldingSetTrait<T>::Profile(X, TempID);
+ return TempID == ID;
+}
+template<typename T>
+inline unsigned
+DefaultFoldingSetTrait<T>::ComputeHash(T &X, FoldingSetNodeID &TempID) {
+ FoldingSetTrait<T>::Profile(X, TempID);
+ return TempID.ComputeHash();
+}
+template<typename T, typename Ctx>
+inline bool
+DefaultContextualFoldingSetTrait<T, Ctx>::Equals(T &X,
+ const FoldingSetNodeID &ID,
+ unsigned /*IDHash*/,
+ FoldingSetNodeID &TempID,
+ Ctx Context) {
+ ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
+ return TempID == ID;
+}
+template<typename T, typename Ctx>
+inline unsigned
+DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
+ FoldingSetNodeID &TempID,
+ Ctx Context) {
+ ContextualFoldingSetTrait<T, Ctx>::Profile(X, TempID, Context);
+ return TempID.ComputeHash();
+}
+
+//===----------------------------------------------------------------------===//
+/// FoldingSet - This template class is used to instantiate a specialized
+/// implementation of the folding set to the node class T. T must be a
+/// subclass of FoldingSetNode and implement a Profile function.
+///
+/// Note that this set type is movable and move-assignable. However, its
+/// moved-from state is not a valid state for anything other than
+/// move-assigning and destroying. This is primarily to enable movable APIs
+/// that incorporate these objects.
+template <class T> class FoldingSet final : public FoldingSetImpl {
+private:
+ /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+ /// way to convert nodes into a unique specifier.
+ void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
+ T *TN = static_cast<T *>(N);
+ FoldingSetTrait<T>::Profile(*TN, ID);
+ }
+ /// NodeEquals - Instantiations may optionally provide a way to compare a
+ /// node with a specified ID.
+ bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID) const override {
+ T *TN = static_cast<T *>(N);
+ return FoldingSetTrait<T>::Equals(*TN, ID, IDHash, TempID);
+ }
+ /// ComputeNodeHash - Instantiations may optionally provide a way to compute a
+ /// hash value directly from a node.
+ unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
+ T *TN = static_cast<T *>(N);
+ return FoldingSetTrait<T>::ComputeHash(*TN, TempID);
+ }
+
+public:
+ explicit FoldingSet(unsigned Log2InitSize = 6)
+ : FoldingSetImpl(Log2InitSize) {}
+
+ FoldingSet(FoldingSet &&Arg) : FoldingSetImpl(std::move(Arg)) {}
+ FoldingSet &operator=(FoldingSet &&RHS) {
+ (void)FoldingSetImpl::operator=(std::move(RHS));
+ return *this;
+ }
+
+ typedef FoldingSetIterator<T> iterator;
+ iterator begin() { return iterator(Buckets); }
+ iterator end() { return iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetIterator<const T> const_iterator;
+ const_iterator begin() const { return const_iterator(Buckets); }
+ const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetBucketIterator<T> bucket_iterator;
+
+ bucket_iterator bucket_begin(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
+ }
+
+ bucket_iterator bucket_end(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N' and
+ /// return it instead.
+ T *GetOrInsertNode(Node *N) {
+ return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
+ }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
+ /// return it. If not, return the insertion token that will make insertion
+ /// faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// ContextualFoldingSet - This template class is a further refinement
+/// of FoldingSet which provides a context argument when calling
+/// Profile on its nodes. Currently, that argument is fixed at
+/// initialization time.
+///
+/// T must be a subclass of FoldingSetNode and implement a Profile
+/// function with signature
+/// void Profile(llvm::FoldingSetNodeID &, Ctx);
+template <class T, class Ctx>
+class ContextualFoldingSet final : public FoldingSetImpl {
+ // Unfortunately, this can't derive from FoldingSet<T> because the
+ // construction vtable for FoldingSet<T> requires
+ // FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
+ // requires a single-argument T::Profile().
+
+private:
+ Ctx Context;
+
+ /// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
+ /// way to convert nodes into a unique specifier.
+ void GetNodeProfile(FoldingSetImpl::Node *N,
+ FoldingSetNodeID &ID) const override {
+ T *TN = static_cast<T *>(N);
+ ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context);
+ }
+ bool NodeEquals(FoldingSetImpl::Node *N, const FoldingSetNodeID &ID,
+ unsigned IDHash, FoldingSetNodeID &TempID) const override {
+ T *TN = static_cast<T *>(N);
+ return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
+ Context);
+ }
+ unsigned ComputeNodeHash(FoldingSetImpl::Node *N,
+ FoldingSetNodeID &TempID) const override {
+ T *TN = static_cast<T *>(N);
+ return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, Context);
+ }
+
+public:
+ explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
+ : FoldingSetImpl(Log2InitSize), Context(Context)
+ {}
+
+ Ctx getContext() const { return Context; }
+
+ typedef FoldingSetIterator<T> iterator;
+ iterator begin() { return iterator(Buckets); }
+ iterator end() { return iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetIterator<const T> const_iterator;
+ const_iterator begin() const { return const_iterator(Buckets); }
+ const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetBucketIterator<T> bucket_iterator;
+
+ bucket_iterator bucket_begin(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
+ }
+
+ bucket_iterator bucket_end(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N'
+ /// and return it instead.
+ T *GetOrInsertNode(Node *N) {
+ return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
+ }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it
+ /// exists, return it. If not, return the insertion token that will
+ /// make insertion faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetVector - This template class combines a FoldingSet and a vector
+/// to provide the interface of FoldingSet but with deterministic iteration
+/// order based on the insertion order. T must be a subclass of FoldingSetNode
+/// and implement a Profile function.
+template <class T, class VectorT = SmallVector<T*, 8> >
+class FoldingSetVector {
+ FoldingSet<T> Set;
+ VectorT Vector;
+
+public:
+ explicit FoldingSetVector(unsigned Log2InitSize = 6)
+ : Set(Log2InitSize) {
+ }
+
+ typedef pointee_iterator<typename VectorT::iterator> iterator;
+ iterator begin() { return Vector.begin(); }
+ iterator end() { return Vector.end(); }
+
+ typedef pointee_iterator<typename VectorT::const_iterator> const_iterator;
+ const_iterator begin() const { return Vector.begin(); }
+ const_iterator end() const { return Vector.end(); }
+
+ /// clear - Remove all nodes from the folding set.
+ void clear() { Set.clear(); Vector.clear(); }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
+ /// return it. If not, return the insertion token that will make insertion
+ /// faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return Set.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N' and
+ /// return it instead.
+ T *GetOrInsertNode(T *N) {
+ T *Result = Set.GetOrInsertNode(N);
+ if (Result == N) Vector.push_back(N);
+ return Result;
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set. InsertPos must be obtained from
+ /// FindNodeOrInsertPos.
+ void InsertNode(T *N, void *InsertPos) {
+ Set.InsertNode(N, InsertPos);
+ Vector.push_back(N);
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set.
+ void InsertNode(T *N) {
+ Set.InsertNode(N);
+ Vector.push_back(N);
+ }
+
+ /// size - Returns the number of nodes in the folding set.
+ unsigned size() const { return Set.size(); }
+
+ /// empty - Returns true if there are no nodes in the folding set.
+ bool empty() const { return Set.empty(); }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetIteratorImpl - This is the common iterator support shared by all
+/// folding sets, which knows how to walk the folding set hash table.
+class FoldingSetIteratorImpl {
+protected:
+ FoldingSetNode *NodePtr;
+ FoldingSetIteratorImpl(void **Bucket);
+ void advance();
+
+public:
+ bool operator==(const FoldingSetIteratorImpl &RHS) const {
+ return NodePtr == RHS.NodePtr;
+ }
+ bool operator!=(const FoldingSetIteratorImpl &RHS) const {
+ return NodePtr != RHS.NodePtr;
+ }
+};
+
+template <class T> class FoldingSetIterator : public FoldingSetIteratorImpl {
+public:
+ explicit FoldingSetIterator(void **Bucket) : FoldingSetIteratorImpl(Bucket) {}
+
+ T &operator*() const {
+ return *static_cast<T*>(NodePtr);
+ }
+
+ T *operator->() const {
+ return static_cast<T*>(NodePtr);
+ }
+
+ inline FoldingSetIterator &operator++() { // Preincrement
+ advance();
+ return *this;
+ }
+ FoldingSetIterator operator++(int) { // Postincrement
+ FoldingSetIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetBucketIteratorImpl - This is the common bucket iterator support
+/// shared by all folding sets, which knows how to walk a particular bucket
+/// of a folding set hash table.
+
+class FoldingSetBucketIteratorImpl {
+protected:
+ void *Ptr;
+
+ explicit FoldingSetBucketIteratorImpl(void **Bucket);
+
+ FoldingSetBucketIteratorImpl(void **Bucket, bool)
+ : Ptr(Bucket) {}
+
+ void advance() {
+ void *Probe = static_cast<FoldingSetNode*>(Ptr)->getNextInBucket();
+ uintptr_t x = reinterpret_cast<uintptr_t>(Probe) & ~0x1;
+ Ptr = reinterpret_cast<void*>(x);
+ }
+
+public:
+ bool operator==(const FoldingSetBucketIteratorImpl &RHS) const {
+ return Ptr == RHS.Ptr;
+ }
+ bool operator!=(const FoldingSetBucketIteratorImpl &RHS) const {
+ return Ptr != RHS.Ptr;
+ }
+};
+
+template <class T>
+class FoldingSetBucketIterator : public FoldingSetBucketIteratorImpl {
+public:
+ explicit FoldingSetBucketIterator(void **Bucket) :
+ FoldingSetBucketIteratorImpl(Bucket) {}
+
+ FoldingSetBucketIterator(void **Bucket, bool) :
+ FoldingSetBucketIteratorImpl(Bucket, true) {}
+
+ T &operator*() const { return *static_cast<T*>(Ptr); }
+ T *operator->() const { return static_cast<T*>(Ptr); }
+
+ inline FoldingSetBucketIterator &operator++() { // Preincrement
+ advance();
+ return *this;
+ }
+ FoldingSetBucketIterator operator++(int) { // Postincrement
+ FoldingSetBucketIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetNodeWrapper - This template class is used to "wrap" arbitrary
+/// types in an enclosing object so that they can be inserted into FoldingSets.
+template <typename T>
+class FoldingSetNodeWrapper : public FoldingSetNode {
+ T data;
+
+public:
+ template <typename... Ts>
+ explicit FoldingSetNodeWrapper(Ts &&... Args)
+ : data(std::forward<Ts>(Args)...) {}
+
+ void Profile(FoldingSetNodeID &ID) { FoldingSetTrait<T>::Profile(data, ID); }
+
+ T &getValue() { return data; }
+ const T &getValue() const { return data; }
+
+ operator T&() { return data; }
+ operator const T&() const { return data; }
+};
+
+//===----------------------------------------------------------------------===//
+/// FastFoldingSetNode - This is a subclass of FoldingSetNode which stores
+/// a FoldingSetNodeID value rather than requiring the node to recompute it
+/// each time it is needed. This trades space for speed (which can be
+/// significant if the ID is long), and it also permits nodes to drop
+/// information that would otherwise only be required for recomputing an ID.
+class FastFoldingSetNode : public FoldingSetNode {
+ FoldingSetNodeID FastID;
+
+protected:
+ explicit FastFoldingSetNode(const FoldingSetNodeID &ID) : FastID(ID) {}
+
+public:
+ void Profile(FoldingSetNodeID &ID) const { ID.AddNodeID(FastID); }
+};
+
+//===----------------------------------------------------------------------===//
+// Partial specializations of FoldingSetTrait.
+
+template<typename T> struct FoldingSetTrait<T*> {
+ static inline void Profile(T *X, FoldingSetNodeID &ID) {
+ ID.AddPointer(X);
+ }
+};
+template <typename T1, typename T2>
+struct FoldingSetTrait<std::pair<T1, T2>> {
+ static inline void Profile(const std::pair<T1, T2> &P,
+ llvm::FoldingSetNodeID &ID) {
+ ID.Add(P.first);
+ ID.Add(P.second);
+ }
+};
+} // End of namespace llvm.
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/GraphTraits.h b/contrib/llvm/include/llvm/ADT/GraphTraits.h
new file mode 100644
index 0000000..823caef
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/GraphTraits.h
@@ -0,0 +1,106 @@
+//===-- llvm/ADT/GraphTraits.h - Graph traits template ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the little GraphTraits<X> template class that should be
+// specialized by classes that want to be iteratable by generic graph iterators.
+//
+// This file also defines the marker class Inverse that is used to iterate over
+// graphs in a graph defined, inverse ordering...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_GRAPHTRAITS_H
+#define LLVM_ADT_GRAPHTRAITS_H
+
+namespace llvm {
+
+// GraphTraits - This class should be specialized by different graph types...
+// which is why the default version is empty.
+//
+template<class GraphType>
+struct GraphTraits {
+ // Elements to provide:
+
+ // typedef NodeType - Type of Node in the graph
+ // typedef ChildIteratorType - Type used to iterate over children in graph
+
+ // static NodeType *getEntryNode(const GraphType &)
+ // Return the entry node of the graph
+
+ // static ChildIteratorType child_begin(NodeType *)
+ // static ChildIteratorType child_end (NodeType *)
+ // Return iterators that point to the beginning and ending of the child
+ // node list for the specified node.
+ //
+
+
+ // typedef ...iterator nodes_iterator;
+ // static nodes_iterator nodes_begin(GraphType *G)
+ // static nodes_iterator nodes_end (GraphType *G)
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+
+ // static unsigned size (GraphType *G)
+ // Return total number of nodes in the graph
+ //
+
+
+ // If anyone tries to use this class without having an appropriate
+ // specialization, make an error. If you get this error, it's because you
+ // need to include the appropriate specialization of GraphTraits<> for your
+ // graph, or you need to define it for a new graph type. Either that or
+ // your argument to XXX_begin(...) is unknown or needs to have the proper .h
+ // file #include'd.
+ //
+ typedef typename GraphType::UnknownGraphTypeError NodeType;
+};
+
+
+// Inverse - This class is used as a little marker class to tell the graph
+// iterator to iterate over the graph in a graph defined "Inverse" ordering.
+// Not all graphs define an inverse ordering, and if they do, it depends on
+// the graph exactly what that is. Here's an example of usage with the
+// df_iterator:
+//
+// idf_iterator<Method*> I = idf_begin(M), E = idf_end(M);
+// for (; I != E; ++I) { ... }
+//
+// Which is equivalent to:
+// df_iterator<Inverse<Method*> > I = idf_begin(M), E = idf_end(M);
+// for (; I != E; ++I) { ... }
+//
+template <class GraphType>
+struct Inverse {
+ const GraphType &Graph;
+
+ inline Inverse(const GraphType &G) : Graph(G) {}
+};
+
+// Provide a partial specialization of GraphTraits so that the inverse of an
+// inverse falls back to the original graph.
+template<class T>
+struct GraphTraits<Inverse<Inverse<T> > > {
+ typedef typename GraphTraits<T>::NodeType NodeType;
+ typedef typename GraphTraits<T>::ChildIteratorType ChildIteratorType;
+
+ static NodeType *getEntryNode(Inverse<Inverse<T> > *G) {
+ return GraphTraits<T>::getEntryNode(G->Graph.Graph);
+ }
+
+ static ChildIteratorType child_begin(NodeType* N) {
+ return GraphTraits<T>::child_begin(N);
+ }
+
+ static ChildIteratorType child_end(NodeType* N) {
+ return GraphTraits<T>::child_end(N);
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/Hashing.h b/contrib/llvm/include/llvm/ADT/Hashing.h
new file mode 100644
index 0000000..de56f91
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/Hashing.h
@@ -0,0 +1,661 @@
+//===-- llvm/ADT/Hashing.h - Utilities for hashing --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the newly proposed standard C++ interfaces for hashing
+// arbitrary data and building hash functions for user-defined types. This
+// interface was originally proposed in N3333[1] and is currently under review
+// for inclusion in a future TR and/or standard.
+//
+// The primary interfaces provide are comprised of one type and three functions:
+//
+// -- 'hash_code' class is an opaque type representing the hash code for some
+// data. It is the intended product of hashing, and can be used to implement
+// hash tables, checksumming, and other common uses of hashes. It is not an
+// integer type (although it can be converted to one) because it is risky
+// to assume much about the internals of a hash_code. In particular, each
+// execution of the program has a high probability of producing a different
+// hash_code for a given input. Thus their values are not stable to save or
+// persist, and should only be used during the execution for the
+// construction of hashing datastructures.
+//
+// -- 'hash_value' is a function designed to be overloaded for each
+// user-defined type which wishes to be used within a hashing context. It
+// should be overloaded within the user-defined type's namespace and found
+// via ADL. Overloads for primitive types are provided by this library.
+//
+// -- 'hash_combine' and 'hash_combine_range' are functions designed to aid
+// programmers in easily and intuitively combining a set of data into
+// a single hash_code for their object. They should only logically be used
+// within the implementation of a 'hash_value' routine or similar context.
+//
+// Note that 'hash_combine_range' contains very special logic for hashing
+// a contiguous array of integers or pointers. This logic is *extremely* fast,
+// on a modern Intel "Gainestown" Xeon (Nehalem uarch) @2.2 GHz, these were
+// benchmarked at over 6.5 GiB/s for large keys, and <20 cycles/hash for keys
+// under 32-bytes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_HASHING_H
+#define LLVM_ADT_HASHING_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <iterator>
+#include <string>
+#include <utility>
+
+namespace llvm {
+
+/// \brief An opaque object representing a hash code.
+///
+/// This object represents the result of hashing some entity. It is intended to
+/// be used to implement hashtables or other hashing-based data structures.
+/// While it wraps and exposes a numeric value, this value should not be
+/// trusted to be stable or predictable across processes or executions.
+///
+/// In order to obtain the hash_code for an object 'x':
+/// \code
+/// using llvm::hash_value;
+/// llvm::hash_code code = hash_value(x);
+/// \endcode
+class hash_code {
+ size_t value;
+
+public:
+ /// \brief Default construct a hash_code.
+ /// Note that this leaves the value uninitialized.
+ hash_code() = default;
+
+ /// \brief Form a hash code directly from a numerical value.
+ hash_code(size_t value) : value(value) {}
+
+ /// \brief Convert the hash code to its numerical value for use.
+ /*explicit*/ operator size_t() const { return value; }
+
+ friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
+ return lhs.value == rhs.value;
+ }
+ friend bool operator!=(const hash_code &lhs, const hash_code &rhs) {
+ return lhs.value != rhs.value;
+ }
+
+ /// \brief Allow a hash_code to be directly run through hash_value.
+ friend size_t hash_value(const hash_code &code) { return code.value; }
+};
+
+/// \brief Compute a hash_code for any integer value.
+///
+/// Note that this function is intended to compute the same hash_code for
+/// a particular value without regard to the pre-promotion type. This is in
+/// contrast to hash_combine which may produce different hash_codes for
+/// differing argument types even if they would implicit promote to a common
+/// type without changing the value.
+template <typename T>
+typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
+hash_value(T value);
+
+/// \brief Compute a hash_code for a pointer's address.
+///
+/// N.B.: This hashes the *address*. Not the value and not the type.
+template <typename T> hash_code hash_value(const T *ptr);
+
+/// \brief Compute a hash_code for a pair of objects.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg);
+
+/// \brief Compute a hash_code for a standard string.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg);
+
+
+/// \brief Override the execution seed with a fixed value.
+///
+/// This hashing library uses a per-execution seed designed to change on each
+/// run with high probability in order to ensure that the hash codes are not
+/// attackable and to ensure that output which is intended to be stable does
+/// not rely on the particulars of the hash codes produced.
+///
+/// That said, there are use cases where it is important to be able to
+/// reproduce *exactly* a specific behavior. To that end, we provide a function
+/// which will forcibly set the seed to a fixed value. This must be done at the
+/// start of the program, before any hashes are computed. Also, it cannot be
+/// undone. This makes it thread-hostile and very hard to use outside of
+/// immediately on start of a simple program designed for reproducible
+/// behavior.
+void set_fixed_execution_hash_seed(size_t fixed_value);
+
+
+// All of the implementation details of actually computing the various hash
+// code values are held within this namespace. These routines are included in
+// the header file mainly to allow inlining and constant propagation.
+namespace hashing {
+namespace detail {
+
+inline uint64_t fetch64(const char *p) {
+ uint64_t result;
+ memcpy(&result, p, sizeof(result));
+ if (sys::IsBigEndianHost)
+ sys::swapByteOrder(result);
+ return result;
+}
+
+inline uint32_t fetch32(const char *p) {
+ uint32_t result;
+ memcpy(&result, p, sizeof(result));
+ if (sys::IsBigEndianHost)
+ sys::swapByteOrder(result);
+ return result;
+}
+
+/// Some primes between 2^63 and 2^64 for various uses.
+static const uint64_t k0 = 0xc3a5c85c97cb3127ULL;
+static const uint64_t k1 = 0xb492b66fbe98f273ULL;
+static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
+static const uint64_t k3 = 0xc949d7c7509e6557ULL;
+
+/// \brief Bitwise right rotate.
+/// Normally this will compile to a single instruction, especially if the
+/// shift is a manifest constant.
+inline uint64_t rotate(uint64_t val, size_t shift) {
+ // Avoid shifting by 64: doing so yields an undefined result.
+ return shift == 0 ? val : ((val >> shift) | (val << (64 - shift)));
+}
+
+inline uint64_t shift_mix(uint64_t val) {
+ return val ^ (val >> 47);
+}
+
+inline uint64_t hash_16_bytes(uint64_t low, uint64_t high) {
+ // Murmur-inspired hashing.
+ const uint64_t kMul = 0x9ddfea08eb382d69ULL;
+ uint64_t a = (low ^ high) * kMul;
+ a ^= (a >> 47);
+ uint64_t b = (high ^ a) * kMul;
+ b ^= (b >> 47);
+ b *= kMul;
+ return b;
+}
+
+inline uint64_t hash_1to3_bytes(const char *s, size_t len, uint64_t seed) {
+ uint8_t a = s[0];
+ uint8_t b = s[len >> 1];
+ uint8_t c = s[len - 1];
+ uint32_t y = static_cast<uint32_t>(a) + (static_cast<uint32_t>(b) << 8);
+ uint32_t z = len + (static_cast<uint32_t>(c) << 2);
+ return shift_mix(y * k2 ^ z * k3 ^ seed) * k2;
+}
+
+inline uint64_t hash_4to8_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t a = fetch32(s);
+ return hash_16_bytes(len + (a << 3), seed ^ fetch32(s + len - 4));
+}
+
+inline uint64_t hash_9to16_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t a = fetch64(s);
+ uint64_t b = fetch64(s + len - 8);
+ return hash_16_bytes(seed ^ a, rotate(b + len, len)) ^ b;
+}
+
+inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t a = fetch64(s) * k1;
+ uint64_t b = fetch64(s + 8);
+ uint64_t c = fetch64(s + len - 8) * k2;
+ uint64_t d = fetch64(s + len - 16) * k0;
+ return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d,
+ a + rotate(b ^ k3, 20) - c + len + seed);
+}
+
+inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
+ uint64_t z = fetch64(s + 24);
+ uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
+ uint64_t b = rotate(a + z, 52);
+ uint64_t c = rotate(a, 37);
+ a += fetch64(s + 8);
+ c += rotate(a, 7);
+ a += fetch64(s + 16);
+ uint64_t vf = a + z;
+ uint64_t vs = b + rotate(a, 31) + c;
+ a = fetch64(s + 16) + fetch64(s + len - 32);
+ z = fetch64(s + len - 8);
+ b = rotate(a + z, 52);
+ c = rotate(a, 37);
+ a += fetch64(s + len - 24);
+ c += rotate(a, 7);
+ a += fetch64(s + len - 16);
+ uint64_t wf = a + z;
+ uint64_t ws = b + rotate(a, 31) + c;
+ uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
+ return shift_mix((seed ^ (r * k0)) + vs) * k2;
+}
+
+inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
+ if (length >= 4 && length <= 8)
+ return hash_4to8_bytes(s, length, seed);
+ if (length > 8 && length <= 16)
+ return hash_9to16_bytes(s, length, seed);
+ if (length > 16 && length <= 32)
+ return hash_17to32_bytes(s, length, seed);
+ if (length > 32)
+ return hash_33to64_bytes(s, length, seed);
+ if (length != 0)
+ return hash_1to3_bytes(s, length, seed);
+
+ return k2 ^ seed;
+}
+
+/// \brief The intermediate state used during hashing.
+/// Currently, the algorithm for computing hash codes is based on CityHash and
+/// keeps 56 bytes of arbitrary state.
+struct hash_state {
+ uint64_t h0, h1, h2, h3, h4, h5, h6;
+
+ /// \brief Create a new hash_state structure and initialize it based on the
+ /// seed and the first 64-byte chunk.
+ /// This effectively performs the initial mix.
+ static hash_state create(const char *s, uint64_t seed) {
+ hash_state state = {
+ 0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
+ seed * k1, shift_mix(seed), 0 };
+ state.h6 = hash_16_bytes(state.h4, state.h5);
+ state.mix(s);
+ return state;
+ }
+
+ /// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a'
+ /// and 'b', including whatever is already in 'a' and 'b'.
+ static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
+ a += fetch64(s);
+ uint64_t c = fetch64(s + 24);
+ b = rotate(b + a + c, 21);
+ uint64_t d = a;
+ a += fetch64(s + 8) + fetch64(s + 16);
+ b += rotate(a, 44) + d;
+ a += c;
+ }
+
+ /// \brief Mix in a 64-byte buffer of data.
+ /// We mix all 64 bytes even when the chunk length is smaller, but we
+ /// record the actual length.
+ void mix(const char *s) {
+ h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
+ h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
+ h0 ^= h6;
+ h1 += h3 + fetch64(s + 40);
+ h2 = rotate(h2 + h5, 33) * k1;
+ h3 = h4 * k1;
+ h4 = h0 + h5;
+ mix_32_bytes(s, h3, h4);
+ h5 = h2 + h6;
+ h6 = h1 + fetch64(s + 16);
+ mix_32_bytes(s + 32, h5, h6);
+ std::swap(h2, h0);
+ }
+
+ /// \brief Compute the final 64-bit hash code value based on the current
+ /// state and the length of bytes hashed.
+ uint64_t finalize(size_t length) {
+ return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
+ hash_16_bytes(h4, h6) + shift_mix(length) * k1 + h0);
+ }
+};
+
+
+/// \brief A global, fixed seed-override variable.
+///
+/// This variable can be set using the \see llvm::set_fixed_execution_seed
+/// function. See that function for details. Do not, under any circumstances,
+/// set or read this variable.
+extern size_t fixed_seed_override;
+
+inline size_t get_execution_seed() {
+ // FIXME: This needs to be a per-execution seed. This is just a placeholder
+ // implementation. Switching to a per-execution seed is likely to flush out
+ // instability bugs and so will happen as its own commit.
+ //
+ // However, if there is a fixed seed override set the first time this is
+ // called, return that instead of the per-execution seed.
+ const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
+ static size_t seed = fixed_seed_override ? fixed_seed_override
+ : (size_t)seed_prime;
+ return seed;
+}
+
+
+/// \brief Trait to indicate whether a type's bits can be hashed directly.
+///
+/// A type trait which is true if we want to combine values for hashing by
+/// reading the underlying data. It is false if values of this type must
+/// first be passed to hash_value, and the resulting hash_codes combined.
+//
+// FIXME: We want to replace is_integral_or_enum and is_pointer here with
+// a predicate which asserts that comparing the underlying storage of two
+// values of the type for equality is equivalent to comparing the two values
+// for equality. For all the platforms we care about, this holds for integers
+// and pointers, but there are platforms where it doesn't and we would like to
+// support user-defined types which happen to satisfy this property.
+template <typename T> struct is_hashable_data
+ : std::integral_constant<bool, ((is_integral_or_enum<T>::value ||
+ std::is_pointer<T>::value) &&
+ 64 % sizeof(T) == 0)> {};
+
+// Special case std::pair to detect when both types are viable and when there
+// is no alignment-derived padding in the pair. This is a bit of a lie because
+// std::pair isn't truly POD, but it's close enough in all reasonable
+// implementations for our use case of hashing the underlying data.
+template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
+ : std::integral_constant<bool, (is_hashable_data<T>::value &&
+ is_hashable_data<U>::value &&
+ (sizeof(T) + sizeof(U)) ==
+ sizeof(std::pair<T, U>))> {};
+
+/// \brief Helper to get the hashable data representation for a type.
+/// This variant is enabled when the type itself can be used.
+template <typename T>
+typename std::enable_if<is_hashable_data<T>::value, T>::type
+get_hashable_data(const T &value) {
+ return value;
+}
+/// \brief Helper to get the hashable data representation for a type.
+/// This variant is enabled when we must first call hash_value and use the
+/// result as our data.
+template <typename T>
+typename std::enable_if<!is_hashable_data<T>::value, size_t>::type
+get_hashable_data(const T &value) {
+ using ::llvm::hash_value;
+ return hash_value(value);
+}
+
+/// \brief Helper to store data from a value into a buffer and advance the
+/// pointer into that buffer.
+///
+/// This routine first checks whether there is enough space in the provided
+/// buffer, and if not immediately returns false. If there is space, it
+/// copies the underlying bytes of value into the buffer, advances the
+/// buffer_ptr past the copied bytes, and returns true.
+template <typename T>
+bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
+ size_t offset = 0) {
+ size_t store_size = sizeof(value) - offset;
+ if (buffer_ptr + store_size > buffer_end)
+ return false;
+ const char *value_data = reinterpret_cast<const char *>(&value);
+ memcpy(buffer_ptr, value_data + offset, store_size);
+ buffer_ptr += store_size;
+ return true;
+}
+
+/// \brief Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is
+/// integral. Rather than computing a hash_code for each object and then
+/// combining them, this (as an optimization) directly combines the integers.
+template <typename InputIteratorT>
+hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
+ const size_t seed = get_execution_seed();
+ char buffer[64], *buffer_ptr = buffer;
+ char *const buffer_end = std::end(buffer);
+ while (first != last && store_and_advance(buffer_ptr, buffer_end,
+ get_hashable_data(*first)))
+ ++first;
+ if (first == last)
+ return hash_short(buffer, buffer_ptr - buffer, seed);
+ assert(buffer_ptr == buffer_end);
+
+ hash_state state = state.create(buffer, seed);
+ size_t length = 64;
+ while (first != last) {
+ // Fill up the buffer. We don't clear it, which re-mixes the last round
+ // when only a partial 64-byte chunk is left.
+ buffer_ptr = buffer;
+ while (first != last && store_and_advance(buffer_ptr, buffer_end,
+ get_hashable_data(*first)))
+ ++first;
+
+ // Rotate the buffer if we did a partial fill in order to simulate doing
+ // a mix of the last 64-bytes. That is how the algorithm works when we
+ // have a contiguous byte sequence, and we want to emulate that here.
+ std::rotate(buffer, buffer_ptr, buffer_end);
+
+ // Mix this chunk into the current state.
+ state.mix(buffer);
+ length += buffer_ptr - buffer;
+ };
+
+ return state.finalize(length);
+}
+
+/// \brief Implement the combining of integral values into a hash_code.
+///
+/// This overload is selected when the value type of the iterator is integral
+/// and when the input iterator is actually a pointer. Rather than computing
+/// a hash_code for each object and then combining them, this (as an
+/// optimization) directly combines the integers. Also, because the integers
+/// are stored in contiguous memory, this routine avoids copying each value
+/// and directly reads from the underlying memory.
+template <typename ValueT>
+typename std::enable_if<is_hashable_data<ValueT>::value, hash_code>::type
+hash_combine_range_impl(ValueT *first, ValueT *last) {
+ const size_t seed = get_execution_seed();
+ const char *s_begin = reinterpret_cast<const char *>(first);
+ const char *s_end = reinterpret_cast<const char *>(last);
+ const size_t length = std::distance(s_begin, s_end);
+ if (length <= 64)
+ return hash_short(s_begin, length, seed);
+
+ const char *s_aligned_end = s_begin + (length & ~63);
+ hash_state state = state.create(s_begin, seed);
+ s_begin += 64;
+ while (s_begin != s_aligned_end) {
+ state.mix(s_begin);
+ s_begin += 64;
+ }
+ if (length & 63)
+ state.mix(s_end - 64);
+
+ return state.finalize(length);
+}
+
+} // namespace detail
+} // namespace hashing
+
+
+/// \brief Compute a hash_code for a sequence of values.
+///
+/// This hashes a sequence of values. It produces the same hash_code as
+/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
+/// and is significantly faster given pointers and types which can be hashed as
+/// a sequence of bytes.
+template <typename InputIteratorT>
+hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
+ return ::llvm::hashing::detail::hash_combine_range_impl(first, last);
+}
+
+
+// Implementation details for hash_combine.
+namespace hashing {
+namespace detail {
+
+/// \brief Helper class to manage the recursive combining of hash_combine
+/// arguments.
+///
+/// This class exists to manage the state and various calls involved in the
+/// recursive combining of arguments used in hash_combine. It is particularly
+/// useful at minimizing the code in the recursive calls to ease the pain
+/// caused by a lack of variadic functions.
+struct hash_combine_recursive_helper {
+ char buffer[64];
+ hash_state state;
+ const size_t seed;
+
+public:
+ /// \brief Construct a recursive hash combining helper.
+ ///
+ /// This sets up the state for a recursive hash combine, including getting
+ /// the seed and buffer setup.
+ hash_combine_recursive_helper()
+ : seed(get_execution_seed()) {}
+
+ /// \brief Combine one chunk of data into the current in-flight hash.
+ ///
+ /// This merges one chunk of data into the hash. First it tries to buffer
+ /// the data. If the buffer is full, it hashes the buffer into its
+ /// hash_state, empties it, and then merges the new chunk in. This also
+ /// handles cases where the data straddles the end of the buffer.
+ template <typename T>
+ char *combine_data(size_t &length, char *buffer_ptr, char *buffer_end, T data) {
+ if (!store_and_advance(buffer_ptr, buffer_end, data)) {
+ // Check for skew which prevents the buffer from being packed, and do
+ // a partial store into the buffer to fill it. This is only a concern
+ // with the variadic combine because that formation can have varying
+ // argument types.
+ size_t partial_store_size = buffer_end - buffer_ptr;
+ memcpy(buffer_ptr, &data, partial_store_size);
+
+ // If the store fails, our buffer is full and ready to hash. We have to
+ // either initialize the hash state (on the first full buffer) or mix
+ // this buffer into the existing hash state. Length tracks the *hashed*
+ // length, not the buffered length.
+ if (length == 0) {
+ state = state.create(buffer, seed);
+ length = 64;
+ } else {
+ // Mix this chunk into the current state and bump length up by 64.
+ state.mix(buffer);
+ length += 64;
+ }
+ // Reset the buffer_ptr to the head of the buffer for the next chunk of
+ // data.
+ buffer_ptr = buffer;
+
+ // Try again to store into the buffer -- this cannot fail as we only
+ // store types smaller than the buffer.
+ if (!store_and_advance(buffer_ptr, buffer_end, data,
+ partial_store_size))
+ abort();
+ }
+ return buffer_ptr;
+ }
+
+ /// \brief Recursive, variadic combining method.
+ ///
+ /// This function recurses through each argument, combining that argument
+ /// into a single hash.
+ template <typename T, typename ...Ts>
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end,
+ const T &arg, const Ts &...args) {
+ buffer_ptr = combine_data(length, buffer_ptr, buffer_end, get_hashable_data(arg));
+
+ // Recurse to the next argument.
+ return combine(length, buffer_ptr, buffer_end, args...);
+ }
+
+ /// \brief Base case for recursive, variadic combining.
+ ///
+ /// The base case when combining arguments recursively is reached when all
+ /// arguments have been handled. It flushes the remaining buffer and
+ /// constructs a hash_code.
+ hash_code combine(size_t length, char *buffer_ptr, char *buffer_end) {
+ // Check whether the entire set of values fit in the buffer. If so, we'll
+ // use the optimized short hashing routine and skip state entirely.
+ if (length == 0)
+ return hash_short(buffer, buffer_ptr - buffer, seed);
+
+ // Mix the final buffer, rotating it if we did a partial fill in order to
+ // simulate doing a mix of the last 64-bytes. That is how the algorithm
+ // works when we have a contiguous byte sequence, and we want to emulate
+ // that here.
+ std::rotate(buffer, buffer_ptr, buffer_end);
+
+ // Mix this chunk into the current state.
+ state.mix(buffer);
+ length += buffer_ptr - buffer;
+
+ return state.finalize(length);
+ }
+};
+
+} // namespace detail
+} // namespace hashing
+
+/// \brief Combine values into a single hash_code.
+///
+/// This routine accepts a varying number of arguments of any type. It will
+/// attempt to combine them into a single hash_code. For user-defined types it
+/// attempts to call a \see hash_value overload (via ADL) for the type. For
+/// integer and pointer types it directly combines their data into the
+/// resulting hash_code.
+///
+/// The result is suitable for returning from a user's hash_value
+/// *implementation* for their user-defined type. Consumers of a type should
+/// *not* call this routine, they should instead call 'hash_value'.
+template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
+ // Recursively hash each argument using a helper class.
+ ::llvm::hashing::detail::hash_combine_recursive_helper helper;
+ return helper.combine(0, helper.buffer, helper.buffer + 64, args...);
+}
+
+// Implementation details for implementations of hash_value overloads provided
+// here.
+namespace hashing {
+namespace detail {
+
+/// \brief Helper to hash the value of a single integer.
+///
+/// Overloads for smaller integer types are not provided to ensure consistent
+/// behavior in the presence of integral promotions. Essentially,
+/// "hash_value('4')" and "hash_value('0' + 4)" should be the same.
+inline hash_code hash_integer_value(uint64_t value) {
+ // Similar to hash_4to8_bytes but using a seed instead of length.
+ const uint64_t seed = get_execution_seed();
+ const char *s = reinterpret_cast<const char *>(&value);
+ const uint64_t a = fetch32(s);
+ return hash_16_bytes(seed + (a << 3), fetch32(s + 4));
+}
+
+} // namespace detail
+} // namespace hashing
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
+hash_value(T value) {
+ return ::llvm::hashing::detail::hash_integer_value(value);
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T> hash_code hash_value(const T *ptr) {
+ return ::llvm::hashing::detail::hash_integer_value(
+ reinterpret_cast<uintptr_t>(ptr));
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T, typename U>
+hash_code hash_value(const std::pair<T, U> &arg) {
+ return hash_combine(arg.first, arg.second);
+}
+
+// Declared and documented above, but defined here so that any of the hashing
+// infrastructure is available.
+template <typename T>
+hash_code hash_value(const std::basic_string<T> &arg) {
+ return hash_combine_range(arg.begin(), arg.end());
+}
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableList.h b/contrib/llvm/include/llvm/ADT/ImmutableList.h
new file mode 100644
index 0000000..a1d26bd
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/ImmutableList.h
@@ -0,0 +1,229 @@
+//==--- ImmutableList.h - Immutable (functional) list interface --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ImmutableList class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLELIST_H
+#define LLVM_ADT_IMMUTABLELIST_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+
+namespace llvm {
+
+template <typename T> class ImmutableListFactory;
+
+template <typename T>
+class ImmutableListImpl : public FoldingSetNode {
+ T Head;
+ const ImmutableListImpl* Tail;
+
+ ImmutableListImpl(const T& head, const ImmutableListImpl* tail = nullptr)
+ : Head(head), Tail(tail) {}
+
+ friend class ImmutableListFactory<T>;
+
+ void operator=(const ImmutableListImpl&) = delete;
+ ImmutableListImpl(const ImmutableListImpl&) = delete;
+
+public:
+ const T& getHead() const { return Head; }
+ const ImmutableListImpl* getTail() const { return Tail; }
+
+ static inline void Profile(FoldingSetNodeID& ID, const T& H,
+ const ImmutableListImpl* L){
+ ID.AddPointer(L);
+ ID.Add(H);
+ }
+
+ void Profile(FoldingSetNodeID& ID) {
+ Profile(ID, Head, Tail);
+ }
+};
+
+/// ImmutableList - This class represents an immutable (functional) list.
+/// It is implemented as a smart pointer (wraps ImmutableListImpl), so it
+/// it is intended to always be copied by value as if it were a pointer.
+/// This interface matches ImmutableSet and ImmutableMap. ImmutableList
+/// objects should almost never be created directly, and instead should
+/// be created by ImmutableListFactory objects that manage the lifetime
+/// of a group of lists. When the factory object is reclaimed, all lists
+/// created by that factory are released as well.
+template <typename T>
+class ImmutableList {
+public:
+ typedef T value_type;
+ typedef ImmutableListFactory<T> Factory;
+
+private:
+ const ImmutableListImpl<T>* X;
+
+public:
+ // This constructor should normally only be called by ImmutableListFactory<T>.
+ // There may be cases, however, when one needs to extract the internal pointer
+ // and reconstruct a list object from that pointer.
+ ImmutableList(const ImmutableListImpl<T>* x = nullptr) : X(x) {}
+
+ const ImmutableListImpl<T>* getInternalPointer() const {
+ return X;
+ }
+
+ class iterator {
+ const ImmutableListImpl<T>* L;
+ public:
+ iterator() : L(nullptr) {}
+ iterator(ImmutableList l) : L(l.getInternalPointer()) {}
+
+ iterator& operator++() { L = L->getTail(); return *this; }
+ bool operator==(const iterator& I) const { return L == I.L; }
+ bool operator!=(const iterator& I) const { return L != I.L; }
+ const value_type& operator*() const { return L->getHead(); }
+ ImmutableList getList() const { return L; }
+ };
+
+ /// begin - Returns an iterator referring to the head of the list, or
+ /// an iterator denoting the end of the list if the list is empty.
+ iterator begin() const { return iterator(X); }
+
+ /// end - Returns an iterator denoting the end of the list. This iterator
+ /// does not refer to a valid list element.
+ iterator end() const { return iterator(); }
+
+ /// isEmpty - Returns true if the list is empty.
+ bool isEmpty() const { return !X; }
+
+ bool contains(const T& V) const {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (*I == V)
+ return true;
+ }
+ return false;
+ }
+
+ /// isEqual - Returns true if two lists are equal. Because all lists created
+ /// from the same ImmutableListFactory are uniqued, this has O(1) complexity
+ /// because it the contents of the list do not need to be compared. Note
+ /// that you should only compare two lists created from the same
+ /// ImmutableListFactory.
+ bool isEqual(const ImmutableList& L) const { return X == L.X; }
+
+ bool operator==(const ImmutableList& L) const { return isEqual(L); }
+
+ /// getHead - Returns the head of the list.
+ const T& getHead() {
+ assert (!isEmpty() && "Cannot get the head of an empty list.");
+ return X->getHead();
+ }
+
+ /// getTail - Returns the tail of the list, which is another (possibly empty)
+ /// ImmutableList.
+ ImmutableList getTail() {
+ return X ? X->getTail() : nullptr;
+ }
+
+ void Profile(FoldingSetNodeID& ID) const {
+ ID.AddPointer(X);
+ }
+};
+
+template <typename T>
+class ImmutableListFactory {
+ typedef ImmutableListImpl<T> ListTy;
+ typedef FoldingSet<ListTy> CacheTy;
+
+ CacheTy Cache;
+ uintptr_t Allocator;
+
+ bool ownsAllocator() const {
+ return Allocator & 0x1 ? false : true;
+ }
+
+ BumpPtrAllocator& getAllocator() const {
+ return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
+ }
+
+public:
+ ImmutableListFactory()
+ : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
+
+ ImmutableListFactory(BumpPtrAllocator& Alloc)
+ : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
+
+ ~ImmutableListFactory() {
+ if (ownsAllocator()) delete &getAllocator();
+ }
+
+ ImmutableList<T> concat(const T& Head, ImmutableList<T> Tail) {
+ // Profile the new list to see if it already exists in our cache.
+ FoldingSetNodeID ID;
+ void* InsertPos;
+
+ const ListTy* TailImpl = Tail.getInternalPointer();
+ ListTy::Profile(ID, Head, TailImpl);
+ ListTy* L = Cache.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!L) {
+ // The list does not exist in our cache. Create it.
+ BumpPtrAllocator& A = getAllocator();
+ L = (ListTy*) A.Allocate<ListTy>();
+ new (L) ListTy(Head, TailImpl);
+
+ // Insert the new list into the cache.
+ Cache.InsertNode(L, InsertPos);
+ }
+
+ return L;
+ }
+
+ ImmutableList<T> add(const T& D, ImmutableList<T> L) {
+ return concat(D, L);
+ }
+
+ ImmutableList<T> getEmptyList() const {
+ return ImmutableList<T>(nullptr);
+ }
+
+ ImmutableList<T> create(const T& X) {
+ return Concat(X, getEmptyList());
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Partially-specialized Traits.
+//===----------------------------------------------------------------------===//
+
+template<typename T> struct DenseMapInfo;
+template<typename T> struct DenseMapInfo<ImmutableList<T> > {
+ static inline ImmutableList<T> getEmptyKey() {
+ return reinterpret_cast<ImmutableListImpl<T>*>(-1);
+ }
+ static inline ImmutableList<T> getTombstoneKey() {
+ return reinterpret_cast<ImmutableListImpl<T>*>(-2);
+ }
+ static unsigned getHashValue(ImmutableList<T> X) {
+ uintptr_t PtrVal = reinterpret_cast<uintptr_t>(X.getInternalPointer());
+ return (unsigned((uintptr_t)PtrVal) >> 4) ^
+ (unsigned((uintptr_t)PtrVal) >> 9);
+ }
+ static bool isEqual(ImmutableList<T> X1, ImmutableList<T> X2) {
+ return X1 == X2;
+ }
+};
+
+template <typename T> struct isPodLike;
+template <typename T>
+struct isPodLike<ImmutableList<T> > { static const bool value = true; };
+
+} // end llvm namespace
+
+#endif // LLVM_ADT_IMMUTABLELIST_H
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableMap.h b/contrib/llvm/include/llvm/ADT/ImmutableMap.h
new file mode 100644
index 0000000..7480cd7
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/ImmutableMap.h
@@ -0,0 +1,408 @@
+//===--- ImmutableMap.h - Immutable (functional) map interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ImmutableMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLEMAP_H
+#define LLVM_ADT_IMMUTABLEMAP_H
+
+#include "llvm/ADT/ImmutableSet.h"
+
+namespace llvm {
+
+/// ImutKeyValueInfo -Traits class used by ImmutableMap. While both the first
+/// and second elements in a pair are used to generate profile information,
+/// only the first element (the key) is used by isEqual and isLess.
+template <typename T, typename S>
+struct ImutKeyValueInfo {
+ typedef const std::pair<T,S> value_type;
+ typedef const value_type& value_type_ref;
+ typedef const T key_type;
+ typedef const T& key_type_ref;
+ typedef const S data_type;
+ typedef const S& data_type_ref;
+
+ static inline key_type_ref KeyOfValue(value_type_ref V) {
+ return V.first;
+ }
+
+ static inline data_type_ref DataOfValue(value_type_ref V) {
+ return V.second;
+ }
+
+ static inline bool isEqual(key_type_ref L, key_type_ref R) {
+ return ImutContainerInfo<T>::isEqual(L,R);
+ }
+ static inline bool isLess(key_type_ref L, key_type_ref R) {
+ return ImutContainerInfo<T>::isLess(L,R);
+ }
+
+ static inline bool isDataEqual(data_type_ref L, data_type_ref R) {
+ return ImutContainerInfo<S>::isEqual(L,R);
+ }
+
+ static inline void Profile(FoldingSetNodeID& ID, value_type_ref V) {
+ ImutContainerInfo<T>::Profile(ID, V.first);
+ ImutContainerInfo<S>::Profile(ID, V.second);
+ }
+};
+
+template <typename KeyT, typename ValT,
+ typename ValInfo = ImutKeyValueInfo<KeyT,ValT> >
+class ImmutableMap {
+public:
+ typedef typename ValInfo::value_type value_type;
+ typedef typename ValInfo::value_type_ref value_type_ref;
+ typedef typename ValInfo::key_type key_type;
+ typedef typename ValInfo::key_type_ref key_type_ref;
+ typedef typename ValInfo::data_type data_type;
+ typedef typename ValInfo::data_type_ref data_type_ref;
+ typedef ImutAVLTree<ValInfo> TreeTy;
+
+protected:
+ TreeTy* Root;
+
+public:
+ /// Constructs a map from a pointer to a tree root. In general one
+ /// should use a Factory object to create maps instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) {
+ if (Root) { Root->retain(); }
+ }
+
+ ImmutableMap(const ImmutableMap &X) : Root(X.Root) {
+ if (Root) { Root->retain(); }
+ }
+
+ ImmutableMap &operator=(const ImmutableMap &X) {
+ if (Root != X.Root) {
+ if (X.Root) { X.Root->retain(); }
+ if (Root) { Root->release(); }
+ Root = X.Root;
+ }
+ return *this;
+ }
+
+ ~ImmutableMap() {
+ if (Root) { Root->release(); }
+ }
+
+ class Factory {
+ typename TreeTy::Factory F;
+ const bool Canonicalize;
+
+ public:
+ Factory(bool canonicalize = true) : Canonicalize(canonicalize) {}
+
+ Factory(BumpPtrAllocator &Alloc, bool canonicalize = true)
+ : F(Alloc), Canonicalize(canonicalize) {}
+
+ ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }
+
+ ImmutableMap add(ImmutableMap Old, key_type_ref K, data_type_ref D) {
+ TreeTy *T = F.add(Old.Root, std::pair<key_type,data_type>(K,D));
+ return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
+ }
+
+ ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
+ TreeTy *T = F.remove(Old.Root,K);
+ return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
+ }
+
+ typename TreeTy::Factory *getTreeFactory() const {
+ return const_cast<typename TreeTy::Factory *>(&F);
+ }
+
+ private:
+ Factory(const Factory& RHS) = delete;
+ void operator=(const Factory& RHS) = delete;
+ };
+
+ bool contains(key_type_ref K) const {
+ return Root ? Root->contains(K) : false;
+ }
+
+ bool operator==(const ImmutableMap &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableMap &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+ }
+
+ TreeTy *getRoot() const {
+ if (Root) { Root->retain(); }
+ return Root;
+ }
+
+ TreeTy *getRootWithoutRetain() const { return Root; }
+
+ void manualRetain() {
+ if (Root) Root->retain();
+ }
+
+ void manualRelease() {
+ if (Root) Root->release();
+ }
+
+ bool isEmpty() const { return !Root; }
+
+ //===--------------------------------------------------===//
+ // Foreach - A limited form of map iteration.
+ //===--------------------------------------------------===//
+
+private:
+ template <typename Callback>
+ struct CBWrapper {
+ Callback C;
+ void operator()(value_type_ref V) { C(V.first,V.second); }
+ };
+
+ template <typename Callback>
+ struct CBWrapperRef {
+ Callback &C;
+ CBWrapperRef(Callback& c) : C(c) {}
+
+ void operator()(value_type_ref V) { C(V.first,V.second); }
+ };
+
+public:
+ template <typename Callback>
+ void foreach(Callback& C) {
+ if (Root) {
+ CBWrapperRef<Callback> CB(C);
+ Root->foreach(CB);
+ }
+ }
+
+ template <typename Callback>
+ void foreach() {
+ if (Root) {
+ CBWrapper<Callback> CB;
+ Root->foreach(CB);
+ }
+ }
+
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void verify() const { if (Root) Root->verify(); }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ class iterator : public ImutAVLValueIterator<ImmutableMap> {
+ iterator() = default;
+ explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
+ friend class ImmutableMap;
+
+ public:
+ key_type_ref getKey() const { return (*this)->first; }
+ data_type_ref getData() const { return (*this)->second; }
+ };
+
+ iterator begin() const { return iterator(Root); }
+ iterator end() const { return iterator(); }
+
+ data_type* lookup(key_type_ref K) const {
+ if (Root) {
+ TreeTy* T = Root->find(K);
+ if (T) return &T->getValue().second;
+ }
+
+ return nullptr;
+ }
+
+ /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
+ /// which key is the highest in the ordering of keys in the map. This
+ /// method returns NULL if the map is empty.
+ value_type* getMaxElement() const {
+ return Root ? &(Root->getMaxElement()->getValue()) : nullptr;
+ }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static inline void Profile(FoldingSetNodeID& ID, const ImmutableMap& M) {
+ ID.AddPointer(M.Root);
+ }
+
+ inline void Profile(FoldingSetNodeID& ID) const {
+ return Profile(ID,*this);
+ }
+};
+
+// NOTE: This will possibly become the new implementation of ImmutableMap some day.
+template <typename KeyT, typename ValT,
+typename ValInfo = ImutKeyValueInfo<KeyT,ValT> >
+class ImmutableMapRef {
+public:
+ typedef typename ValInfo::value_type value_type;
+ typedef typename ValInfo::value_type_ref value_type_ref;
+ typedef typename ValInfo::key_type key_type;
+ typedef typename ValInfo::key_type_ref key_type_ref;
+ typedef typename ValInfo::data_type data_type;
+ typedef typename ValInfo::data_type_ref data_type_ref;
+ typedef ImutAVLTree<ValInfo> TreeTy;
+ typedef typename TreeTy::Factory FactoryTy;
+
+protected:
+ TreeTy *Root;
+ FactoryTy *Factory;
+
+public:
+ /// Constructs a map from a pointer to a tree root. In general one
+ /// should use a Factory object to create maps instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ explicit ImmutableMapRef(const TreeTy *R, FactoryTy *F)
+ : Root(const_cast<TreeTy *>(R)), Factory(F) {
+ if (Root) {
+ Root->retain();
+ }
+ }
+
+ explicit ImmutableMapRef(const ImmutableMap<KeyT, ValT> &X,
+ typename ImmutableMap<KeyT, ValT>::Factory &F)
+ : Root(X.getRootWithoutRetain()),
+ Factory(F.getTreeFactory()) {
+ if (Root) { Root->retain(); }
+ }
+
+ ImmutableMapRef(const ImmutableMapRef &X) : Root(X.Root), Factory(X.Factory) {
+ if (Root) {
+ Root->retain();
+ }
+ }
+
+ ImmutableMapRef &operator=(const ImmutableMapRef &X) {
+ if (Root != X.Root) {
+ if (X.Root)
+ X.Root->retain();
+
+ if (Root)
+ Root->release();
+
+ Root = X.Root;
+ Factory = X.Factory;
+ }
+ return *this;
+ }
+
+ ~ImmutableMapRef() {
+ if (Root)
+ Root->release();
+ }
+
+ static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
+ return ImmutableMapRef(0, F);
+ }
+
+ void manualRetain() {
+ if (Root) Root->retain();
+ }
+
+ void manualRelease() {
+ if (Root) Root->release();
+ }
+
+ ImmutableMapRef add(key_type_ref K, data_type_ref D) const {
+ TreeTy *NewT = Factory->add(Root, std::pair<key_type, data_type>(K, D));
+ return ImmutableMapRef(NewT, Factory);
+ }
+
+ ImmutableMapRef remove(key_type_ref K) const {
+ TreeTy *NewT = Factory->remove(Root, K);
+ return ImmutableMapRef(NewT, Factory);
+ }
+
+ bool contains(key_type_ref K) const {
+ return Root ? Root->contains(K) : false;
+ }
+
+ ImmutableMap<KeyT, ValT> asImmutableMap() const {
+ return ImmutableMap<KeyT, ValT>(Factory->getCanonicalTree(Root));
+ }
+
+ bool operator==(const ImmutableMapRef &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableMapRef &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+ }
+
+ bool isEmpty() const { return !Root; }
+
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void verify() const {
+ if (Root)
+ Root->verify();
+ }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ class iterator : public ImutAVLValueIterator<ImmutableMapRef> {
+ iterator() = default;
+ explicit iterator(TreeTy *Tree) : iterator::ImutAVLValueIterator(Tree) {}
+ friend class ImmutableMapRef;
+
+ public:
+ key_type_ref getKey() const { return (*this)->first; }
+ data_type_ref getData() const { return (*this)->second; }
+ };
+
+ iterator begin() const { return iterator(Root); }
+ iterator end() const { return iterator(); }
+
+ data_type *lookup(key_type_ref K) const {
+ if (Root) {
+ TreeTy* T = Root->find(K);
+ if (T) return &T->getValue().second;
+ }
+
+ return nullptr;
+ }
+
+ /// getMaxElement - Returns the <key,value> pair in the ImmutableMap for
+ /// which key is the highest in the ordering of keys in the map. This
+ /// method returns NULL if the map is empty.
+ value_type* getMaxElement() const {
+ return Root ? &(Root->getMaxElement()->getValue()) : 0;
+ }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static inline void Profile(FoldingSetNodeID &ID, const ImmutableMapRef &M) {
+ ID.AddPointer(M.Root);
+ }
+
+ inline void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_IMMUTABLEMAP_H
diff --git a/contrib/llvm/include/llvm/ADT/ImmutableSet.h b/contrib/llvm/include/llvm/ADT/ImmutableSet.h
new file mode 100644
index 0000000..87026f0
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/ImmutableSet.h
@@ -0,0 +1,1218 @@
+//===--- ImmutableSet.h - Immutable (functional) set interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ImutAVLTree and ImmutableSet classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_IMMUTABLESET_H
+#define LLVM_ADT_IMMUTABLESET_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <functional>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Definition.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo> class ImutAVLFactory;
+template <typename ImutInfo> class ImutIntervalAVLFactory;
+template <typename ImutInfo> class ImutAVLTreeInOrderIterator;
+template <typename ImutInfo> class ImutAVLTreeGenericIterator;
+
+template <typename ImutInfo >
+class ImutAVLTree {
+public:
+ typedef typename ImutInfo::key_type_ref key_type_ref;
+ typedef typename ImutInfo::value_type value_type;
+ typedef typename ImutInfo::value_type_ref value_type_ref;
+
+ typedef ImutAVLFactory<ImutInfo> Factory;
+ friend class ImutAVLFactory<ImutInfo>;
+ friend class ImutIntervalAVLFactory<ImutInfo>;
+
+ friend class ImutAVLTreeGenericIterator<ImutInfo>;
+
+ typedef ImutAVLTreeInOrderIterator<ImutInfo> iterator;
+
+ //===----------------------------------------------------===//
+ // Public Interface.
+ //===----------------------------------------------------===//
+
+ /// Return a pointer to the left subtree. This value
+ /// is NULL if there is no left subtree.
+ ImutAVLTree *getLeft() const { return left; }
+
+ /// Return a pointer to the right subtree. This value is
+ /// NULL if there is no right subtree.
+ ImutAVLTree *getRight() const { return right; }
+
+ /// getHeight - Returns the height of the tree. A tree with no subtrees
+ /// has a height of 1.
+ unsigned getHeight() const { return height; }
+
+ /// getValue - Returns the data value associated with the tree node.
+ const value_type& getValue() const { return value; }
+
+ /// find - Finds the subtree associated with the specified key value.
+ /// This method returns NULL if no matching subtree is found.
+ ImutAVLTree* find(key_type_ref K) {
+ ImutAVLTree *T = this;
+ while (T) {
+ key_type_ref CurrentKey = ImutInfo::KeyOfValue(T->getValue());
+ if (ImutInfo::isEqual(K,CurrentKey))
+ return T;
+ else if (ImutInfo::isLess(K,CurrentKey))
+ T = T->getLeft();
+ else
+ T = T->getRight();
+ }
+ return nullptr;
+ }
+
+ /// getMaxElement - Find the subtree associated with the highest ranged
+ /// key value.
+ ImutAVLTree* getMaxElement() {
+ ImutAVLTree *T = this;
+ ImutAVLTree *Right = T->getRight();
+ while (Right) { T = Right; Right = T->getRight(); }
+ return T;
+ }
+
+ /// size - Returns the number of nodes in the tree, which includes
+ /// both leaves and non-leaf nodes.
+ unsigned size() const {
+ unsigned n = 1;
+ if (const ImutAVLTree* L = getLeft())
+ n += L->size();
+ if (const ImutAVLTree* R = getRight())
+ n += R->size();
+ return n;
+ }
+
+ /// begin - Returns an iterator that iterates over the nodes of the tree
+ /// in an inorder traversal. The returned iterator thus refers to the
+ /// the tree node with the minimum data element.
+ iterator begin() const { return iterator(this); }
+
+ /// end - Returns an iterator for the tree that denotes the end of an
+ /// inorder traversal.
+ iterator end() const { return iterator(); }
+
+ bool isElementEqual(value_type_ref V) const {
+ // Compare the keys.
+ if (!ImutInfo::isEqual(ImutInfo::KeyOfValue(getValue()),
+ ImutInfo::KeyOfValue(V)))
+ return false;
+
+ // Also compare the data values.
+ if (!ImutInfo::isDataEqual(ImutInfo::DataOfValue(getValue()),
+ ImutInfo::DataOfValue(V)))
+ return false;
+
+ return true;
+ }
+
+ bool isElementEqual(const ImutAVLTree* RHS) const {
+ return isElementEqual(RHS->getValue());
+ }
+
+ /// isEqual - Compares two trees for structural equality and returns true
+ /// if they are equal. This worst case performance of this operation is
+ // linear in the sizes of the trees.
+ bool isEqual(const ImutAVLTree& RHS) const {
+ if (&RHS == this)
+ return true;
+
+ iterator LItr = begin(), LEnd = end();
+ iterator RItr = RHS.begin(), REnd = RHS.end();
+
+ while (LItr != LEnd && RItr != REnd) {
+ if (&*LItr == &*RItr) {
+ LItr.skipSubTree();
+ RItr.skipSubTree();
+ continue;
+ }
+
+ if (!LItr->isElementEqual(&*RItr))
+ return false;
+
+ ++LItr;
+ ++RItr;
+ }
+
+ return LItr == LEnd && RItr == REnd;
+ }
+
+ /// isNotEqual - Compares two trees for structural inequality. Performance
+ /// is the same is isEqual.
+ bool isNotEqual(const ImutAVLTree& RHS) const { return !isEqual(RHS); }
+
+ /// contains - Returns true if this tree contains a subtree (node) that
+ /// has an data element that matches the specified key. Complexity
+ /// is logarithmic in the size of the tree.
+ bool contains(key_type_ref K) { return (bool) find(K); }
+
+ /// foreach - A member template the accepts invokes operator() on a functor
+ /// object (specifed by Callback) for every node/subtree in the tree.
+ /// Nodes are visited using an inorder traversal.
+ template <typename Callback>
+ void foreach(Callback& C) {
+ if (ImutAVLTree* L = getLeft())
+ L->foreach(C);
+
+ C(value);
+
+ if (ImutAVLTree* R = getRight())
+ R->foreach(C);
+ }
+
+ /// validateTree - A utility method that checks that the balancing and
+ /// ordering invariants of the tree are satisifed. It is a recursive
+ /// method that returns the height of the tree, which is then consumed
+ /// by the enclosing validateTree call. External callers should ignore the
+ /// return value. An invalid tree will cause an assertion to fire in
+ /// a debug build.
+ unsigned validateTree() const {
+ unsigned HL = getLeft() ? getLeft()->validateTree() : 0;
+ unsigned HR = getRight() ? getRight()->validateTree() : 0;
+ (void) HL;
+ (void) HR;
+
+ assert(getHeight() == ( HL > HR ? HL : HR ) + 1
+ && "Height calculation wrong");
+
+ assert((HL > HR ? HL-HR : HR-HL) <= 2
+ && "Balancing invariant violated");
+
+ assert((!getLeft() ||
+ ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()),
+ ImutInfo::KeyOfValue(getValue()))) &&
+ "Value in left child is not less that current value");
+
+
+ assert(!(getRight() ||
+ ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
+ ImutInfo::KeyOfValue(getRight()->getValue()))) &&
+ "Current value is not less that value of right child");
+
+ return getHeight();
+ }
+
+ //===----------------------------------------------------===//
+ // Internal values.
+ //===----------------------------------------------------===//
+
+private:
+ Factory *factory;
+ ImutAVLTree *left;
+ ImutAVLTree *right;
+ ImutAVLTree *prev;
+ ImutAVLTree *next;
+
+ unsigned height : 28;
+ unsigned IsMutable : 1;
+ unsigned IsDigestCached : 1;
+ unsigned IsCanonicalized : 1;
+
+ value_type value;
+ uint32_t digest;
+ uint32_t refCount;
+
+ //===----------------------------------------------------===//
+ // Internal methods (node manipulation; used by Factory).
+ //===----------------------------------------------------===//
+
+private:
+ /// ImutAVLTree - Internal constructor that is only called by
+ /// ImutAVLFactory.
+ ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
+ unsigned height)
+ : factory(f), left(l), right(r), prev(nullptr), next(nullptr),
+ height(height), IsMutable(true), IsDigestCached(false),
+ IsCanonicalized(0), value(v), digest(0), refCount(0)
+ {
+ if (left) left->retain();
+ if (right) right->retain();
+ }
+
+ /// isMutable - Returns true if the left and right subtree references
+ /// (as well as height) can be changed. If this method returns false,
+ /// the tree is truly immutable. Trees returned from an ImutAVLFactory
+ /// object should always have this method return true. Further, if this
+ /// method returns false for an instance of ImutAVLTree, all subtrees
+ /// will also have this method return false. The converse is not true.
+ bool isMutable() const { return IsMutable; }
+
+ /// hasCachedDigest - Returns true if the digest for this tree is cached.
+ /// This can only be true if the tree is immutable.
+ bool hasCachedDigest() const { return IsDigestCached; }
+
+ //===----------------------------------------------------===//
+ // Mutating operations. A tree root can be manipulated as
+ // long as its reference has not "escaped" from internal
+ // methods of a factory object (see below). When a tree
+ // pointer is externally viewable by client code, the
+ // internal "mutable bit" is cleared to mark the tree
+ // immutable. Note that a tree that still has its mutable
+ // bit set may have children (subtrees) that are themselves
+ // immutable.
+ //===----------------------------------------------------===//
+
+ /// markImmutable - Clears the mutable flag for a tree. After this happens,
+ /// it is an error to call setLeft(), setRight(), and setHeight().
+ void markImmutable() {
+ assert(isMutable() && "Mutable flag already removed.");
+ IsMutable = false;
+ }
+
+ /// markedCachedDigest - Clears the NoCachedDigest flag for a tree.
+ void markedCachedDigest() {
+ assert(!hasCachedDigest() && "NoCachedDigest flag already removed.");
+ IsDigestCached = true;
+ }
+
+ /// setHeight - Changes the height of the tree. Used internally by
+ /// ImutAVLFactory.
+ void setHeight(unsigned h) {
+ assert(isMutable() && "Only a mutable tree can have its height changed.");
+ height = h;
+ }
+
+ static uint32_t computeDigest(ImutAVLTree *L, ImutAVLTree *R,
+ value_type_ref V) {
+ uint32_t digest = 0;
+
+ if (L)
+ digest += L->computeDigest();
+
+ // Compute digest of stored data.
+ FoldingSetNodeID ID;
+ ImutInfo::Profile(ID,V);
+ digest += ID.ComputeHash();
+
+ if (R)
+ digest += R->computeDigest();
+
+ return digest;
+ }
+
+ uint32_t computeDigest() {
+ // Check the lowest bit to determine if digest has actually been
+ // pre-computed.
+ if (hasCachedDigest())
+ return digest;
+
+ uint32_t X = computeDigest(getLeft(), getRight(), getValue());
+ digest = X;
+ markedCachedDigest();
+ return X;
+ }
+
+ //===----------------------------------------------------===//
+ // Reference count operations.
+ //===----------------------------------------------------===//
+
+public:
+ void retain() { ++refCount; }
+ void release() {
+ assert(refCount > 0);
+ if (--refCount == 0)
+ destroy();
+ }
+ void destroy() {
+ if (left)
+ left->release();
+ if (right)
+ right->release();
+ if (IsCanonicalized) {
+ if (next)
+ next->prev = prev;
+
+ if (prev)
+ prev->next = next;
+ else
+ factory->Cache[factory->maskCacheIndex(computeDigest())] = next;
+ }
+
+ // We need to clear the mutability bit in case we are
+ // destroying the node as part of a sweep in ImutAVLFactory::recoverNodes().
+ IsMutable = false;
+ factory->freeNodes.push_back(this);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Factory class.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo >
+class ImutAVLFactory {
+ friend class ImutAVLTree<ImutInfo>;
+ typedef ImutAVLTree<ImutInfo> TreeTy;
+ typedef typename TreeTy::value_type_ref value_type_ref;
+ typedef typename TreeTy::key_type_ref key_type_ref;
+
+ typedef DenseMap<unsigned, TreeTy*> CacheTy;
+
+ CacheTy Cache;
+ uintptr_t Allocator;
+ std::vector<TreeTy*> createdNodes;
+ std::vector<TreeTy*> freeNodes;
+
+ bool ownsAllocator() const {
+ return Allocator & 0x1 ? false : true;
+ }
+
+ BumpPtrAllocator& getAllocator() const {
+ return *reinterpret_cast<BumpPtrAllocator*>(Allocator & ~0x1);
+ }
+
+ //===--------------------------------------------------===//
+ // Public interface.
+ //===--------------------------------------------------===//
+
+public:
+ ImutAVLFactory()
+ : Allocator(reinterpret_cast<uintptr_t>(new BumpPtrAllocator())) {}
+
+ ImutAVLFactory(BumpPtrAllocator& Alloc)
+ : Allocator(reinterpret_cast<uintptr_t>(&Alloc) | 0x1) {}
+
+ ~ImutAVLFactory() {
+ if (ownsAllocator()) delete &getAllocator();
+ }
+
+ TreeTy* add(TreeTy* T, value_type_ref V) {
+ T = add_internal(V,T);
+ markImmutable(T);
+ recoverNodes();
+ return T;
+ }
+
+ TreeTy* remove(TreeTy* T, key_type_ref V) {
+ T = remove_internal(V,T);
+ markImmutable(T);
+ recoverNodes();
+ return T;
+ }
+
+ TreeTy* getEmptyTree() const { return nullptr; }
+
+protected:
+
+ //===--------------------------------------------------===//
+ // A bunch of quick helper functions used for reasoning
+ // about the properties of trees and their children.
+ // These have succinct names so that the balancing code
+ // is as terse (and readable) as possible.
+ //===--------------------------------------------------===//
+
+ bool isEmpty(TreeTy* T) const { return !T; }
+ unsigned getHeight(TreeTy* T) const { return T ? T->getHeight() : 0; }
+ TreeTy* getLeft(TreeTy* T) const { return T->getLeft(); }
+ TreeTy* getRight(TreeTy* T) const { return T->getRight(); }
+ value_type_ref getValue(TreeTy* T) const { return T->value; }
+
+ // Make sure the index is not the Tombstone or Entry key of the DenseMap.
+ static unsigned maskCacheIndex(unsigned I) { return (I & ~0x02); }
+
+ unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
+ unsigned hl = getHeight(L);
+ unsigned hr = getHeight(R);
+ return (hl > hr ? hl : hr) + 1;
+ }
+
+ static bool compareTreeWithSection(TreeTy* T,
+ typename TreeTy::iterator& TI,
+ typename TreeTy::iterator& TE) {
+ typename TreeTy::iterator I = T->begin(), E = T->end();
+ for ( ; I!=E ; ++I, ++TI) {
+ if (TI == TE || !I->isElementEqual(&*TI))
+ return false;
+ }
+ return true;
+ }
+
+ //===--------------------------------------------------===//
+ // "createNode" is used to generate new tree roots that link
+ // to other trees. The functon may also simply move links
+ // in an existing root if that root is still marked mutable.
+ // This is necessary because otherwise our balancing code
+ // would leak memory as it would create nodes that are
+ // then discarded later before the finished tree is
+ // returned to the caller.
+ //===--------------------------------------------------===//
+
+ TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) {
+ BumpPtrAllocator& A = getAllocator();
+ TreeTy* T;
+ if (!freeNodes.empty()) {
+ T = freeNodes.back();
+ freeNodes.pop_back();
+ assert(T != L);
+ assert(T != R);
+ } else {
+ T = (TreeTy*) A.Allocate<TreeTy>();
+ }
+ new (T) TreeTy(this, L, R, V, incrementHeight(L,R));
+ createdNodes.push_back(T);
+ return T;
+ }
+
+ TreeTy* createNode(TreeTy* newLeft, TreeTy* oldTree, TreeTy* newRight) {
+ return createNode(newLeft, getValue(oldTree), newRight);
+ }
+
+ void recoverNodes() {
+ for (unsigned i = 0, n = createdNodes.size(); i < n; ++i) {
+ TreeTy *N = createdNodes[i];
+ if (N->isMutable() && N->refCount == 0)
+ N->destroy();
+ }
+ createdNodes.clear();
+ }
+
+ /// balanceTree - Used by add_internal and remove_internal to
+ /// balance a newly created tree.
+ TreeTy* balanceTree(TreeTy* L, value_type_ref V, TreeTy* R) {
+ unsigned hl = getHeight(L);
+ unsigned hr = getHeight(R);
+
+ if (hl > hr + 2) {
+ assert(!isEmpty(L) && "Left tree cannot be empty to have a height >= 2");
+
+ TreeTy *LL = getLeft(L);
+ TreeTy *LR = getRight(L);
+
+ if (getHeight(LL) >= getHeight(LR))
+ return createNode(LL, L, createNode(LR,V,R));
+
+ assert(!isEmpty(LR) && "LR cannot be empty because it has a height >= 1");
+
+ TreeTy *LRL = getLeft(LR);
+ TreeTy *LRR = getRight(LR);
+
+ return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R));
+ }
+
+ if (hr > hl + 2) {
+ assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2");
+
+ TreeTy *RL = getLeft(R);
+ TreeTy *RR = getRight(R);
+
+ if (getHeight(RR) >= getHeight(RL))
+ return createNode(createNode(L,V,RL), R, RR);
+
+ assert(!isEmpty(RL) && "RL cannot be empty because it has a height >= 1");
+
+ TreeTy *RLL = getLeft(RL);
+ TreeTy *RLR = getRight(RL);
+
+ return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR));
+ }
+
+ return createNode(L,V,R);
+ }
+
+ /// add_internal - Creates a new tree that includes the specified
+ /// data and the data from the original tree. If the original tree
+ /// already contained the data item, the original tree is returned.
+ TreeTy* add_internal(value_type_ref V, TreeTy* T) {
+ if (isEmpty(T))
+ return createNode(T, V, T);
+ assert(!T->isMutable());
+
+ key_type_ref K = ImutInfo::KeyOfValue(V);
+ key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
+
+ if (ImutInfo::isEqual(K,KCurrent))
+ return createNode(getLeft(T), V, getRight(T));
+ else if (ImutInfo::isLess(K,KCurrent))
+ return balanceTree(add_internal(V, getLeft(T)), getValue(T), getRight(T));
+ else
+ return balanceTree(getLeft(T), getValue(T), add_internal(V, getRight(T)));
+ }
+
+ /// remove_internal - Creates a new tree that includes all the data
+ /// from the original tree except the specified data. If the
+ /// specified data did not exist in the original tree, the original
+ /// tree is returned.
+ TreeTy* remove_internal(key_type_ref K, TreeTy* T) {
+ if (isEmpty(T))
+ return T;
+
+ assert(!T->isMutable());
+
+ key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
+
+ if (ImutInfo::isEqual(K,KCurrent)) {
+ return combineTrees(getLeft(T), getRight(T));
+ } else if (ImutInfo::isLess(K,KCurrent)) {
+ return balanceTree(remove_internal(K, getLeft(T)),
+ getValue(T), getRight(T));
+ } else {
+ return balanceTree(getLeft(T), getValue(T),
+ remove_internal(K, getRight(T)));
+ }
+ }
+
+ TreeTy* combineTrees(TreeTy* L, TreeTy* R) {
+ if (isEmpty(L))
+ return R;
+ if (isEmpty(R))
+ return L;
+ TreeTy* OldNode;
+ TreeTy* newRight = removeMinBinding(R,OldNode);
+ return balanceTree(L, getValue(OldNode), newRight);
+ }
+
+ TreeTy* removeMinBinding(TreeTy* T, TreeTy*& Noderemoved) {
+ assert(!isEmpty(T));
+ if (isEmpty(getLeft(T))) {
+ Noderemoved = T;
+ return getRight(T);
+ }
+ return balanceTree(removeMinBinding(getLeft(T), Noderemoved),
+ getValue(T), getRight(T));
+ }
+
+ /// markImmutable - Clears the mutable bits of a root and all of its
+ /// descendants.
+ void markImmutable(TreeTy* T) {
+ if (!T || !T->isMutable())
+ return;
+ T->markImmutable();
+ markImmutable(getLeft(T));
+ markImmutable(getRight(T));
+ }
+
+public:
+ TreeTy *getCanonicalTree(TreeTy *TNew) {
+ if (!TNew)
+ return nullptr;
+
+ if (TNew->IsCanonicalized)
+ return TNew;
+
+ // Search the hashtable for another tree with the same digest, and
+ // if find a collision compare those trees by their contents.
+ unsigned digest = TNew->computeDigest();
+ TreeTy *&entry = Cache[maskCacheIndex(digest)];
+ do {
+ if (!entry)
+ break;
+ for (TreeTy *T = entry ; T != nullptr; T = T->next) {
+ // Compare the Contents('T') with Contents('TNew')
+ typename TreeTy::iterator TI = T->begin(), TE = T->end();
+ if (!compareTreeWithSection(TNew, TI, TE))
+ continue;
+ if (TI != TE)
+ continue; // T has more contents than TNew.
+ // Trees did match! Return 'T'.
+ if (TNew->refCount == 0)
+ TNew->destroy();
+ return T;
+ }
+ entry->prev = TNew;
+ TNew->next = entry;
+ }
+ while (false);
+
+ entry = TNew;
+ TNew->IsCanonicalized = true;
+ return TNew;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable AVL-Tree Iterators.
+//===----------------------------------------------------------------------===//
+
+template <typename ImutInfo>
+class ImutAVLTreeGenericIterator
+ : public std::iterator<std::bidirectional_iterator_tag,
+ ImutAVLTree<ImutInfo>> {
+ SmallVector<uintptr_t,20> stack;
+public:
+ enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3,
+ Flags=0x3 };
+
+ typedef ImutAVLTree<ImutInfo> TreeTy;
+
+ ImutAVLTreeGenericIterator() {}
+ ImutAVLTreeGenericIterator(const TreeTy *Root) {
+ if (Root) stack.push_back(reinterpret_cast<uintptr_t>(Root));
+ }
+
+ TreeTy &operator*() const {
+ assert(!stack.empty());
+ return *reinterpret_cast<TreeTy *>(stack.back() & ~Flags);
+ }
+ TreeTy *operator->() const { return &*this; }
+
+ uintptr_t getVisitState() const {
+ assert(!stack.empty());
+ return stack.back() & Flags;
+ }
+
+
+ bool atEnd() const { return stack.empty(); }
+
+ bool atBeginning() const {
+ return stack.size() == 1 && getVisitState() == VisitedNone;
+ }
+
+ void skipToParent() {
+ assert(!stack.empty());
+ stack.pop_back();
+ if (stack.empty())
+ return;
+ switch (getVisitState()) {
+ case VisitedNone:
+ stack.back() |= VisitedLeft;
+ break;
+ case VisitedLeft:
+ stack.back() |= VisitedRight;
+ break;
+ default:
+ llvm_unreachable("Unreachable.");
+ }
+ }
+
+ bool operator==(const ImutAVLTreeGenericIterator &x) const {
+ return stack == x.stack;
+ }
+
+ bool operator!=(const ImutAVLTreeGenericIterator &x) const {
+ return !(*this == x);
+ }
+
+ ImutAVLTreeGenericIterator &operator++() {
+ assert(!stack.empty());
+ TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
+ assert(Current);
+ switch (getVisitState()) {
+ case VisitedNone:
+ if (TreeTy* L = Current->getLeft())
+ stack.push_back(reinterpret_cast<uintptr_t>(L));
+ else
+ stack.back() |= VisitedLeft;
+ break;
+ case VisitedLeft:
+ if (TreeTy* R = Current->getRight())
+ stack.push_back(reinterpret_cast<uintptr_t>(R));
+ else
+ stack.back() |= VisitedRight;
+ break;
+ case VisitedRight:
+ skipToParent();
+ break;
+ default:
+ llvm_unreachable("Unreachable.");
+ }
+ return *this;
+ }
+
+ ImutAVLTreeGenericIterator &operator--() {
+ assert(!stack.empty());
+ TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
+ assert(Current);
+ switch (getVisitState()) {
+ case VisitedNone:
+ stack.pop_back();
+ break;
+ case VisitedLeft:
+ stack.back() &= ~Flags; // Set state to "VisitedNone."
+ if (TreeTy* L = Current->getLeft())
+ stack.push_back(reinterpret_cast<uintptr_t>(L) | VisitedRight);
+ break;
+ case VisitedRight:
+ stack.back() &= ~Flags;
+ stack.back() |= VisitedLeft;
+ if (TreeTy* R = Current->getRight())
+ stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight);
+ break;
+ default:
+ llvm_unreachable("Unreachable.");
+ }
+ return *this;
+ }
+};
+
+template <typename ImutInfo>
+class ImutAVLTreeInOrderIterator
+ : public std::iterator<std::bidirectional_iterator_tag,
+ ImutAVLTree<ImutInfo>> {
+ typedef ImutAVLTreeGenericIterator<ImutInfo> InternalIteratorTy;
+ InternalIteratorTy InternalItr;
+
+public:
+ typedef ImutAVLTree<ImutInfo> TreeTy;
+
+ ImutAVLTreeInOrderIterator(const TreeTy* Root) : InternalItr(Root) {
+ if (Root)
+ ++*this; // Advance to first element.
+ }
+
+ ImutAVLTreeInOrderIterator() : InternalItr() {}
+
+ bool operator==(const ImutAVLTreeInOrderIterator &x) const {
+ return InternalItr == x.InternalItr;
+ }
+
+ bool operator!=(const ImutAVLTreeInOrderIterator &x) const {
+ return !(*this == x);
+ }
+
+ TreeTy &operator*() const { return *InternalItr; }
+ TreeTy *operator->() const { return &*InternalItr; }
+
+ ImutAVLTreeInOrderIterator &operator++() {
+ do ++InternalItr;
+ while (!InternalItr.atEnd() &&
+ InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
+
+ return *this;
+ }
+
+ ImutAVLTreeInOrderIterator &operator--() {
+ do --InternalItr;
+ while (!InternalItr.atBeginning() &&
+ InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
+
+ return *this;
+ }
+
+ void skipSubTree() {
+ InternalItr.skipToParent();
+
+ while (!InternalItr.atEnd() &&
+ InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft)
+ ++InternalItr;
+ }
+};
+
+/// Generic iterator that wraps a T::TreeTy::iterator and exposes
+/// iterator::getValue() on dereference.
+template <typename T>
+struct ImutAVLValueIterator
+ : iterator_adaptor_base<
+ ImutAVLValueIterator<T>, typename T::TreeTy::iterator,
+ typename std::iterator_traits<
+ typename T::TreeTy::iterator>::iterator_category,
+ const typename T::value_type> {
+ ImutAVLValueIterator() = default;
+ explicit ImutAVLValueIterator(typename T::TreeTy *Tree)
+ : ImutAVLValueIterator::iterator_adaptor_base(Tree) {}
+
+ typename ImutAVLValueIterator::reference operator*() const {
+ return this->I->getValue();
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Trait classes for Profile information.
+//===----------------------------------------------------------------------===//
+
+/// Generic profile template. The default behavior is to invoke the
+/// profile method of an object. Specializations for primitive integers
+/// and generic handling of pointers is done below.
+template <typename T>
+struct ImutProfileInfo {
+ typedef const T value_type;
+ typedef const T& value_type_ref;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ FoldingSetTrait<T>::Profile(X,ID);
+ }
+};
+
+/// Profile traits for integers.
+template <typename T>
+struct ImutProfileInteger {
+ typedef const T value_type;
+ typedef const T& value_type_ref;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ ID.AddInteger(X);
+ }
+};
+
+#define PROFILE_INTEGER_INFO(X)\
+template<> struct ImutProfileInfo<X> : ImutProfileInteger<X> {};
+
+PROFILE_INTEGER_INFO(char)
+PROFILE_INTEGER_INFO(unsigned char)
+PROFILE_INTEGER_INFO(short)
+PROFILE_INTEGER_INFO(unsigned short)
+PROFILE_INTEGER_INFO(unsigned)
+PROFILE_INTEGER_INFO(signed)
+PROFILE_INTEGER_INFO(long)
+PROFILE_INTEGER_INFO(unsigned long)
+PROFILE_INTEGER_INFO(long long)
+PROFILE_INTEGER_INFO(unsigned long long)
+
+#undef PROFILE_INTEGER_INFO
+
+/// Profile traits for booleans.
+template <>
+struct ImutProfileInfo<bool> {
+ typedef const bool value_type;
+ typedef const bool& value_type_ref;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ ID.AddBoolean(X);
+ }
+};
+
+
+/// Generic profile trait for pointer types. We treat pointers as
+/// references to unique objects.
+template <typename T>
+struct ImutProfileInfo<T*> {
+ typedef const T* value_type;
+ typedef value_type value_type_ref;
+
+ static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
+ ID.AddPointer(X);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Trait classes that contain element comparison operators and type
+// definitions used by ImutAVLTree, ImmutableSet, and ImmutableMap. These
+// inherit from the profile traits (ImutProfileInfo) to include operations
+// for element profiling.
+//===----------------------------------------------------------------------===//
+
+
+/// ImutContainerInfo - Generic definition of comparison operations for
+/// elements of immutable containers that defaults to using
+/// std::equal_to<> and std::less<> to perform comparison of elements.
+template <typename T>
+struct ImutContainerInfo : public ImutProfileInfo<T> {
+ typedef typename ImutProfileInfo<T>::value_type value_type;
+ typedef typename ImutProfileInfo<T>::value_type_ref value_type_ref;
+ typedef value_type key_type;
+ typedef value_type_ref key_type_ref;
+ typedef bool data_type;
+ typedef bool data_type_ref;
+
+ static key_type_ref KeyOfValue(value_type_ref D) { return D; }
+ static data_type_ref DataOfValue(value_type_ref) { return true; }
+
+ static bool isEqual(key_type_ref LHS, key_type_ref RHS) {
+ return std::equal_to<key_type>()(LHS,RHS);
+ }
+
+ static bool isLess(key_type_ref LHS, key_type_ref RHS) {
+ return std::less<key_type>()(LHS,RHS);
+ }
+
+ static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
+};
+
+/// ImutContainerInfo - Specialization for pointer values to treat pointers
+/// as references to unique objects. Pointers are thus compared by
+/// their addresses.
+template <typename T>
+struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
+ typedef typename ImutProfileInfo<T*>::value_type value_type;
+ typedef typename ImutProfileInfo<T*>::value_type_ref value_type_ref;
+ typedef value_type key_type;
+ typedef value_type_ref key_type_ref;
+ typedef bool data_type;
+ typedef bool data_type_ref;
+
+ static key_type_ref KeyOfValue(value_type_ref D) { return D; }
+ static data_type_ref DataOfValue(value_type_ref) { return true; }
+
+ static bool isEqual(key_type_ref LHS, key_type_ref RHS) { return LHS == RHS; }
+
+ static bool isLess(key_type_ref LHS, key_type_ref RHS) { return LHS < RHS; }
+
+ static bool isDataEqual(data_type_ref, data_type_ref) { return true; }
+};
+
+//===----------------------------------------------------------------------===//
+// Immutable Set
+//===----------------------------------------------------------------------===//
+
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT> >
+class ImmutableSet {
+public:
+ typedef typename ValInfo::value_type value_type;
+ typedef typename ValInfo::value_type_ref value_type_ref;
+ typedef ImutAVLTree<ValInfo> TreeTy;
+
+private:
+ TreeTy *Root;
+
+public:
+ /// Constructs a set from a pointer to a tree root. In general one
+ /// should use a Factory object to create sets instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ explicit ImmutableSet(TreeTy* R) : Root(R) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableSet(const ImmutableSet &X) : Root(X.Root) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableSet &operator=(const ImmutableSet &X) {
+ if (Root != X.Root) {
+ if (X.Root) { X.Root->retain(); }
+ if (Root) { Root->release(); }
+ Root = X.Root;
+ }
+ return *this;
+ }
+ ~ImmutableSet() {
+ if (Root) { Root->release(); }
+ }
+
+ class Factory {
+ typename TreeTy::Factory F;
+ const bool Canonicalize;
+
+ public:
+ Factory(bool canonicalize = true)
+ : Canonicalize(canonicalize) {}
+
+ Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
+ : F(Alloc), Canonicalize(canonicalize) {}
+
+ /// getEmptySet - Returns an immutable set that contains no elements.
+ ImmutableSet getEmptySet() {
+ return ImmutableSet(F.getEmptyTree());
+ }
+
+ /// add - Creates a new immutable set that contains all of the values
+ /// of the original set with the addition of the specified value. If
+ /// the original set already included the value, then the original set is
+ /// returned and no memory is allocated. The time and space complexity
+ /// of this operation is logarithmic in the size of the original set.
+ /// The memory allocated to represent the set is released when the
+ /// factory object that created the set is destroyed.
+ ImmutableSet add(ImmutableSet Old, value_type_ref V) {
+ TreeTy *NewT = F.add(Old.Root, V);
+ return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
+ }
+
+ /// remove - Creates a new immutable set that contains all of the values
+ /// of the original set with the exception of the specified value. If
+ /// the original set did not contain the value, the original set is
+ /// returned and no memory is allocated. The time and space complexity
+ /// of this operation is logarithmic in the size of the original set.
+ /// The memory allocated to represent the set is released when the
+ /// factory object that created the set is destroyed.
+ ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
+ TreeTy *NewT = F.remove(Old.Root, V);
+ return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
+ }
+
+ BumpPtrAllocator& getAllocator() { return F.getAllocator(); }
+
+ typename TreeTy::Factory *getTreeFactory() const {
+ return const_cast<typename TreeTy::Factory *>(&F);
+ }
+
+ private:
+ Factory(const Factory& RHS) = delete;
+ void operator=(const Factory& RHS) = delete;
+ };
+
+ friend class Factory;
+
+ /// Returns true if the set contains the specified value.
+ bool contains(value_type_ref V) const {
+ return Root ? Root->contains(V) : false;
+ }
+
+ bool operator==(const ImmutableSet &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableSet &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+ }
+
+ TreeTy *getRoot() {
+ if (Root) { Root->retain(); }
+ return Root;
+ }
+
+ TreeTy *getRootWithoutRetain() const {
+ return Root;
+ }
+
+ /// isEmpty - Return true if the set contains no elements.
+ bool isEmpty() const { return !Root; }
+
+ /// isSingleton - Return true if the set contains exactly one element.
+ /// This method runs in constant time.
+ bool isSingleton() const { return getHeight() == 1; }
+
+ template <typename Callback>
+ void foreach(Callback& C) { if (Root) Root->foreach(C); }
+
+ template <typename Callback>
+ void foreach() { if (Root) { Callback C; Root->foreach(C); } }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ typedef ImutAVLValueIterator<ImmutableSet> iterator;
+
+ iterator begin() const { return iterator(Root); }
+ iterator end() const { return iterator(); }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static void Profile(FoldingSetNodeID &ID, const ImmutableSet &S) {
+ ID.AddPointer(S.Root);
+ }
+
+ void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void validateTree() const { if (Root) Root->validateTree(); }
+};
+
+// NOTE: This may some day replace the current ImmutableSet.
+template <typename ValT, typename ValInfo = ImutContainerInfo<ValT> >
+class ImmutableSetRef {
+public:
+ typedef typename ValInfo::value_type value_type;
+ typedef typename ValInfo::value_type_ref value_type_ref;
+ typedef ImutAVLTree<ValInfo> TreeTy;
+ typedef typename TreeTy::Factory FactoryTy;
+
+private:
+ TreeTy *Root;
+ FactoryTy *Factory;
+
+public:
+ /// Constructs a set from a pointer to a tree root. In general one
+ /// should use a Factory object to create sets instead of directly
+ /// invoking the constructor, but there are cases where make this
+ /// constructor public is useful.
+ explicit ImmutableSetRef(TreeTy* R, FactoryTy *F)
+ : Root(R),
+ Factory(F) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableSetRef(const ImmutableSetRef &X)
+ : Root(X.Root),
+ Factory(X.Factory) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableSetRef &operator=(const ImmutableSetRef &X) {
+ if (Root != X.Root) {
+ if (X.Root) { X.Root->retain(); }
+ if (Root) { Root->release(); }
+ Root = X.Root;
+ Factory = X.Factory;
+ }
+ return *this;
+ }
+ ~ImmutableSetRef() {
+ if (Root) { Root->release(); }
+ }
+
+ static ImmutableSetRef getEmptySet(FactoryTy *F) {
+ return ImmutableSetRef(0, F);
+ }
+
+ ImmutableSetRef add(value_type_ref V) {
+ return ImmutableSetRef(Factory->add(Root, V), Factory);
+ }
+
+ ImmutableSetRef remove(value_type_ref V) {
+ return ImmutableSetRef(Factory->remove(Root, V), Factory);
+ }
+
+ /// Returns true if the set contains the specified value.
+ bool contains(value_type_ref V) const {
+ return Root ? Root->contains(V) : false;
+ }
+
+ ImmutableSet<ValT> asImmutableSet(bool canonicalize = true) const {
+ return ImmutableSet<ValT>(canonicalize ?
+ Factory->getCanonicalTree(Root) : Root);
+ }
+
+ TreeTy *getRootWithoutRetain() const {
+ return Root;
+ }
+
+ bool operator==(const ImmutableSetRef &RHS) const {
+ return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
+ }
+
+ bool operator!=(const ImmutableSetRef &RHS) const {
+ return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
+ }
+
+ /// isEmpty - Return true if the set contains no elements.
+ bool isEmpty() const { return !Root; }
+
+ /// isSingleton - Return true if the set contains exactly one element.
+ /// This method runs in constant time.
+ bool isSingleton() const { return getHeight() == 1; }
+
+ //===--------------------------------------------------===//
+ // Iterators.
+ //===--------------------------------------------------===//
+
+ typedef ImutAVLValueIterator<ImmutableSetRef> iterator;
+
+ iterator begin() const { return iterator(Root); }
+ iterator end() const { return iterator(); }
+
+ //===--------------------------------------------------===//
+ // Utility methods.
+ //===--------------------------------------------------===//
+
+ unsigned getHeight() const { return Root ? Root->getHeight() : 0; }
+
+ static void Profile(FoldingSetNodeID &ID, const ImmutableSetRef &S) {
+ ID.AddPointer(S.Root);
+ }
+
+ void Profile(FoldingSetNodeID &ID) const { return Profile(ID, *this); }
+
+ //===--------------------------------------------------===//
+ // For testing.
+ //===--------------------------------------------------===//
+
+ void validateTree() const { if (Root) Root->validateTree(); }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/IndexedMap.h b/contrib/llvm/include/llvm/ADT/IndexedMap.h
new file mode 100644
index 0000000..5ba85c0
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/IndexedMap.h
@@ -0,0 +1,85 @@
+//===- llvm/ADT/IndexedMap.h - An index map implementation ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an indexed map. The index map template takes two
+// types. The first is the mapped type and the second is a functor
+// that maps its argument to a size_t. On instantiation a "null" value
+// can be provided to be used as a "does not exist" indicator in the
+// map. A member function grow() is provided that given the value of
+// the maximally indexed key (the argument of the functor) makes sure
+// the map has enough space for it.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INDEXEDMAP_H
+#define LLVM_ADT_INDEXEDMAP_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cassert>
+#include <functional>
+
+namespace llvm {
+
+template <typename T, typename ToIndexT = llvm::identity<unsigned> >
+ class IndexedMap {
+ typedef typename ToIndexT::argument_type IndexT;
+ // Prefer SmallVector with zero inline storage over std::vector. IndexedMaps
+ // can grow very large and SmallVector grows more efficiently as long as T
+ // is trivially copyable.
+ typedef SmallVector<T, 0> StorageT;
+ StorageT storage_;
+ T nullVal_;
+ ToIndexT toIndex_;
+
+ public:
+ IndexedMap() : nullVal_(T()) { }
+
+ explicit IndexedMap(const T& val) : nullVal_(val) { }
+
+ typename StorageT::reference operator[](IndexT n) {
+ assert(toIndex_(n) < storage_.size() && "index out of bounds!");
+ return storage_[toIndex_(n)];
+ }
+
+ typename StorageT::const_reference operator[](IndexT n) const {
+ assert(toIndex_(n) < storage_.size() && "index out of bounds!");
+ return storage_[toIndex_(n)];
+ }
+
+ void reserve(typename StorageT::size_type s) {
+ storage_.reserve(s);
+ }
+
+ void resize(typename StorageT::size_type s) {
+ storage_.resize(s, nullVal_);
+ }
+
+ void clear() {
+ storage_.clear();
+ }
+
+ void grow(IndexT n) {
+ unsigned NewSize = toIndex_(n) + 1;
+ if (NewSize > storage_.size())
+ resize(NewSize);
+ }
+
+ bool inBounds(IndexT n) const {
+ return toIndex_(n) < storage_.size();
+ }
+
+ typename StorageT::size_type size() const {
+ return storage_.size();
+ }
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/IntEqClasses.h b/contrib/llvm/include/llvm/ADT/IntEqClasses.h
new file mode 100644
index 0000000..8e75c48
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/IntEqClasses.h
@@ -0,0 +1,88 @@
+//===-- llvm/ADT/IntEqClasses.h - Equiv. Classes of Integers ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Equivalence classes for small integers. This is a mapping of the integers
+// 0 .. N-1 into M equivalence classes numbered 0 .. M-1.
+//
+// Initially each integer has its own equivalence class. Classes are joined by
+// passing a representative member of each class to join().
+//
+// Once the classes are built, compress() will number them 0 .. M-1 and prevent
+// further changes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTEQCLASSES_H
+#define LLVM_ADT_INTEQCLASSES_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class IntEqClasses {
+ /// EC - When uncompressed, map each integer to a smaller member of its
+ /// equivalence class. The class leader is the smallest member and maps to
+ /// itself.
+ ///
+ /// When compressed, EC[i] is the equivalence class of i.
+ SmallVector<unsigned, 8> EC;
+
+ /// NumClasses - The number of equivalence classes when compressed, or 0 when
+ /// uncompressed.
+ unsigned NumClasses;
+
+public:
+ /// IntEqClasses - Create an equivalence class mapping for 0 .. N-1.
+ IntEqClasses(unsigned N = 0) : NumClasses(0) { grow(N); }
+
+ /// grow - Increase capacity to hold 0 .. N-1, putting new integers in unique
+ /// equivalence classes.
+ /// This requires an uncompressed map.
+ void grow(unsigned N);
+
+ /// clear - Clear all classes so that grow() will assign a unique class to
+ /// every integer.
+ void clear() {
+ EC.clear();
+ NumClasses = 0;
+ }
+
+ /// join - Join the equivalence classes of a and b. After joining classes,
+ /// findLeader(a) == findLeader(b).
+ /// This requires an uncompressed map.
+ void join(unsigned a, unsigned b);
+
+ /// findLeader - Compute the leader of a's equivalence class. This is the
+ /// smallest member of the class.
+ /// This requires an uncompressed map.
+ unsigned findLeader(unsigned a) const;
+
+ /// compress - Compress equivalence classes by numbering them 0 .. M.
+ /// This makes the equivalence class map immutable.
+ void compress();
+
+ /// getNumClasses - Return the number of equivalence classes after compress()
+ /// was called.
+ unsigned getNumClasses() const { return NumClasses; }
+
+ /// operator[] - Return a's equivalence class number, 0 .. getNumClasses()-1.
+ /// This requires a compressed map.
+ unsigned operator[](unsigned a) const {
+ assert(NumClasses && "operator[] called before compress()");
+ return EC[a];
+ }
+
+ /// uncompress - Change back to the uncompressed representation that allows
+ /// editing.
+ void uncompress();
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/IntervalMap.h b/contrib/llvm/include/llvm/ADT/IntervalMap.h
new file mode 100644
index 0000000..f8843b2
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/IntervalMap.h
@@ -0,0 +1,2156 @@
+//===- llvm/ADT/IntervalMap.h - A sorted interval map -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a coalescing interval map for small objects.
+//
+// KeyT objects are mapped to ValT objects. Intervals of keys that map to the
+// same value are represented in a compressed form.
+//
+// Iterators provide ordered access to the compressed intervals rather than the
+// individual keys, and insert and erase operations use key intervals as well.
+//
+// Like SmallVector, IntervalMap will store the first N intervals in the map
+// object itself without any allocations. When space is exhausted it switches to
+// a B+-tree representation with very small overhead for small key and value
+// objects.
+//
+// A Traits class specifies how keys are compared. It also allows IntervalMap to
+// work with both closed and half-open intervals.
+//
+// Keys and values are not stored next to each other in a std::pair, so we don't
+// provide such a value_type. Dereferencing iterators only returns the mapped
+// value. The interval bounds are accessible through the start() and stop()
+// iterator methods.
+//
+// IntervalMap is optimized for small key and value objects, 4 or 8 bytes each
+// is the optimal size. For large objects use std::map instead.
+//
+//===----------------------------------------------------------------------===//
+//
+// Synopsis:
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap {
+// public:
+// typedef KeyT key_type;
+// typedef ValT mapped_type;
+// typedef RecyclingAllocator<...> Allocator;
+// class iterator;
+// class const_iterator;
+//
+// explicit IntervalMap(Allocator&);
+// ~IntervalMap():
+//
+// bool empty() const;
+// KeyT start() const;
+// KeyT stop() const;
+// ValT lookup(KeyT x, Value NotFound = Value()) const;
+//
+// const_iterator begin() const;
+// const_iterator end() const;
+// iterator begin();
+// iterator end();
+// const_iterator find(KeyT x) const;
+// iterator find(KeyT x);
+//
+// void insert(KeyT a, KeyT b, ValT y);
+// void clear();
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::const_iterator :
+// public std::iterator<std::bidirectional_iterator_tag, ValT> {
+// public:
+// bool operator==(const const_iterator &) const;
+// bool operator!=(const const_iterator &) const;
+// bool valid() const;
+//
+// const KeyT &start() const;
+// const KeyT &stop() const;
+// const ValT &value() const;
+// const ValT &operator*() const;
+// const ValT *operator->() const;
+//
+// const_iterator &operator++();
+// const_iterator &operator++(int);
+// const_iterator &operator--();
+// const_iterator &operator--(int);
+// void goToBegin();
+// void goToEnd();
+// void find(KeyT x);
+// void advanceTo(KeyT x);
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::iterator : public const_iterator {
+// public:
+// void insert(KeyT a, KeyT b, Value y);
+// void erase();
+// };
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTERVALMAP_H
+#define LLVM_ADT_INTERVALMAP_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include <iterator>
+
+namespace llvm {
+
+
+//===----------------------------------------------------------------------===//
+//--- Key traits ---//
+//===----------------------------------------------------------------------===//
+//
+// The IntervalMap works with closed or half-open intervals.
+// Adjacent intervals that map to the same value are coalesced.
+//
+// The IntervalMapInfo traits class is used to determine if a key is contained
+// in an interval, and if two intervals are adjacent so they can be coalesced.
+// The provided implementation works for closed integer intervals, other keys
+// probably need a specialized version.
+//
+// The point x is contained in [a;b] when !startLess(x, a) && !stopLess(b, x).
+//
+// It is assumed that (a;b] half-open intervals are not used, only [a;b) is
+// allowed. This is so that stopLess(a, b) can be used to determine if two
+// intervals overlap.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T>
+struct IntervalMapInfo {
+
+ /// startLess - Return true if x is not in [a;b].
+ /// This is x < a both for closed intervals and for [a;b) half-open intervals.
+ static inline bool startLess(const T &x, const T &a) {
+ return x < a;
+ }
+
+ /// stopLess - Return true if x is not in [a;b].
+ /// This is b < x for a closed interval, b <= x for [a;b) half-open intervals.
+ static inline bool stopLess(const T &b, const T &x) {
+ return b < x;
+ }
+
+ /// adjacent - Return true when the intervals [x;a] and [b;y] can coalesce.
+ /// This is a+1 == b for closed intervals, a == b for half-open intervals.
+ static inline bool adjacent(const T &a, const T &b) {
+ return a+1 == b;
+ }
+
+};
+
+template <typename T>
+struct IntervalMapHalfOpenInfo {
+
+ /// startLess - Return true if x is not in [a;b).
+ static inline bool startLess(const T &x, const T &a) {
+ return x < a;
+ }
+
+ /// stopLess - Return true if x is not in [a;b).
+ static inline bool stopLess(const T &b, const T &x) {
+ return b <= x;
+ }
+
+ /// adjacent - Return true when the intervals [x;a) and [b;y) can coalesce.
+ static inline bool adjacent(const T &a, const T &b) {
+ return a == b;
+ }
+
+};
+
+/// IntervalMapImpl - Namespace used for IntervalMap implementation details.
+/// It should be considered private to the implementation.
+namespace IntervalMapImpl {
+
+// Forward declarations.
+template <typename, typename, unsigned, typename> class LeafNode;
+template <typename, typename, unsigned, typename> class BranchNode;
+
+typedef std::pair<unsigned,unsigned> IdxPair;
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeBase ---//
+//===----------------------------------------------------------------------===//
+//
+// Both leaf and branch nodes store vectors of pairs.
+// Leaves store ((KeyT, KeyT), ValT) pairs, branches use (NodeRef, KeyT).
+//
+// Keys and values are stored in separate arrays to avoid padding caused by
+// different object alignments. This also helps improve locality of reference
+// when searching the keys.
+//
+// The nodes don't know how many elements they contain - that information is
+// stored elsewhere. Omitting the size field prevents padding and allows a node
+// to fill the allocated cache lines completely.
+//
+// These are typical key and value sizes, the node branching factor (N), and
+// wasted space when nodes are sized to fit in three cache lines (192 bytes):
+//
+// T1 T2 N Waste Used by
+// 4 4 24 0 Branch<4> (32-bit pointers)
+// 8 4 16 0 Leaf<4,4>, Branch<4>
+// 8 8 12 0 Leaf<4,8>, Branch<8>
+// 16 4 9 12 Leaf<8,4>
+// 16 8 8 0 Leaf<8,8>
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T1, typename T2, unsigned N>
+class NodeBase {
+public:
+ enum { Capacity = N };
+
+ T1 first[N];
+ T2 second[N];
+
+ /// copy - Copy elements from another node.
+ /// @param Other Node elements are copied from.
+ /// @param i Beginning of the source range in other.
+ /// @param j Beginning of the destination range in this.
+ /// @param Count Number of elements to copy.
+ template <unsigned M>
+ void copy(const NodeBase<T1, T2, M> &Other, unsigned i,
+ unsigned j, unsigned Count) {
+ assert(i + Count <= M && "Invalid source range");
+ assert(j + Count <= N && "Invalid dest range");
+ for (unsigned e = i + Count; i != e; ++i, ++j) {
+ first[j] = Other.first[i];
+ second[j] = Other.second[i];
+ }
+ }
+
+ /// moveLeft - Move elements to the left.
+ /// @param i Beginning of the source range.
+ /// @param j Beginning of the destination range.
+ /// @param Count Number of elements to copy.
+ void moveLeft(unsigned i, unsigned j, unsigned Count) {
+ assert(j <= i && "Use moveRight shift elements right");
+ copy(*this, i, j, Count);
+ }
+
+ /// moveRight - Move elements to the right.
+ /// @param i Beginning of the source range.
+ /// @param j Beginning of the destination range.
+ /// @param Count Number of elements to copy.
+ void moveRight(unsigned i, unsigned j, unsigned Count) {
+ assert(i <= j && "Use moveLeft shift elements left");
+ assert(j + Count <= N && "Invalid range");
+ while (Count--) {
+ first[j + Count] = first[i + Count];
+ second[j + Count] = second[i + Count];
+ }
+ }
+
+ /// erase - Erase elements [i;j).
+ /// @param i Beginning of the range to erase.
+ /// @param j End of the range. (Exclusive).
+ /// @param Size Number of elements in node.
+ void erase(unsigned i, unsigned j, unsigned Size) {
+ moveLeft(j, i, Size - j);
+ }
+
+ /// erase - Erase element at i.
+ /// @param i Index of element to erase.
+ /// @param Size Number of elements in node.
+ void erase(unsigned i, unsigned Size) {
+ erase(i, i+1, Size);
+ }
+
+ /// shift - Shift elements [i;size) 1 position to the right.
+ /// @param i Beginning of the range to move.
+ /// @param Size Number of elements in node.
+ void shift(unsigned i, unsigned Size) {
+ moveRight(i, i + 1, Size - i);
+ }
+
+ /// transferToLeftSib - Transfer elements to a left sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Left sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Count Number of elements to transfer.
+ void transferToLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+ unsigned Count) {
+ Sib.copy(*this, 0, SSize, Count);
+ erase(0, Count, Size);
+ }
+
+ /// transferToRightSib - Transfer elements to a right sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Right sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Count Number of elements to transfer.
+ void transferToRightSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+ unsigned Count) {
+ Sib.moveRight(0, Count, SSize);
+ Sib.copy(*this, Size-Count, 0, Count);
+ }
+
+ /// adjustFromLeftSib - Adjust the number if elements in this node by moving
+ /// elements to or from a left sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Right sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Add The number of elements to add to this node, possibly < 0.
+ /// @return Number of elements added to this node, possibly negative.
+ int adjustFromLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize, int Add) {
+ if (Add > 0) {
+ // We want to grow, copy from sib.
+ unsigned Count = std::min(std::min(unsigned(Add), SSize), N - Size);
+ Sib.transferToRightSib(SSize, *this, Size, Count);
+ return Count;
+ } else {
+ // We want to shrink, copy to sib.
+ unsigned Count = std::min(std::min(unsigned(-Add), Size), N - SSize);
+ transferToLeftSib(Size, Sib, SSize, Count);
+ return -Count;
+ }
+ }
+};
+
+/// IntervalMapImpl::adjustSiblingSizes - Move elements between sibling nodes.
+/// @param Node Array of pointers to sibling nodes.
+/// @param Nodes Number of nodes.
+/// @param CurSize Array of current node sizes, will be overwritten.
+/// @param NewSize Array of desired node sizes.
+template <typename NodeT>
+void adjustSiblingSizes(NodeT *Node[], unsigned Nodes,
+ unsigned CurSize[], const unsigned NewSize[]) {
+ // Move elements right.
+ for (int n = Nodes - 1; n; --n) {
+ if (CurSize[n] == NewSize[n])
+ continue;
+ for (int m = n - 1; m != -1; --m) {
+ int d = Node[n]->adjustFromLeftSib(CurSize[n], *Node[m], CurSize[m],
+ NewSize[n] - CurSize[n]);
+ CurSize[m] -= d;
+ CurSize[n] += d;
+ // Keep going if the current node was exhausted.
+ if (CurSize[n] >= NewSize[n])
+ break;
+ }
+ }
+
+ if (Nodes == 0)
+ return;
+
+ // Move elements left.
+ for (unsigned n = 0; n != Nodes - 1; ++n) {
+ if (CurSize[n] == NewSize[n])
+ continue;
+ for (unsigned m = n + 1; m != Nodes; ++m) {
+ int d = Node[m]->adjustFromLeftSib(CurSize[m], *Node[n], CurSize[n],
+ CurSize[n] - NewSize[n]);
+ CurSize[m] += d;
+ CurSize[n] -= d;
+ // Keep going if the current node was exhausted.
+ if (CurSize[n] >= NewSize[n])
+ break;
+ }
+ }
+
+#ifndef NDEBUG
+ for (unsigned n = 0; n != Nodes; n++)
+ assert(CurSize[n] == NewSize[n] && "Insufficient element shuffle");
+#endif
+}
+
+/// IntervalMapImpl::distribute - Compute a new distribution of node elements
+/// after an overflow or underflow. Reserve space for a new element at Position,
+/// and compute the node that will hold Position after redistributing node
+/// elements.
+///
+/// It is required that
+///
+/// Elements == sum(CurSize), and
+/// Elements + Grow <= Nodes * Capacity.
+///
+/// NewSize[] will be filled in such that:
+///
+/// sum(NewSize) == Elements, and
+/// NewSize[i] <= Capacity.
+///
+/// The returned index is the node where Position will go, so:
+///
+/// sum(NewSize[0..idx-1]) <= Position
+/// sum(NewSize[0..idx]) >= Position
+///
+/// The last equality, sum(NewSize[0..idx]) == Position, can only happen when
+/// Grow is set and NewSize[idx] == Capacity-1. The index points to the node
+/// before the one holding the Position'th element where there is room for an
+/// insertion.
+///
+/// @param Nodes The number of nodes.
+/// @param Elements Total elements in all nodes.
+/// @param Capacity The capacity of each node.
+/// @param CurSize Array[Nodes] of current node sizes, or NULL.
+/// @param NewSize Array[Nodes] to receive the new node sizes.
+/// @param Position Insert position.
+/// @param Grow Reserve space for a new element at Position.
+/// @return (node, offset) for Position.
+IdxPair distribute(unsigned Nodes, unsigned Elements, unsigned Capacity,
+ const unsigned *CurSize, unsigned NewSize[],
+ unsigned Position, bool Grow);
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeSizer ---//
+//===----------------------------------------------------------------------===//
+//
+// Compute node sizes from key and value types.
+//
+// The branching factors are chosen to make nodes fit in three cache lines.
+// This may not be possible if keys or values are very large. Such large objects
+// are handled correctly, but a std::map would probably give better performance.
+//
+//===----------------------------------------------------------------------===//
+
+enum {
+ // Cache line size. Most architectures have 32 or 64 byte cache lines.
+ // We use 64 bytes here because it provides good branching factors.
+ Log2CacheLine = 6,
+ CacheLineBytes = 1 << Log2CacheLine,
+ DesiredNodeBytes = 3 * CacheLineBytes
+};
+
+template <typename KeyT, typename ValT>
+struct NodeSizer {
+ enum {
+ // Compute the leaf node branching factor that makes a node fit in three
+ // cache lines. The branching factor must be at least 3, or some B+-tree
+ // balancing algorithms won't work.
+ // LeafSize can't be larger than CacheLineBytes. This is required by the
+ // PointerIntPair used by NodeRef.
+ DesiredLeafSize = DesiredNodeBytes /
+ static_cast<unsigned>(2*sizeof(KeyT)+sizeof(ValT)),
+ MinLeafSize = 3,
+ LeafSize = DesiredLeafSize > MinLeafSize ? DesiredLeafSize : MinLeafSize
+ };
+
+ typedef NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize> LeafBase;
+
+ enum {
+ // Now that we have the leaf branching factor, compute the actual allocation
+ // unit size by rounding up to a whole number of cache lines.
+ AllocBytes = (sizeof(LeafBase) + CacheLineBytes-1) & ~(CacheLineBytes-1),
+
+ // Determine the branching factor for branch nodes.
+ BranchSize = AllocBytes /
+ static_cast<unsigned>(sizeof(KeyT) + sizeof(void*))
+ };
+
+ /// Allocator - The recycling allocator used for both branch and leaf nodes.
+ /// This typedef is very likely to be identical for all IntervalMaps with
+ /// reasonably sized entries, so the same allocator can be shared among
+ /// different kinds of maps.
+ typedef RecyclingAllocator<BumpPtrAllocator, char,
+ AllocBytes, CacheLineBytes> Allocator;
+
+};
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeRef ---//
+//===----------------------------------------------------------------------===//
+//
+// B+-tree nodes can be leaves or branches, so we need a polymorphic node
+// pointer that can point to both kinds.
+//
+// All nodes are cache line aligned and the low 6 bits of a node pointer are
+// always 0. These bits are used to store the number of elements in the
+// referenced node. Besides saving space, placing node sizes in the parents
+// allow tree balancing algorithms to run without faulting cache lines for nodes
+// that may not need to be modified.
+//
+// A NodeRef doesn't know whether it references a leaf node or a branch node.
+// It is the responsibility of the caller to use the correct types.
+//
+// Nodes are never supposed to be empty, and it is invalid to store a node size
+// of 0 in a NodeRef. The valid range of sizes is 1-64.
+//
+//===----------------------------------------------------------------------===//
+
+class NodeRef {
+ struct CacheAlignedPointerTraits {
+ static inline void *getAsVoidPointer(void *P) { return P; }
+ static inline void *getFromVoidPointer(void *P) { return P; }
+ enum { NumLowBitsAvailable = Log2CacheLine };
+ };
+ PointerIntPair<void*, Log2CacheLine, unsigned, CacheAlignedPointerTraits> pip;
+
+public:
+ /// NodeRef - Create a null ref.
+ NodeRef() {}
+
+ /// operator bool - Detect a null ref.
+ explicit operator bool() const { return pip.getOpaqueValue(); }
+
+ /// NodeRef - Create a reference to the node p with n elements.
+ template <typename NodeT>
+ NodeRef(NodeT *p, unsigned n) : pip(p, n - 1) {
+ assert(n <= NodeT::Capacity && "Size too big for node");
+ }
+
+ /// size - Return the number of elements in the referenced node.
+ unsigned size() const { return pip.getInt() + 1; }
+
+ /// setSize - Update the node size.
+ void setSize(unsigned n) { pip.setInt(n - 1); }
+
+ /// subtree - Access the i'th subtree reference in a branch node.
+ /// This depends on branch nodes storing the NodeRef array as their first
+ /// member.
+ NodeRef &subtree(unsigned i) const {
+ return reinterpret_cast<NodeRef*>(pip.getPointer())[i];
+ }
+
+ /// get - Dereference as a NodeT reference.
+ template <typename NodeT>
+ NodeT &get() const {
+ return *reinterpret_cast<NodeT*>(pip.getPointer());
+ }
+
+ bool operator==(const NodeRef &RHS) const {
+ if (pip == RHS.pip)
+ return true;
+ assert(pip.getPointer() != RHS.pip.getPointer() && "Inconsistent NodeRefs");
+ return false;
+ }
+
+ bool operator!=(const NodeRef &RHS) const {
+ return !operator==(RHS);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::LeafNode ---//
+//===----------------------------------------------------------------------===//
+//
+// Leaf nodes store up to N disjoint intervals with corresponding values.
+//
+// The intervals are kept sorted and fully coalesced so there are no adjacent
+// intervals mapping to the same value.
+//
+// These constraints are always satisfied:
+//
+// - Traits::stopLess(start(i), stop(i)) - Non-empty, sane intervals.
+//
+// - Traits::stopLess(stop(i), start(i + 1) - Sorted.
+//
+// - value(i) != value(i + 1) || !Traits::adjacent(stop(i), start(i + 1))
+// - Fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class LeafNode : public NodeBase<std::pair<KeyT, KeyT>, ValT, N> {
+public:
+ const KeyT &start(unsigned i) const { return this->first[i].first; }
+ const KeyT &stop(unsigned i) const { return this->first[i].second; }
+ const ValT &value(unsigned i) const { return this->second[i]; }
+
+ KeyT &start(unsigned i) { return this->first[i].first; }
+ KeyT &stop(unsigned i) { return this->first[i].second; }
+ ValT &value(unsigned i) { return this->second[i]; }
+
+ /// findFrom - Find the first interval after i that may contain x.
+ /// @param i Starting index for the search.
+ /// @param Size Number of elements in node.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i].stop, x), or size.
+ /// This is the first interval that can possibly contain x.
+ unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+ assert(i <= Size && Size <= N && "Bad indices");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+ return i;
+ }
+
+ /// safeFind - Find an interval that is known to exist. This is the same as
+ /// findFrom except is it assumed that x is at least within range of the last
+ /// interval.
+ /// @param i Starting index for the search.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i].stop, x), never size.
+ /// This is the first interval that can possibly contain x.
+ unsigned safeFind(unsigned i, KeyT x) const {
+ assert(i < N && "Bad index");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (Traits::stopLess(stop(i), x)) ++i;
+ assert(i < N && "Unsafe intervals");
+ return i;
+ }
+
+ /// safeLookup - Lookup mapped value for a safe key.
+ /// It is assumed that x is within range of the last entry.
+ /// @param x Key to search for.
+ /// @param NotFound Value to return if x is not in any interval.
+ /// @return The mapped value at x or NotFound.
+ ValT safeLookup(KeyT x, ValT NotFound) const {
+ unsigned i = safeFind(0, x);
+ return Traits::startLess(x, start(i)) ? NotFound : value(i);
+ }
+
+ unsigned insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y);
+};
+
+/// insertFrom - Add mapping of [a;b] to y if possible, coalescing as much as
+/// possible. This may cause the node to grow by 1, or it may cause the node
+/// to shrink because of coalescing.
+/// @param Pos Starting index = insertFrom(0, size, a)
+/// @param Size Number of elements in node.
+/// @param a Interval start.
+/// @param b Interval stop.
+/// @param y Value be mapped.
+/// @return (insert position, new size), or (i, Capacity+1) on overflow.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+unsigned LeafNode<KeyT, ValT, N, Traits>::
+insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y) {
+ unsigned i = Pos;
+ assert(i <= Size && Size <= N && "Invalid index");
+ assert(!Traits::stopLess(b, a) && "Invalid interval");
+
+ // Verify the findFrom invariant.
+ assert((i == 0 || Traits::stopLess(stop(i - 1), a)));
+ assert((i == Size || !Traits::stopLess(stop(i), a)));
+ assert((i == Size || Traits::stopLess(b, start(i))) && "Overlapping insert");
+
+ // Coalesce with previous interval.
+ if (i && value(i - 1) == y && Traits::adjacent(stop(i - 1), a)) {
+ Pos = i - 1;
+ // Also coalesce with next interval?
+ if (i != Size && value(i) == y && Traits::adjacent(b, start(i))) {
+ stop(i - 1) = stop(i);
+ this->erase(i, Size);
+ return Size - 1;
+ }
+ stop(i - 1) = b;
+ return Size;
+ }
+
+ // Detect overflow.
+ if (i == N)
+ return N + 1;
+
+ // Add new interval at end.
+ if (i == Size) {
+ start(i) = a;
+ stop(i) = b;
+ value(i) = y;
+ return Size + 1;
+ }
+
+ // Try to coalesce with following interval.
+ if (value(i) == y && Traits::adjacent(b, start(i))) {
+ start(i) = a;
+ return Size;
+ }
+
+ // We must insert before i. Detect overflow.
+ if (Size == N)
+ return N + 1;
+
+ // Insert before i.
+ this->shift(i, Size);
+ start(i) = a;
+ stop(i) = b;
+ value(i) = y;
+ return Size + 1;
+}
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::BranchNode ---//
+//===----------------------------------------------------------------------===//
+//
+// A branch node stores references to 1--N subtrees all of the same height.
+//
+// The key array in a branch node holds the rightmost stop key of each subtree.
+// It is redundant to store the last stop key since it can be found in the
+// parent node, but doing so makes tree balancing a lot simpler.
+//
+// It is unusual for a branch node to only have one subtree, but it can happen
+// in the root node if it is smaller than the normal nodes.
+//
+// When all of the leaf nodes from all the subtrees are concatenated, they must
+// satisfy the same constraints as a single leaf node. They must be sorted,
+// sane, and fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class BranchNode : public NodeBase<NodeRef, KeyT, N> {
+public:
+ const KeyT &stop(unsigned i) const { return this->second[i]; }
+ const NodeRef &subtree(unsigned i) const { return this->first[i]; }
+
+ KeyT &stop(unsigned i) { return this->second[i]; }
+ NodeRef &subtree(unsigned i) { return this->first[i]; }
+
+ /// findFrom - Find the first subtree after i that may contain x.
+ /// @param i Starting index for the search.
+ /// @param Size Number of elements in node.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i], x), or size.
+ /// This is the first subtree that can possibly contain x.
+ unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+ assert(i <= Size && Size <= N && "Bad indices");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index to findFrom is past the needed point");
+ while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+ return i;
+ }
+
+ /// safeFind - Find a subtree that is known to exist. This is the same as
+ /// findFrom except is it assumed that x is in range.
+ /// @param i Starting index for the search.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i], x), never size.
+ /// This is the first subtree that can possibly contain x.
+ unsigned safeFind(unsigned i, KeyT x) const {
+ assert(i < N && "Bad index");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (Traits::stopLess(stop(i), x)) ++i;
+ assert(i < N && "Unsafe intervals");
+ return i;
+ }
+
+ /// safeLookup - Get the subtree containing x, Assuming that x is in range.
+ /// @param x Key to search for.
+ /// @return Subtree containing x
+ NodeRef safeLookup(KeyT x) const {
+ return subtree(safeFind(0, x));
+ }
+
+ /// insert - Insert a new (subtree, stop) pair.
+ /// @param i Insert position, following entries will be shifted.
+ /// @param Size Number of elements in node.
+ /// @param Node Subtree to insert.
+ /// @param Stop Last key in subtree.
+ void insert(unsigned i, unsigned Size, NodeRef Node, KeyT Stop) {
+ assert(Size < N && "branch node overflow");
+ assert(i <= Size && "Bad insert position");
+ this->shift(i, Size);
+ subtree(i) = Node;
+ stop(i) = Stop;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::Path ---//
+//===----------------------------------------------------------------------===//
+//
+// A Path is used by iterators to represent a position in a B+-tree, and the
+// path to get there from the root.
+//
+// The Path class also contains the tree navigation code that doesn't have to
+// be templatized.
+//
+//===----------------------------------------------------------------------===//
+
+class Path {
+ /// Entry - Each step in the path is a node pointer and an offset into that
+ /// node.
+ struct Entry {
+ void *node;
+ unsigned size;
+ unsigned offset;
+
+ Entry(void *Node, unsigned Size, unsigned Offset)
+ : node(Node), size(Size), offset(Offset) {}
+
+ Entry(NodeRef Node, unsigned Offset)
+ : node(&Node.subtree(0)), size(Node.size()), offset(Offset) {}
+
+ NodeRef &subtree(unsigned i) const {
+ return reinterpret_cast<NodeRef*>(node)[i];
+ }
+ };
+
+ /// path - The path entries, path[0] is the root node, path.back() is a leaf.
+ SmallVector<Entry, 4> path;
+
+public:
+ // Node accessors.
+ template <typename NodeT> NodeT &node(unsigned Level) const {
+ return *reinterpret_cast<NodeT*>(path[Level].node);
+ }
+ unsigned size(unsigned Level) const { return path[Level].size; }
+ unsigned offset(unsigned Level) const { return path[Level].offset; }
+ unsigned &offset(unsigned Level) { return path[Level].offset; }
+
+ // Leaf accessors.
+ template <typename NodeT> NodeT &leaf() const {
+ return *reinterpret_cast<NodeT*>(path.back().node);
+ }
+ unsigned leafSize() const { return path.back().size; }
+ unsigned leafOffset() const { return path.back().offset; }
+ unsigned &leafOffset() { return path.back().offset; }
+
+ /// valid - Return true if path is at a valid node, not at end().
+ bool valid() const {
+ return !path.empty() && path.front().offset < path.front().size;
+ }
+
+ /// height - Return the height of the tree corresponding to this path.
+ /// This matches map->height in a full path.
+ unsigned height() const { return path.size() - 1; }
+
+ /// subtree - Get the subtree referenced from Level. When the path is
+ /// consistent, node(Level + 1) == subtree(Level).
+ /// @param Level 0..height-1. The leaves have no subtrees.
+ NodeRef &subtree(unsigned Level) const {
+ return path[Level].subtree(path[Level].offset);
+ }
+
+ /// reset - Reset cached information about node(Level) from subtree(Level -1).
+ /// @param Level 1..height. THe node to update after parent node changed.
+ void reset(unsigned Level) {
+ path[Level] = Entry(subtree(Level - 1), offset(Level));
+ }
+
+ /// push - Add entry to path.
+ /// @param Node Node to add, should be subtree(path.size()-1).
+ /// @param Offset Offset into Node.
+ void push(NodeRef Node, unsigned Offset) {
+ path.push_back(Entry(Node, Offset));
+ }
+
+ /// pop - Remove the last path entry.
+ void pop() {
+ path.pop_back();
+ }
+
+ /// setSize - Set the size of a node both in the path and in the tree.
+ /// @param Level 0..height. Note that setting the root size won't change
+ /// map->rootSize.
+ /// @param Size New node size.
+ void setSize(unsigned Level, unsigned Size) {
+ path[Level].size = Size;
+ if (Level)
+ subtree(Level - 1).setSize(Size);
+ }
+
+ /// setRoot - Clear the path and set a new root node.
+ /// @param Node New root node.
+ /// @param Size New root size.
+ /// @param Offset Offset into root node.
+ void setRoot(void *Node, unsigned Size, unsigned Offset) {
+ path.clear();
+ path.push_back(Entry(Node, Size, Offset));
+ }
+
+ /// replaceRoot - Replace the current root node with two new entries after the
+ /// tree height has increased.
+ /// @param Root The new root node.
+ /// @param Size Number of entries in the new root.
+ /// @param Offsets Offsets into the root and first branch nodes.
+ void replaceRoot(void *Root, unsigned Size, IdxPair Offsets);
+
+ /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+ /// @param Level Get the sibling to node(Level).
+ /// @return Left sibling, or NodeRef().
+ NodeRef getLeftSibling(unsigned Level) const;
+
+ /// moveLeft - Move path to the left sibling at Level. Leave nodes below Level
+ /// unaltered.
+ /// @param Level Move node(Level).
+ void moveLeft(unsigned Level);
+
+ /// fillLeft - Grow path to Height by taking leftmost branches.
+ /// @param Height The target height.
+ void fillLeft(unsigned Height) {
+ while (height() < Height)
+ push(subtree(height()), 0);
+ }
+
+ /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+ /// @param Level Get the sinbling to node(Level).
+ /// @return Left sibling, or NodeRef().
+ NodeRef getRightSibling(unsigned Level) const;
+
+ /// moveRight - Move path to the left sibling at Level. Leave nodes below
+ /// Level unaltered.
+ /// @param Level Move node(Level).
+ void moveRight(unsigned Level);
+
+ /// atBegin - Return true if path is at begin().
+ bool atBegin() const {
+ for (unsigned i = 0, e = path.size(); i != e; ++i)
+ if (path[i].offset != 0)
+ return false;
+ return true;
+ }
+
+ /// atLastEntry - Return true if the path is at the last entry of the node at
+ /// Level.
+ /// @param Level Node to examine.
+ bool atLastEntry(unsigned Level) const {
+ return path[Level].offset == path[Level].size - 1;
+ }
+
+ /// legalizeForInsert - Prepare the path for an insertion at Level. When the
+ /// path is at end(), node(Level) may not be a legal node. legalizeForInsert
+ /// ensures that node(Level) is real by moving back to the last node at Level,
+ /// and setting offset(Level) to size(Level) if required.
+ /// @param Level The level where an insertion is about to take place.
+ void legalizeForInsert(unsigned Level) {
+ if (valid())
+ return;
+ moveLeft(Level);
+ ++path[Level].offset;
+ }
+};
+
+} // namespace IntervalMapImpl
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT,
+ unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize,
+ typename Traits = IntervalMapInfo<KeyT> >
+class IntervalMap {
+ typedef IntervalMapImpl::NodeSizer<KeyT, ValT> Sizer;
+ typedef IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits> Leaf;
+ typedef IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>
+ Branch;
+ typedef IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits> RootLeaf;
+ typedef IntervalMapImpl::IdxPair IdxPair;
+
+ // The RootLeaf capacity is given as a template parameter. We must compute the
+ // corresponding RootBranch capacity.
+ enum {
+ DesiredRootBranchCap = (sizeof(RootLeaf) - sizeof(KeyT)) /
+ (sizeof(KeyT) + sizeof(IntervalMapImpl::NodeRef)),
+ RootBranchCap = DesiredRootBranchCap ? DesiredRootBranchCap : 1
+ };
+
+ typedef IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>
+ RootBranch;
+
+ // When branched, we store a global start key as well as the branch node.
+ struct RootBranchData {
+ KeyT start;
+ RootBranch node;
+ };
+
+public:
+ typedef typename Sizer::Allocator Allocator;
+ typedef KeyT KeyType;
+ typedef ValT ValueType;
+ typedef Traits KeyTraits;
+
+private:
+ // The root data is either a RootLeaf or a RootBranchData instance.
+ AlignedCharArrayUnion<RootLeaf, RootBranchData> data;
+
+ // Tree height.
+ // 0: Leaves in root.
+ // 1: Root points to leaf.
+ // 2: root->branch->leaf ...
+ unsigned height;
+
+ // Number of entries in the root node.
+ unsigned rootSize;
+
+ // Allocator used for creating external nodes.
+ Allocator &allocator;
+
+ /// dataAs - Represent data as a node type without breaking aliasing rules.
+ template <typename T>
+ T &dataAs() const {
+ union {
+ const char *d;
+ T *t;
+ } u;
+ u.d = data.buffer;
+ return *u.t;
+ }
+
+ const RootLeaf &rootLeaf() const {
+ assert(!branched() && "Cannot acces leaf data in branched root");
+ return dataAs<RootLeaf>();
+ }
+ RootLeaf &rootLeaf() {
+ assert(!branched() && "Cannot acces leaf data in branched root");
+ return dataAs<RootLeaf>();
+ }
+ RootBranchData &rootBranchData() const {
+ assert(branched() && "Cannot access branch data in non-branched root");
+ return dataAs<RootBranchData>();
+ }
+ RootBranchData &rootBranchData() {
+ assert(branched() && "Cannot access branch data in non-branched root");
+ return dataAs<RootBranchData>();
+ }
+ const RootBranch &rootBranch() const { return rootBranchData().node; }
+ RootBranch &rootBranch() { return rootBranchData().node; }
+ KeyT rootBranchStart() const { return rootBranchData().start; }
+ KeyT &rootBranchStart() { return rootBranchData().start; }
+
+ template <typename NodeT> NodeT *newNode() {
+ return new(allocator.template Allocate<NodeT>()) NodeT();
+ }
+
+ template <typename NodeT> void deleteNode(NodeT *P) {
+ P->~NodeT();
+ allocator.Deallocate(P);
+ }
+
+ IdxPair branchRoot(unsigned Position);
+ IdxPair splitRoot(unsigned Position);
+
+ void switchRootToBranch() {
+ rootLeaf().~RootLeaf();
+ height = 1;
+ new (&rootBranchData()) RootBranchData();
+ }
+
+ void switchRootToLeaf() {
+ rootBranchData().~RootBranchData();
+ height = 0;
+ new(&rootLeaf()) RootLeaf();
+ }
+
+ bool branched() const { return height > 0; }
+
+ ValT treeSafeLookup(KeyT x, ValT NotFound) const;
+ void visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef,
+ unsigned Level));
+ void deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level);
+
+public:
+ explicit IntervalMap(Allocator &a) : height(0), rootSize(0), allocator(a) {
+ assert((uintptr_t(data.buffer) & (alignOf<RootLeaf>() - 1)) == 0 &&
+ "Insufficient alignment");
+ new(&rootLeaf()) RootLeaf();
+ }
+
+ ~IntervalMap() {
+ clear();
+ rootLeaf().~RootLeaf();
+ }
+
+ /// empty - Return true when no intervals are mapped.
+ bool empty() const {
+ return rootSize == 0;
+ }
+
+ /// start - Return the smallest mapped key in a non-empty map.
+ KeyT start() const {
+ assert(!empty() && "Empty IntervalMap has no start");
+ return !branched() ? rootLeaf().start(0) : rootBranchStart();
+ }
+
+ /// stop - Return the largest mapped key in a non-empty map.
+ KeyT stop() const {
+ assert(!empty() && "Empty IntervalMap has no stop");
+ return !branched() ? rootLeaf().stop(rootSize - 1) :
+ rootBranch().stop(rootSize - 1);
+ }
+
+ /// lookup - Return the mapped value at x or NotFound.
+ ValT lookup(KeyT x, ValT NotFound = ValT()) const {
+ if (empty() || Traits::startLess(x, start()) || Traits::stopLess(stop(), x))
+ return NotFound;
+ return branched() ? treeSafeLookup(x, NotFound) :
+ rootLeaf().safeLookup(x, NotFound);
+ }
+
+ /// insert - Add a mapping of [a;b] to y, coalesce with adjacent intervals.
+ /// It is assumed that no key in the interval is mapped to another value, but
+ /// overlapping intervals already mapped to y will be coalesced.
+ void insert(KeyT a, KeyT b, ValT y) {
+ if (branched() || rootSize == RootLeaf::Capacity)
+ return find(a).insert(a, b, y);
+
+ // Easy insert into root leaf.
+ unsigned p = rootLeaf().findFrom(0, rootSize, a);
+ rootSize = rootLeaf().insertFrom(p, rootSize, a, b, y);
+ }
+
+ /// clear - Remove all entries.
+ void clear();
+
+ class const_iterator;
+ class iterator;
+ friend class const_iterator;
+ friend class iterator;
+
+ const_iterator begin() const {
+ const_iterator I(*this);
+ I.goToBegin();
+ return I;
+ }
+
+ iterator begin() {
+ iterator I(*this);
+ I.goToBegin();
+ return I;
+ }
+
+ const_iterator end() const {
+ const_iterator I(*this);
+ I.goToEnd();
+ return I;
+ }
+
+ iterator end() {
+ iterator I(*this);
+ I.goToEnd();
+ return I;
+ }
+
+ /// find - Return an iterator pointing to the first interval ending at or
+ /// after x, or end().
+ const_iterator find(KeyT x) const {
+ const_iterator I(*this);
+ I.find(x);
+ return I;
+ }
+
+ iterator find(KeyT x) {
+ iterator I(*this);
+ I.find(x);
+ return I;
+ }
+};
+
+/// treeSafeLookup - Return the mapped value at x or NotFound, assuming a
+/// branched root.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+ValT IntervalMap<KeyT, ValT, N, Traits>::
+treeSafeLookup(KeyT x, ValT NotFound) const {
+ assert(branched() && "treeLookup assumes a branched root");
+
+ IntervalMapImpl::NodeRef NR = rootBranch().safeLookup(x);
+ for (unsigned h = height-1; h; --h)
+ NR = NR.get<Branch>().safeLookup(x);
+ return NR.get<Leaf>().safeLookup(x, NotFound);
+}
+
+
+// branchRoot - Switch from a leaf root to a branched root.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+branchRoot(unsigned Position) {
+ using namespace IntervalMapImpl;
+ // How many external leaf nodes to hold RootLeaf+1?
+ const unsigned Nodes = RootLeaf::Capacity / Leaf::Capacity + 1;
+
+ // Compute element distribution among new nodes.
+ unsigned size[Nodes];
+ IdxPair NewOffset(0, Position);
+
+ // Is is very common for the root node to be smaller than external nodes.
+ if (Nodes == 1)
+ size[0] = rootSize;
+ else
+ NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, nullptr, size,
+ Position, true);
+
+ // Allocate new nodes.
+ unsigned pos = 0;
+ NodeRef node[Nodes];
+ for (unsigned n = 0; n != Nodes; ++n) {
+ Leaf *L = newNode<Leaf>();
+ L->copy(rootLeaf(), pos, 0, size[n]);
+ node[n] = NodeRef(L, size[n]);
+ pos += size[n];
+ }
+
+ // Destroy the old leaf node, construct branch node instead.
+ switchRootToBranch();
+ for (unsigned n = 0; n != Nodes; ++n) {
+ rootBranch().stop(n) = node[n].template get<Leaf>().stop(size[n]-1);
+ rootBranch().subtree(n) = node[n];
+ }
+ rootBranchStart() = node[0].template get<Leaf>().start(0);
+ rootSize = Nodes;
+ return NewOffset;
+}
+
+// splitRoot - Split the current BranchRoot into multiple Branch nodes.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+splitRoot(unsigned Position) {
+ using namespace IntervalMapImpl;
+ // How many external leaf nodes to hold RootBranch+1?
+ const unsigned Nodes = RootBranch::Capacity / Branch::Capacity + 1;
+
+ // Compute element distribution among new nodes.
+ unsigned Size[Nodes];
+ IdxPair NewOffset(0, Position);
+
+ // Is is very common for the root node to be smaller than external nodes.
+ if (Nodes == 1)
+ Size[0] = rootSize;
+ else
+ NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, nullptr, Size,
+ Position, true);
+
+ // Allocate new nodes.
+ unsigned Pos = 0;
+ NodeRef Node[Nodes];
+ for (unsigned n = 0; n != Nodes; ++n) {
+ Branch *B = newNode<Branch>();
+ B->copy(rootBranch(), Pos, 0, Size[n]);
+ Node[n] = NodeRef(B, Size[n]);
+ Pos += Size[n];
+ }
+
+ for (unsigned n = 0; n != Nodes; ++n) {
+ rootBranch().stop(n) = Node[n].template get<Branch>().stop(Size[n]-1);
+ rootBranch().subtree(n) = Node[n];
+ }
+ rootSize = Nodes;
+ ++height;
+ return NewOffset;
+}
+
+/// visitNodes - Visit each external node.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef, unsigned Height)) {
+ if (!branched())
+ return;
+ SmallVector<IntervalMapImpl::NodeRef, 4> Refs, NextRefs;
+
+ // Collect level 0 nodes from the root.
+ for (unsigned i = 0; i != rootSize; ++i)
+ Refs.push_back(rootBranch().subtree(i));
+
+ // Visit all branch nodes.
+ for (unsigned h = height - 1; h; --h) {
+ for (unsigned i = 0, e = Refs.size(); i != e; ++i) {
+ for (unsigned j = 0, s = Refs[i].size(); j != s; ++j)
+ NextRefs.push_back(Refs[i].subtree(j));
+ (this->*f)(Refs[i], h);
+ }
+ Refs.clear();
+ Refs.swap(NextRefs);
+ }
+
+ // Visit all leaf nodes.
+ for (unsigned i = 0, e = Refs.size(); i != e; ++i)
+ (this->*f)(Refs[i], 0);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level) {
+ if (Level)
+ deleteNode(&Node.get<Branch>());
+ else
+ deleteNode(&Node.get<Leaf>());
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+clear() {
+ if (branched()) {
+ visitNodes(&IntervalMap::deleteNode);
+ switchRootToLeaf();
+ }
+ rootSize = 0;
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap::const_iterator ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::const_iterator :
+ public std::iterator<std::bidirectional_iterator_tag, ValT> {
+protected:
+ friend class IntervalMap;
+
+ // The map referred to.
+ IntervalMap *map;
+
+ // We store a full path from the root to the current position.
+ // The path may be partially filled, but never between iterator calls.
+ IntervalMapImpl::Path path;
+
+ explicit const_iterator(const IntervalMap &map) :
+ map(const_cast<IntervalMap*>(&map)) {}
+
+ bool branched() const {
+ assert(map && "Invalid iterator");
+ return map->branched();
+ }
+
+ void setRoot(unsigned Offset) {
+ if (branched())
+ path.setRoot(&map->rootBranch(), map->rootSize, Offset);
+ else
+ path.setRoot(&map->rootLeaf(), map->rootSize, Offset);
+ }
+
+ void pathFillFind(KeyT x);
+ void treeFind(KeyT x);
+ void treeAdvanceTo(KeyT x);
+
+ /// unsafeStart - Writable access to start() for iterator.
+ KeyT &unsafeStart() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().start(path.leafOffset()) :
+ path.leaf<RootLeaf>().start(path.leafOffset());
+ }
+
+ /// unsafeStop - Writable access to stop() for iterator.
+ KeyT &unsafeStop() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().stop(path.leafOffset()) :
+ path.leaf<RootLeaf>().stop(path.leafOffset());
+ }
+
+ /// unsafeValue - Writable access to value() for iterator.
+ ValT &unsafeValue() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().value(path.leafOffset()) :
+ path.leaf<RootLeaf>().value(path.leafOffset());
+ }
+
+public:
+ /// const_iterator - Create an iterator that isn't pointing anywhere.
+ const_iterator() : map(nullptr) {}
+
+ /// setMap - Change the map iterated over. This call must be followed by a
+ /// call to goToBegin(), goToEnd(), or find()
+ void setMap(const IntervalMap &m) { map = const_cast<IntervalMap*>(&m); }
+
+ /// valid - Return true if the current position is valid, false for end().
+ bool valid() const { return path.valid(); }
+
+ /// atBegin - Return true if the current position is the first map entry.
+ bool atBegin() const { return path.atBegin(); }
+
+ /// start - Return the beginning of the current interval.
+ const KeyT &start() const { return unsafeStart(); }
+
+ /// stop - Return the end of the current interval.
+ const KeyT &stop() const { return unsafeStop(); }
+
+ /// value - Return the mapped value at the current interval.
+ const ValT &value() const { return unsafeValue(); }
+
+ const ValT &operator*() const { return value(); }
+
+ bool operator==(const const_iterator &RHS) const {
+ assert(map == RHS.map && "Cannot compare iterators from different maps");
+ if (!valid())
+ return !RHS.valid();
+ if (path.leafOffset() != RHS.path.leafOffset())
+ return false;
+ return &path.template leaf<Leaf>() == &RHS.path.template leaf<Leaf>();
+ }
+
+ bool operator!=(const const_iterator &RHS) const {
+ return !operator==(RHS);
+ }
+
+ /// goToBegin - Move to the first interval in map.
+ void goToBegin() {
+ setRoot(0);
+ if (branched())
+ path.fillLeft(map->height);
+ }
+
+ /// goToEnd - Move beyond the last interval in map.
+ void goToEnd() {
+ setRoot(map->rootSize);
+ }
+
+ /// preincrement - move to the next interval.
+ const_iterator &operator++() {
+ assert(valid() && "Cannot increment end()");
+ if (++path.leafOffset() == path.leafSize() && branched())
+ path.moveRight(map->height);
+ return *this;
+ }
+
+ /// postincrement - Dont do that!
+ const_iterator operator++(int) {
+ const_iterator tmp = *this;
+ operator++();
+ return tmp;
+ }
+
+ /// predecrement - move to the previous interval.
+ const_iterator &operator--() {
+ if (path.leafOffset() && (valid() || !branched()))
+ --path.leafOffset();
+ else
+ path.moveLeft(map->height);
+ return *this;
+ }
+
+ /// postdecrement - Dont do that!
+ const_iterator operator--(int) {
+ const_iterator tmp = *this;
+ operator--();
+ return tmp;
+ }
+
+ /// find - Move to the first interval with stop >= x, or end().
+ /// This is a full search from the root, the current position is ignored.
+ void find(KeyT x) {
+ if (branched())
+ treeFind(x);
+ else
+ setRoot(map->rootLeaf().findFrom(0, map->rootSize, x));
+ }
+
+ /// advanceTo - Move to the first interval with stop >= x, or end().
+ /// The search is started from the current position, and no earlier positions
+ /// can be found. This is much faster than find() for small moves.
+ void advanceTo(KeyT x) {
+ if (!valid())
+ return;
+ if (branched())
+ treeAdvanceTo(x);
+ else
+ path.leafOffset() =
+ map->rootLeaf().findFrom(path.leafOffset(), map->rootSize, x);
+ }
+
+};
+
+/// pathFillFind - Complete path by searching for x.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::pathFillFind(KeyT x) {
+ IntervalMapImpl::NodeRef NR = path.subtree(path.height());
+ for (unsigned i = map->height - path.height() - 1; i; --i) {
+ unsigned p = NR.get<Branch>().safeFind(0, x);
+ path.push(NR, p);
+ NR = NR.subtree(p);
+ }
+ path.push(NR, NR.get<Leaf>().safeFind(0, x));
+}
+
+/// treeFind - Find in a branched tree.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeFind(KeyT x) {
+ setRoot(map->rootBranch().findFrom(0, map->rootSize, x));
+ if (valid())
+ pathFillFind(x);
+}
+
+/// treeAdvanceTo - Find position after the current one.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeAdvanceTo(KeyT x) {
+ // Can we stay on the same leaf node?
+ if (!Traits::stopLess(path.leaf<Leaf>().stop(path.leafSize() - 1), x)) {
+ path.leafOffset() = path.leaf<Leaf>().safeFind(path.leafOffset(), x);
+ return;
+ }
+
+ // Drop the current leaf.
+ path.pop();
+
+ // Search towards the root for a usable subtree.
+ if (path.height()) {
+ for (unsigned l = path.height() - 1; l; --l) {
+ if (!Traits::stopLess(path.node<Branch>(l).stop(path.offset(l)), x)) {
+ // The branch node at l+1 is usable
+ path.offset(l + 1) =
+ path.node<Branch>(l + 1).safeFind(path.offset(l + 1), x);
+ return pathFillFind(x);
+ }
+ path.pop();
+ }
+ // Is the level-1 Branch usable?
+ if (!Traits::stopLess(map->rootBranch().stop(path.offset(0)), x)) {
+ path.offset(1) = path.node<Branch>(1).safeFind(path.offset(1), x);
+ return pathFillFind(x);
+ }
+ }
+
+ // We reached the root.
+ setRoot(map->rootBranch().findFrom(path.offset(0), map->rootSize, x));
+ if (valid())
+ pathFillFind(x);
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap::iterator ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator {
+ friend class IntervalMap;
+ typedef IntervalMapImpl::IdxPair IdxPair;
+
+ explicit iterator(IntervalMap &map) : const_iterator(map) {}
+
+ void setNodeStop(unsigned Level, KeyT Stop);
+ bool insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop);
+ template <typename NodeT> bool overflow(unsigned Level);
+ void treeInsert(KeyT a, KeyT b, ValT y);
+ void eraseNode(unsigned Level);
+ void treeErase(bool UpdateRoot = true);
+ bool canCoalesceLeft(KeyT Start, ValT x);
+ bool canCoalesceRight(KeyT Stop, ValT x);
+
+public:
+ /// iterator - Create null iterator.
+ iterator() {}
+
+ /// setStart - Move the start of the current interval.
+ /// This may cause coalescing with the previous interval.
+ /// @param a New start key, must not overlap the previous interval.
+ void setStart(KeyT a);
+
+ /// setStop - Move the end of the current interval.
+ /// This may cause coalescing with the following interval.
+ /// @param b New stop key, must not overlap the following interval.
+ void setStop(KeyT b);
+
+ /// setValue - Change the mapped value of the current interval.
+ /// This may cause coalescing with the previous and following intervals.
+ /// @param x New value.
+ void setValue(ValT x);
+
+ /// setStartUnchecked - Move the start of the current interval without
+ /// checking for coalescing or overlaps.
+ /// This should only be used when it is known that coalescing is not required.
+ /// @param a New start key.
+ void setStartUnchecked(KeyT a) { this->unsafeStart() = a; }
+
+ /// setStopUnchecked - Move the end of the current interval without checking
+ /// for coalescing or overlaps.
+ /// This should only be used when it is known that coalescing is not required.
+ /// @param b New stop key.
+ void setStopUnchecked(KeyT b) {
+ this->unsafeStop() = b;
+ // Update keys in branch nodes as well.
+ if (this->path.atLastEntry(this->path.height()))
+ setNodeStop(this->path.height(), b);
+ }
+
+ /// setValueUnchecked - Change the mapped value of the current interval
+ /// without checking for coalescing.
+ /// @param x New value.
+ void setValueUnchecked(ValT x) { this->unsafeValue() = x; }
+
+ /// insert - Insert mapping [a;b] -> y before the current position.
+ void insert(KeyT a, KeyT b, ValT y);
+
+ /// erase - Erase the current interval.
+ void erase();
+
+ iterator &operator++() {
+ const_iterator::operator++();
+ return *this;
+ }
+
+ iterator operator++(int) {
+ iterator tmp = *this;
+ operator++();
+ return tmp;
+ }
+
+ iterator &operator--() {
+ const_iterator::operator--();
+ return *this;
+ }
+
+ iterator operator--(int) {
+ iterator tmp = *this;
+ operator--();
+ return tmp;
+ }
+
+};
+
+/// canCoalesceLeft - Can the current interval coalesce to the left after
+/// changing start or value?
+/// @param Start New start of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceLeft(KeyT Start, ValT Value) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ if (!this->branched()) {
+ unsigned i = P.leafOffset();
+ RootLeaf &Node = P.leaf<RootLeaf>();
+ return i && Node.value(i-1) == Value &&
+ Traits::adjacent(Node.stop(i-1), Start);
+ }
+ // Branched.
+ if (unsigned i = P.leafOffset()) {
+ Leaf &Node = P.leaf<Leaf>();
+ return Node.value(i-1) == Value && Traits::adjacent(Node.stop(i-1), Start);
+ } else if (NodeRef NR = P.getLeftSibling(P.height())) {
+ unsigned i = NR.size() - 1;
+ Leaf &Node = NR.get<Leaf>();
+ return Node.value(i) == Value && Traits::adjacent(Node.stop(i), Start);
+ }
+ return false;
+}
+
+/// canCoalesceRight - Can the current interval coalesce to the right after
+/// changing stop or value?
+/// @param Stop New stop of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceRight(KeyT Stop, ValT Value) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ unsigned i = P.leafOffset() + 1;
+ if (!this->branched()) {
+ if (i >= P.leafSize())
+ return false;
+ RootLeaf &Node = P.leaf<RootLeaf>();
+ return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+ }
+ // Branched.
+ if (i < P.leafSize()) {
+ Leaf &Node = P.leaf<Leaf>();
+ return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+ } else if (NodeRef NR = P.getRightSibling(P.height())) {
+ Leaf &Node = NR.get<Leaf>();
+ return Node.value(0) == Value && Traits::adjacent(Stop, Node.start(0));
+ }
+ return false;
+}
+
+/// setNodeStop - Update the stop key of the current node at level and above.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setNodeStop(unsigned Level, KeyT Stop) {
+ // There are no references to the root node, so nothing to update.
+ if (!Level)
+ return;
+ IntervalMapImpl::Path &P = this->path;
+ // Update nodes pointing to the current node.
+ while (--Level) {
+ P.node<Branch>(Level).stop(P.offset(Level)) = Stop;
+ if (!P.atLastEntry(Level))
+ return;
+ }
+ // Update root separately since it has a different layout.
+ P.node<RootBranch>(Level).stop(P.offset(Level)) = Stop;
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStart(KeyT a) {
+ assert(Traits::stopLess(a, this->stop()) && "Cannot move start beyond stop");
+ KeyT &CurStart = this->unsafeStart();
+ if (!Traits::startLess(a, CurStart) || !canCoalesceLeft(a, this->value())) {
+ CurStart = a;
+ return;
+ }
+ // Coalesce with the interval to the left.
+ --*this;
+ a = this->start();
+ erase();
+ setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStop(KeyT b) {
+ assert(Traits::stopLess(this->start(), b) && "Cannot move stop beyond start");
+ if (Traits::startLess(b, this->stop()) ||
+ !canCoalesceRight(b, this->value())) {
+ setStopUnchecked(b);
+ return;
+ }
+ // Coalesce with interval to the right.
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setValue(ValT x) {
+ setValueUnchecked(x);
+ if (canCoalesceRight(this->stop(), x)) {
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+ }
+ if (canCoalesceLeft(this->start(), x)) {
+ --*this;
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+ }
+}
+
+/// insertNode - insert a node before the current path at level.
+/// Leave the current path pointing at the new node.
+/// @param Level path index of the node to be inserted.
+/// @param Node The node to be inserted.
+/// @param Stop The last index in the new node.
+/// @return True if the tree height was increased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop) {
+ assert(Level && "Cannot insert next to the root");
+ bool SplitRoot = false;
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ if (Level == 1) {
+ // Insert into the root branch node.
+ if (IM.rootSize < RootBranch::Capacity) {
+ IM.rootBranch().insert(P.offset(0), IM.rootSize, Node, Stop);
+ P.setSize(0, ++IM.rootSize);
+ P.reset(Level);
+ return SplitRoot;
+ }
+
+ // We need to split the root while keeping our position.
+ SplitRoot = true;
+ IdxPair Offset = IM.splitRoot(P.offset(0));
+ P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+ // Fall through to insert at the new higher level.
+ ++Level;
+ }
+
+ // When inserting before end(), make sure we have a valid path.
+ P.legalizeForInsert(--Level);
+
+ // Insert into the branch node at Level-1.
+ if (P.size(Level) == Branch::Capacity) {
+ // Branch node is full, handle handle the overflow.
+ assert(!SplitRoot && "Cannot overflow after splitting the root");
+ SplitRoot = overflow<Branch>(Level);
+ Level += SplitRoot;
+ }
+ P.node<Branch>(Level).insert(P.offset(Level), P.size(Level), Node, Stop);
+ P.setSize(Level, P.size(Level) + 1);
+ if (P.atLastEntry(Level))
+ setNodeStop(Level, Stop);
+ P.reset(Level + 1);
+ return SplitRoot;
+}
+
+// insert
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insert(KeyT a, KeyT b, ValT y) {
+ if (this->branched())
+ return treeInsert(a, b, y);
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ // Try simple root leaf insert.
+ unsigned Size = IM.rootLeaf().insertFrom(P.leafOffset(), IM.rootSize, a, b, y);
+
+ // Was the root node insert successful?
+ if (Size <= RootLeaf::Capacity) {
+ P.setSize(0, IM.rootSize = Size);
+ return;
+ }
+
+ // Root leaf node is full, we must branch.
+ IdxPair Offset = IM.branchRoot(P.leafOffset());
+ P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+ // Now it fits in the new leaf.
+ treeInsert(a, b, y);
+}
+
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeInsert(KeyT a, KeyT b, ValT y) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+
+ if (!P.valid())
+ P.legalizeForInsert(this->map->height);
+
+ // Check if this insertion will extend the node to the left.
+ if (P.leafOffset() == 0 && Traits::startLess(a, P.leaf<Leaf>().start(0))) {
+ // Node is growing to the left, will it affect a left sibling node?
+ if (NodeRef Sib = P.getLeftSibling(P.height())) {
+ Leaf &SibLeaf = Sib.get<Leaf>();
+ unsigned SibOfs = Sib.size() - 1;
+ if (SibLeaf.value(SibOfs) == y &&
+ Traits::adjacent(SibLeaf.stop(SibOfs), a)) {
+ // This insertion will coalesce with the last entry in SibLeaf. We can
+ // handle it in two ways:
+ // 1. Extend SibLeaf.stop to b and be done, or
+ // 2. Extend a to SibLeaf, erase the SibLeaf entry and continue.
+ // We prefer 1., but need 2 when coalescing to the right as well.
+ Leaf &CurLeaf = P.leaf<Leaf>();
+ P.moveLeft(P.height());
+ if (Traits::stopLess(b, CurLeaf.start(0)) &&
+ (y != CurLeaf.value(0) || !Traits::adjacent(b, CurLeaf.start(0)))) {
+ // Easy, just extend SibLeaf and we're done.
+ setNodeStop(P.height(), SibLeaf.stop(SibOfs) = b);
+ return;
+ } else {
+ // We have both left and right coalescing. Erase the old SibLeaf entry
+ // and continue inserting the larger interval.
+ a = SibLeaf.start(SibOfs);
+ treeErase(/* UpdateRoot= */false);
+ }
+ }
+ } else {
+ // No left sibling means we are at begin(). Update cached bound.
+ this->map->rootBranchStart() = a;
+ }
+ }
+
+ // When we are inserting at the end of a leaf node, we must update stops.
+ unsigned Size = P.leafSize();
+ bool Grow = P.leafOffset() == Size;
+ Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), Size, a, b, y);
+
+ // Leaf insertion unsuccessful? Overflow and try again.
+ if (Size > Leaf::Capacity) {
+ overflow<Leaf>(P.height());
+ Grow = P.leafOffset() == P.leafSize();
+ Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), P.leafSize(), a, b, y);
+ assert(Size <= Leaf::Capacity && "overflow() didn't make room");
+ }
+
+ // Inserted, update offset and leaf size.
+ P.setSize(P.height(), Size);
+
+ // Insert was the last node entry, update stops.
+ if (Grow)
+ setNodeStop(P.height(), b);
+}
+
+/// erase - erase the current interval and move to the next position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::erase() {
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+ assert(P.valid() && "Cannot erase end()");
+ if (this->branched())
+ return treeErase();
+ IM.rootLeaf().erase(P.leafOffset(), IM.rootSize);
+ P.setSize(0, --IM.rootSize);
+}
+
+/// treeErase - erase() for a branched tree.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeErase(bool UpdateRoot) {
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+ Leaf &Node = P.leaf<Leaf>();
+
+ // Nodes are not allowed to become empty.
+ if (P.leafSize() == 1) {
+ IM.deleteNode(&Node);
+ eraseNode(IM.height);
+ // Update rootBranchStart if we erased begin().
+ if (UpdateRoot && IM.branched() && P.valid() && P.atBegin())
+ IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+ return;
+ }
+
+ // Erase current entry.
+ Node.erase(P.leafOffset(), P.leafSize());
+ unsigned NewSize = P.leafSize() - 1;
+ P.setSize(IM.height, NewSize);
+ // When we erase the last entry, update stop and move to a legal position.
+ if (P.leafOffset() == NewSize) {
+ setNodeStop(IM.height, Node.stop(NewSize - 1));
+ P.moveRight(IM.height);
+ } else if (UpdateRoot && P.atBegin())
+ IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+}
+
+/// eraseNode - Erase the current node at Level from its parent and move path to
+/// the first entry of the next sibling node.
+/// The node must be deallocated by the caller.
+/// @param Level 1..height, the root node cannot be erased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::eraseNode(unsigned Level) {
+ assert(Level && "Cannot erase root node");
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ if (--Level == 0) {
+ IM.rootBranch().erase(P.offset(0), IM.rootSize);
+ P.setSize(0, --IM.rootSize);
+ // If this cleared the root, switch to height=0.
+ if (IM.empty()) {
+ IM.switchRootToLeaf();
+ this->setRoot(0);
+ return;
+ }
+ } else {
+ // Remove node ref from branch node at Level.
+ Branch &Parent = P.node<Branch>(Level);
+ if (P.size(Level) == 1) {
+ // Branch node became empty, remove it recursively.
+ IM.deleteNode(&Parent);
+ eraseNode(Level);
+ } else {
+ // Branch node won't become empty.
+ Parent.erase(P.offset(Level), P.size(Level));
+ unsigned NewSize = P.size(Level) - 1;
+ P.setSize(Level, NewSize);
+ // If we removed the last branch, update stop and move to a legal pos.
+ if (P.offset(Level) == NewSize) {
+ setNodeStop(Level, Parent.stop(NewSize - 1));
+ P.moveRight(Level);
+ }
+ }
+ }
+ // Update path cache for the new right sibling position.
+ if (P.valid()) {
+ P.reset(Level + 1);
+ P.offset(Level + 1) = 0;
+ }
+}
+
+/// overflow - Distribute entries of the current node evenly among
+/// its siblings and ensure that the current node is not full.
+/// This may require allocating a new node.
+/// @tparam NodeT The type of node at Level (Leaf or Branch).
+/// @param Level path index of the overflowing node.
+/// @return True when the tree height was changed.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+template <typename NodeT>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::overflow(unsigned Level) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ unsigned CurSize[4];
+ NodeT *Node[4];
+ unsigned Nodes = 0;
+ unsigned Elements = 0;
+ unsigned Offset = P.offset(Level);
+
+ // Do we have a left sibling?
+ NodeRef LeftSib = P.getLeftSibling(Level);
+ if (LeftSib) {
+ Offset += Elements = CurSize[Nodes] = LeftSib.size();
+ Node[Nodes++] = &LeftSib.get<NodeT>();
+ }
+
+ // Current node.
+ Elements += CurSize[Nodes] = P.size(Level);
+ Node[Nodes++] = &P.node<NodeT>(Level);
+
+ // Do we have a right sibling?
+ NodeRef RightSib = P.getRightSibling(Level);
+ if (RightSib) {
+ Elements += CurSize[Nodes] = RightSib.size();
+ Node[Nodes++] = &RightSib.get<NodeT>();
+ }
+
+ // Do we need to allocate a new node?
+ unsigned NewNode = 0;
+ if (Elements + 1 > Nodes * NodeT::Capacity) {
+ // Insert NewNode at the penultimate position, or after a single node.
+ NewNode = Nodes == 1 ? 1 : Nodes - 1;
+ CurSize[Nodes] = CurSize[NewNode];
+ Node[Nodes] = Node[NewNode];
+ CurSize[NewNode] = 0;
+ Node[NewNode] = this->map->template newNode<NodeT>();
+ ++Nodes;
+ }
+
+ // Compute the new element distribution.
+ unsigned NewSize[4];
+ IdxPair NewOffset = distribute(Nodes, Elements, NodeT::Capacity,
+ CurSize, NewSize, Offset, true);
+ adjustSiblingSizes(Node, Nodes, CurSize, NewSize);
+
+ // Move current location to the leftmost node.
+ if (LeftSib)
+ P.moveLeft(Level);
+
+ // Elements have been rearranged, now update node sizes and stops.
+ bool SplitRoot = false;
+ unsigned Pos = 0;
+ for (;;) {
+ KeyT Stop = Node[Pos]->stop(NewSize[Pos]-1);
+ if (NewNode && Pos == NewNode) {
+ SplitRoot = insertNode(Level, NodeRef(Node[Pos], NewSize[Pos]), Stop);
+ Level += SplitRoot;
+ } else {
+ P.setSize(Level, NewSize[Pos]);
+ setNodeStop(Level, Stop);
+ }
+ if (Pos + 1 == Nodes)
+ break;
+ P.moveRight(Level);
+ ++Pos;
+ }
+
+ // Where was I? Find NewOffset.
+ while(Pos != NewOffset.first) {
+ P.moveLeft(Level);
+ --Pos;
+ }
+ P.offset(Level) = NewOffset.second;
+ return SplitRoot;
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapOverlaps ----//
+//===----------------------------------------------------------------------===//
+
+/// IntervalMapOverlaps - Iterate over the overlaps of mapped intervals in two
+/// IntervalMaps. The maps may be different, but the KeyT and Traits types
+/// should be the same.
+///
+/// Typical uses:
+///
+/// 1. Test for overlap:
+/// bool overlap = IntervalMapOverlaps(a, b).valid();
+///
+/// 2. Enumerate overlaps:
+/// for (IntervalMapOverlaps I(a, b); I.valid() ; ++I) { ... }
+///
+template <typename MapA, typename MapB>
+class IntervalMapOverlaps {
+ typedef typename MapA::KeyType KeyType;
+ typedef typename MapA::KeyTraits Traits;
+ typename MapA::const_iterator posA;
+ typename MapB::const_iterator posB;
+
+ /// advance - Move posA and posB forward until reaching an overlap, or until
+ /// either meets end.
+ /// Don't move the iterators if they are already overlapping.
+ void advance() {
+ if (!valid())
+ return;
+
+ if (Traits::stopLess(posA.stop(), posB.start())) {
+ // A ends before B begins. Catch up.
+ posA.advanceTo(posB.start());
+ if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+ return;
+ } else if (Traits::stopLess(posB.stop(), posA.start())) {
+ // B ends before A begins. Catch up.
+ posB.advanceTo(posA.start());
+ if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+ return;
+ } else
+ // Already overlapping.
+ return;
+
+ for (;;) {
+ // Make a.end > b.start.
+ posA.advanceTo(posB.start());
+ if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+ return;
+ // Make b.end > a.start.
+ posB.advanceTo(posA.start());
+ if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+ return;
+ }
+ }
+
+public:
+ /// IntervalMapOverlaps - Create an iterator for the overlaps of a and b.
+ IntervalMapOverlaps(const MapA &a, const MapB &b)
+ : posA(b.empty() ? a.end() : a.find(b.start())),
+ posB(posA.valid() ? b.find(posA.start()) : b.end()) { advance(); }
+
+ /// valid - Return true if iterator is at an overlap.
+ bool valid() const {
+ return posA.valid() && posB.valid();
+ }
+
+ /// a - access the left hand side in the overlap.
+ const typename MapA::const_iterator &a() const { return posA; }
+
+ /// b - access the right hand side in the overlap.
+ const typename MapB::const_iterator &b() const { return posB; }
+
+ /// start - Beginning of the overlapping interval.
+ KeyType start() const {
+ KeyType ak = a().start();
+ KeyType bk = b().start();
+ return Traits::startLess(ak, bk) ? bk : ak;
+ }
+
+ /// stop - End of the overlapping interval.
+ KeyType stop() const {
+ KeyType ak = a().stop();
+ KeyType bk = b().stop();
+ return Traits::startLess(ak, bk) ? ak : bk;
+ }
+
+ /// skipA - Move to the next overlap that doesn't involve a().
+ void skipA() {
+ ++posA;
+ advance();
+ }
+
+ /// skipB - Move to the next overlap that doesn't involve b().
+ void skipB() {
+ ++posB;
+ advance();
+ }
+
+ /// Preincrement - Move to the next overlap.
+ IntervalMapOverlaps &operator++() {
+ // Bump the iterator that ends first. The other one may have more overlaps.
+ if (Traits::startLess(posB.stop(), posA.stop()))
+ skipB();
+ else
+ skipA();
+ return *this;
+ }
+
+ /// advanceTo - Move to the first overlapping interval with
+ /// stopLess(x, stop()).
+ void advanceTo(KeyType x) {
+ if (!valid())
+ return;
+ // Make sure advanceTo sees monotonic keys.
+ if (Traits::stopLess(posA.stop(), x))
+ posA.advanceTo(x);
+ if (Traits::stopLess(posB.stop(), x))
+ posB.advanceTo(x);
+ advance();
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h b/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h
new file mode 100644
index 0000000..8057ec1
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -0,0 +1,288 @@
+//== llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer ---*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines IntrusiveRefCntPtr, a template class that
+// implements a "smart" pointer for objects that maintain their own
+// internal reference count, and RefCountedBase/RefCountedBaseVPTR, two
+// generic base classes for objects that wish to have their lifetimes
+// managed using reference counting.
+//
+// IntrusiveRefCntPtr is similar to Boost's intrusive_ptr with added
+// LLVM-style casting.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTRUSIVEREFCNTPTR_H
+#define LLVM_ADT_INTRUSIVEREFCNTPTR_H
+
+#include <atomic>
+#include <cassert>
+#include <cstddef>
+
+namespace llvm {
+
+ template <class T>
+ class IntrusiveRefCntPtr;
+
+//===----------------------------------------------------------------------===//
+/// RefCountedBase - A generic base class for objects that wish to
+/// have their lifetimes managed using reference counts. Classes
+/// subclass RefCountedBase to obtain such functionality, and are
+/// typically handled with IntrusiveRefCntPtr "smart pointers" (see below)
+/// which automatically handle the management of reference counts.
+/// Objects that subclass RefCountedBase should not be allocated on
+/// the stack, as invoking "delete" (which is called when the
+/// reference count hits 0) on such objects is an error.
+//===----------------------------------------------------------------------===//
+ template <class Derived>
+ class RefCountedBase {
+ mutable unsigned ref_cnt;
+
+ public:
+ RefCountedBase() : ref_cnt(0) {}
+ RefCountedBase(const RefCountedBase &) : ref_cnt(0) {}
+
+ void Retain() const { ++ref_cnt; }
+ void Release() const {
+ assert (ref_cnt > 0 && "Reference count is already zero.");
+ if (--ref_cnt == 0) delete static_cast<const Derived*>(this);
+ }
+ };
+
+//===----------------------------------------------------------------------===//
+/// RefCountedBaseVPTR - A class that has the same function as
+/// RefCountedBase, but with a virtual destructor. Should be used
+/// instead of RefCountedBase for classes that already have virtual
+/// methods to enforce dynamic allocation via 'new'. Classes that
+/// inherit from RefCountedBaseVPTR can't be allocated on stack -
+/// attempting to do this will produce a compile error.
+//===----------------------------------------------------------------------===//
+ class RefCountedBaseVPTR {
+ mutable unsigned ref_cnt;
+ virtual void anchor();
+
+ protected:
+ RefCountedBaseVPTR() : ref_cnt(0) {}
+ RefCountedBaseVPTR(const RefCountedBaseVPTR &) : ref_cnt(0) {}
+
+ virtual ~RefCountedBaseVPTR() {}
+
+ void Retain() const { ++ref_cnt; }
+ void Release() const {
+ assert (ref_cnt > 0 && "Reference count is already zero.");
+ if (--ref_cnt == 0) delete this;
+ }
+
+ template <typename T>
+ friend struct IntrusiveRefCntPtrInfo;
+ };
+
+
+ template <typename T> struct IntrusiveRefCntPtrInfo {
+ static void retain(T *obj) { obj->Retain(); }
+ static void release(T *obj) { obj->Release(); }
+ };
+
+/// \brief A thread-safe version of \c llvm::RefCountedBase.
+///
+/// A generic base class for objects that wish to have their lifetimes managed
+/// using reference counts. Classes subclass \c ThreadSafeRefCountedBase to
+/// obtain such functionality, and are typically handled with
+/// \c IntrusiveRefCntPtr "smart pointers" which automatically handle the
+/// management of reference counts.
+template <class Derived>
+class ThreadSafeRefCountedBase {
+ mutable std::atomic<int> RefCount;
+
+protected:
+ ThreadSafeRefCountedBase() : RefCount(0) {}
+
+public:
+ void Retain() const { ++RefCount; }
+
+ void Release() const {
+ int NewRefCount = --RefCount;
+ assert(NewRefCount >= 0 && "Reference count was already zero.");
+ if (NewRefCount == 0)
+ delete static_cast<const Derived*>(this);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// IntrusiveRefCntPtr - A template class that implements a "smart pointer"
+/// that assumes the wrapped object has a reference count associated
+/// with it that can be managed via calls to
+/// IntrusivePtrAddRef/IntrusivePtrRelease. The smart pointers
+/// manage reference counts via the RAII idiom: upon creation of
+/// smart pointer the reference count of the wrapped object is
+/// incremented and upon destruction of the smart pointer the
+/// reference count is decremented. This class also safely handles
+/// wrapping NULL pointers.
+///
+/// Reference counting is implemented via calls to
+/// Obj->Retain()/Obj->Release(). Release() is required to destroy
+/// the object when the reference count reaches zero. Inheriting from
+/// RefCountedBase/RefCountedBaseVPTR takes care of this
+/// automatically.
+//===----------------------------------------------------------------------===//
+ template <typename T>
+ class IntrusiveRefCntPtr {
+ T* Obj;
+
+ public:
+ typedef T element_type;
+
+ explicit IntrusiveRefCntPtr() : Obj(nullptr) {}
+
+ IntrusiveRefCntPtr(T* obj) : Obj(obj) {
+ retain();
+ }
+
+ IntrusiveRefCntPtr(const IntrusiveRefCntPtr& S) : Obj(S.Obj) {
+ retain();
+ }
+
+ IntrusiveRefCntPtr(IntrusiveRefCntPtr&& S) : Obj(S.Obj) {
+ S.Obj = nullptr;
+ }
+
+ template <class X>
+ IntrusiveRefCntPtr(IntrusiveRefCntPtr<X>&& S) : Obj(S.get()) {
+ S.Obj = nullptr;
+ }
+
+ template <class X>
+ IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X>& S)
+ : Obj(S.get()) {
+ retain();
+ }
+
+ IntrusiveRefCntPtr& operator=(IntrusiveRefCntPtr S) {
+ swap(S);
+ return *this;
+ }
+
+ ~IntrusiveRefCntPtr() { release(); }
+
+ T& operator*() const { return *Obj; }
+
+ T* operator->() const { return Obj; }
+
+ T* get() const { return Obj; }
+
+ explicit operator bool() const { return Obj; }
+
+ void swap(IntrusiveRefCntPtr& other) {
+ T* tmp = other.Obj;
+ other.Obj = Obj;
+ Obj = tmp;
+ }
+
+ void reset() {
+ release();
+ Obj = nullptr;
+ }
+
+ void resetWithoutRelease() {
+ Obj = nullptr;
+ }
+
+ private:
+ void retain() { if (Obj) IntrusiveRefCntPtrInfo<T>::retain(Obj); }
+ void release() { if (Obj) IntrusiveRefCntPtrInfo<T>::release(Obj); }
+
+ template <typename X>
+ friend class IntrusiveRefCntPtr;
+ };
+
+ template<class T, class U>
+ inline bool operator==(const IntrusiveRefCntPtr<T>& A,
+ const IntrusiveRefCntPtr<U>& B)
+ {
+ return A.get() == B.get();
+ }
+
+ template<class T, class U>
+ inline bool operator!=(const IntrusiveRefCntPtr<T>& A,
+ const IntrusiveRefCntPtr<U>& B)
+ {
+ return A.get() != B.get();
+ }
+
+ template<class T, class U>
+ inline bool operator==(const IntrusiveRefCntPtr<T>& A,
+ U* B)
+ {
+ return A.get() == B;
+ }
+
+ template<class T, class U>
+ inline bool operator!=(const IntrusiveRefCntPtr<T>& A,
+ U* B)
+ {
+ return A.get() != B;
+ }
+
+ template<class T, class U>
+ inline bool operator==(T* A,
+ const IntrusiveRefCntPtr<U>& B)
+ {
+ return A == B.get();
+ }
+
+ template<class T, class U>
+ inline bool operator!=(T* A,
+ const IntrusiveRefCntPtr<U>& B)
+ {
+ return A != B.get();
+ }
+
+ template <class T>
+ bool operator==(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
+ return !B;
+ }
+
+ template <class T>
+ bool operator==(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
+ return B == A;
+ }
+
+ template <class T>
+ bool operator!=(std::nullptr_t A, const IntrusiveRefCntPtr<T> &B) {
+ return !(A == B);
+ }
+
+ template <class T>
+ bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
+ return !(A == B);
+ }
+
+//===----------------------------------------------------------------------===//
+// LLVM-style downcasting support for IntrusiveRefCntPtr objects
+//===----------------------------------------------------------------------===//
+
+ template <typename From> struct simplify_type;
+
+ template<class T> struct simplify_type<IntrusiveRefCntPtr<T> > {
+ typedef T* SimpleType;
+ static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T>& Val) {
+ return Val.get();
+ }
+ };
+
+ template<class T> struct simplify_type<const IntrusiveRefCntPtr<T> > {
+ typedef /*const*/ T* SimpleType;
+ static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T>& Val) {
+ return Val.get();
+ }
+ };
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_INTRUSIVEREFCNTPTR_H
diff --git a/contrib/llvm/include/llvm/ADT/MapVector.h b/contrib/llvm/include/llvm/ADT/MapVector.h
new file mode 100644
index 0000000..f19a50b
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/MapVector.h
@@ -0,0 +1,200 @@
+//===- llvm/ADT/MapVector.h - Map w/ deterministic value order --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a map that provides insertion order iteration. The
+// interface is purposefully minimal. The key is assumed to be cheap to copy
+// and 2 copies are kept, one for indexing in a DenseMap, one for iteration in
+// a std::vector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_MAPVECTOR_H
+#define LLVM_ADT_MAPVECTOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include <vector>
+
+namespace llvm {
+
+/// This class implements a map that also provides access to all stored values
+/// in a deterministic order. The values are kept in a std::vector and the
+/// mapping is done with DenseMap from Keys to indexes in that vector.
+template<typename KeyT, typename ValueT,
+ typename MapType = llvm::DenseMap<KeyT, unsigned>,
+ typename VectorType = std::vector<std::pair<KeyT, ValueT> > >
+class MapVector {
+ typedef typename VectorType::size_type size_type;
+
+ MapType Map;
+ VectorType Vector;
+
+public:
+ typedef typename VectorType::iterator iterator;
+ typedef typename VectorType::const_iterator const_iterator;
+ typedef typename VectorType::reverse_iterator reverse_iterator;
+ typedef typename VectorType::const_reverse_iterator const_reverse_iterator;
+
+ size_type size() const { return Vector.size(); }
+
+ iterator begin() { return Vector.begin(); }
+ const_iterator begin() const { return Vector.begin(); }
+ iterator end() { return Vector.end(); }
+ const_iterator end() const { return Vector.end(); }
+
+ reverse_iterator rbegin() { return Vector.rbegin(); }
+ const_reverse_iterator rbegin() const { return Vector.rbegin(); }
+ reverse_iterator rend() { return Vector.rend(); }
+ const_reverse_iterator rend() const { return Vector.rend(); }
+
+ bool empty() const {
+ return Vector.empty();
+ }
+
+ std::pair<KeyT, ValueT> &front() { return Vector.front(); }
+ const std::pair<KeyT, ValueT> &front() const { return Vector.front(); }
+ std::pair<KeyT, ValueT> &back() { return Vector.back(); }
+ const std::pair<KeyT, ValueT> &back() const { return Vector.back(); }
+
+ void clear() {
+ Map.clear();
+ Vector.clear();
+ }
+
+ void swap(MapVector &RHS) {
+ std::swap(Map, RHS.Map);
+ std::swap(Vector, RHS.Vector);
+ }
+
+ ValueT &operator[](const KeyT &Key) {
+ std::pair<KeyT, unsigned> Pair = std::make_pair(Key, 0);
+ std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+ unsigned &I = Result.first->second;
+ if (Result.second) {
+ Vector.push_back(std::make_pair(Key, ValueT()));
+ I = Vector.size() - 1;
+ }
+ return Vector[I].second;
+ }
+
+ ValueT lookup(const KeyT &Key) const {
+ typename MapType::const_iterator Pos = Map.find(Key);
+ return Pos == Map.end()? ValueT() : Vector[Pos->second].second;
+ }
+
+ std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+ std::pair<KeyT, unsigned> Pair = std::make_pair(KV.first, 0);
+ std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
+ unsigned &I = Result.first->second;
+ if (Result.second) {
+ Vector.push_back(std::make_pair(KV.first, KV.second));
+ I = Vector.size() - 1;
+ return std::make_pair(std::prev(end()), true);
+ }
+ return std::make_pair(begin() + I, false);
+ }
+
+ size_type count(const KeyT &Key) const {
+ typename MapType::const_iterator Pos = Map.find(Key);
+ return Pos == Map.end()? 0 : 1;
+ }
+
+ iterator find(const KeyT &Key) {
+ typename MapType::const_iterator Pos = Map.find(Key);
+ return Pos == Map.end()? Vector.end() :
+ (Vector.begin() + Pos->second);
+ }
+
+ const_iterator find(const KeyT &Key) const {
+ typename MapType::const_iterator Pos = Map.find(Key);
+ return Pos == Map.end()? Vector.end() :
+ (Vector.begin() + Pos->second);
+ }
+
+ /// \brief Remove the last element from the vector.
+ void pop_back() {
+ typename MapType::iterator Pos = Map.find(Vector.back().first);
+ Map.erase(Pos);
+ Vector.pop_back();
+ }
+
+ /// \brief Remove the element given by Iterator.
+ ///
+ /// Returns an iterator to the element following the one which was removed,
+ /// which may be end().
+ ///
+ /// \note This is a deceivingly expensive operation (linear time). It's
+ /// usually better to use \a remove_if() if possible.
+ typename VectorType::iterator erase(typename VectorType::iterator Iterator) {
+ Map.erase(Iterator->first);
+ auto Next = Vector.erase(Iterator);
+ if (Next == Vector.end())
+ return Next;
+
+ // Update indices in the map.
+ size_t Index = Next - Vector.begin();
+ for (auto &I : Map) {
+ assert(I.second != Index && "Index was already erased!");
+ if (I.second > Index)
+ --I.second;
+ }
+ return Next;
+ }
+
+ /// \brief Remove all elements with the key value Key.
+ ///
+ /// Returns the number of elements removed.
+ size_type erase(const KeyT &Key) {
+ auto Iterator = find(Key);
+ if (Iterator == end())
+ return 0;
+ erase(Iterator);
+ return 1;
+ }
+
+ /// \brief Remove the elements that match the predicate.
+ ///
+ /// Erase all elements that match \c Pred in a single pass. Takes linear
+ /// time.
+ template <class Predicate> void remove_if(Predicate Pred);
+};
+
+template <typename KeyT, typename ValueT, typename MapType, typename VectorType>
+template <class Function>
+void MapVector<KeyT, ValueT, MapType, VectorType>::remove_if(Function Pred) {
+ auto O = Vector.begin();
+ for (auto I = O, E = Vector.end(); I != E; ++I) {
+ if (Pred(*I)) {
+ // Erase from the map.
+ Map.erase(I->first);
+ continue;
+ }
+
+ if (I != O) {
+ // Move the value and update the index in the map.
+ *O = std::move(*I);
+ Map[O->first] = O - Vector.begin();
+ }
+ ++O;
+ }
+ // Erase trailing entries in the vector.
+ Vector.erase(O, Vector.end());
+}
+
+/// \brief A MapVector that performs no allocations if smaller than a certain
+/// size.
+template <typename KeyT, typename ValueT, unsigned N>
+struct SmallMapVector
+ : MapVector<KeyT, ValueT, SmallDenseMap<KeyT, unsigned, N>,
+ SmallVector<std::pair<KeyT, ValueT>, N>> {
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/None.h b/contrib/llvm/include/llvm/ADT/None.h
new file mode 100644
index 0000000..d69ec17
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/None.h
@@ -0,0 +1,26 @@
+//===-- None.h - Simple null value for implicit construction ------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides None, an enumerator for use in implicit constructors
+// of various (usually templated) types to make such construction more
+// terse.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_NONE_H
+#define LLVM_ADT_NONE_H
+
+namespace llvm {
+/// \brief A simple null object to allow implicit construction of Optional<T>
+/// and similar types without having to spell out the specialization's name.
+enum class NoneType { None };
+const NoneType None = None;
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/Optional.h b/contrib/llvm/include/llvm/ADT/Optional.h
new file mode 100644
index 0000000..d9acaf6
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/Optional.h
@@ -0,0 +1,228 @@
+//===-- Optional.h - Simple variant for passing optional values ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Optional, a template class modeled in the spirit of
+// OCaml's 'opt' variant. The idea is to strongly type whether or not
+// a value can be optional.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_OPTIONAL_H
+#define LLVM_ADT_OPTIONAL_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <new>
+#include <utility>
+
+namespace llvm {
+
+template<typename T>
+class Optional {
+ AlignedCharArrayUnion<T> storage;
+ bool hasVal;
+public:
+ typedef T value_type;
+
+ Optional(NoneType) : hasVal(false) {}
+ explicit Optional() : hasVal(false) {}
+ Optional(const T &y) : hasVal(true) {
+ new (storage.buffer) T(y);
+ }
+ Optional(const Optional &O) : hasVal(O.hasVal) {
+ if (hasVal)
+ new (storage.buffer) T(*O);
+ }
+
+ Optional(T &&y) : hasVal(true) {
+ new (storage.buffer) T(std::forward<T>(y));
+ }
+ Optional(Optional<T> &&O) : hasVal(O) {
+ if (O) {
+ new (storage.buffer) T(std::move(*O));
+ O.reset();
+ }
+ }
+ Optional &operator=(T &&y) {
+ if (hasVal)
+ **this = std::move(y);
+ else {
+ new (storage.buffer) T(std::move(y));
+ hasVal = true;
+ }
+ return *this;
+ }
+ Optional &operator=(Optional &&O) {
+ if (!O)
+ reset();
+ else {
+ *this = std::move(*O);
+ O.reset();
+ }
+ return *this;
+ }
+
+ /// Create a new object by constructing it in place with the given arguments.
+ template<typename ...ArgTypes>
+ void emplace(ArgTypes &&...Args) {
+ reset();
+ hasVal = true;
+ new (storage.buffer) T(std::forward<ArgTypes>(Args)...);
+ }
+
+ static inline Optional create(const T* y) {
+ return y ? Optional(*y) : Optional();
+ }
+
+ // FIXME: these assignments (& the equivalent const T&/const Optional& ctors)
+ // could be made more efficient by passing by value, possibly unifying them
+ // with the rvalue versions above - but this could place a different set of
+ // requirements (notably: the existence of a default ctor) when implemented
+ // in that way. Careful SFINAE to avoid such pitfalls would be required.
+ Optional &operator=(const T &y) {
+ if (hasVal)
+ **this = y;
+ else {
+ new (storage.buffer) T(y);
+ hasVal = true;
+ }
+ return *this;
+ }
+
+ Optional &operator=(const Optional &O) {
+ if (!O)
+ reset();
+ else
+ *this = *O;
+ return *this;
+ }
+
+ void reset() {
+ if (hasVal) {
+ (**this).~T();
+ hasVal = false;
+ }
+ }
+
+ ~Optional() {
+ reset();
+ }
+
+ const T* getPointer() const { assert(hasVal); return reinterpret_cast<const T*>(storage.buffer); }
+ T* getPointer() { assert(hasVal); return reinterpret_cast<T*>(storage.buffer); }
+ const T& getValue() const LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
+ T& getValue() LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
+
+ explicit operator bool() const { return hasVal; }
+ bool hasValue() const { return hasVal; }
+ const T* operator->() const { return getPointer(); }
+ T* operator->() { return getPointer(); }
+ const T& operator*() const LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
+ T& operator*() LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
+
+ template <typename U>
+ LLVM_CONSTEXPR T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION {
+ return hasValue() ? getValue() : std::forward<U>(value);
+ }
+
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ T&& getValue() && { assert(hasVal); return std::move(*getPointer()); }
+ T&& operator*() && { assert(hasVal); return std::move(*getPointer()); }
+
+ template <typename U>
+ T getValueOr(U &&value) && {
+ return hasValue() ? std::move(getValue()) : std::forward<U>(value);
+ }
+#endif
+};
+
+template <typename T> struct isPodLike;
+template <typename T> struct isPodLike<Optional<T> > {
+ // An Optional<T> is pod-like if T is.
+ static const bool value = isPodLike<T>::value;
+};
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator==(const Optional<T> &X, const Optional<U> &Y);
+
+template<typename T>
+bool operator==(const Optional<T> &X, NoneType) {
+ return !X.hasValue();
+}
+
+template<typename T>
+bool operator==(NoneType, const Optional<T> &X) {
+ return X == None;
+}
+
+template<typename T>
+bool operator!=(const Optional<T> &X, NoneType) {
+ return !(X == None);
+}
+
+template<typename T>
+bool operator!=(NoneType, const Optional<T> &X) {
+ return X != None;
+}
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator!=(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator<(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator<=(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator>=(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator>(const Optional<T> &X, const Optional<U> &Y);
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/PackedVector.h b/contrib/llvm/include/llvm/ADT/PackedVector.h
new file mode 100644
index 0000000..0926717
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/PackedVector.h
@@ -0,0 +1,149 @@
+//===- llvm/ADT/PackedVector.h - Packed values vector -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PackedVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PACKEDVECTOR_H
+#define LLVM_ADT_PACKEDVECTOR_H
+
+#include "llvm/ADT/BitVector.h"
+#include <limits>
+
+namespace llvm {
+
+template <typename T, unsigned BitNum, typename BitVectorTy, bool isSigned>
+class PackedVectorBase;
+
+// This won't be necessary if we can specialize members without specializing
+// the parent template.
+template <typename T, unsigned BitNum, typename BitVectorTy>
+class PackedVectorBase<T, BitNum, BitVectorTy, false> {
+protected:
+ static T getValue(const BitVectorTy &Bits, unsigned Idx) {
+ T val = T();
+ for (unsigned i = 0; i != BitNum; ++i)
+ val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
+ return val;
+ }
+
+ static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
+ assert((val >> BitNum) == 0 && "value is too big");
+ for (unsigned i = 0; i != BitNum; ++i)
+ Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
+ }
+};
+
+template <typename T, unsigned BitNum, typename BitVectorTy>
+class PackedVectorBase<T, BitNum, BitVectorTy, true> {
+protected:
+ static T getValue(const BitVectorTy &Bits, unsigned Idx) {
+ T val = T();
+ for (unsigned i = 0; i != BitNum-1; ++i)
+ val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
+ if (Bits[(Idx << (BitNum-1)) + BitNum-1])
+ val = ~val;
+ return val;
+ }
+
+ static void setValue(BitVectorTy &Bits, unsigned Idx, T val) {
+ if (val < 0) {
+ val = ~val;
+ Bits.set((Idx << (BitNum-1)) + BitNum-1);
+ }
+ assert((val >> (BitNum-1)) == 0 && "value is too big");
+ for (unsigned i = 0; i != BitNum-1; ++i)
+ Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
+ }
+};
+
+/// \brief Store a vector of values using a specific number of bits for each
+/// value. Both signed and unsigned types can be used, e.g
+/// @code
+/// PackedVector<signed, 2> vec;
+/// @endcode
+/// will create a vector accepting values -2, -1, 0, 1. Any other value will hit
+/// an assertion.
+template <typename T, unsigned BitNum, typename BitVectorTy = BitVector>
+class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy,
+ std::numeric_limits<T>::is_signed> {
+ BitVectorTy Bits;
+ typedef PackedVectorBase<T, BitNum, BitVectorTy,
+ std::numeric_limits<T>::is_signed> base;
+
+public:
+ class reference {
+ PackedVector &Vec;
+ const unsigned Idx;
+
+ reference(); // Undefined
+ public:
+ reference(PackedVector &vec, unsigned idx) : Vec(vec), Idx(idx) {}
+
+ reference &operator=(T val) {
+ Vec.setValue(Vec.Bits, Idx, val);
+ return *this;
+ }
+ operator T() const {
+ return Vec.getValue(Vec.Bits, Idx);
+ }
+ };
+
+ PackedVector() = default;
+ explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) { }
+
+ bool empty() const { return Bits.empty(); }
+
+ unsigned size() const { return Bits.size() >> (BitNum - 1); }
+
+ void clear() { Bits.clear(); }
+
+ void resize(unsigned N) { Bits.resize(N << (BitNum - 1)); }
+
+ void reserve(unsigned N) { Bits.reserve(N << (BitNum-1)); }
+
+ PackedVector &reset() {
+ Bits.reset();
+ return *this;
+ }
+
+ void push_back(T val) {
+ resize(size()+1);
+ (*this)[size()-1] = val;
+ }
+
+ reference operator[](unsigned Idx) {
+ return reference(*this, Idx);
+ }
+
+ T operator[](unsigned Idx) const {
+ return base::getValue(Bits, Idx);
+ }
+
+ bool operator==(const PackedVector &RHS) const {
+ return Bits == RHS.Bits;
+ }
+
+ bool operator!=(const PackedVector &RHS) const {
+ return Bits != RHS.Bits;
+ }
+
+ PackedVector &operator|=(const PackedVector &RHS) {
+ Bits |= RHS.Bits;
+ return *this;
+ }
+};
+
+// Leave BitNum=0 undefined.
+template <typename T> class PackedVector<T, 0>;
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/PointerIntPair.h b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
new file mode 100644
index 0000000..0058d85
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/PointerIntPair.h
@@ -0,0 +1,218 @@
+//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PointerIntPair class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERINTPAIR_H
+#define LLVM_ADT_POINTERINTPAIR_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <limits>
+
+namespace llvm {
+
+template <typename T> struct DenseMapInfo;
+
+template <typename PointerT, unsigned IntBits, typename PtrTraits>
+struct PointerIntPairInfo;
+
+/// PointerIntPair - This class implements a pair of a pointer and small
+/// integer. It is designed to represent this in the space required by one
+/// pointer by bitmangling the integer into the low part of the pointer. This
+/// can only be done for small integers: typically up to 3 bits, but it depends
+/// on the number of bits available according to PointerLikeTypeTraits for the
+/// type.
+///
+/// Note that PointerIntPair always puts the IntVal part in the highest bits
+/// possible. For example, PointerIntPair<void*, 1, bool> will put the bit for
+/// the bool into bit #2, not bit #0, which allows the low two bits to be used
+/// for something else. For example, this allows:
+/// PointerIntPair<PointerIntPair<void*, 1, bool>, 1, bool>
+/// ... and the two bools will land in different bits.
+///
+template <typename PointerTy, unsigned IntBits, typename IntType = unsigned,
+ typename PtrTraits = PointerLikeTypeTraits<PointerTy>,
+ typename Info = PointerIntPairInfo<PointerTy, IntBits, PtrTraits>>
+class PointerIntPair {
+ intptr_t Value;
+
+public:
+ PointerIntPair() : Value(0) {}
+ PointerIntPair(PointerTy PtrVal, IntType IntVal) {
+ setPointerAndInt(PtrVal, IntVal);
+ }
+ explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); }
+
+ PointerTy getPointer() const { return Info::getPointer(Value); }
+
+ IntType getInt() const { return (IntType)Info::getInt(Value); }
+
+ void setPointer(PointerTy PtrVal) {
+ Value = Info::updatePointer(Value, PtrVal);
+ }
+
+ void setInt(IntType IntVal) { Value = Info::updateInt(Value, IntVal); }
+
+ void initWithPointer(PointerTy PtrVal) {
+ Value = Info::updatePointer(0, PtrVal);
+ }
+
+ void setPointerAndInt(PointerTy PtrVal, IntType IntVal) {
+ Value = Info::updateInt(Info::updatePointer(0, PtrVal), IntVal);
+ }
+
+ PointerTy const *getAddrOfPointer() const {
+ return const_cast<PointerIntPair *>(this)->getAddrOfPointer();
+ }
+
+ PointerTy *getAddrOfPointer() {
+ assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&
+ "Can only return the address if IntBits is cleared and "
+ "PtrTraits doesn't change the pointer");
+ return reinterpret_cast<PointerTy *>(&Value);
+ }
+
+ void *getOpaqueValue() const { return reinterpret_cast<void *>(Value); }
+ void setFromOpaqueValue(void *Val) {
+ Value = reinterpret_cast<intptr_t>(Val);
+ }
+
+ static PointerIntPair getFromOpaqueValue(void *V) {
+ PointerIntPair P;
+ P.setFromOpaqueValue(V);
+ return P;
+ }
+
+ // Allow PointerIntPairs to be created from const void * if and only if the
+ // pointer type could be created from a const void *.
+ static PointerIntPair getFromOpaqueValue(const void *V) {
+ (void)PtrTraits::getFromVoidPointer(V);
+ return getFromOpaqueValue(const_cast<void *>(V));
+ }
+
+ bool operator==(const PointerIntPair &RHS) const {
+ return Value == RHS.Value;
+ }
+ bool operator!=(const PointerIntPair &RHS) const {
+ return Value != RHS.Value;
+ }
+ bool operator<(const PointerIntPair &RHS) const { return Value < RHS.Value; }
+ bool operator>(const PointerIntPair &RHS) const { return Value > RHS.Value; }
+ bool operator<=(const PointerIntPair &RHS) const {
+ return Value <= RHS.Value;
+ }
+ bool operator>=(const PointerIntPair &RHS) const {
+ return Value >= RHS.Value;
+ }
+};
+
+template <typename PointerT, unsigned IntBits, typename PtrTraits>
+struct PointerIntPairInfo {
+ static_assert(PtrTraits::NumLowBitsAvailable <
+ std::numeric_limits<uintptr_t>::digits,
+ "cannot use a pointer type that has all bits free");
+ static_assert(IntBits <= PtrTraits::NumLowBitsAvailable,
+ "PointerIntPair with integer size too large for pointer");
+ enum : uintptr_t {
+ /// PointerBitMask - The bits that come from the pointer.
+ PointerBitMask =
+ ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1),
+
+ /// IntShift - The number of low bits that we reserve for other uses, and
+ /// keep zero.
+ IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable - IntBits,
+
+ /// IntMask - This is the unshifted mask for valid bits of the int type.
+ IntMask = (uintptr_t)(((intptr_t)1 << IntBits) - 1),
+
+ // ShiftedIntMask - This is the bits for the integer shifted in place.
+ ShiftedIntMask = (uintptr_t)(IntMask << IntShift)
+ };
+
+ static PointerT getPointer(intptr_t Value) {
+ return PtrTraits::getFromVoidPointer(
+ reinterpret_cast<void *>(Value & PointerBitMask));
+ }
+
+ static intptr_t getInt(intptr_t Value) {
+ return (Value >> IntShift) & IntMask;
+ }
+
+ static intptr_t updatePointer(intptr_t OrigValue, PointerT Ptr) {
+ intptr_t PtrWord =
+ reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(Ptr));
+ assert((PtrWord & ~PointerBitMask) == 0 &&
+ "Pointer is not sufficiently aligned");
+ // Preserve all low bits, just update the pointer.
+ return PtrWord | (OrigValue & ~PointerBitMask);
+ }
+
+ static intptr_t updateInt(intptr_t OrigValue, intptr_t Int) {
+ intptr_t IntWord = static_cast<intptr_t>(Int);
+ assert((IntWord & ~IntMask) == 0 && "Integer too large for field");
+
+ // Preserve all bits other than the ones we are updating.
+ return (OrigValue & ~ShiftedIntMask) | IntWord << IntShift;
+ }
+};
+
+template <typename T> struct isPodLike;
+template <typename PointerTy, unsigned IntBits, typename IntType>
+struct isPodLike<PointerIntPair<PointerTy, IntBits, IntType>> {
+ static const bool value = true;
+};
+
+// Provide specialization of DenseMapInfo for PointerIntPair.
+template <typename PointerTy, unsigned IntBits, typename IntType>
+struct DenseMapInfo<PointerIntPair<PointerTy, IntBits, IntType>> {
+ typedef PointerIntPair<PointerTy, IntBits, IntType> Ty;
+ static Ty getEmptyKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-1);
+ Val <<= PointerLikeTypeTraits<Ty>::NumLowBitsAvailable;
+ return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
+ }
+ static Ty getTombstoneKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-2);
+ Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable;
+ return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
+ }
+ static unsigned getHashValue(Ty V) {
+ uintptr_t IV = reinterpret_cast<uintptr_t>(V.getOpaqueValue());
+ return unsigned(IV) ^ unsigned(IV >> 9);
+ }
+ static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; }
+};
+
+// Teach SmallPtrSet that PointerIntPair is "basically a pointer".
+template <typename PointerTy, unsigned IntBits, typename IntType,
+ typename PtrTraits>
+class PointerLikeTypeTraits<
+ PointerIntPair<PointerTy, IntBits, IntType, PtrTraits>> {
+public:
+ static inline void *
+ getAsVoidPointer(const PointerIntPair<PointerTy, IntBits, IntType> &P) {
+ return P.getOpaqueValue();
+ }
+ static inline PointerIntPair<PointerTy, IntBits, IntType>
+ getFromVoidPointer(void *P) {
+ return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
+ }
+ static inline PointerIntPair<PointerTy, IntBits, IntType>
+ getFromVoidPointer(const void *P) {
+ return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
+ }
+ enum { NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable - IntBits };
+};
+
+} // end namespace llvm
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/PointerUnion.h b/contrib/llvm/include/llvm/ADT/PointerUnion.h
new file mode 100644
index 0000000..6b3fe57
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/PointerUnion.h
@@ -0,0 +1,474 @@
+//===- llvm/ADT/PointerUnion.h - Discriminated Union of 2 Ptrs --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PointerUnion class, which is a discriminated union of
+// pointer types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POINTERUNION_H
+#define LLVM_ADT_POINTERUNION_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+
+template <typename T> struct PointerUnionTypeSelectorReturn {
+ typedef T Return;
+};
+
+/// Get a type based on whether two types are the same or not.
+///
+/// For:
+///
+/// \code
+/// typedef typename PointerUnionTypeSelector<T1, T2, EQ, NE>::Return Ret;
+/// \endcode
+///
+/// Ret will be EQ type if T1 is same as T2 or NE type otherwise.
+template <typename T1, typename T2, typename RET_EQ, typename RET_NE>
+struct PointerUnionTypeSelector {
+ typedef typename PointerUnionTypeSelectorReturn<RET_NE>::Return Return;
+};
+
+template <typename T, typename RET_EQ, typename RET_NE>
+struct PointerUnionTypeSelector<T, T, RET_EQ, RET_NE> {
+ typedef typename PointerUnionTypeSelectorReturn<RET_EQ>::Return Return;
+};
+
+template <typename T1, typename T2, typename RET_EQ, typename RET_NE>
+struct PointerUnionTypeSelectorReturn<
+ PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>> {
+ typedef
+ typename PointerUnionTypeSelector<T1, T2, RET_EQ, RET_NE>::Return Return;
+};
+
+/// Provide PointerLikeTypeTraits for void* that is used by PointerUnion
+/// for the two template arguments.
+template <typename PT1, typename PT2> class PointerUnionUIntTraits {
+public:
+ static inline void *getAsVoidPointer(void *P) { return P; }
+ static inline void *getFromVoidPointer(void *P) { return P; }
+ enum {
+ PT1BitsAv = (int)(PointerLikeTypeTraits<PT1>::NumLowBitsAvailable),
+ PT2BitsAv = (int)(PointerLikeTypeTraits<PT2>::NumLowBitsAvailable),
+ NumLowBitsAvailable = PT1BitsAv < PT2BitsAv ? PT1BitsAv : PT2BitsAv
+ };
+};
+
+/// A discriminated union of two pointer types, with the discriminator in the
+/// low bit of the pointer.
+///
+/// This implementation is extremely efficient in space due to leveraging the
+/// low bits of the pointer, while exposing a natural and type-safe API.
+///
+/// Common use patterns would be something like this:
+/// PointerUnion<int*, float*> P;
+/// P = (int*)0;
+/// printf("%d %d", P.is<int*>(), P.is<float*>()); // prints "1 0"
+/// X = P.get<int*>(); // ok.
+/// Y = P.get<float*>(); // runtime assertion failure.
+/// Z = P.get<double*>(); // compile time failure.
+/// P = (float*)0;
+/// Y = P.get<float*>(); // ok.
+/// X = P.get<int*>(); // runtime assertion failure.
+template <typename PT1, typename PT2> class PointerUnion {
+public:
+ typedef PointerIntPair<void *, 1, bool, PointerUnionUIntTraits<PT1, PT2>>
+ ValTy;
+
+private:
+ ValTy Val;
+
+ struct IsPT1 {
+ static const int Num = 0;
+ };
+ struct IsPT2 {
+ static const int Num = 1;
+ };
+ template <typename T> struct UNION_DOESNT_CONTAIN_TYPE {};
+
+public:
+ PointerUnion() {}
+
+ PointerUnion(PT1 V)
+ : Val(const_cast<void *>(
+ PointerLikeTypeTraits<PT1>::getAsVoidPointer(V))) {}
+ PointerUnion(PT2 V)
+ : Val(const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(V)),
+ 1) {}
+
+ /// Test if the pointer held in the union is null, regardless of
+ /// which type it is.
+ bool isNull() const {
+ // Convert from the void* to one of the pointer types, to make sure that
+ // we recursively strip off low bits if we have a nested PointerUnion.
+ return !PointerLikeTypeTraits<PT1>::getFromVoidPointer(Val.getPointer());
+ }
+ explicit operator bool() const { return !isNull(); }
+
+ /// Test if the Union currently holds the type matching T.
+ template <typename T> int is() const {
+ typedef typename ::llvm::PointerUnionTypeSelector<
+ PT1, T, IsPT1, ::llvm::PointerUnionTypeSelector<
+ PT2, T, IsPT2, UNION_DOESNT_CONTAIN_TYPE<T>>>::Return
+ Ty;
+ int TyNo = Ty::Num;
+ return static_cast<int>(Val.getInt()) == TyNo;
+ }
+
+ /// Returns the value of the specified pointer type.
+ ///
+ /// If the specified pointer type is incorrect, assert.
+ template <typename T> T get() const {
+ assert(is<T>() && "Invalid accessor called");
+ return PointerLikeTypeTraits<T>::getFromVoidPointer(Val.getPointer());
+ }
+
+ /// Returns the current pointer if it is of the specified pointer type,
+ /// otherwises returns null.
+ template <typename T> T dyn_cast() const {
+ if (is<T>())
+ return get<T>();
+ return T();
+ }
+
+ /// If the union is set to the first pointer type get an address pointing to
+ /// it.
+ PT1 const *getAddrOfPtr1() const {
+ return const_cast<PointerUnion *>(this)->getAddrOfPtr1();
+ }
+
+ /// If the union is set to the first pointer type get an address pointing to
+ /// it.
+ PT1 *getAddrOfPtr1() {
+ assert(is<PT1>() && "Val is not the first pointer");
+ assert(
+ get<PT1>() == Val.getPointer() &&
+ "Can't get the address because PointerLikeTypeTraits changes the ptr");
+ return (PT1 *)Val.getAddrOfPointer();
+ }
+
+ /// Assignment from nullptr which just clears the union.
+ const PointerUnion &operator=(std::nullptr_t) {
+ Val.initWithPointer(nullptr);
+ return *this;
+ }
+
+ /// Assignment operators - Allow assigning into this union from either
+ /// pointer type, setting the discriminator to remember what it came from.
+ const PointerUnion &operator=(const PT1 &RHS) {
+ Val.initWithPointer(
+ const_cast<void *>(PointerLikeTypeTraits<PT1>::getAsVoidPointer(RHS)));
+ return *this;
+ }
+ const PointerUnion &operator=(const PT2 &RHS) {
+ Val.setPointerAndInt(
+ const_cast<void *>(PointerLikeTypeTraits<PT2>::getAsVoidPointer(RHS)),
+ 1);
+ return *this;
+ }
+
+ void *getOpaqueValue() const { return Val.getOpaqueValue(); }
+ static inline PointerUnion getFromOpaqueValue(void *VP) {
+ PointerUnion V;
+ V.Val = ValTy::getFromOpaqueValue(VP);
+ return V;
+ }
+};
+
+template <typename PT1, typename PT2>
+static bool operator==(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
+ return lhs.getOpaqueValue() == rhs.getOpaqueValue();
+}
+
+template <typename PT1, typename PT2>
+static bool operator!=(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
+ return lhs.getOpaqueValue() != rhs.getOpaqueValue();
+}
+
+template <typename PT1, typename PT2>
+static bool operator<(PointerUnion<PT1, PT2> lhs, PointerUnion<PT1, PT2> rhs) {
+ return lhs.getOpaqueValue() < rhs.getOpaqueValue();
+}
+
+// Teach SmallPtrSet that PointerUnion is "basically a pointer", that has
+// # low bits available = min(PT1bits,PT2bits)-1.
+template <typename PT1, typename PT2>
+class PointerLikeTypeTraits<PointerUnion<PT1, PT2>> {
+public:
+ static inline void *getAsVoidPointer(const PointerUnion<PT1, PT2> &P) {
+ return P.getOpaqueValue();
+ }
+ static inline PointerUnion<PT1, PT2> getFromVoidPointer(void *P) {
+ return PointerUnion<PT1, PT2>::getFromOpaqueValue(P);
+ }
+
+ // The number of bits available are the min of the two pointer types.
+ enum {
+ NumLowBitsAvailable = PointerLikeTypeTraits<
+ typename PointerUnion<PT1, PT2>::ValTy>::NumLowBitsAvailable
+ };
+};
+
+/// A pointer union of three pointer types. See documentation for PointerUnion
+/// for usage.
+template <typename PT1, typename PT2, typename PT3> class PointerUnion3 {
+public:
+ typedef PointerUnion<PT1, PT2> InnerUnion;
+ typedef PointerUnion<InnerUnion, PT3> ValTy;
+
+private:
+ ValTy Val;
+
+ struct IsInnerUnion {
+ ValTy Val;
+ IsInnerUnion(ValTy val) : Val(val) {}
+ template <typename T> int is() const {
+ return Val.template is<InnerUnion>() &&
+ Val.template get<InnerUnion>().template is<T>();
+ }
+ template <typename T> T get() const {
+ return Val.template get<InnerUnion>().template get<T>();
+ }
+ };
+
+ struct IsPT3 {
+ ValTy Val;
+ IsPT3(ValTy val) : Val(val) {}
+ template <typename T> int is() const { return Val.template is<T>(); }
+ template <typename T> T get() const { return Val.template get<T>(); }
+ };
+
+public:
+ PointerUnion3() {}
+
+ PointerUnion3(PT1 V) { Val = InnerUnion(V); }
+ PointerUnion3(PT2 V) { Val = InnerUnion(V); }
+ PointerUnion3(PT3 V) { Val = V; }
+
+ /// Test if the pointer held in the union is null, regardless of
+ /// which type it is.
+ bool isNull() const { return Val.isNull(); }
+ explicit operator bool() const { return !isNull(); }
+
+ /// Test if the Union currently holds the type matching T.
+ template <typename T> int is() const {
+ // If T is PT1/PT2 choose IsInnerUnion otherwise choose IsPT3.
+ typedef typename ::llvm::PointerUnionTypeSelector<
+ PT1, T, IsInnerUnion,
+ ::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3>>::Return
+ Ty;
+ return Ty(Val).template is<T>();
+ }
+
+ /// Returns the value of the specified pointer type.
+ ///
+ /// If the specified pointer type is incorrect, assert.
+ template <typename T> T get() const {
+ assert(is<T>() && "Invalid accessor called");
+ // If T is PT1/PT2 choose IsInnerUnion otherwise choose IsPT3.
+ typedef typename ::llvm::PointerUnionTypeSelector<
+ PT1, T, IsInnerUnion,
+ ::llvm::PointerUnionTypeSelector<PT2, T, IsInnerUnion, IsPT3>>::Return
+ Ty;
+ return Ty(Val).template get<T>();
+ }
+
+ /// Returns the current pointer if it is of the specified pointer type,
+ /// otherwises returns null.
+ template <typename T> T dyn_cast() const {
+ if (is<T>())
+ return get<T>();
+ return T();
+ }
+
+ /// Assignment from nullptr which just clears the union.
+ const PointerUnion3 &operator=(std::nullptr_t) {
+ Val = nullptr;
+ return *this;
+ }
+
+ /// Assignment operators - Allow assigning into this union from either
+ /// pointer type, setting the discriminator to remember what it came from.
+ const PointerUnion3 &operator=(const PT1 &RHS) {
+ Val = InnerUnion(RHS);
+ return *this;
+ }
+ const PointerUnion3 &operator=(const PT2 &RHS) {
+ Val = InnerUnion(RHS);
+ return *this;
+ }
+ const PointerUnion3 &operator=(const PT3 &RHS) {
+ Val = RHS;
+ return *this;
+ }
+
+ void *getOpaqueValue() const { return Val.getOpaqueValue(); }
+ static inline PointerUnion3 getFromOpaqueValue(void *VP) {
+ PointerUnion3 V;
+ V.Val = ValTy::getFromOpaqueValue(VP);
+ return V;
+ }
+};
+
+// Teach SmallPtrSet that PointerUnion3 is "basically a pointer", that has
+// # low bits available = min(PT1bits,PT2bits,PT2bits)-2.
+template <typename PT1, typename PT2, typename PT3>
+class PointerLikeTypeTraits<PointerUnion3<PT1, PT2, PT3>> {
+public:
+ static inline void *getAsVoidPointer(const PointerUnion3<PT1, PT2, PT3> &P) {
+ return P.getOpaqueValue();
+ }
+ static inline PointerUnion3<PT1, PT2, PT3> getFromVoidPointer(void *P) {
+ return PointerUnion3<PT1, PT2, PT3>::getFromOpaqueValue(P);
+ }
+
+ // The number of bits available are the min of the two pointer types.
+ enum {
+ NumLowBitsAvailable = PointerLikeTypeTraits<
+ typename PointerUnion3<PT1, PT2, PT3>::ValTy>::NumLowBitsAvailable
+ };
+};
+
+/// A pointer union of four pointer types. See documentation for PointerUnion
+/// for usage.
+template <typename PT1, typename PT2, typename PT3, typename PT4>
+class PointerUnion4 {
+public:
+ typedef PointerUnion<PT1, PT2> InnerUnion1;
+ typedef PointerUnion<PT3, PT4> InnerUnion2;
+ typedef PointerUnion<InnerUnion1, InnerUnion2> ValTy;
+
+private:
+ ValTy Val;
+
+public:
+ PointerUnion4() {}
+
+ PointerUnion4(PT1 V) { Val = InnerUnion1(V); }
+ PointerUnion4(PT2 V) { Val = InnerUnion1(V); }
+ PointerUnion4(PT3 V) { Val = InnerUnion2(V); }
+ PointerUnion4(PT4 V) { Val = InnerUnion2(V); }
+
+ /// Test if the pointer held in the union is null, regardless of
+ /// which type it is.
+ bool isNull() const { return Val.isNull(); }
+ explicit operator bool() const { return !isNull(); }
+
+ /// Test if the Union currently holds the type matching T.
+ template <typename T> int is() const {
+ // If T is PT1/PT2 choose InnerUnion1 otherwise choose InnerUnion2.
+ typedef typename ::llvm::PointerUnionTypeSelector<
+ PT1, T, InnerUnion1, ::llvm::PointerUnionTypeSelector<
+ PT2, T, InnerUnion1, InnerUnion2>>::Return Ty;
+ return Val.template is<Ty>() && Val.template get<Ty>().template is<T>();
+ }
+
+ /// Returns the value of the specified pointer type.
+ ///
+ /// If the specified pointer type is incorrect, assert.
+ template <typename T> T get() const {
+ assert(is<T>() && "Invalid accessor called");
+ // If T is PT1/PT2 choose InnerUnion1 otherwise choose InnerUnion2.
+ typedef typename ::llvm::PointerUnionTypeSelector<
+ PT1, T, InnerUnion1, ::llvm::PointerUnionTypeSelector<
+ PT2, T, InnerUnion1, InnerUnion2>>::Return Ty;
+ return Val.template get<Ty>().template get<T>();
+ }
+
+ /// Returns the current pointer if it is of the specified pointer type,
+ /// otherwises returns null.
+ template <typename T> T dyn_cast() const {
+ if (is<T>())
+ return get<T>();
+ return T();
+ }
+
+ /// Assignment from nullptr which just clears the union.
+ const PointerUnion4 &operator=(std::nullptr_t) {
+ Val = nullptr;
+ return *this;
+ }
+
+ /// Assignment operators - Allow assigning into this union from either
+ /// pointer type, setting the discriminator to remember what it came from.
+ const PointerUnion4 &operator=(const PT1 &RHS) {
+ Val = InnerUnion1(RHS);
+ return *this;
+ }
+ const PointerUnion4 &operator=(const PT2 &RHS) {
+ Val = InnerUnion1(RHS);
+ return *this;
+ }
+ const PointerUnion4 &operator=(const PT3 &RHS) {
+ Val = InnerUnion2(RHS);
+ return *this;
+ }
+ const PointerUnion4 &operator=(const PT4 &RHS) {
+ Val = InnerUnion2(RHS);
+ return *this;
+ }
+
+ void *getOpaqueValue() const { return Val.getOpaqueValue(); }
+ static inline PointerUnion4 getFromOpaqueValue(void *VP) {
+ PointerUnion4 V;
+ V.Val = ValTy::getFromOpaqueValue(VP);
+ return V;
+ }
+};
+
+// Teach SmallPtrSet that PointerUnion4 is "basically a pointer", that has
+// # low bits available = min(PT1bits,PT2bits,PT2bits)-2.
+template <typename PT1, typename PT2, typename PT3, typename PT4>
+class PointerLikeTypeTraits<PointerUnion4<PT1, PT2, PT3, PT4>> {
+public:
+ static inline void *
+ getAsVoidPointer(const PointerUnion4<PT1, PT2, PT3, PT4> &P) {
+ return P.getOpaqueValue();
+ }
+ static inline PointerUnion4<PT1, PT2, PT3, PT4> getFromVoidPointer(void *P) {
+ return PointerUnion4<PT1, PT2, PT3, PT4>::getFromOpaqueValue(P);
+ }
+
+ // The number of bits available are the min of the two pointer types.
+ enum {
+ NumLowBitsAvailable = PointerLikeTypeTraits<
+ typename PointerUnion4<PT1, PT2, PT3, PT4>::ValTy>::NumLowBitsAvailable
+ };
+};
+
+// Teach DenseMap how to use PointerUnions as keys.
+template <typename T, typename U> struct DenseMapInfo<PointerUnion<T, U>> {
+ typedef PointerUnion<T, U> Pair;
+ typedef DenseMapInfo<T> FirstInfo;
+ typedef DenseMapInfo<U> SecondInfo;
+
+ static inline Pair getEmptyKey() { return Pair(FirstInfo::getEmptyKey()); }
+ static inline Pair getTombstoneKey() {
+ return Pair(FirstInfo::getTombstoneKey());
+ }
+ static unsigned getHashValue(const Pair &PairVal) {
+ intptr_t key = (intptr_t)PairVal.getOpaqueValue();
+ return DenseMapInfo<intptr_t>::getHashValue(key);
+ }
+ static bool isEqual(const Pair &LHS, const Pair &RHS) {
+ return LHS.template is<T>() == RHS.template is<T>() &&
+ (LHS.template is<T>() ? FirstInfo::isEqual(LHS.template get<T>(),
+ RHS.template get<T>())
+ : SecondInfo::isEqual(LHS.template get<U>(),
+ RHS.template get<U>()));
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
new file mode 100644
index 0000000..ce343a1
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h
@@ -0,0 +1,300 @@
+//===- llvm/ADT/PostOrderIterator.h - PostOrder iterator --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file builds on the ADT/GraphTraits.h file to build a generic graph
+// post order iterator. This should work over any graph type that has a
+// GraphTraits specialization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_POSTORDERITERATOR_H
+#define LLVM_ADT_POSTORDERITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+// The po_iterator_storage template provides access to the set of already
+// visited nodes during the po_iterator's depth-first traversal.
+//
+// The default implementation simply contains a set of visited nodes, while
+// the Extended=true version uses a reference to an external set.
+//
+// It is possible to prune the depth-first traversal in several ways:
+//
+// - When providing an external set that already contains some graph nodes,
+// those nodes won't be visited again. This is useful for restarting a
+// post-order traversal on a graph with nodes that aren't dominated by a
+// single node.
+//
+// - By providing a custom SetType class, unwanted graph nodes can be excluded
+// by having the insert() function return false. This could for example
+// confine a CFG traversal to blocks in a specific loop.
+//
+// - Finally, by specializing the po_iterator_storage template itself, graph
+// edges can be pruned by returning false in the insertEdge() function. This
+// could be used to remove loop back-edges from the CFG seen by po_iterator.
+//
+// A specialized po_iterator_storage class can observe both the pre-order and
+// the post-order. The insertEdge() function is called in a pre-order, while
+// the finishPostorder() function is called just before the po_iterator moves
+// on to the next node.
+
+/// Default po_iterator_storage implementation with an internal set object.
+template<class SetType, bool External>
+class po_iterator_storage {
+ SetType Visited;
+public:
+ // Return true if edge destination should be visited.
+ template<typename NodeType>
+ bool insertEdge(NodeType *From, NodeType *To) {
+ return Visited.insert(To).second;
+ }
+
+ // Called after all children of BB have been visited.
+ template<typename NodeType>
+ void finishPostorder(NodeType *BB) {}
+};
+
+/// Specialization of po_iterator_storage that references an external set.
+template<class SetType>
+class po_iterator_storage<SetType, true> {
+ SetType &Visited;
+public:
+ po_iterator_storage(SetType &VSet) : Visited(VSet) {}
+ po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {}
+
+ // Return true if edge destination should be visited, called with From = 0 for
+ // the root node.
+ // Graph edges can be pruned by specializing this function.
+ template <class NodeType> bool insertEdge(NodeType *From, NodeType *To) {
+ return Visited.insert(To).second;
+ }
+
+ // Called after all children of BB have been visited.
+ template<class NodeType>
+ void finishPostorder(NodeType *BB) {}
+};
+
+template<class GraphT,
+ class SetType = llvm::SmallPtrSet<typename GraphTraits<GraphT>::NodeType*, 8>,
+ bool ExtStorage = false,
+ class GT = GraphTraits<GraphT> >
+class po_iterator : public std::iterator<std::forward_iterator_tag,
+ typename GT::NodeType, ptrdiff_t>,
+ public po_iterator_storage<SetType, ExtStorage> {
+ typedef std::iterator<std::forward_iterator_tag,
+ typename GT::NodeType, ptrdiff_t> super;
+ typedef typename GT::NodeType NodeType;
+ typedef typename GT::ChildIteratorType ChildItTy;
+
+ // VisitStack - Used to maintain the ordering. Top = current block
+ // First element is basic block pointer, second is the 'next child' to visit
+ std::vector<std::pair<NodeType *, ChildItTy> > VisitStack;
+
+ void traverseChild() {
+ while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
+ NodeType *BB = *VisitStack.back().second++;
+ if (this->insertEdge(VisitStack.back().first, BB)) {
+ // If the block is not visited...
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+ }
+ }
+ }
+
+ po_iterator(NodeType *BB) {
+ this->insertEdge((NodeType*)nullptr, BB);
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+ traverseChild();
+ }
+ po_iterator() {} // End is when stack is empty.
+
+ po_iterator(NodeType *BB, SetType &S)
+ : po_iterator_storage<SetType, ExtStorage>(S) {
+ if (this->insertEdge((NodeType*)nullptr, BB)) {
+ VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
+ traverseChild();
+ }
+ }
+
+ po_iterator(SetType &S)
+ : po_iterator_storage<SetType, ExtStorage>(S) {
+ } // End is when stack is empty.
+public:
+ typedef typename super::pointer pointer;
+
+ // Provide static "constructors"...
+ static po_iterator begin(GraphT G) {
+ return po_iterator(GT::getEntryNode(G));
+ }
+ static po_iterator end(GraphT G) { return po_iterator(); }
+
+ static po_iterator begin(GraphT G, SetType &S) {
+ return po_iterator(GT::getEntryNode(G), S);
+ }
+ static po_iterator end(GraphT G, SetType &S) { return po_iterator(S); }
+
+ bool operator==(const po_iterator &x) const {
+ return VisitStack == x.VisitStack;
+ }
+ bool operator!=(const po_iterator &x) const { return !(*this == x); }
+
+ pointer operator*() const { return VisitStack.back().first; }
+
+ // This is a nonstandard operator-> that dereferences the pointer an extra
+ // time... so that you can actually call methods ON the BasicBlock, because
+ // the contained type is a pointer. This allows BBIt->getTerminator() f.e.
+ //
+ NodeType *operator->() const { return **this; }
+
+ po_iterator &operator++() { // Preincrement
+ this->finishPostorder(VisitStack.back().first);
+ VisitStack.pop_back();
+ if (!VisitStack.empty())
+ traverseChild();
+ return *this;
+ }
+
+ po_iterator operator++(int) { // Postincrement
+ po_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+};
+
+// Provide global constructors that automatically figure out correct types...
+//
+template <class T>
+po_iterator<T> po_begin(const T &G) { return po_iterator<T>::begin(G); }
+template <class T>
+po_iterator<T> po_end (const T &G) { return po_iterator<T>::end(G); }
+
+template <class T> iterator_range<po_iterator<T>> post_order(const T &G) {
+ return make_range(po_begin(G), po_end(G));
+}
+
+// Provide global definitions of external postorder iterators...
+template<class T, class SetType=std::set<typename GraphTraits<T>::NodeType*> >
+struct po_ext_iterator : public po_iterator<T, SetType, true> {
+ po_ext_iterator(const po_iterator<T, SetType, true> &V) :
+ po_iterator<T, SetType, true>(V) {}
+};
+
+template<class T, class SetType>
+po_ext_iterator<T, SetType> po_ext_begin(T G, SetType &S) {
+ return po_ext_iterator<T, SetType>::begin(G, S);
+}
+
+template<class T, class SetType>
+po_ext_iterator<T, SetType> po_ext_end(T G, SetType &S) {
+ return po_ext_iterator<T, SetType>::end(G, S);
+}
+
+template <class T, class SetType>
+iterator_range<po_ext_iterator<T, SetType>> post_order_ext(const T &G, SetType &S) {
+ return make_range(po_ext_begin(G, S), po_ext_end(G, S));
+}
+
+// Provide global definitions of inverse post order iterators...
+template <class T,
+ class SetType = std::set<typename GraphTraits<T>::NodeType*>,
+ bool External = false>
+struct ipo_iterator : public po_iterator<Inverse<T>, SetType, External > {
+ ipo_iterator(const po_iterator<Inverse<T>, SetType, External> &V) :
+ po_iterator<Inverse<T>, SetType, External> (V) {}
+};
+
+template <class T>
+ipo_iterator<T> ipo_begin(const T &G) {
+ return ipo_iterator<T>::begin(G);
+}
+
+template <class T>
+ipo_iterator<T> ipo_end(const T &G){
+ return ipo_iterator<T>::end(G);
+}
+
+template <class T>
+iterator_range<ipo_iterator<T>> inverse_post_order(const T &G) {
+ return make_range(ipo_begin(G), ipo_end(G));
+}
+
+// Provide global definitions of external inverse postorder iterators...
+template <class T,
+ class SetType = std::set<typename GraphTraits<T>::NodeType*> >
+struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
+ ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
+ ipo_iterator<T, SetType, true>(V) {}
+ ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) :
+ ipo_iterator<T, SetType, true>(V) {}
+};
+
+template <class T, class SetType>
+ipo_ext_iterator<T, SetType> ipo_ext_begin(const T &G, SetType &S) {
+ return ipo_ext_iterator<T, SetType>::begin(G, S);
+}
+
+template <class T, class SetType>
+ipo_ext_iterator<T, SetType> ipo_ext_end(const T &G, SetType &S) {
+ return ipo_ext_iterator<T, SetType>::end(G, S);
+}
+
+template <class T, class SetType>
+iterator_range<ipo_ext_iterator<T, SetType>>
+inverse_post_order_ext(const T &G, SetType &S) {
+ return make_range(ipo_ext_begin(G, S), ipo_ext_end(G, S));
+}
+
+//===--------------------------------------------------------------------===//
+// Reverse Post Order CFG iterator code
+//===--------------------------------------------------------------------===//
+//
+// This is used to visit basic blocks in a method in reverse post order. This
+// class is awkward to use because I don't know a good incremental algorithm to
+// computer RPO from a graph. Because of this, the construction of the
+// ReversePostOrderTraversal object is expensive (it must walk the entire graph
+// with a postorder iterator to build the data structures). The moral of this
+// story is: Don't create more ReversePostOrderTraversal classes than necessary.
+//
+// This class should be used like this:
+// {
+// ReversePostOrderTraversal<Function*> RPOT(FuncPtr); // Expensive to create
+// for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
+// ...
+// }
+// for (rpo_iterator I = RPOT.begin(); I != RPOT.end(); ++I) {
+// ...
+// }
+// }
+//
+
+template<class GraphT, class GT = GraphTraits<GraphT> >
+class ReversePostOrderTraversal {
+ typedef typename GT::NodeType NodeType;
+ std::vector<NodeType*> Blocks; // Block list in normal PO order
+ void Initialize(NodeType *BB) {
+ std::copy(po_begin(BB), po_end(BB), std::back_inserter(Blocks));
+ }
+public:
+ typedef typename std::vector<NodeType*>::reverse_iterator rpo_iterator;
+
+ ReversePostOrderTraversal(GraphT G) { Initialize(GT::getEntryNode(G)); }
+
+ // Because we want a reverse post order, use reverse iterators from the vector
+ rpo_iterator begin() { return Blocks.rbegin(); }
+ rpo_iterator end() { return Blocks.rend(); }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/PriorityQueue.h b/contrib/llvm/include/llvm/ADT/PriorityQueue.h
new file mode 100644
index 0000000..827d0b3
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/PriorityQueue.h
@@ -0,0 +1,84 @@
+//===- llvm/ADT/PriorityQueue.h - Priority queues ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PriorityQueue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_PRIORITYQUEUE_H
+#define LLVM_ADT_PRIORITYQUEUE_H
+
+#include <algorithm>
+#include <queue>
+
+namespace llvm {
+
+/// PriorityQueue - This class behaves like std::priority_queue and
+/// provides a few additional convenience functions.
+///
+template<class T,
+ class Sequence = std::vector<T>,
+ class Compare = std::less<typename Sequence::value_type> >
+class PriorityQueue : public std::priority_queue<T, Sequence, Compare> {
+public:
+ explicit PriorityQueue(const Compare &compare = Compare(),
+ const Sequence &sequence = Sequence())
+ : std::priority_queue<T, Sequence, Compare>(compare, sequence)
+ {}
+
+ template<class Iterator>
+ PriorityQueue(Iterator begin, Iterator end,
+ const Compare &compare = Compare(),
+ const Sequence &sequence = Sequence())
+ : std::priority_queue<T, Sequence, Compare>(begin, end, compare, sequence)
+ {}
+
+ /// erase_one - Erase one element from the queue, regardless of its
+ /// position. This operation performs a linear search to find an element
+ /// equal to t, but then uses all logarithmic-time algorithms to do
+ /// the erase operation.
+ ///
+ void erase_one(const T &t) {
+ // Linear-search to find the element.
+ typename Sequence::size_type i =
+ std::find(this->c.begin(), this->c.end(), t) - this->c.begin();
+
+ // Logarithmic-time heap bubble-up.
+ while (i != 0) {
+ typename Sequence::size_type parent = (i - 1) / 2;
+ this->c[i] = this->c[parent];
+ i = parent;
+ }
+
+ // The element we want to remove is now at the root, so we can use
+ // priority_queue's plain pop to remove it.
+ this->pop();
+ }
+
+ /// reheapify - If an element in the queue has changed in a way that
+ /// affects its standing in the comparison function, the queue's
+ /// internal state becomes invalid. Calling reheapify() resets the
+ /// queue's state, making it valid again. This operation has time
+ /// complexity proportional to the number of elements in the queue,
+ /// so don't plan to use it a lot.
+ ///
+ void reheapify() {
+ std::make_heap(this->c.begin(), this->c.end(), this->comp);
+ }
+
+ /// clear - Erase all elements from the queue.
+ ///
+ void clear() {
+ this->c.clear();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/SCCIterator.h b/contrib/llvm/include/llvm/ADT/SCCIterator.h
new file mode 100644
index 0000000..bc74416
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SCCIterator.h
@@ -0,0 +1,245 @@
+//===---- ADT/SCCIterator.h - Strongly Connected Comp. Iter. ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This builds on the llvm/ADT/GraphTraits.h file to find the strongly
+/// connected components (SCCs) of a graph in O(N+E) time using Tarjan's DFS
+/// algorithm.
+///
+/// The SCC iterator has the important property that if a node in SCC S1 has an
+/// edge to a node in SCC S2, then it visits S1 *after* S2.
+///
+/// To visit S1 *before* S2, use the scc_iterator on the Inverse graph. (NOTE:
+/// This requires some simple wrappers and is not supported yet.)
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCCITERATOR_H
+#define LLVM_ADT_SCCITERATOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/iterator.h"
+#include <vector>
+
+namespace llvm {
+
+/// \brief Enumerate the SCCs of a directed graph in reverse topological order
+/// of the SCC DAG.
+///
+/// This is implemented using Tarjan's DFS algorithm using an internal stack to
+/// build up a vector of nodes in a particular SCC. Note that it is a forward
+/// iterator and thus you cannot backtrack or re-visit nodes.
+template <class GraphT, class GT = GraphTraits<GraphT>>
+class scc_iterator
+ : public iterator_facade_base<
+ scc_iterator<GraphT, GT>, std::forward_iterator_tag,
+ const std::vector<typename GT::NodeType *>, ptrdiff_t> {
+ typedef typename GT::NodeType NodeType;
+ typedef typename GT::ChildIteratorType ChildItTy;
+ typedef std::vector<NodeType *> SccTy;
+ typedef typename scc_iterator::reference reference;
+
+ /// Element of VisitStack during DFS.
+ struct StackElement {
+ NodeType *Node; ///< The current node pointer.
+ ChildItTy NextChild; ///< The next child, modified inplace during DFS.
+ unsigned MinVisited; ///< Minimum uplink value of all children of Node.
+
+ StackElement(NodeType *Node, const ChildItTy &Child, unsigned Min)
+ : Node(Node), NextChild(Child), MinVisited(Min) {}
+
+ bool operator==(const StackElement &Other) const {
+ return Node == Other.Node &&
+ NextChild == Other.NextChild &&
+ MinVisited == Other.MinVisited;
+ }
+ };
+
+ /// The visit counters used to detect when a complete SCC is on the stack.
+ /// visitNum is the global counter.
+ ///
+ /// nodeVisitNumbers are per-node visit numbers, also used as DFS flags.
+ unsigned visitNum;
+ DenseMap<NodeType *, unsigned> nodeVisitNumbers;
+
+ /// Stack holding nodes of the SCC.
+ std::vector<NodeType *> SCCNodeStack;
+
+ /// The current SCC, retrieved using operator*().
+ SccTy CurrentSCC;
+
+ /// DFS stack, Used to maintain the ordering. The top contains the current
+ /// node, the next child to visit, and the minimum uplink value of all child
+ std::vector<StackElement> VisitStack;
+
+ /// A single "visit" within the non-recursive DFS traversal.
+ void DFSVisitOne(NodeType *N);
+
+ /// The stack-based DFS traversal; defined below.
+ void DFSVisitChildren();
+
+ /// Compute the next SCC using the DFS traversal.
+ void GetNextSCC();
+
+ scc_iterator(NodeType *entryN) : visitNum(0) {
+ DFSVisitOne(entryN);
+ GetNextSCC();
+ }
+
+ /// End is when the DFS stack is empty.
+ scc_iterator() {}
+
+public:
+ static scc_iterator begin(const GraphT &G) {
+ return scc_iterator(GT::getEntryNode(G));
+ }
+ static scc_iterator end(const GraphT &) { return scc_iterator(); }
+
+ /// \brief Direct loop termination test which is more efficient than
+ /// comparison with \c end().
+ bool isAtEnd() const {
+ assert(!CurrentSCC.empty() || VisitStack.empty());
+ return CurrentSCC.empty();
+ }
+
+ bool operator==(const scc_iterator &x) const {
+ return VisitStack == x.VisitStack && CurrentSCC == x.CurrentSCC;
+ }
+
+ scc_iterator &operator++() {
+ GetNextSCC();
+ return *this;
+ }
+
+ reference operator*() const {
+ assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
+ return CurrentSCC;
+ }
+
+ /// \brief Test if the current SCC has a loop.
+ ///
+ /// If the SCC has more than one node, this is trivially true. If not, it may
+ /// still contain a loop if the node has an edge back to itself.
+ bool hasLoop() const;
+
+ /// This informs the \c scc_iterator that the specified \c Old node
+ /// has been deleted, and \c New is to be used in its place.
+ void ReplaceNode(NodeType *Old, NodeType *New) {
+ assert(nodeVisitNumbers.count(Old) && "Old not in scc_iterator?");
+ nodeVisitNumbers[New] = nodeVisitNumbers[Old];
+ nodeVisitNumbers.erase(Old);
+ }
+};
+
+template <class GraphT, class GT>
+void scc_iterator<GraphT, GT>::DFSVisitOne(NodeType *N) {
+ ++visitNum;
+ nodeVisitNumbers[N] = visitNum;
+ SCCNodeStack.push_back(N);
+ VisitStack.push_back(StackElement(N, GT::child_begin(N), visitNum));
+#if 0 // Enable if needed when debugging.
+ dbgs() << "TarjanSCC: Node " << N <<
+ " : visitNum = " << visitNum << "\n";
+#endif
+}
+
+template <class GraphT, class GT>
+void scc_iterator<GraphT, GT>::DFSVisitChildren() {
+ assert(!VisitStack.empty());
+ while (VisitStack.back().NextChild != GT::child_end(VisitStack.back().Node)) {
+ // TOS has at least one more child so continue DFS
+ NodeType *childN = *VisitStack.back().NextChild++;
+ typename DenseMap<NodeType *, unsigned>::iterator Visited =
+ nodeVisitNumbers.find(childN);
+ if (Visited == nodeVisitNumbers.end()) {
+ // this node has never been seen.
+ DFSVisitOne(childN);
+ continue;
+ }
+
+ unsigned childNum = Visited->second;
+ if (VisitStack.back().MinVisited > childNum)
+ VisitStack.back().MinVisited = childNum;
+ }
+}
+
+template <class GraphT, class GT> void scc_iterator<GraphT, GT>::GetNextSCC() {
+ CurrentSCC.clear(); // Prepare to compute the next SCC
+ while (!VisitStack.empty()) {
+ DFSVisitChildren();
+
+ // Pop the leaf on top of the VisitStack.
+ NodeType *visitingN = VisitStack.back().Node;
+ unsigned minVisitNum = VisitStack.back().MinVisited;
+ assert(VisitStack.back().NextChild == GT::child_end(visitingN));
+ VisitStack.pop_back();
+
+ // Propagate MinVisitNum to parent so we can detect the SCC starting node.
+ if (!VisitStack.empty() && VisitStack.back().MinVisited > minVisitNum)
+ VisitStack.back().MinVisited = minVisitNum;
+
+#if 0 // Enable if needed when debugging.
+ dbgs() << "TarjanSCC: Popped node " << visitingN <<
+ " : minVisitNum = " << minVisitNum << "; Node visit num = " <<
+ nodeVisitNumbers[visitingN] << "\n";
+#endif
+
+ if (minVisitNum != nodeVisitNumbers[visitingN])
+ continue;
+
+ // A full SCC is on the SCCNodeStack! It includes all nodes below
+ // visitingN on the stack. Copy those nodes to CurrentSCC,
+ // reset their minVisit values, and return (this suspends
+ // the DFS traversal till the next ++).
+ do {
+ CurrentSCC.push_back(SCCNodeStack.back());
+ SCCNodeStack.pop_back();
+ nodeVisitNumbers[CurrentSCC.back()] = ~0U;
+ } while (CurrentSCC.back() != visitingN);
+ return;
+ }
+}
+
+template <class GraphT, class GT>
+bool scc_iterator<GraphT, GT>::hasLoop() const {
+ assert(!CurrentSCC.empty() && "Dereferencing END SCC iterator!");
+ if (CurrentSCC.size() > 1)
+ return true;
+ NodeType *N = CurrentSCC.front();
+ for (ChildItTy CI = GT::child_begin(N), CE = GT::child_end(N); CI != CE;
+ ++CI)
+ if (*CI == N)
+ return true;
+ return false;
+ }
+
+/// \brief Construct the begin iterator for a deduced graph type T.
+template <class T> scc_iterator<T> scc_begin(const T &G) {
+ return scc_iterator<T>::begin(G);
+}
+
+/// \brief Construct the end iterator for a deduced graph type T.
+template <class T> scc_iterator<T> scc_end(const T &G) {
+ return scc_iterator<T>::end(G);
+}
+
+/// \brief Construct the begin iterator for a deduced graph type T's Inverse<T>.
+template <class T> scc_iterator<Inverse<T> > scc_begin(const Inverse<T> &G) {
+ return scc_iterator<Inverse<T> >::begin(G);
+}
+
+/// \brief Construct the end iterator for a deduced graph type T's Inverse<T>.
+template <class T> scc_iterator<Inverse<T> > scc_end(const Inverse<T> &G) {
+ return scc_iterator<Inverse<T> >::end(G);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/STLExtras.h b/contrib/llvm/include/llvm/ADT/STLExtras.h
new file mode 100644
index 0000000..d4360fa
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/STLExtras.h
@@ -0,0 +1,472 @@
+//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some templates that are useful if you are working with the
+// STL at all.
+//
+// No library is required when using these functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STLEXTRAS_H
+#define LLVM_ADT_STLEXTRAS_H
+
+#include "llvm/Support/Compiler.h"
+#include <algorithm> // for std::all_of
+#include <cassert>
+#include <cstddef> // for std::size_t
+#include <cstdlib> // for qsort
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <utility> // for std::pair
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <functional>
+//===----------------------------------------------------------------------===//
+
+template<class Ty>
+struct identity : public std::unary_function<Ty, Ty> {
+ Ty &operator()(Ty &self) const {
+ return self;
+ }
+ const Ty &operator()(const Ty &self) const {
+ return self;
+ }
+};
+
+template<class Ty>
+struct less_ptr : public std::binary_function<Ty, Ty, bool> {
+ bool operator()(const Ty* left, const Ty* right) const {
+ return *left < *right;
+ }
+};
+
+template<class Ty>
+struct greater_ptr : public std::binary_function<Ty, Ty, bool> {
+ bool operator()(const Ty* left, const Ty* right) const {
+ return *right < *left;
+ }
+};
+
+/// An efficient, type-erasing, non-owning reference to a callable. This is
+/// intended for use as the type of a function parameter that is not used
+/// after the function in question returns.
+///
+/// This class does not own the callable, so it is not in general safe to store
+/// a function_ref.
+template<typename Fn> class function_ref;
+
+template<typename Ret, typename ...Params>
+class function_ref<Ret(Params...)> {
+ Ret (*callback)(intptr_t callable, Params ...params);
+ intptr_t callable;
+
+ template<typename Callable>
+ static Ret callback_fn(intptr_t callable, Params ...params) {
+ return (*reinterpret_cast<Callable*>(callable))(
+ std::forward<Params>(params)...);
+ }
+
+public:
+ template <typename Callable>
+ function_ref(Callable &&callable,
+ typename std::enable_if<
+ !std::is_same<typename std::remove_reference<Callable>::type,
+ function_ref>::value>::type * = nullptr)
+ : callback(callback_fn<typename std::remove_reference<Callable>::type>),
+ callable(reinterpret_cast<intptr_t>(&callable)) {}
+ Ret operator()(Params ...params) const {
+ return callback(callable, std::forward<Params>(params)...);
+ }
+};
+
+// deleter - Very very very simple method that is used to invoke operator
+// delete on something. It is used like this:
+//
+// for_each(V.begin(), B.end(), deleter<Interval>);
+//
+template <class T>
+inline void deleter(T *Ptr) {
+ delete Ptr;
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <iterator>
+//===----------------------------------------------------------------------===//
+
+// mapped_iterator - This is a simple iterator adapter that causes a function to
+// be dereferenced whenever operator* is invoked on the iterator.
+//
+template <class RootIt, class UnaryFunc>
+class mapped_iterator {
+ RootIt current;
+ UnaryFunc Fn;
+public:
+ typedef typename std::iterator_traits<RootIt>::iterator_category
+ iterator_category;
+ typedef typename std::iterator_traits<RootIt>::difference_type
+ difference_type;
+ typedef typename UnaryFunc::result_type value_type;
+
+ typedef void pointer;
+ //typedef typename UnaryFunc::result_type *pointer;
+ typedef void reference; // Can't modify value returned by fn
+
+ typedef RootIt iterator_type;
+
+ inline const RootIt &getCurrent() const { return current; }
+ inline const UnaryFunc &getFunc() const { return Fn; }
+
+ inline explicit mapped_iterator(const RootIt &I, UnaryFunc F)
+ : current(I), Fn(F) {}
+
+ inline value_type operator*() const { // All this work to do this
+ return Fn(*current); // little change
+ }
+
+ mapped_iterator &operator++() {
+ ++current;
+ return *this;
+ }
+ mapped_iterator &operator--() {
+ --current;
+ return *this;
+ }
+ mapped_iterator operator++(int) {
+ mapped_iterator __tmp = *this;
+ ++current;
+ return __tmp;
+ }
+ mapped_iterator operator--(int) {
+ mapped_iterator __tmp = *this;
+ --current;
+ return __tmp;
+ }
+ mapped_iterator operator+(difference_type n) const {
+ return mapped_iterator(current + n, Fn);
+ }
+ mapped_iterator &operator+=(difference_type n) {
+ current += n;
+ return *this;
+ }
+ mapped_iterator operator-(difference_type n) const {
+ return mapped_iterator(current - n, Fn);
+ }
+ mapped_iterator &operator-=(difference_type n) {
+ current -= n;
+ return *this;
+ }
+ reference operator[](difference_type n) const { return *(*this + n); }
+
+ bool operator!=(const mapped_iterator &X) const { return !operator==(X); }
+ bool operator==(const mapped_iterator &X) const {
+ return current == X.current;
+ }
+ bool operator<(const mapped_iterator &X) const { return current < X.current; }
+
+ difference_type operator-(const mapped_iterator &X) const {
+ return current - X.current;
+ }
+};
+
+template <class Iterator, class Func>
+inline mapped_iterator<Iterator, Func>
+operator+(typename mapped_iterator<Iterator, Func>::difference_type N,
+ const mapped_iterator<Iterator, Func> &X) {
+ return mapped_iterator<Iterator, Func>(X.getCurrent() - N, X.getFunc());
+}
+
+
+// map_iterator - Provide a convenient way to create mapped_iterators, just like
+// make_pair is useful for creating pairs...
+//
+template <class ItTy, class FuncTy>
+inline mapped_iterator<ItTy, FuncTy> map_iterator(const ItTy &I, FuncTy F) {
+ return mapped_iterator<ItTy, FuncTy>(I, F);
+}
+
+/// \brief Metafunction to determine if type T has a member called rbegin().
+template <typename T> struct has_rbegin {
+ template <typename U> static char(&f(const U &, decltype(&U::rbegin)))[1];
+ static char(&f(...))[2];
+ const static bool value = sizeof(f(std::declval<T>(), nullptr)) == 1;
+};
+
+// Returns an iterator_range over the given container which iterates in reverse.
+// Note that the container must have rbegin()/rend() methods for this to work.
+template <typename ContainerTy>
+auto reverse(ContainerTy &&C,
+ typename std::enable_if<has_rbegin<ContainerTy>::value>::type * =
+ nullptr) -> decltype(make_range(C.rbegin(), C.rend())) {
+ return make_range(C.rbegin(), C.rend());
+}
+
+// Returns a std::reverse_iterator wrapped around the given iterator.
+template <typename IteratorTy>
+std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) {
+ return std::reverse_iterator<IteratorTy>(It);
+}
+
+// Returns an iterator_range over the given container which iterates in reverse.
+// Note that the container must have begin()/end() methods which return
+// bidirectional iterators for this to work.
+template <typename ContainerTy>
+auto reverse(
+ ContainerTy &&C,
+ typename std::enable_if<!has_rbegin<ContainerTy>::value>::type * = nullptr)
+ -> decltype(make_range(llvm::make_reverse_iterator(std::end(C)),
+ llvm::make_reverse_iterator(std::begin(C)))) {
+ return make_range(llvm::make_reverse_iterator(std::end(C)),
+ llvm::make_reverse_iterator(std::begin(C)));
+}
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <utility>
+//===----------------------------------------------------------------------===//
+
+/// \brief Function object to check whether the first component of a std::pair
+/// compares less than the first component of another std::pair.
+struct less_first {
+ template <typename T> bool operator()(const T &lhs, const T &rhs) const {
+ return lhs.first < rhs.first;
+ }
+};
+
+/// \brief Function object to check whether the second component of a std::pair
+/// compares less than the second component of another std::pair.
+struct less_second {
+ template <typename T> bool operator()(const T &lhs, const T &rhs) const {
+ return lhs.second < rhs.second;
+ }
+};
+
+// A subset of N3658. More stuff can be added as-needed.
+
+/// \brief Represents a compile-time sequence of integers.
+template <class T, T... I> struct integer_sequence {
+ typedef T value_type;
+
+ static LLVM_CONSTEXPR size_t size() { return sizeof...(I); }
+};
+
+/// \brief Alias for the common case of a sequence of size_ts.
+template <size_t... I>
+struct index_sequence : integer_sequence<std::size_t, I...> {};
+
+template <std::size_t N, std::size_t... I>
+struct build_index_impl : build_index_impl<N - 1, N - 1, I...> {};
+template <std::size_t... I>
+struct build_index_impl<0, I...> : index_sequence<I...> {};
+
+/// \brief Creates a compile-time integer sequence for a parameter pack.
+template <class... Ts>
+struct index_sequence_for : build_index_impl<sizeof...(Ts)> {};
+
+//===----------------------------------------------------------------------===//
+// Extra additions for arrays
+//===----------------------------------------------------------------------===//
+
+/// Find the length of an array.
+template <class T, std::size_t N>
+LLVM_CONSTEXPR inline size_t array_lengthof(T (&)[N]) {
+ return N;
+}
+
+/// Adapt std::less<T> for array_pod_sort.
+template<typename T>
+inline int array_pod_sort_comparator(const void *P1, const void *P2) {
+ if (std::less<T>()(*reinterpret_cast<const T*>(P1),
+ *reinterpret_cast<const T*>(P2)))
+ return -1;
+ if (std::less<T>()(*reinterpret_cast<const T*>(P2),
+ *reinterpret_cast<const T*>(P1)))
+ return 1;
+ return 0;
+}
+
+/// get_array_pod_sort_comparator - This is an internal helper function used to
+/// get type deduction of T right.
+template<typename T>
+inline int (*get_array_pod_sort_comparator(const T &))
+ (const void*, const void*) {
+ return array_pod_sort_comparator<T>;
+}
+
+
+/// array_pod_sort - This sorts an array with the specified start and end
+/// extent. This is just like std::sort, except that it calls qsort instead of
+/// using an inlined template. qsort is slightly slower than std::sort, but
+/// most sorts are not performance critical in LLVM and std::sort has to be
+/// template instantiated for each type, leading to significant measured code
+/// bloat. This function should generally be used instead of std::sort where
+/// possible.
+///
+/// This function assumes that you have simple POD-like types that can be
+/// compared with std::less and can be moved with memcpy. If this isn't true,
+/// you should use std::sort.
+///
+/// NOTE: If qsort_r were portable, we could allow a custom comparator and
+/// default to std::less.
+template<class IteratorTy>
+inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
+ // Don't inefficiently call qsort with one element or trigger undefined
+ // behavior with an empty sequence.
+ auto NElts = End - Start;
+ if (NElts <= 1) return;
+ qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
+}
+
+template <class IteratorTy>
+inline void array_pod_sort(
+ IteratorTy Start, IteratorTy End,
+ int (*Compare)(
+ const typename std::iterator_traits<IteratorTy>::value_type *,
+ const typename std::iterator_traits<IteratorTy>::value_type *)) {
+ // Don't inefficiently call qsort with one element or trigger undefined
+ // behavior with an empty sequence.
+ auto NElts = End - Start;
+ if (NElts <= 1) return;
+ qsort(&*Start, NElts, sizeof(*Start),
+ reinterpret_cast<int (*)(const void *, const void *)>(Compare));
+}
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <algorithm>
+//===----------------------------------------------------------------------===//
+
+/// For a container of pointers, deletes the pointers and then clears the
+/// container.
+template<typename Container>
+void DeleteContainerPointers(Container &C) {
+ for (typename Container::iterator I = C.begin(), E = C.end(); I != E; ++I)
+ delete *I;
+ C.clear();
+}
+
+/// In a container of pairs (usually a map) whose second element is a pointer,
+/// deletes the second elements and then clears the container.
+template<typename Container>
+void DeleteContainerSeconds(Container &C) {
+ for (typename Container::iterator I = C.begin(), E = C.end(); I != E; ++I)
+ delete I->second;
+ C.clear();
+}
+
+/// Provide wrappers to std::all_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template<typename R, class UnaryPredicate>
+bool all_of(R &&Range, UnaryPredicate &&P) {
+ return std::all_of(Range.begin(), Range.end(),
+ std::forward<UnaryPredicate>(P));
+}
+
+/// Provide wrappers to std::any_of which take ranges instead of having to pass
+/// begin/end explicitly.
+template <typename R, class UnaryPredicate>
+bool any_of(R &&Range, UnaryPredicate &&P) {
+ return std::any_of(Range.begin(), Range.end(),
+ std::forward<UnaryPredicate>(P));
+}
+
+/// Provide wrappers to std::find which take ranges instead of having to pass
+/// begin/end explicitly.
+template<typename R, class T>
+auto find(R &&Range, const T &val) -> decltype(Range.begin()) {
+ return std::find(Range.begin(), Range.end(), val);
+}
+
+//===----------------------------------------------------------------------===//
+// Extra additions to <memory>
+//===----------------------------------------------------------------------===//
+
+// Implement make_unique according to N3656.
+
+/// \brief Constructs a `new T()` with the given args and returns a
+/// `unique_ptr<T>` which owns the object.
+///
+/// Example:
+///
+/// auto p = make_unique<int>();
+/// auto p = make_unique<std::tuple<int, int>>(0, 1);
+template <class T, class... Args>
+typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type
+make_unique(Args &&... args) {
+ return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+/// \brief Constructs a `new T[n]` with the given args and returns a
+/// `unique_ptr<T[]>` which owns the object.
+///
+/// \param n size of the new array.
+///
+/// Example:
+///
+/// auto p = make_unique<int[]>(2); // value-initializes the array with 0's.
+template <class T>
+typename std::enable_if<std::is_array<T>::value && std::extent<T>::value == 0,
+ std::unique_ptr<T>>::type
+make_unique(size_t n) {
+ return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]());
+}
+
+/// This function isn't used and is only here to provide better compile errors.
+template <class T, class... Args>
+typename std::enable_if<std::extent<T>::value != 0>::type
+make_unique(Args &&...) = delete;
+
+struct FreeDeleter {
+ void operator()(void* v) {
+ ::free(v);
+ }
+};
+
+template<typename First, typename Second>
+struct pair_hash {
+ size_t operator()(const std::pair<First, Second> &P) const {
+ return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
+ }
+};
+
+/// A functor like C++14's std::less<void> in its absence.
+struct less {
+ template <typename A, typename B> bool operator()(A &&a, B &&b) const {
+ return std::forward<A>(a) < std::forward<B>(b);
+ }
+};
+
+/// A functor like C++14's std::equal<void> in its absence.
+struct equal {
+ template <typename A, typename B> bool operator()(A &&a, B &&b) const {
+ return std::forward<A>(a) == std::forward<B>(b);
+ }
+};
+
+/// Binary functor that adapts to any other binary functor after dereferencing
+/// operands.
+template <typename T> struct deref {
+ T func;
+ // Could be further improved to cope with non-derivable functors and
+ // non-binary functors (should be a variadic template member function
+ // operator()).
+ template <typename A, typename B>
+ auto operator()(A &lhs, B &rhs) const -> decltype(func(*lhs, *rhs)) {
+ assert(lhs);
+ assert(rhs);
+ return func(*lhs, *rhs);
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/ScopedHashTable.h b/contrib/llvm/include/llvm/ADT/ScopedHashTable.h
new file mode 100644
index 0000000..4af3d6d
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/ScopedHashTable.h
@@ -0,0 +1,256 @@
+//===- ScopedHashTable.h - A simple scoped hash table -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an efficient scoped hash table, which is useful for
+// things like dominator-based optimizations. This allows clients to do things
+// like this:
+//
+// ScopedHashTable<int, int> HT;
+// {
+// ScopedHashTableScope<int, int> Scope1(HT);
+// HT.insert(0, 0);
+// HT.insert(1, 1);
+// {
+// ScopedHashTableScope<int, int> Scope2(HT);
+// HT.insert(0, 42);
+// }
+// }
+//
+// Looking up the value for "0" in the Scope2 block will return 42. Looking
+// up the value for 0 before 42 is inserted or after Scope2 is popped will
+// return 0.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SCOPEDHASHTABLE_H
+#define LLVM_ADT_SCOPEDHASHTABLE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+ typename AllocatorTy = MallocAllocator>
+class ScopedHashTable;
+
+template <typename K, typename V>
+class ScopedHashTableVal {
+ ScopedHashTableVal *NextInScope;
+ ScopedHashTableVal *NextForKey;
+ K Key;
+ V Val;
+ ScopedHashTableVal(const K &key, const V &val) : Key(key), Val(val) {}
+
+public:
+ const K &getKey() const { return Key; }
+ const V &getValue() const { return Val; }
+ V &getValue() { return Val; }
+
+ ScopedHashTableVal *getNextForKey() { return NextForKey; }
+ const ScopedHashTableVal *getNextForKey() const { return NextForKey; }
+ ScopedHashTableVal *getNextInScope() { return NextInScope; }
+
+ template <typename AllocatorTy>
+ static ScopedHashTableVal *Create(ScopedHashTableVal *nextInScope,
+ ScopedHashTableVal *nextForKey,
+ const K &key, const V &val,
+ AllocatorTy &Allocator) {
+ ScopedHashTableVal *New = Allocator.template Allocate<ScopedHashTableVal>();
+ // Set up the value.
+ new (New) ScopedHashTableVal(key, val);
+ New->NextInScope = nextInScope;
+ New->NextForKey = nextForKey;
+ return New;
+ }
+
+ template <typename AllocatorTy> void Destroy(AllocatorTy &Allocator) {
+ // Free memory referenced by the item.
+ this->~ScopedHashTableVal();
+ Allocator.Deallocate(this);
+ }
+};
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+ typename AllocatorTy = MallocAllocator>
+class ScopedHashTableScope {
+ /// HT - The hashtable that we are active for.
+ ScopedHashTable<K, V, KInfo, AllocatorTy> &HT;
+
+ /// PrevScope - This is the scope that we are shadowing in HT.
+ ScopedHashTableScope *PrevScope;
+
+ /// LastValInScope - This is the last value that was inserted for this scope
+ /// or null if none have been inserted yet.
+ ScopedHashTableVal<K, V> *LastValInScope;
+ void operator=(ScopedHashTableScope &) = delete;
+ ScopedHashTableScope(ScopedHashTableScope &) = delete;
+
+public:
+ ScopedHashTableScope(ScopedHashTable<K, V, KInfo, AllocatorTy> &HT);
+ ~ScopedHashTableScope();
+
+ ScopedHashTableScope *getParentScope() { return PrevScope; }
+ const ScopedHashTableScope *getParentScope() const { return PrevScope; }
+
+private:
+ friend class ScopedHashTable<K, V, KInfo, AllocatorTy>;
+ ScopedHashTableVal<K, V> *getLastValInScope() {
+ return LastValInScope;
+ }
+ void setLastValInScope(ScopedHashTableVal<K, V> *Val) {
+ LastValInScope = Val;
+ }
+};
+
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>>
+class ScopedHashTableIterator {
+ ScopedHashTableVal<K, V> *Node;
+
+public:
+ ScopedHashTableIterator(ScopedHashTableVal<K, V> *node) : Node(node) {}
+
+ V &operator*() const {
+ assert(Node && "Dereference end()");
+ return Node->getValue();
+ }
+ V *operator->() const {
+ return &Node->getValue();
+ }
+
+ bool operator==(const ScopedHashTableIterator &RHS) const {
+ return Node == RHS.Node;
+ }
+ bool operator!=(const ScopedHashTableIterator &RHS) const {
+ return Node != RHS.Node;
+ }
+
+ inline ScopedHashTableIterator& operator++() { // Preincrement
+ assert(Node && "incrementing past end()");
+ Node = Node->getNextForKey();
+ return *this;
+ }
+ ScopedHashTableIterator operator++(int) { // Postincrement
+ ScopedHashTableIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+template <typename K, typename V, typename KInfo, typename AllocatorTy>
+class ScopedHashTable {
+public:
+ /// ScopeTy - This is a helpful typedef that allows clients to get easy access
+ /// to the name of the scope for this hash table.
+ typedef ScopedHashTableScope<K, V, KInfo, AllocatorTy> ScopeTy;
+ typedef unsigned size_type;
+
+private:
+ typedef ScopedHashTableVal<K, V> ValTy;
+ DenseMap<K, ValTy*, KInfo> TopLevelMap;
+ ScopeTy *CurScope;
+
+ AllocatorTy Allocator;
+
+ ScopedHashTable(const ScopedHashTable &); // NOT YET IMPLEMENTED
+ void operator=(const ScopedHashTable &); // NOT YET IMPLEMENTED
+ friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
+
+public:
+ ScopedHashTable() : CurScope(nullptr) {}
+ ScopedHashTable(AllocatorTy A) : CurScope(0), Allocator(A) {}
+ ~ScopedHashTable() {
+ assert(!CurScope && TopLevelMap.empty() && "Scope imbalance!");
+ }
+
+ /// Access to the allocator.
+ AllocatorTy &getAllocator() { return Allocator; }
+ const AllocatorTy &getAllocator() const { return Allocator; }
+
+ /// Return 1 if the specified key is in the table, 0 otherwise.
+ size_type count(const K &Key) const {
+ return TopLevelMap.count(Key);
+ }
+
+ V lookup(const K &Key) {
+ typename DenseMap<K, ValTy*, KInfo>::iterator I = TopLevelMap.find(Key);
+ if (I != TopLevelMap.end())
+ return I->second->getValue();
+
+ return V();
+ }
+
+ void insert(const K &Key, const V &Val) {
+ insertIntoScope(CurScope, Key, Val);
+ }
+
+ typedef ScopedHashTableIterator<K, V, KInfo> iterator;
+
+ iterator end() { return iterator(0); }
+
+ iterator begin(const K &Key) {
+ typename DenseMap<K, ValTy*, KInfo>::iterator I =
+ TopLevelMap.find(Key);
+ if (I == TopLevelMap.end()) return end();
+ return iterator(I->second);
+ }
+
+ ScopeTy *getCurScope() { return CurScope; }
+ const ScopeTy *getCurScope() const { return CurScope; }
+
+ /// insertIntoScope - This inserts the specified key/value at the specified
+ /// (possibly not the current) scope. While it is ok to insert into a scope
+ /// that isn't the current one, it isn't ok to insert *underneath* an existing
+ /// value of the specified key.
+ void insertIntoScope(ScopeTy *S, const K &Key, const V &Val) {
+ assert(S && "No scope active!");
+ ScopedHashTableVal<K, V> *&KeyEntry = TopLevelMap[Key];
+ KeyEntry = ValTy::Create(S->getLastValInScope(), KeyEntry, Key, Val,
+ Allocator);
+ S->setLastValInScope(KeyEntry);
+ }
+};
+
+/// ScopedHashTableScope ctor - Install this as the current scope for the hash
+/// table.
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::
+ ScopedHashTableScope(ScopedHashTable<K, V, KInfo, Allocator> &ht) : HT(ht) {
+ PrevScope = HT.CurScope;
+ HT.CurScope = this;
+ LastValInScope = nullptr;
+}
+
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
+ assert(HT.CurScope == this && "Scope imbalance!");
+ HT.CurScope = PrevScope;
+
+ // Pop and delete all values corresponding to this scope.
+ while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) {
+ // Pop this value out of the TopLevelMap.
+ if (!ThisEntry->getNextForKey()) {
+ assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry &&
+ "Scope imbalance!");
+ HT.TopLevelMap.erase(ThisEntry->getKey());
+ } else {
+ ScopedHashTableVal<K, V> *&KeyEntry = HT.TopLevelMap[ThisEntry->getKey()];
+ assert(KeyEntry == ThisEntry && "Scope imbalance!");
+ KeyEntry = ThisEntry->getNextForKey();
+ }
+
+ // Pop this value out of the scope.
+ LastValInScope = ThisEntry->getNextInScope();
+
+ // Delete this entry.
+ ThisEntry->Destroy(HT.getAllocator());
+ }
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/SetOperations.h b/contrib/llvm/include/llvm/ADT/SetOperations.h
new file mode 100644
index 0000000..7c9f2fb
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SetOperations.h
@@ -0,0 +1,71 @@
+//===-- llvm/ADT/SetOperations.h - Generic Set Operations -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines generic set operations that may be used on set's of
+// different types, and different element types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SETOPERATIONS_H
+#define LLVM_ADT_SETOPERATIONS_H
+
+namespace llvm {
+
+/// set_union(A, B) - Compute A := A u B, return whether A changed.
+///
+template <class S1Ty, class S2Ty>
+bool set_union(S1Ty &S1, const S2Ty &S2) {
+ bool Changed = false;
+
+ for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
+ SI != SE; ++SI)
+ if (S1.insert(*SI).second)
+ Changed = true;
+
+ return Changed;
+}
+
+/// set_intersect(A, B) - Compute A := A ^ B
+/// Identical to set_intersection, except that it works on set<>'s and
+/// is nicer to use. Functionally, this iterates through S1, removing
+/// elements that are not contained in S2.
+///
+template <class S1Ty, class S2Ty>
+void set_intersect(S1Ty &S1, const S2Ty &S2) {
+ for (typename S1Ty::iterator I = S1.begin(); I != S1.end();) {
+ const auto &E = *I;
+ ++I;
+ if (!S2.count(E)) S1.erase(E); // Erase element if not in S2
+ }
+}
+
+/// set_difference(A, B) - Return A - B
+///
+template <class S1Ty, class S2Ty>
+S1Ty set_difference(const S1Ty &S1, const S2Ty &S2) {
+ S1Ty Result;
+ for (typename S1Ty::const_iterator SI = S1.begin(), SE = S1.end();
+ SI != SE; ++SI)
+ if (!S2.count(*SI)) // if the element is not in set2
+ Result.insert(*SI);
+ return Result;
+}
+
+/// set_subtract(A, B) - Compute A := A - B
+///
+template <class S1Ty, class S2Ty>
+void set_subtract(S1Ty &S1, const S2Ty &S2) {
+ for (typename S2Ty::const_iterator SI = S2.begin(), SE = S2.end();
+ SI != SE; ++SI)
+ S1.erase(*SI);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/SetVector.h b/contrib/llvm/include/llvm/ADT/SetVector.h
new file mode 100644
index 0000000..bc56357
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SetVector.h
@@ -0,0 +1,255 @@
+//===- llvm/ADT/SetVector.h - Set with insert order iteration ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a set that has insertion order iteration
+// characteristics. This is useful for keeping a set of things that need to be
+// visited later but in a deterministic order (insertion order). The interface
+// is purposefully minimal.
+//
+// This file defines SetVector and SmallSetVector, which performs no allocations
+// if the SetVector has less than a certain number of elements.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SETVECTOR_H
+#define LLVM_ADT_SETVECTOR_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include <algorithm>
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+
+/// \brief A vector that has set insertion semantics.
+///
+/// This adapter class provides a way to keep a set of things that also has the
+/// property of a deterministic iteration order. The order of iteration is the
+/// order of insertion.
+template <typename T, typename Vector = std::vector<T>,
+ typename Set = DenseSet<T>>
+class SetVector {
+public:
+ typedef T value_type;
+ typedef T key_type;
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef Set set_type;
+ typedef Vector vector_type;
+ typedef typename vector_type::const_iterator iterator;
+ typedef typename vector_type::const_iterator const_iterator;
+ typedef typename vector_type::const_reverse_iterator reverse_iterator;
+ typedef typename vector_type::const_reverse_iterator const_reverse_iterator;
+ typedef typename vector_type::size_type size_type;
+
+ /// \brief Construct an empty SetVector
+ SetVector() {}
+
+ /// \brief Initialize a SetVector with a range of elements
+ template<typename It>
+ SetVector(It Start, It End) {
+ insert(Start, End);
+ }
+
+ ArrayRef<T> getArrayRef() const { return vector_; }
+
+ /// \brief Determine if the SetVector is empty or not.
+ bool empty() const {
+ return vector_.empty();
+ }
+
+ /// \brief Determine the number of elements in the SetVector.
+ size_type size() const {
+ return vector_.size();
+ }
+
+ /// \brief Get an iterator to the beginning of the SetVector.
+ iterator begin() {
+ return vector_.begin();
+ }
+
+ /// \brief Get a const_iterator to the beginning of the SetVector.
+ const_iterator begin() const {
+ return vector_.begin();
+ }
+
+ /// \brief Get an iterator to the end of the SetVector.
+ iterator end() {
+ return vector_.end();
+ }
+
+ /// \brief Get a const_iterator to the end of the SetVector.
+ const_iterator end() const {
+ return vector_.end();
+ }
+
+ /// \brief Get an reverse_iterator to the end of the SetVector.
+ reverse_iterator rbegin() {
+ return vector_.rbegin();
+ }
+
+ /// \brief Get a const_reverse_iterator to the end of the SetVector.
+ const_reverse_iterator rbegin() const {
+ return vector_.rbegin();
+ }
+
+ /// \brief Get a reverse_iterator to the beginning of the SetVector.
+ reverse_iterator rend() {
+ return vector_.rend();
+ }
+
+ /// \brief Get a const_reverse_iterator to the beginning of the SetVector.
+ const_reverse_iterator rend() const {
+ return vector_.rend();
+ }
+
+ /// \brief Return the last element of the SetVector.
+ const T &back() const {
+ assert(!empty() && "Cannot call back() on empty SetVector!");
+ return vector_.back();
+ }
+
+ /// \brief Index into the SetVector.
+ const_reference operator[](size_type n) const {
+ assert(n < vector_.size() && "SetVector access out of range!");
+ return vector_[n];
+ }
+
+ /// \brief Insert a new element into the SetVector.
+ /// \returns true iff the element was inserted into the SetVector.
+ bool insert(const value_type &X) {
+ bool result = set_.insert(X).second;
+ if (result)
+ vector_.push_back(X);
+ return result;
+ }
+
+ /// \brief Insert a range of elements into the SetVector.
+ template<typename It>
+ void insert(It Start, It End) {
+ for (; Start != End; ++Start)
+ if (set_.insert(*Start).second)
+ vector_.push_back(*Start);
+ }
+
+ /// \brief Remove an item from the set vector.
+ bool remove(const value_type& X) {
+ if (set_.erase(X)) {
+ typename vector_type::iterator I =
+ std::find(vector_.begin(), vector_.end(), X);
+ assert(I != vector_.end() && "Corrupted SetVector instances!");
+ vector_.erase(I);
+ return true;
+ }
+ return false;
+ }
+
+ /// \brief Remove items from the set vector based on a predicate function.
+ ///
+ /// This is intended to be equivalent to the following code, if we could
+ /// write it:
+ ///
+ /// \code
+ /// V.erase(std::remove_if(V.begin(), V.end(), P), V.end());
+ /// \endcode
+ ///
+ /// However, SetVector doesn't expose non-const iterators, making any
+ /// algorithm like remove_if impossible to use.
+ ///
+ /// \returns true if any element is removed.
+ template <typename UnaryPredicate>
+ bool remove_if(UnaryPredicate P) {
+ typename vector_type::iterator I
+ = std::remove_if(vector_.begin(), vector_.end(),
+ TestAndEraseFromSet<UnaryPredicate>(P, set_));
+ if (I == vector_.end())
+ return false;
+ vector_.erase(I, vector_.end());
+ return true;
+ }
+
+ /// \brief Count the number of elements of a given key in the SetVector.
+ /// \returns 0 if the element is not in the SetVector, 1 if it is.
+ size_type count(const key_type &key) const {
+ return set_.count(key);
+ }
+
+ /// \brief Completely clear the SetVector
+ void clear() {
+ set_.clear();
+ vector_.clear();
+ }
+
+ /// \brief Remove the last element of the SetVector.
+ void pop_back() {
+ assert(!empty() && "Cannot remove an element from an empty SetVector!");
+ set_.erase(back());
+ vector_.pop_back();
+ }
+
+ T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val() {
+ T Ret = back();
+ pop_back();
+ return Ret;
+ }
+
+ bool operator==(const SetVector &that) const {
+ return vector_ == that.vector_;
+ }
+
+ bool operator!=(const SetVector &that) const {
+ return vector_ != that.vector_;
+ }
+
+private:
+ /// \brief A wrapper predicate designed for use with std::remove_if.
+ ///
+ /// This predicate wraps a predicate suitable for use with std::remove_if to
+ /// call set_.erase(x) on each element which is slated for removal.
+ template <typename UnaryPredicate>
+ class TestAndEraseFromSet {
+ UnaryPredicate P;
+ set_type &set_;
+
+ public:
+ TestAndEraseFromSet(UnaryPredicate P, set_type &set_) : P(P), set_(set_) {}
+
+ template <typename ArgumentT>
+ bool operator()(const ArgumentT &Arg) {
+ if (P(Arg)) {
+ set_.erase(Arg);
+ return true;
+ }
+ return false;
+ }
+ };
+
+ set_type set_; ///< The set.
+ vector_type vector_; ///< The vector.
+};
+
+/// \brief A SetVector that performs no allocations if smaller than
+/// a certain size.
+template <typename T, unsigned N>
+class SmallSetVector : public SetVector<T, SmallVector<T, N>, SmallSet<T, N> > {
+public:
+ SmallSetVector() {}
+
+ /// \brief Initialize a SmallSetVector with a range of elements
+ template<typename It>
+ SmallSetVector(It Start, It End) {
+ this->insert(Start, End);
+ }
+};
+
+} // End llvm namespace
+
+// vim: sw=2 ai
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/SmallBitVector.h b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
new file mode 100644
index 0000000..4aa3bc2
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SmallBitVector.h
@@ -0,0 +1,600 @@
+//===- llvm/ADT/SmallBitVector.h - 'Normally small' bit vectors -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SmallBitVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLBITVECTOR_H
+#define LLVM_ADT_SMALLBITVECTOR_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+
+namespace llvm {
+
+/// SmallBitVector - This is a 'bitvector' (really, a variable-sized bit array),
+/// optimized for the case when the array is small. It contains one
+/// pointer-sized field, which is directly used as a plain collection of bits
+/// when possible, or as a pointer to a larger heap-allocated array when
+/// necessary. This allows normal "small" cases to be fast without losing
+/// generality for large inputs.
+///
+class SmallBitVector {
+ // TODO: In "large" mode, a pointer to a BitVector is used, leading to an
+ // unnecessary level of indirection. It would be more efficient to use a
+ // pointer to memory containing size, allocation size, and the array of bits.
+ uintptr_t X;
+
+ enum {
+ // The number of bits in this class.
+ NumBaseBits = sizeof(uintptr_t) * CHAR_BIT,
+
+ // One bit is used to discriminate between small and large mode. The
+ // remaining bits are used for the small-mode representation.
+ SmallNumRawBits = NumBaseBits - 1,
+
+ // A few more bits are used to store the size of the bit set in small mode.
+ // Theoretically this is a ceil-log2. These bits are encoded in the most
+ // significant bits of the raw bits.
+ SmallNumSizeBits = (NumBaseBits == 32 ? 5 :
+ NumBaseBits == 64 ? 6 :
+ SmallNumRawBits),
+
+ // The remaining bits are used to store the actual set in small mode.
+ SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits
+ };
+
+ static_assert(NumBaseBits == 64 || NumBaseBits == 32,
+ "Unsupported word size");
+
+public:
+ typedef unsigned size_type;
+ // Encapsulation of a single bit.
+ class reference {
+ SmallBitVector &TheVector;
+ unsigned BitPos;
+
+ public:
+ reference(SmallBitVector &b, unsigned Idx) : TheVector(b), BitPos(Idx) {}
+
+ reference(const reference&) = default;
+
+ reference& operator=(reference t) {
+ *this = bool(t);
+ return *this;
+ }
+
+ reference& operator=(bool t) {
+ if (t)
+ TheVector.set(BitPos);
+ else
+ TheVector.reset(BitPos);
+ return *this;
+ }
+
+ operator bool() const {
+ return const_cast<const SmallBitVector &>(TheVector).operator[](BitPos);
+ }
+ };
+
+private:
+ bool isSmall() const {
+ return X & uintptr_t(1);
+ }
+
+ BitVector *getPointer() const {
+ assert(!isSmall());
+ return reinterpret_cast<BitVector *>(X);
+ }
+
+ void switchToSmall(uintptr_t NewSmallBits, size_t NewSize) {
+ X = 1;
+ setSmallSize(NewSize);
+ setSmallBits(NewSmallBits);
+ }
+
+ void switchToLarge(BitVector *BV) {
+ X = reinterpret_cast<uintptr_t>(BV);
+ assert(!isSmall() && "Tried to use an unaligned pointer");
+ }
+
+ // Return all the bits used for the "small" representation; this includes
+ // bits for the size as well as the element bits.
+ uintptr_t getSmallRawBits() const {
+ assert(isSmall());
+ return X >> 1;
+ }
+
+ void setSmallRawBits(uintptr_t NewRawBits) {
+ assert(isSmall());
+ X = (NewRawBits << 1) | uintptr_t(1);
+ }
+
+ // Return the size.
+ size_t getSmallSize() const {
+ return getSmallRawBits() >> SmallNumDataBits;
+ }
+
+ void setSmallSize(size_t Size) {
+ setSmallRawBits(getSmallBits() | (Size << SmallNumDataBits));
+ }
+
+ // Return the element bits.
+ uintptr_t getSmallBits() const {
+ return getSmallRawBits() & ~(~uintptr_t(0) << getSmallSize());
+ }
+
+ void setSmallBits(uintptr_t NewBits) {
+ setSmallRawBits((NewBits & ~(~uintptr_t(0) << getSmallSize())) |
+ (getSmallSize() << SmallNumDataBits));
+ }
+
+public:
+ /// SmallBitVector default ctor - Creates an empty bitvector.
+ SmallBitVector() : X(1) {}
+
+ /// SmallBitVector ctor - Creates a bitvector of specified number of bits. All
+ /// bits are initialized to the specified value.
+ explicit SmallBitVector(unsigned s, bool t = false) {
+ if (s <= SmallNumDataBits)
+ switchToSmall(t ? ~uintptr_t(0) : 0, s);
+ else
+ switchToLarge(new BitVector(s, t));
+ }
+
+ /// SmallBitVector copy ctor.
+ SmallBitVector(const SmallBitVector &RHS) {
+ if (RHS.isSmall())
+ X = RHS.X;
+ else
+ switchToLarge(new BitVector(*RHS.getPointer()));
+ }
+
+ SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) {
+ RHS.X = 1;
+ }
+
+ ~SmallBitVector() {
+ if (!isSmall())
+ delete getPointer();
+ }
+
+ /// empty - Tests whether there are no bits in this bitvector.
+ bool empty() const {
+ return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
+ }
+
+ /// size - Returns the number of bits in this bitvector.
+ size_t size() const {
+ return isSmall() ? getSmallSize() : getPointer()->size();
+ }
+
+ /// count - Returns the number of bits which are set.
+ size_type count() const {
+ if (isSmall()) {
+ uintptr_t Bits = getSmallBits();
+ return countPopulation(Bits);
+ }
+ return getPointer()->count();
+ }
+
+ /// any - Returns true if any bit is set.
+ bool any() const {
+ if (isSmall())
+ return getSmallBits() != 0;
+ return getPointer()->any();
+ }
+
+ /// all - Returns true if all bits are set.
+ bool all() const {
+ if (isSmall())
+ return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1;
+ return getPointer()->all();
+ }
+
+ /// none - Returns true if none of the bits are set.
+ bool none() const {
+ if (isSmall())
+ return getSmallBits() == 0;
+ return getPointer()->none();
+ }
+
+ /// find_first - Returns the index of the first set bit, -1 if none
+ /// of the bits are set.
+ int find_first() const {
+ if (isSmall()) {
+ uintptr_t Bits = getSmallBits();
+ if (Bits == 0)
+ return -1;
+ return countTrailingZeros(Bits);
+ }
+ return getPointer()->find_first();
+ }
+
+ /// find_next - Returns the index of the next set bit following the
+ /// "Prev" bit. Returns -1 if the next set bit is not found.
+ int find_next(unsigned Prev) const {
+ if (isSmall()) {
+ uintptr_t Bits = getSmallBits();
+ // Mask off previous bits.
+ Bits &= ~uintptr_t(0) << (Prev + 1);
+ if (Bits == 0 || Prev + 1 >= getSmallSize())
+ return -1;
+ return countTrailingZeros(Bits);
+ }
+ return getPointer()->find_next(Prev);
+ }
+
+ /// clear - Clear all bits.
+ void clear() {
+ if (!isSmall())
+ delete getPointer();
+ switchToSmall(0, 0);
+ }
+
+ /// resize - Grow or shrink the bitvector.
+ void resize(unsigned N, bool t = false) {
+ if (!isSmall()) {
+ getPointer()->resize(N, t);
+ } else if (SmallNumDataBits >= N) {
+ uintptr_t NewBits = t ? ~uintptr_t(0) << getSmallSize() : 0;
+ setSmallSize(N);
+ setSmallBits(NewBits | getSmallBits());
+ } else {
+ BitVector *BV = new BitVector(N, t);
+ uintptr_t OldBits = getSmallBits();
+ for (size_t i = 0, e = getSmallSize(); i != e; ++i)
+ (*BV)[i] = (OldBits >> i) & 1;
+ switchToLarge(BV);
+ }
+ }
+
+ void reserve(unsigned N) {
+ if (isSmall()) {
+ if (N > SmallNumDataBits) {
+ uintptr_t OldBits = getSmallRawBits();
+ size_t SmallSize = getSmallSize();
+ BitVector *BV = new BitVector(SmallSize);
+ for (size_t i = 0; i < SmallSize; ++i)
+ if ((OldBits >> i) & 1)
+ BV->set(i);
+ BV->reserve(N);
+ switchToLarge(BV);
+ }
+ } else {
+ getPointer()->reserve(N);
+ }
+ }
+
+ // Set, reset, flip
+ SmallBitVector &set() {
+ if (isSmall())
+ setSmallBits(~uintptr_t(0));
+ else
+ getPointer()->set();
+ return *this;
+ }
+
+ SmallBitVector &set(unsigned Idx) {
+ if (isSmall()) {
+ assert(Idx <= static_cast<unsigned>(
+ std::numeric_limits<uintptr_t>::digits) &&
+ "undefined behavior");
+ setSmallBits(getSmallBits() | (uintptr_t(1) << Idx));
+ }
+ else
+ getPointer()->set(Idx);
+ return *this;
+ }
+
+ /// set - Efficiently set a range of bits in [I, E)
+ SmallBitVector &set(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to set backwards range!");
+ assert(E <= size() && "Attempted to set out-of-bounds range!");
+ if (I == E) return *this;
+ if (isSmall()) {
+ uintptr_t EMask = ((uintptr_t)1) << E;
+ uintptr_t IMask = ((uintptr_t)1) << I;
+ uintptr_t Mask = EMask - IMask;
+ setSmallBits(getSmallBits() | Mask);
+ } else
+ getPointer()->set(I, E);
+ return *this;
+ }
+
+ SmallBitVector &reset() {
+ if (isSmall())
+ setSmallBits(0);
+ else
+ getPointer()->reset();
+ return *this;
+ }
+
+ SmallBitVector &reset(unsigned Idx) {
+ if (isSmall())
+ setSmallBits(getSmallBits() & ~(uintptr_t(1) << Idx));
+ else
+ getPointer()->reset(Idx);
+ return *this;
+ }
+
+ /// reset - Efficiently reset a range of bits in [I, E)
+ SmallBitVector &reset(unsigned I, unsigned E) {
+ assert(I <= E && "Attempted to reset backwards range!");
+ assert(E <= size() && "Attempted to reset out-of-bounds range!");
+ if (I == E) return *this;
+ if (isSmall()) {
+ uintptr_t EMask = ((uintptr_t)1) << E;
+ uintptr_t IMask = ((uintptr_t)1) << I;
+ uintptr_t Mask = EMask - IMask;
+ setSmallBits(getSmallBits() & ~Mask);
+ } else
+ getPointer()->reset(I, E);
+ return *this;
+ }
+
+ SmallBitVector &flip() {
+ if (isSmall())
+ setSmallBits(~getSmallBits());
+ else
+ getPointer()->flip();
+ return *this;
+ }
+
+ SmallBitVector &flip(unsigned Idx) {
+ if (isSmall())
+ setSmallBits(getSmallBits() ^ (uintptr_t(1) << Idx));
+ else
+ getPointer()->flip(Idx);
+ return *this;
+ }
+
+ // No argument flip.
+ SmallBitVector operator~() const {
+ return SmallBitVector(*this).flip();
+ }
+
+ // Indexing.
+ reference operator[](unsigned Idx) {
+ assert(Idx < size() && "Out-of-bounds Bit access.");
+ return reference(*this, Idx);
+ }
+
+ bool operator[](unsigned Idx) const {
+ assert(Idx < size() && "Out-of-bounds Bit access.");
+ if (isSmall())
+ return ((getSmallBits() >> Idx) & 1) != 0;
+ return getPointer()->operator[](Idx);
+ }
+
+ bool test(unsigned Idx) const {
+ return (*this)[Idx];
+ }
+
+ /// Test if any common bits are set.
+ bool anyCommon(const SmallBitVector &RHS) const {
+ if (isSmall() && RHS.isSmall())
+ return (getSmallBits() & RHS.getSmallBits()) != 0;
+ if (!isSmall() && !RHS.isSmall())
+ return getPointer()->anyCommon(*RHS.getPointer());
+
+ for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+ if (test(i) && RHS.test(i))
+ return true;
+ return false;
+ }
+
+ // Comparison operators.
+ bool operator==(const SmallBitVector &RHS) const {
+ if (size() != RHS.size())
+ return false;
+ if (isSmall())
+ return getSmallBits() == RHS.getSmallBits();
+ else
+ return *getPointer() == *RHS.getPointer();
+ }
+
+ bool operator!=(const SmallBitVector &RHS) const {
+ return !(*this == RHS);
+ }
+
+ // Intersection, union, disjoint union.
+ SmallBitVector &operator&=(const SmallBitVector &RHS) {
+ resize(std::max(size(), RHS.size()));
+ if (isSmall())
+ setSmallBits(getSmallBits() & RHS.getSmallBits());
+ else if (!RHS.isSmall())
+ getPointer()->operator&=(*RHS.getPointer());
+ else {
+ SmallBitVector Copy = RHS;
+ Copy.resize(size());
+ getPointer()->operator&=(*Copy.getPointer());
+ }
+ return *this;
+ }
+
+ /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS.
+ SmallBitVector &reset(const SmallBitVector &RHS) {
+ if (isSmall() && RHS.isSmall())
+ setSmallBits(getSmallBits() & ~RHS.getSmallBits());
+ else if (!isSmall() && !RHS.isSmall())
+ getPointer()->reset(*RHS.getPointer());
+ else
+ for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+ if (RHS.test(i))
+ reset(i);
+
+ return *this;
+ }
+
+ /// test - Check if (This - RHS) is zero.
+ /// This is the same as reset(RHS) and any().
+ bool test(const SmallBitVector &RHS) const {
+ if (isSmall() && RHS.isSmall())
+ return (getSmallBits() & ~RHS.getSmallBits()) != 0;
+ if (!isSmall() && !RHS.isSmall())
+ return getPointer()->test(*RHS.getPointer());
+
+ unsigned i, e;
+ for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+ if (test(i) && !RHS.test(i))
+ return true;
+
+ for (e = size(); i != e; ++i)
+ if (test(i))
+ return true;
+
+ return false;
+ }
+
+ SmallBitVector &operator|=(const SmallBitVector &RHS) {
+ resize(std::max(size(), RHS.size()));
+ if (isSmall())
+ setSmallBits(getSmallBits() | RHS.getSmallBits());
+ else if (!RHS.isSmall())
+ getPointer()->operator|=(*RHS.getPointer());
+ else {
+ SmallBitVector Copy = RHS;
+ Copy.resize(size());
+ getPointer()->operator|=(*Copy.getPointer());
+ }
+ return *this;
+ }
+
+ SmallBitVector &operator^=(const SmallBitVector &RHS) {
+ resize(std::max(size(), RHS.size()));
+ if (isSmall())
+ setSmallBits(getSmallBits() ^ RHS.getSmallBits());
+ else if (!RHS.isSmall())
+ getPointer()->operator^=(*RHS.getPointer());
+ else {
+ SmallBitVector Copy = RHS;
+ Copy.resize(size());
+ getPointer()->operator^=(*Copy.getPointer());
+ }
+ return *this;
+ }
+
+ // Assignment operator.
+ const SmallBitVector &operator=(const SmallBitVector &RHS) {
+ if (isSmall()) {
+ if (RHS.isSmall())
+ X = RHS.X;
+ else
+ switchToLarge(new BitVector(*RHS.getPointer()));
+ } else {
+ if (!RHS.isSmall())
+ *getPointer() = *RHS.getPointer();
+ else {
+ delete getPointer();
+ X = RHS.X;
+ }
+ }
+ return *this;
+ }
+
+ const SmallBitVector &operator=(SmallBitVector &&RHS) {
+ if (this != &RHS) {
+ clear();
+ swap(RHS);
+ }
+ return *this;
+ }
+
+ void swap(SmallBitVector &RHS) {
+ std::swap(X, RHS.X);
+ }
+
+ /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
+ /// This computes "*this |= Mask".
+ void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<true, false>(Mask, MaskWords);
+ else
+ getPointer()->setBitsInMask(Mask, MaskWords);
+ }
+
+ /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
+ /// Don't resize. This computes "*this &= ~Mask".
+ void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<false, false>(Mask, MaskWords);
+ else
+ getPointer()->clearBitsInMask(Mask, MaskWords);
+ }
+
+ /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this |= ~Mask".
+ void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<true, true>(Mask, MaskWords);
+ else
+ getPointer()->setBitsNotInMask(Mask, MaskWords);
+ }
+
+ /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this &= Mask".
+ void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<false, true>(Mask, MaskWords);
+ else
+ getPointer()->clearBitsNotInMask(Mask, MaskWords);
+ }
+
+private:
+ template <bool AddBits, bool InvertMask>
+ void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+ assert(MaskWords <= sizeof(uintptr_t) && "Mask is larger than base!");
+ uintptr_t M = Mask[0];
+ if (NumBaseBits == 64)
+ M |= uint64_t(Mask[1]) << 32;
+ if (InvertMask)
+ M = ~M;
+ if (AddBits)
+ setSmallBits(getSmallBits() | M);
+ else
+ setSmallBits(getSmallBits() & ~M);
+ }
+};
+
+inline SmallBitVector
+operator&(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+ SmallBitVector Result(LHS);
+ Result &= RHS;
+ return Result;
+}
+
+inline SmallBitVector
+operator|(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+ SmallBitVector Result(LHS);
+ Result |= RHS;
+ return Result;
+}
+
+inline SmallBitVector
+operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) {
+ SmallBitVector Result(LHS);
+ Result ^= RHS;
+ return Result;
+}
+
+} // End llvm namespace
+
+namespace std {
+ /// Implement std::swap in terms of BitVector swap.
+ inline void
+ swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
+ LHS.swap(RHS);
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
new file mode 100644
index 0000000..3d98e8f
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h
@@ -0,0 +1,350 @@
+//===- llvm/ADT/SmallPtrSet.h - 'Normally small' pointer set ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallPtrSet class. See the doxygen comment for
+// SmallPtrSetImplBase for more details on the algorithm used.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLPTRSET_H
+#define LLVM_ADT_SMALLPTRSET_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <utility>
+
+namespace llvm {
+
+class SmallPtrSetIteratorImpl;
+
+/// SmallPtrSetImplBase - This is the common code shared among all the
+/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one
+/// for small and one for large sets.
+///
+/// Small sets use an array of pointers allocated in the SmallPtrSet object,
+/// which is treated as a simple array of pointers. When a pointer is added to
+/// the set, the array is scanned to see if the element already exists, if not
+/// the element is 'pushed back' onto the array. If we run out of space in the
+/// array, we grow into the 'large set' case. SmallSet should be used when the
+/// sets are often small. In this case, no memory allocation is used, and only
+/// light-weight and cache-efficient scanning is used.
+///
+/// Large sets use a classic exponentially-probed hash table. Empty buckets are
+/// represented with an illegal pointer value (-1) to allow null pointers to be
+/// inserted. Tombstones are represented with another illegal pointer value
+/// (-2), to allow deletion. The hash table is resized when the table is 3/4 or
+/// more. When this happens, the table is doubled in size.
+///
+class SmallPtrSetImplBase {
+ friend class SmallPtrSetIteratorImpl;
+
+protected:
+ /// SmallArray - Points to a fixed size set of buckets, used in 'small mode'.
+ const void **SmallArray;
+ /// CurArray - This is the current set of buckets. If equal to SmallArray,
+ /// then the set is in 'small mode'.
+ const void **CurArray;
+ /// CurArraySize - The allocated size of CurArray, always a power of two.
+ unsigned CurArraySize;
+
+ // If small, this is # elts allocated consecutively
+ unsigned NumElements;
+ unsigned NumTombstones;
+
+ // Helpers to copy and move construct a SmallPtrSet.
+ SmallPtrSetImplBase(const void **SmallStorage, const SmallPtrSetImplBase &that);
+ SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize,
+ SmallPtrSetImplBase &&that);
+ explicit SmallPtrSetImplBase(const void **SmallStorage, unsigned SmallSize) :
+ SmallArray(SmallStorage), CurArray(SmallStorage), CurArraySize(SmallSize) {
+ assert(SmallSize && (SmallSize & (SmallSize-1)) == 0 &&
+ "Initial size must be a power of two!");
+ clear();
+ }
+ ~SmallPtrSetImplBase();
+
+public:
+ typedef unsigned size_type;
+ bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const { return size() == 0; }
+ size_type size() const { return NumElements; }
+
+ void clear() {
+ // If the capacity of the array is huge, and the # elements used is small,
+ // shrink the array.
+ if (!isSmall() && NumElements*4 < CurArraySize && CurArraySize > 32)
+ return shrink_and_clear();
+
+ // Fill the array with empty markers.
+ memset(CurArray, -1, CurArraySize*sizeof(void*));
+ NumElements = 0;
+ NumTombstones = 0;
+ }
+
+protected:
+ static void *getTombstoneMarker() { return reinterpret_cast<void*>(-2); }
+ static void *getEmptyMarker() {
+ // Note that -1 is chosen to make clear() efficiently implementable with
+ // memset and because it's not a valid pointer value.
+ return reinterpret_cast<void*>(-1);
+ }
+
+ /// insert_imp - This returns true if the pointer was new to the set, false if
+ /// it was already in the set. This is hidden from the client so that the
+ /// derived class can check that the right type of pointer is passed in.
+ std::pair<const void *const *, bool> insert_imp(const void *Ptr);
+
+ /// erase_imp - If the set contains the specified pointer, remove it and
+ /// return true, otherwise return false. This is hidden from the client so
+ /// that the derived class can check that the right type of pointer is passed
+ /// in.
+ bool erase_imp(const void * Ptr);
+
+ bool count_imp(const void * Ptr) const {
+ if (isSmall()) {
+ // Linear search for the item.
+ for (const void *const *APtr = SmallArray,
+ *const *E = SmallArray+NumElements; APtr != E; ++APtr)
+ if (*APtr == Ptr)
+ return true;
+ return false;
+ }
+
+ // Big set case.
+ return *FindBucketFor(Ptr) == Ptr;
+ }
+
+private:
+ bool isSmall() const { return CurArray == SmallArray; }
+
+ const void * const *FindBucketFor(const void *Ptr) const;
+ void shrink_and_clear();
+
+ /// Grow - Allocate a larger backing store for the buckets and move it over.
+ void Grow(unsigned NewSize);
+
+ void operator=(const SmallPtrSetImplBase &RHS) = delete;
+
+protected:
+ /// swap - Swaps the elements of two sets.
+ /// Note: This method assumes that both sets have the same small size.
+ void swap(SmallPtrSetImplBase &RHS);
+
+ void CopyFrom(const SmallPtrSetImplBase &RHS);
+ void MoveFrom(unsigned SmallSize, SmallPtrSetImplBase &&RHS);
+};
+
+/// SmallPtrSetIteratorImpl - This is the common base class shared between all
+/// instances of SmallPtrSetIterator.
+class SmallPtrSetIteratorImpl {
+protected:
+ const void *const *Bucket;
+ const void *const *End;
+
+public:
+ explicit SmallPtrSetIteratorImpl(const void *const *BP, const void*const *E)
+ : Bucket(BP), End(E) {
+ AdvanceIfNotValid();
+ }
+
+ bool operator==(const SmallPtrSetIteratorImpl &RHS) const {
+ return Bucket == RHS.Bucket;
+ }
+ bool operator!=(const SmallPtrSetIteratorImpl &RHS) const {
+ return Bucket != RHS.Bucket;
+ }
+
+protected:
+ /// AdvanceIfNotValid - If the current bucket isn't valid, advance to a bucket
+ /// that is. This is guaranteed to stop because the end() bucket is marked
+ /// valid.
+ void AdvanceIfNotValid() {
+ assert(Bucket <= End);
+ while (Bucket != End &&
+ (*Bucket == SmallPtrSetImplBase::getEmptyMarker() ||
+ *Bucket == SmallPtrSetImplBase::getTombstoneMarker()))
+ ++Bucket;
+ }
+};
+
+/// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet.
+template<typename PtrTy>
+class SmallPtrSetIterator : public SmallPtrSetIteratorImpl {
+ typedef PointerLikeTypeTraits<PtrTy> PtrTraits;
+
+public:
+ typedef PtrTy value_type;
+ typedef PtrTy reference;
+ typedef PtrTy pointer;
+ typedef std::ptrdiff_t difference_type;
+ typedef std::forward_iterator_tag iterator_category;
+
+ explicit SmallPtrSetIterator(const void *const *BP, const void *const *E)
+ : SmallPtrSetIteratorImpl(BP, E) {}
+
+ // Most methods provided by baseclass.
+
+ const PtrTy operator*() const {
+ assert(Bucket < End);
+ return PtrTraits::getFromVoidPointer(const_cast<void*>(*Bucket));
+ }
+
+ inline SmallPtrSetIterator& operator++() { // Preincrement
+ ++Bucket;
+ AdvanceIfNotValid();
+ return *this;
+ }
+
+ SmallPtrSetIterator operator++(int) { // Postincrement
+ SmallPtrSetIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+/// RoundUpToPowerOfTwo - This is a helper template that rounds N up to the next
+/// power of two (which means N itself if N is already a power of two).
+template<unsigned N>
+struct RoundUpToPowerOfTwo;
+
+/// RoundUpToPowerOfTwoH - If N is not a power of two, increase it. This is a
+/// helper template used to implement RoundUpToPowerOfTwo.
+template<unsigned N, bool isPowerTwo>
+struct RoundUpToPowerOfTwoH {
+ enum { Val = N };
+};
+template<unsigned N>
+struct RoundUpToPowerOfTwoH<N, false> {
+ enum {
+ // We could just use NextVal = N+1, but this converges faster. N|(N-1) sets
+ // the right-most zero bits to one all at once, e.g. 0b0011000 -> 0b0011111.
+ Val = RoundUpToPowerOfTwo<(N|(N-1)) + 1>::Val
+ };
+};
+
+template<unsigned N>
+struct RoundUpToPowerOfTwo {
+ enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
+};
+
+/// \brief A templated base class for \c SmallPtrSet which provides the
+/// typesafe interface that is common across all small sizes.
+///
+/// This is particularly useful for passing around between interface boundaries
+/// to avoid encoding a particular small size in the interface boundary.
+template <typename PtrType>
+class SmallPtrSetImpl : public SmallPtrSetImplBase {
+ typedef PointerLikeTypeTraits<PtrType> PtrTraits;
+
+ SmallPtrSetImpl(const SmallPtrSetImpl &) = delete;
+
+protected:
+ // Constructors that forward to the base.
+ SmallPtrSetImpl(const void **SmallStorage, const SmallPtrSetImpl &that)
+ : SmallPtrSetImplBase(SmallStorage, that) {}
+ SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize,
+ SmallPtrSetImpl &&that)
+ : SmallPtrSetImplBase(SmallStorage, SmallSize, std::move(that)) {}
+ explicit SmallPtrSetImpl(const void **SmallStorage, unsigned SmallSize)
+ : SmallPtrSetImplBase(SmallStorage, SmallSize) {}
+
+public:
+ typedef SmallPtrSetIterator<PtrType> iterator;
+ typedef SmallPtrSetIterator<PtrType> const_iterator;
+
+ /// Inserts Ptr if and only if there is no element in the container equal to
+ /// Ptr. The bool component of the returned pair is true if and only if the
+ /// insertion takes place, and the iterator component of the pair points to
+ /// the element equal to Ptr.
+ std::pair<iterator, bool> insert(PtrType Ptr) {
+ auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr));
+ return std::make_pair(iterator(p.first, CurArray + CurArraySize), p.second);
+ }
+
+ /// erase - If the set contains the specified pointer, remove it and return
+ /// true, otherwise return false.
+ bool erase(PtrType Ptr) {
+ return erase_imp(PtrTraits::getAsVoidPointer(Ptr));
+ }
+
+ /// count - Return 1 if the specified pointer is in the set, 0 otherwise.
+ size_type count(PtrType Ptr) const {
+ return count_imp(PtrTraits::getAsVoidPointer(Ptr)) ? 1 : 0;
+ }
+
+ template <typename IterT>
+ void insert(IterT I, IterT E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+
+ inline iterator begin() const {
+ return iterator(CurArray, CurArray+CurArraySize);
+ }
+ inline iterator end() const {
+ return iterator(CurArray+CurArraySize, CurArray+CurArraySize);
+ }
+};
+
+/// SmallPtrSet - This class implements a set which is optimized for holding
+/// SmallSize or less elements. This internally rounds up SmallSize to the next
+/// power of two if it is not already a power of two. See the comments above
+/// SmallPtrSetImplBase for details of the algorithm.
+template<class PtrType, unsigned SmallSize>
+class SmallPtrSet : public SmallPtrSetImpl<PtrType> {
+ typedef SmallPtrSetImpl<PtrType> BaseT;
+
+ // Make sure that SmallSize is a power of two, round up if not.
+ enum { SmallSizePowTwo = RoundUpToPowerOfTwo<SmallSize>::Val };
+ /// SmallStorage - Fixed size storage used in 'small mode'.
+ const void *SmallStorage[SmallSizePowTwo];
+
+public:
+ SmallPtrSet() : BaseT(SmallStorage, SmallSizePowTwo) {}
+ SmallPtrSet(const SmallPtrSet &that) : BaseT(SmallStorage, that) {}
+ SmallPtrSet(SmallPtrSet &&that)
+ : BaseT(SmallStorage, SmallSizePowTwo, std::move(that)) {}
+
+ template<typename It>
+ SmallPtrSet(It I, It E) : BaseT(SmallStorage, SmallSizePowTwo) {
+ this->insert(I, E);
+ }
+
+ SmallPtrSet<PtrType, SmallSize> &
+ operator=(const SmallPtrSet<PtrType, SmallSize> &RHS) {
+ if (&RHS != this)
+ this->CopyFrom(RHS);
+ return *this;
+ }
+
+ SmallPtrSet<PtrType, SmallSize>&
+ operator=(SmallPtrSet<PtrType, SmallSize> &&RHS) {
+ if (&RHS != this)
+ this->MoveFrom(SmallSizePowTwo, std::move(RHS));
+ return *this;
+ }
+
+ /// swap - Swaps the elements of two sets.
+ void swap(SmallPtrSet<PtrType, SmallSize> &RHS) {
+ SmallPtrSetImplBase::swap(RHS);
+ }
+};
+}
+
+namespace std {
+ /// Implement std::swap in terms of SmallPtrSet swap.
+ template<class T, unsigned N>
+ inline void swap(llvm::SmallPtrSet<T, N> &LHS, llvm::SmallPtrSet<T, N> &RHS) {
+ LHS.swap(RHS);
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/SmallSet.h b/contrib/llvm/include/llvm/ADT/SmallSet.h
new file mode 100644
index 0000000..39a57b8
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SmallSet.h
@@ -0,0 +1,131 @@
+//===- llvm/ADT/SmallSet.h - 'Normally small' sets --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallSet class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLSET_H
+#define LLVM_ADT_SMALLSET_H
+
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include <set>
+
+namespace llvm {
+
+/// SmallSet - This maintains a set of unique values, optimizing for the case
+/// when the set is small (less than N). In this case, the set can be
+/// maintained with no mallocs. If the set gets large, we expand to using an
+/// std::set to maintain reasonable lookup times.
+///
+/// Note that this set does not provide a way to iterate over members in the
+/// set.
+template <typename T, unsigned N, typename C = std::less<T> >
+class SmallSet {
+ /// Use a SmallVector to hold the elements here (even though it will never
+ /// reach its 'large' stage) to avoid calling the default ctors of elements
+ /// we will never use.
+ SmallVector<T, N> Vector;
+ std::set<T, C> Set;
+ typedef typename SmallVector<T, N>::const_iterator VIterator;
+ typedef typename SmallVector<T, N>::iterator mutable_iterator;
+
+public:
+ typedef size_t size_type;
+ SmallSet() {}
+
+ bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
+ return Vector.empty() && Set.empty();
+ }
+
+ size_type size() const {
+ return isSmall() ? Vector.size() : Set.size();
+ }
+
+ /// count - Return 1 if the element is in the set, 0 otherwise.
+ size_type count(const T &V) const {
+ if (isSmall()) {
+ // Since the collection is small, just do a linear search.
+ return vfind(V) == Vector.end() ? 0 : 1;
+ } else {
+ return Set.count(V);
+ }
+ }
+
+ /// insert - Insert an element into the set if it isn't already there.
+ /// Returns true if the element is inserted (it was not in the set before).
+ /// The first value of the returned pair is unused and provided for
+ /// partial compatibility with the standard library self-associative container
+ /// concept.
+ // FIXME: Add iterators that abstract over the small and large form, and then
+ // return those here.
+ std::pair<NoneType, bool> insert(const T &V) {
+ if (!isSmall())
+ return std::make_pair(None, Set.insert(V).second);
+
+ VIterator I = vfind(V);
+ if (I != Vector.end()) // Don't reinsert if it already exists.
+ return std::make_pair(None, false);
+ if (Vector.size() < N) {
+ Vector.push_back(V);
+ return std::make_pair(None, true);
+ }
+
+ // Otherwise, grow from vector to set.
+ while (!Vector.empty()) {
+ Set.insert(Vector.back());
+ Vector.pop_back();
+ }
+ Set.insert(V);
+ return std::make_pair(None, true);
+ }
+
+ template <typename IterT>
+ void insert(IterT I, IterT E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+
+ bool erase(const T &V) {
+ if (!isSmall())
+ return Set.erase(V);
+ for (mutable_iterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
+ if (*I == V) {
+ Vector.erase(I);
+ return true;
+ }
+ return false;
+ }
+
+ void clear() {
+ Vector.clear();
+ Set.clear();
+ }
+
+private:
+ bool isSmall() const { return Set.empty(); }
+
+ VIterator vfind(const T &V) const {
+ for (VIterator I = Vector.begin(), E = Vector.end(); I != E; ++I)
+ if (*I == V)
+ return I;
+ return Vector.end();
+ }
+};
+
+/// If this set is of pointer values, transparently switch over to using
+/// SmallPtrSet for performance.
+template <typename PointeeType, unsigned N>
+class SmallSet<PointeeType*, N> : public SmallPtrSet<PointeeType*, N> {};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/SmallString.h b/contrib/llvm/include/llvm/ADT/SmallString.h
new file mode 100644
index 0000000..e569f54
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SmallString.h
@@ -0,0 +1,297 @@
+//===- llvm/ADT/SmallString.h - 'Normally small' strings --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallString class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLSTRING_H
+#define LLVM_ADT_SMALLSTRING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+/// SmallString - A SmallString is just a SmallVector with methods and accessors
+/// that make it work better as a string (e.g. operator+ etc).
+template<unsigned InternalLen>
+class SmallString : public SmallVector<char, InternalLen> {
+public:
+ /// Default ctor - Initialize to empty.
+ SmallString() {}
+
+ /// Initialize from a StringRef.
+ SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
+
+ /// Initialize with a range.
+ template<typename ItTy>
+ SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}
+
+ // Note that in order to add new overloads for append & assign, we have to
+ // duplicate the inherited versions so as not to inadvertently hide them.
+
+ /// @}
+ /// @name String Assignment
+ /// @{
+
+ /// Assign from a repeated element.
+ void assign(size_t NumElts, char Elt) {
+ this->SmallVectorImpl<char>::assign(NumElts, Elt);
+ }
+
+ /// Assign from an iterator pair.
+ template<typename in_iter>
+ void assign(in_iter S, in_iter E) {
+ this->clear();
+ SmallVectorImpl<char>::append(S, E);
+ }
+
+ /// Assign from a StringRef.
+ void assign(StringRef RHS) {
+ this->clear();
+ SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+ }
+
+ /// Assign from a SmallVector.
+ void assign(const SmallVectorImpl<char> &RHS) {
+ this->clear();
+ SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+ }
+
+ /// @}
+ /// @name String Concatenation
+ /// @{
+
+ /// Append from an iterator pair.
+ template<typename in_iter>
+ void append(in_iter S, in_iter E) {
+ SmallVectorImpl<char>::append(S, E);
+ }
+
+ void append(size_t NumInputs, char Elt) {
+ SmallVectorImpl<char>::append(NumInputs, Elt);
+ }
+
+
+ /// Append from a StringRef.
+ void append(StringRef RHS) {
+ SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+ }
+
+ /// Append from a SmallVector.
+ void append(const SmallVectorImpl<char> &RHS) {
+ SmallVectorImpl<char>::append(RHS.begin(), RHS.end());
+ }
+
+ /// @}
+ /// @name String Comparison
+ /// @{
+
+ /// Check for string equality. This is more efficient than compare() when
+ /// the relative ordering of inequal strings isn't needed.
+ bool equals(StringRef RHS) const {
+ return str().equals(RHS);
+ }
+
+ /// Check for string equality, ignoring case.
+ bool equals_lower(StringRef RHS) const {
+ return str().equals_lower(RHS);
+ }
+
+ /// Compare two strings; the result is -1, 0, or 1 if this string is
+ /// lexicographically less than, equal to, or greater than the \p RHS.
+ int compare(StringRef RHS) const {
+ return str().compare(RHS);
+ }
+
+ /// compare_lower - Compare two strings, ignoring case.
+ int compare_lower(StringRef RHS) const {
+ return str().compare_lower(RHS);
+ }
+
+ /// compare_numeric - Compare two strings, treating sequences of digits as
+ /// numbers.
+ int compare_numeric(StringRef RHS) const {
+ return str().compare_numeric(RHS);
+ }
+
+ /// @}
+ /// @name String Predicates
+ /// @{
+
+ /// startswith - Check if this string starts with the given \p Prefix.
+ bool startswith(StringRef Prefix) const {
+ return str().startswith(Prefix);
+ }
+
+ /// endswith - Check if this string ends with the given \p Suffix.
+ bool endswith(StringRef Suffix) const {
+ return str().endswith(Suffix);
+ }
+
+ /// @}
+ /// @name String Searching
+ /// @{
+
+ /// find - Search for the first character \p C in the string.
+ ///
+ /// \return - The index of the first occurrence of \p C, or npos if not
+ /// found.
+ size_t find(char C, size_t From = 0) const {
+ return str().find(C, From);
+ }
+
+ /// Search for the first string \p Str in the string.
+ ///
+ /// \returns The index of the first occurrence of \p Str, or npos if not
+ /// found.
+ size_t find(StringRef Str, size_t From = 0) const {
+ return str().find(Str, From);
+ }
+
+ /// Search for the last character \p C in the string.
+ ///
+ /// \returns The index of the last occurrence of \p C, or npos if not
+ /// found.
+ size_t rfind(char C, size_t From = StringRef::npos) const {
+ return str().rfind(C, From);
+ }
+
+ /// Search for the last string \p Str in the string.
+ ///
+ /// \returns The index of the last occurrence of \p Str, or npos if not
+ /// found.
+ size_t rfind(StringRef Str) const {
+ return str().rfind(Str);
+ }
+
+ /// Find the first character in the string that is \p C, or npos if not
+ /// found. Same as find.
+ size_t find_first_of(char C, size_t From = 0) const {
+ return str().find_first_of(C, From);
+ }
+
+ /// Find the first character in the string that is in \p Chars, or npos if
+ /// not found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ size_t find_first_of(StringRef Chars, size_t From = 0) const {
+ return str().find_first_of(Chars, From);
+ }
+
+ /// Find the first character in the string that is not \p C or npos if not
+ /// found.
+ size_t find_first_not_of(char C, size_t From = 0) const {
+ return str().find_first_not_of(C, From);
+ }
+
+ /// Find the first character in the string that is not in the string
+ /// \p Chars, or npos if not found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ size_t find_first_not_of(StringRef Chars, size_t From = 0) const {
+ return str().find_first_not_of(Chars, From);
+ }
+
+ /// Find the last character in the string that is \p C, or npos if not
+ /// found.
+ size_t find_last_of(char C, size_t From = StringRef::npos) const {
+ return str().find_last_of(C, From);
+ }
+
+ /// Find the last character in the string that is in \p C, or npos if not
+ /// found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ size_t find_last_of(
+ StringRef Chars, size_t From = StringRef::npos) const {
+ return str().find_last_of(Chars, From);
+ }
+
+ /// @}
+ /// @name Helpful Algorithms
+ /// @{
+
+ /// Return the number of occurrences of \p C in the string.
+ size_t count(char C) const {
+ return str().count(C);
+ }
+
+ /// Return the number of non-overlapped occurrences of \p Str in the
+ /// string.
+ size_t count(StringRef Str) const {
+ return str().count(Str);
+ }
+
+ /// @}
+ /// @name Substring Operations
+ /// @{
+
+ /// Return a reference to the substring from [Start, Start + N).
+ ///
+ /// \param Start The index of the starting character in the substring; if
+ /// the index is npos or greater than the length of the string then the
+ /// empty substring will be returned.
+ ///
+ /// \param N The number of characters to included in the substring. If \p N
+ /// exceeds the number of characters remaining in the string, the string
+ /// suffix (starting with \p Start) will be returned.
+ StringRef substr(size_t Start, size_t N = StringRef::npos) const {
+ return str().substr(Start, N);
+ }
+
+ /// Return a reference to the substring from [Start, End).
+ ///
+ /// \param Start The index of the starting character in the substring; if
+ /// the index is npos or greater than the length of the string then the
+ /// empty substring will be returned.
+ ///
+ /// \param End The index following the last character to include in the
+ /// substring. If this is npos, or less than \p Start, or exceeds the
+ /// number of characters remaining in the string, the string suffix
+ /// (starting with \p Start) will be returned.
+ StringRef slice(size_t Start, size_t End) const {
+ return str().slice(Start, End);
+ }
+
+ // Extra methods.
+
+ /// Explicit conversion to StringRef.
+ StringRef str() const { return StringRef(this->begin(), this->size()); }
+
+ // TODO: Make this const, if it's safe...
+ const char* c_str() {
+ this->push_back(0);
+ this->pop_back();
+ return this->data();
+ }
+
+ /// Implicit conversion to StringRef.
+ operator StringRef() const { return str(); }
+
+ // Extra operators.
+ const SmallString &operator=(StringRef RHS) {
+ this->clear();
+ return *this += RHS;
+ }
+
+ SmallString &operator+=(StringRef RHS) {
+ this->append(RHS.begin(), RHS.end());
+ return *this;
+ }
+ SmallString &operator+=(char C) {
+ this->push_back(C);
+ return *this;
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/SmallVector.h b/contrib/llvm/include/llvm/ADT/SmallVector.h
new file mode 100644
index 0000000..d1062ac
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SmallVector.h
@@ -0,0 +1,954 @@
+//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SmallVector class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SMALLVECTOR_H
+#define LLVM_ADT_SMALLVECTOR_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+
+namespace llvm {
+
+/// This is all the non-templated stuff common to all SmallVectors.
+class SmallVectorBase {
+protected:
+ void *BeginX, *EndX, *CapacityX;
+
+protected:
+ SmallVectorBase(void *FirstEl, size_t Size)
+ : BeginX(FirstEl), EndX(FirstEl), CapacityX((char*)FirstEl+Size) {}
+
+ /// This is an implementation of the grow() method which only works
+ /// on POD-like data types and is out of line to reduce code duplication.
+ void grow_pod(void *FirstEl, size_t MinSizeInBytes, size_t TSize);
+
+public:
+ /// This returns size()*sizeof(T).
+ size_t size_in_bytes() const {
+ return size_t((char*)EndX - (char*)BeginX);
+ }
+
+ /// capacity_in_bytes - This returns capacity()*sizeof(T).
+ size_t capacity_in_bytes() const {
+ return size_t((char*)CapacityX - (char*)BeginX);
+ }
+
+ bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const { return BeginX == EndX; }
+};
+
+template <typename T, unsigned N> struct SmallVectorStorage;
+
+/// This is the part of SmallVectorTemplateBase which does not depend on whether
+/// the type T is a POD. The extra dummy template argument is used by ArrayRef
+/// to avoid unnecessarily requiring T to be complete.
+template <typename T, typename = void>
+class SmallVectorTemplateCommon : public SmallVectorBase {
+private:
+ template <typename, unsigned> friend struct SmallVectorStorage;
+
+ // Allocate raw space for N elements of type T. If T has a ctor or dtor, we
+ // don't want it to be automatically run, so we need to represent the space as
+ // something else. Use an array of char of sufficient alignment.
+ typedef llvm::AlignedCharArrayUnion<T> U;
+ U FirstEl;
+ // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
+
+protected:
+ SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(&FirstEl, Size) {}
+
+ void grow_pod(size_t MinSizeInBytes, size_t TSize) {
+ SmallVectorBase::grow_pod(&FirstEl, MinSizeInBytes, TSize);
+ }
+
+ /// Return true if this is a smallvector which has not had dynamic
+ /// memory allocated for it.
+ bool isSmall() const {
+ return BeginX == static_cast<const void*>(&FirstEl);
+ }
+
+ /// Put this vector in a state of being small.
+ void resetToSmall() {
+ BeginX = EndX = CapacityX = &FirstEl;
+ }
+
+ void setEnd(T *P) { this->EndX = P; }
+public:
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef T *iterator;
+ typedef const T *const_iterator;
+
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ typedef T &reference;
+ typedef const T &const_reference;
+ typedef T *pointer;
+ typedef const T *const_pointer;
+
+ // forward iterator creation methods.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ iterator begin() { return (iterator)this->BeginX; }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ const_iterator begin() const { return (const_iterator)this->BeginX; }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ iterator end() { return (iterator)this->EndX; }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ const_iterator end() const { return (const_iterator)this->EndX; }
+protected:
+ iterator capacity_ptr() { return (iterator)this->CapacityX; }
+ const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
+public:
+
+ // reverse iterator creation methods.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ size_type size() const { return end()-begin(); }
+ size_type max_size() const { return size_type(-1) / sizeof(T); }
+
+ /// Return the total number of elements in the currently allocated buffer.
+ size_t capacity() const { return capacity_ptr() - begin(); }
+
+ /// Return a pointer to the vector's buffer, even if empty().
+ pointer data() { return pointer(begin()); }
+ /// Return a pointer to the vector's buffer, even if empty().
+ const_pointer data() const { return const_pointer(begin()); }
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ reference operator[](size_type idx) {
+ assert(idx < size());
+ return begin()[idx];
+ }
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ const_reference operator[](size_type idx) const {
+ assert(idx < size());
+ return begin()[idx];
+ }
+
+ reference front() {
+ assert(!empty());
+ return begin()[0];
+ }
+ const_reference front() const {
+ assert(!empty());
+ return begin()[0];
+ }
+
+ reference back() {
+ assert(!empty());
+ return end()[-1];
+ }
+ const_reference back() const {
+ assert(!empty());
+ return end()[-1];
+ }
+};
+
+/// SmallVectorTemplateBase<isPodLike = false> - This is where we put method
+/// implementations that are designed to work with non-POD-like T's.
+template <typename T, bool isPodLike>
+class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
+protected:
+ SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
+
+ static void destroy_range(T *S, T *E) {
+ while (S != E) {
+ --E;
+ E->~T();
+ }
+ }
+
+ /// Use move-assignment to move the range [I, E) onto the
+ /// objects starting with "Dest". This is just <memory>'s
+ /// std::move, but not all stdlibs actually provide that.
+ template<typename It1, typename It2>
+ static It2 move(It1 I, It1 E, It2 Dest) {
+ for (; I != E; ++I, ++Dest)
+ *Dest = ::std::move(*I);
+ return Dest;
+ }
+
+ /// Use move-assignment to move the range
+ /// [I, E) onto the objects ending at "Dest", moving objects
+ /// in reverse order. This is just <algorithm>'s
+ /// std::move_backward, but not all stdlibs actually provide that.
+ template<typename It1, typename It2>
+ static It2 move_backward(It1 I, It1 E, It2 Dest) {
+ while (I != E)
+ *--Dest = ::std::move(*--E);
+ return Dest;
+ }
+
+ /// Move the range [I, E) into the uninitialized memory starting with "Dest",
+ /// constructing elements as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+ for (; I != E; ++I, ++Dest)
+ ::new ((void*) &*Dest) T(::std::move(*I));
+ }
+
+ /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
+ /// constructing elements as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
+ std::uninitialized_copy(I, E, Dest);
+ }
+
+ /// Grow the allocated memory (without initializing new elements), doubling
+ /// the size of the allocated memory. Guarantees space for at least one more
+ /// element, or MinSize more elements if specified.
+ void grow(size_t MinSize = 0);
+
+public:
+ void push_back(const T &Elt) {
+ if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+ this->grow();
+ ::new ((void*) this->end()) T(Elt);
+ this->setEnd(this->end()+1);
+ }
+
+ void push_back(T &&Elt) {
+ if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+ this->grow();
+ ::new ((void*) this->end()) T(::std::move(Elt));
+ this->setEnd(this->end()+1);
+ }
+
+ void pop_back() {
+ this->setEnd(this->end()-1);
+ this->end()->~T();
+ }
+};
+
+// Define this out-of-line to dissuade the C++ compiler from inlining it.
+template <typename T, bool isPodLike>
+void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
+ size_t CurCapacity = this->capacity();
+ size_t CurSize = this->size();
+ // Always grow, even from zero.
+ size_t NewCapacity = size_t(NextPowerOf2(CurCapacity+2));
+ if (NewCapacity < MinSize)
+ NewCapacity = MinSize;
+ T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
+
+ // Move the elements over.
+ this->uninitialized_move(this->begin(), this->end(), NewElts);
+
+ // Destroy the original elements.
+ destroy_range(this->begin(), this->end());
+
+ // If this wasn't grown from the inline copy, deallocate the old space.
+ if (!this->isSmall())
+ free(this->begin());
+
+ this->setEnd(NewElts+CurSize);
+ this->BeginX = NewElts;
+ this->CapacityX = this->begin()+NewCapacity;
+}
+
+
+/// SmallVectorTemplateBase<isPodLike = true> - This is where we put method
+/// implementations that are designed to work with POD-like T's.
+template <typename T>
+class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
+protected:
+ SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
+
+ // No need to do a destroy loop for POD's.
+ static void destroy_range(T *, T *) {}
+
+ /// Use move-assignment to move the range [I, E) onto the
+ /// objects starting with "Dest". For PODs, this is just memcpy.
+ template<typename It1, typename It2>
+ static It2 move(It1 I, It1 E, It2 Dest) {
+ return ::std::copy(I, E, Dest);
+ }
+
+ /// Use move-assignment to move the range [I, E) onto the objects ending at
+ /// "Dest", moving objects in reverse order.
+ template<typename It1, typename It2>
+ static It2 move_backward(It1 I, It1 E, It2 Dest) {
+ return ::std::copy_backward(I, E, Dest);
+ }
+
+ /// Move the range [I, E) onto the uninitialized memory
+ /// starting with "Dest", constructing elements into it as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+ // Just do a copy.
+ uninitialized_copy(I, E, Dest);
+ }
+
+ /// Copy the range [I, E) onto the uninitialized memory
+ /// starting with "Dest", constructing elements into it as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
+ // Arbitrary iterator types; just use the basic implementation.
+ std::uninitialized_copy(I, E, Dest);
+ }
+
+ /// Copy the range [I, E) onto the uninitialized memory
+ /// starting with "Dest", constructing elements into it as needed.
+ template <typename T1, typename T2>
+ static void uninitialized_copy(
+ T1 *I, T1 *E, T2 *Dest,
+ typename std::enable_if<std::is_same<typename std::remove_const<T1>::type,
+ T2>::value>::type * = nullptr) {
+ // Use memcpy for PODs iterated by pointers (which includes SmallVector
+ // iterators): std::uninitialized_copy optimizes to memmove, but we can
+ // use memcpy here. Note that I and E are iterators and thus might be
+ // invalid for memcpy if they are equal.
+ if (I != E)
+ memcpy(Dest, I, (E - I) * sizeof(T));
+ }
+
+ /// Double the size of the allocated memory, guaranteeing space for at
+ /// least one more element or MinSize if specified.
+ void grow(size_t MinSize = 0) {
+ this->grow_pod(MinSize*sizeof(T), sizeof(T));
+ }
+public:
+ void push_back(const T &Elt) {
+ if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+ this->grow();
+ memcpy(this->end(), &Elt, sizeof(T));
+ this->setEnd(this->end()+1);
+ }
+
+ void pop_back() {
+ this->setEnd(this->end()-1);
+ }
+};
+
+
+/// This class consists of common code factored out of the SmallVector class to
+/// reduce code duplication based on the SmallVector 'N' template parameter.
+template <typename T>
+class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
+ typedef SmallVectorTemplateBase<T, isPodLike<T>::value > SuperClass;
+
+ SmallVectorImpl(const SmallVectorImpl&) = delete;
+public:
+ typedef typename SuperClass::iterator iterator;
+ typedef typename SuperClass::size_type size_type;
+
+protected:
+ // Default ctor - Initialize to empty.
+ explicit SmallVectorImpl(unsigned N)
+ : SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) {
+ }
+
+public:
+ ~SmallVectorImpl() {
+ // Destroy the constructed elements in the vector.
+ this->destroy_range(this->begin(), this->end());
+
+ // If this wasn't grown from the inline copy, deallocate the old space.
+ if (!this->isSmall())
+ free(this->begin());
+ }
+
+
+ void clear() {
+ this->destroy_range(this->begin(), this->end());
+ this->EndX = this->BeginX;
+ }
+
+ void resize(size_type N) {
+ if (N < this->size()) {
+ this->destroy_range(this->begin()+N, this->end());
+ this->setEnd(this->begin()+N);
+ } else if (N > this->size()) {
+ if (this->capacity() < N)
+ this->grow(N);
+ for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
+ new (&*I) T();
+ this->setEnd(this->begin()+N);
+ }
+ }
+
+ void resize(size_type N, const T &NV) {
+ if (N < this->size()) {
+ this->destroy_range(this->begin()+N, this->end());
+ this->setEnd(this->begin()+N);
+ } else if (N > this->size()) {
+ if (this->capacity() < N)
+ this->grow(N);
+ std::uninitialized_fill(this->end(), this->begin()+N, NV);
+ this->setEnd(this->begin()+N);
+ }
+ }
+
+ void reserve(size_type N) {
+ if (this->capacity() < N)
+ this->grow(N);
+ }
+
+ T LLVM_ATTRIBUTE_UNUSED_RESULT pop_back_val() {
+ T Result = ::std::move(this->back());
+ this->pop_back();
+ return Result;
+ }
+
+ void swap(SmallVectorImpl &RHS);
+
+ /// Add the specified range to the end of the SmallVector.
+ template<typename in_iter>
+ void append(in_iter in_start, in_iter in_end) {
+ size_type NumInputs = std::distance(in_start, in_end);
+ // Grow allocated space if needed.
+ if (NumInputs > size_type(this->capacity_ptr()-this->end()))
+ this->grow(this->size()+NumInputs);
+
+ // Copy the new elements over.
+ this->uninitialized_copy(in_start, in_end, this->end());
+ this->setEnd(this->end() + NumInputs);
+ }
+
+ /// Add the specified range to the end of the SmallVector.
+ void append(size_type NumInputs, const T &Elt) {
+ // Grow allocated space if needed.
+ if (NumInputs > size_type(this->capacity_ptr()-this->end()))
+ this->grow(this->size()+NumInputs);
+
+ // Copy the new elements over.
+ std::uninitialized_fill_n(this->end(), NumInputs, Elt);
+ this->setEnd(this->end() + NumInputs);
+ }
+
+ void append(std::initializer_list<T> IL) {
+ append(IL.begin(), IL.end());
+ }
+
+ void assign(size_type NumElts, const T &Elt) {
+ clear();
+ if (this->capacity() < NumElts)
+ this->grow(NumElts);
+ this->setEnd(this->begin()+NumElts);
+ std::uninitialized_fill(this->begin(), this->end(), Elt);
+ }
+
+ void assign(std::initializer_list<T> IL) {
+ clear();
+ append(IL);
+ }
+
+ iterator erase(iterator I) {
+ assert(I >= this->begin() && "Iterator to erase is out of bounds.");
+ assert(I < this->end() && "Erasing at past-the-end iterator.");
+
+ iterator N = I;
+ // Shift all elts down one.
+ this->move(I+1, this->end(), I);
+ // Drop the last elt.
+ this->pop_back();
+ return(N);
+ }
+
+ iterator erase(iterator S, iterator E) {
+ assert(S >= this->begin() && "Range to erase is out of bounds.");
+ assert(S <= E && "Trying to erase invalid range.");
+ assert(E <= this->end() && "Trying to erase past the end.");
+
+ iterator N = S;
+ // Shift all elts down.
+ iterator I = this->move(E, this->end(), S);
+ // Drop the last elts.
+ this->destroy_range(I, this->end());
+ this->setEnd(I);
+ return(N);
+ }
+
+ iterator insert(iterator I, T &&Elt) {
+ if (I == this->end()) { // Important special case for empty vector.
+ this->push_back(::std::move(Elt));
+ return this->end()-1;
+ }
+
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+
+ if (this->EndX >= this->CapacityX) {
+ size_t EltNo = I-this->begin();
+ this->grow();
+ I = this->begin()+EltNo;
+ }
+
+ ::new ((void*) this->end()) T(::std::move(this->back()));
+ // Push everything else over.
+ this->move_backward(I, this->end()-1, this->end());
+ this->setEnd(this->end()+1);
+
+ // If we just moved the element we're inserting, be sure to update
+ // the reference.
+ T *EltPtr = &Elt;
+ if (I <= EltPtr && EltPtr < this->EndX)
+ ++EltPtr;
+
+ *I = ::std::move(*EltPtr);
+ return I;
+ }
+
+ iterator insert(iterator I, const T &Elt) {
+ if (I == this->end()) { // Important special case for empty vector.
+ this->push_back(Elt);
+ return this->end()-1;
+ }
+
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+
+ if (this->EndX >= this->CapacityX) {
+ size_t EltNo = I-this->begin();
+ this->grow();
+ I = this->begin()+EltNo;
+ }
+ ::new ((void*) this->end()) T(std::move(this->back()));
+ // Push everything else over.
+ this->move_backward(I, this->end()-1, this->end());
+ this->setEnd(this->end()+1);
+
+ // If we just moved the element we're inserting, be sure to update
+ // the reference.
+ const T *EltPtr = &Elt;
+ if (I <= EltPtr && EltPtr < this->EndX)
+ ++EltPtr;
+
+ *I = *EltPtr;
+ return I;
+ }
+
+ iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+ size_t InsertElt = I - this->begin();
+
+ if (I == this->end()) { // Important special case for empty vector.
+ append(NumToInsert, Elt);
+ return this->begin()+InsertElt;
+ }
+
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+
+ // Ensure there is enough space.
+ reserve(this->size() + NumToInsert);
+
+ // Uninvalidate the iterator.
+ I = this->begin()+InsertElt;
+
+ // If there are more elements between the insertion point and the end of the
+ // range than there are being inserted, we can use a simple approach to
+ // insertion. Since we already reserved space, we know that this won't
+ // reallocate the vector.
+ if (size_t(this->end()-I) >= NumToInsert) {
+ T *OldEnd = this->end();
+ append(std::move_iterator<iterator>(this->end() - NumToInsert),
+ std::move_iterator<iterator>(this->end()));
+
+ // Copy the existing elements that get replaced.
+ this->move_backward(I, OldEnd-NumToInsert, OldEnd);
+
+ std::fill_n(I, NumToInsert, Elt);
+ return I;
+ }
+
+ // Otherwise, we're inserting more elements than exist already, and we're
+ // not inserting at the end.
+
+ // Move over the elements that we're about to overwrite.
+ T *OldEnd = this->end();
+ this->setEnd(this->end() + NumToInsert);
+ size_t NumOverwritten = OldEnd-I;
+ this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
+
+ // Replace the overwritten part.
+ std::fill_n(I, NumOverwritten, Elt);
+
+ // Insert the non-overwritten middle part.
+ std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
+ return I;
+ }
+
+ template<typename ItTy>
+ iterator insert(iterator I, ItTy From, ItTy To) {
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+ size_t InsertElt = I - this->begin();
+
+ if (I == this->end()) { // Important special case for empty vector.
+ append(From, To);
+ return this->begin()+InsertElt;
+ }
+
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+
+ size_t NumToInsert = std::distance(From, To);
+
+ // Ensure there is enough space.
+ reserve(this->size() + NumToInsert);
+
+ // Uninvalidate the iterator.
+ I = this->begin()+InsertElt;
+
+ // If there are more elements between the insertion point and the end of the
+ // range than there are being inserted, we can use a simple approach to
+ // insertion. Since we already reserved space, we know that this won't
+ // reallocate the vector.
+ if (size_t(this->end()-I) >= NumToInsert) {
+ T *OldEnd = this->end();
+ append(std::move_iterator<iterator>(this->end() - NumToInsert),
+ std::move_iterator<iterator>(this->end()));
+
+ // Copy the existing elements that get replaced.
+ this->move_backward(I, OldEnd-NumToInsert, OldEnd);
+
+ std::copy(From, To, I);
+ return I;
+ }
+
+ // Otherwise, we're inserting more elements than exist already, and we're
+ // not inserting at the end.
+
+ // Move over the elements that we're about to overwrite.
+ T *OldEnd = this->end();
+ this->setEnd(this->end() + NumToInsert);
+ size_t NumOverwritten = OldEnd-I;
+ this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
+
+ // Replace the overwritten part.
+ for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
+ *J = *From;
+ ++J; ++From;
+ }
+
+ // Insert the non-overwritten middle part.
+ this->uninitialized_copy(From, To, OldEnd);
+ return I;
+ }
+
+ void insert(iterator I, std::initializer_list<T> IL) {
+ insert(I, IL.begin(), IL.end());
+ }
+
+ template <typename... ArgTypes> void emplace_back(ArgTypes &&... Args) {
+ if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
+ this->grow();
+ ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
+ this->setEnd(this->end() + 1);
+ }
+
+ SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
+
+ SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
+
+ bool operator==(const SmallVectorImpl &RHS) const {
+ if (this->size() != RHS.size()) return false;
+ return std::equal(this->begin(), this->end(), RHS.begin());
+ }
+ bool operator!=(const SmallVectorImpl &RHS) const {
+ return !(*this == RHS);
+ }
+
+ bool operator<(const SmallVectorImpl &RHS) const {
+ return std::lexicographical_compare(this->begin(), this->end(),
+ RHS.begin(), RHS.end());
+ }
+
+ /// Set the array size to \p N, which the current array must have enough
+ /// capacity for.
+ ///
+ /// This does not construct or destroy any elements in the vector.
+ ///
+ /// Clients can use this in conjunction with capacity() to write past the end
+ /// of the buffer when they know that more elements are available, and only
+ /// update the size later. This avoids the cost of value initializing elements
+ /// which will only be overwritten.
+ void set_size(size_type N) {
+ assert(N <= this->capacity());
+ this->setEnd(this->begin() + N);
+ }
+};
+
+
+template <typename T>
+void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
+ if (this == &RHS) return;
+
+ // We can only avoid copying elements if neither vector is small.
+ if (!this->isSmall() && !RHS.isSmall()) {
+ std::swap(this->BeginX, RHS.BeginX);
+ std::swap(this->EndX, RHS.EndX);
+ std::swap(this->CapacityX, RHS.CapacityX);
+ return;
+ }
+ if (RHS.size() > this->capacity())
+ this->grow(RHS.size());
+ if (this->size() > RHS.capacity())
+ RHS.grow(this->size());
+
+ // Swap the shared elements.
+ size_t NumShared = this->size();
+ if (NumShared > RHS.size()) NumShared = RHS.size();
+ for (size_type i = 0; i != NumShared; ++i)
+ std::swap((*this)[i], RHS[i]);
+
+ // Copy over the extra elts.
+ if (this->size() > RHS.size()) {
+ size_t EltDiff = this->size() - RHS.size();
+ this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
+ RHS.setEnd(RHS.end()+EltDiff);
+ this->destroy_range(this->begin()+NumShared, this->end());
+ this->setEnd(this->begin()+NumShared);
+ } else if (RHS.size() > this->size()) {
+ size_t EltDiff = RHS.size() - this->size();
+ this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
+ this->setEnd(this->end() + EltDiff);
+ this->destroy_range(RHS.begin()+NumShared, RHS.end());
+ RHS.setEnd(RHS.begin()+NumShared);
+ }
+}
+
+template <typename T>
+SmallVectorImpl<T> &SmallVectorImpl<T>::
+ operator=(const SmallVectorImpl<T> &RHS) {
+ // Avoid self-assignment.
+ if (this == &RHS) return *this;
+
+ // If we already have sufficient space, assign the common elements, then
+ // destroy any excess.
+ size_t RHSSize = RHS.size();
+ size_t CurSize = this->size();
+ if (CurSize >= RHSSize) {
+ // Assign common elements.
+ iterator NewEnd;
+ if (RHSSize)
+ NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
+ else
+ NewEnd = this->begin();
+
+ // Destroy excess elements.
+ this->destroy_range(NewEnd, this->end());
+
+ // Trim.
+ this->setEnd(NewEnd);
+ return *this;
+ }
+
+ // If we have to grow to have enough elements, destroy the current elements.
+ // This allows us to avoid copying them during the grow.
+ // FIXME: don't do this if they're efficiently moveable.
+ if (this->capacity() < RHSSize) {
+ // Destroy current elements.
+ this->destroy_range(this->begin(), this->end());
+ this->setEnd(this->begin());
+ CurSize = 0;
+ this->grow(RHSSize);
+ } else if (CurSize) {
+ // Otherwise, use assignment for the already-constructed elements.
+ std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
+ }
+
+ // Copy construct the new elements in place.
+ this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
+ this->begin()+CurSize);
+
+ // Set end.
+ this->setEnd(this->begin()+RHSSize);
+ return *this;
+}
+
+template <typename T>
+SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
+ // Avoid self-assignment.
+ if (this == &RHS) return *this;
+
+ // If the RHS isn't small, clear this vector and then steal its buffer.
+ if (!RHS.isSmall()) {
+ this->destroy_range(this->begin(), this->end());
+ if (!this->isSmall()) free(this->begin());
+ this->BeginX = RHS.BeginX;
+ this->EndX = RHS.EndX;
+ this->CapacityX = RHS.CapacityX;
+ RHS.resetToSmall();
+ return *this;
+ }
+
+ // If we already have sufficient space, assign the common elements, then
+ // destroy any excess.
+ size_t RHSSize = RHS.size();
+ size_t CurSize = this->size();
+ if (CurSize >= RHSSize) {
+ // Assign common elements.
+ iterator NewEnd = this->begin();
+ if (RHSSize)
+ NewEnd = this->move(RHS.begin(), RHS.end(), NewEnd);
+
+ // Destroy excess elements and trim the bounds.
+ this->destroy_range(NewEnd, this->end());
+ this->setEnd(NewEnd);
+
+ // Clear the RHS.
+ RHS.clear();
+
+ return *this;
+ }
+
+ // If we have to grow to have enough elements, destroy the current elements.
+ // This allows us to avoid copying them during the grow.
+ // FIXME: this may not actually make any sense if we can efficiently move
+ // elements.
+ if (this->capacity() < RHSSize) {
+ // Destroy current elements.
+ this->destroy_range(this->begin(), this->end());
+ this->setEnd(this->begin());
+ CurSize = 0;
+ this->grow(RHSSize);
+ } else if (CurSize) {
+ // Otherwise, use assignment for the already-constructed elements.
+ this->move(RHS.begin(), RHS.begin()+CurSize, this->begin());
+ }
+
+ // Move-construct the new elements in place.
+ this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
+ this->begin()+CurSize);
+
+ // Set end.
+ this->setEnd(this->begin()+RHSSize);
+
+ RHS.clear();
+ return *this;
+}
+
+/// Storage for the SmallVector elements which aren't contained in
+/// SmallVectorTemplateCommon. There are 'N-1' elements here. The remaining '1'
+/// element is in the base class. This is specialized for the N=1 and N=0 cases
+/// to avoid allocating unnecessary storage.
+template <typename T, unsigned N>
+struct SmallVectorStorage {
+ typename SmallVectorTemplateCommon<T>::U InlineElts[N - 1];
+};
+template <typename T> struct SmallVectorStorage<T, 1> {};
+template <typename T> struct SmallVectorStorage<T, 0> {};
+
+/// This is a 'vector' (really, a variable-sized array), optimized
+/// for the case when the array is small. It contains some number of elements
+/// in-place, which allows it to avoid heap allocation when the actual number of
+/// elements is below that threshold. This allows normal "small" cases to be
+/// fast without losing generality for large inputs.
+///
+/// Note that this does not attempt to be exception safe.
+///
+template <typename T, unsigned N>
+class SmallVector : public SmallVectorImpl<T> {
+ /// Inline space for elements which aren't stored in the base class.
+ SmallVectorStorage<T, N> Storage;
+public:
+ SmallVector() : SmallVectorImpl<T>(N) {
+ }
+
+ explicit SmallVector(size_t Size, const T &Value = T())
+ : SmallVectorImpl<T>(N) {
+ this->assign(Size, Value);
+ }
+
+ template<typename ItTy>
+ SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
+ this->append(S, E);
+ }
+
+ template <typename RangeTy>
+ explicit SmallVector(const llvm::iterator_range<RangeTy> R)
+ : SmallVectorImpl<T>(N) {
+ this->append(R.begin(), R.end());
+ }
+
+ SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
+ this->assign(IL);
+ }
+
+ SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(RHS);
+ }
+
+ const SmallVector &operator=(const SmallVector &RHS) {
+ SmallVectorImpl<T>::operator=(RHS);
+ return *this;
+ }
+
+ SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ }
+
+ const SmallVector &operator=(SmallVector &&RHS) {
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ return *this;
+ }
+
+ SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ }
+
+ const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ return *this;
+ }
+
+ const SmallVector &operator=(std::initializer_list<T> IL) {
+ this->assign(IL);
+ return *this;
+ }
+};
+
+template<typename T, unsigned N>
+static inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
+ return X.capacity_in_bytes();
+}
+
+} // End llvm namespace
+
+namespace std {
+ /// Implement std::swap in terms of SmallVector swap.
+ template<typename T>
+ inline void
+ swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
+ LHS.swap(RHS);
+ }
+
+ /// Implement std::swap in terms of SmallVector swap.
+ template<typename T, unsigned N>
+ inline void
+ swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
+ LHS.swap(RHS);
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/SparseBitVector.h b/contrib/llvm/include/llvm/ADT/SparseBitVector.h
new file mode 100644
index 0000000..e6e7241
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SparseBitVector.h
@@ -0,0 +1,904 @@
+//===- llvm/ADT/SparseBitVector.h - Efficient Sparse BitVector -*- C++ -*- ===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseBitVector class. See the doxygen comment for
+// SparseBitVector for more details on the algorithm used.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSEBITVECTOR_H
+#define LLVM_ADT_SPARSEBITVECTOR_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <climits>
+
+namespace llvm {
+
+/// SparseBitVector is an implementation of a bitvector that is sparse by only
+/// storing the elements that have non-zero bits set. In order to make this
+/// fast for the most common cases, SparseBitVector is implemented as a linked
+/// list of SparseBitVectorElements. We maintain a pointer to the last
+/// SparseBitVectorElement accessed (in the form of a list iterator), in order
+/// to make multiple in-order test/set constant time after the first one is
+/// executed. Note that using vectors to store SparseBitVectorElement's does
+/// not work out very well because it causes insertion in the middle to take
+/// enormous amounts of time with a large amount of bits. Other structures that
+/// have better worst cases for insertion in the middle (various balanced trees,
+/// etc) do not perform as well in practice as a linked list with this iterator
+/// kept up to date. They are also significantly more memory intensive.
+
+template <unsigned ElementSize = 128>
+struct SparseBitVectorElement
+ : public ilist_node<SparseBitVectorElement<ElementSize> > {
+public:
+ typedef unsigned long BitWord;
+ typedef unsigned size_type;
+ enum {
+ BITWORD_SIZE = sizeof(BitWord) * CHAR_BIT,
+ BITWORDS_PER_ELEMENT = (ElementSize + BITWORD_SIZE - 1) / BITWORD_SIZE,
+ BITS_PER_ELEMENT = ElementSize
+ };
+
+private:
+ // Index of Element in terms of where first bit starts.
+ unsigned ElementIndex;
+ BitWord Bits[BITWORDS_PER_ELEMENT];
+ // Needed for sentinels
+ friend struct ilist_sentinel_traits<SparseBitVectorElement>;
+ SparseBitVectorElement() {
+ ElementIndex = ~0U;
+ memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
+ }
+
+public:
+ explicit SparseBitVectorElement(unsigned Idx) {
+ ElementIndex = Idx;
+ memset(&Bits[0], 0, sizeof (BitWord) * BITWORDS_PER_ELEMENT);
+ }
+
+ // Comparison.
+ bool operator==(const SparseBitVectorElement &RHS) const {
+ if (ElementIndex != RHS.ElementIndex)
+ return false;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+ if (Bits[i] != RHS.Bits[i])
+ return false;
+ return true;
+ }
+
+ bool operator!=(const SparseBitVectorElement &RHS) const {
+ return !(*this == RHS);
+ }
+
+ // Return the bits that make up word Idx in our element.
+ BitWord word(unsigned Idx) const {
+ assert (Idx < BITWORDS_PER_ELEMENT);
+ return Bits[Idx];
+ }
+
+ unsigned index() const {
+ return ElementIndex;
+ }
+
+ bool empty() const {
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+ if (Bits[i])
+ return false;
+ return true;
+ }
+
+ void set(unsigned Idx) {
+ Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE);
+ }
+
+ bool test_and_set (unsigned Idx) {
+ bool old = test(Idx);
+ if (!old) {
+ set(Idx);
+ return true;
+ }
+ return false;
+ }
+
+ void reset(unsigned Idx) {
+ Bits[Idx / BITWORD_SIZE] &= ~(1L << (Idx % BITWORD_SIZE));
+ }
+
+ bool test(unsigned Idx) const {
+ return Bits[Idx / BITWORD_SIZE] & (1L << (Idx % BITWORD_SIZE));
+ }
+
+ size_type count() const {
+ unsigned NumBits = 0;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+ NumBits += countPopulation(Bits[i]);
+ return NumBits;
+ }
+
+ /// find_first - Returns the index of the first set bit.
+ int find_first() const {
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i)
+ if (Bits[i] != 0)
+ return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
+ llvm_unreachable("Illegal empty element");
+ }
+
+ /// find_next - Returns the index of the next set bit starting from the
+ /// "Curr" bit. Returns -1 if the next set bit is not found.
+ int find_next(unsigned Curr) const {
+ if (Curr >= BITS_PER_ELEMENT)
+ return -1;
+
+ unsigned WordPos = Curr / BITWORD_SIZE;
+ unsigned BitPos = Curr % BITWORD_SIZE;
+ BitWord Copy = Bits[WordPos];
+ assert (WordPos <= BITWORDS_PER_ELEMENT
+ && "Word Position outside of element");
+
+ // Mask off previous bits.
+ Copy &= ~0UL << BitPos;
+
+ if (Copy != 0)
+ return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
+
+ // Check subsequent words.
+ for (unsigned i = WordPos+1; i < BITWORDS_PER_ELEMENT; ++i)
+ if (Bits[i] != 0)
+ return i * BITWORD_SIZE + countTrailingZeros(Bits[i]);
+ return -1;
+ }
+
+ // Union this element with RHS and return true if this one changed.
+ bool unionWith(const SparseBitVectorElement &RHS) {
+ bool changed = false;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+ BitWord old = changed ? 0 : Bits[i];
+
+ Bits[i] |= RHS.Bits[i];
+ if (!changed && old != Bits[i])
+ changed = true;
+ }
+ return changed;
+ }
+
+ // Return true if we have any bits in common with RHS
+ bool intersects(const SparseBitVectorElement &RHS) const {
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+ if (RHS.Bits[i] & Bits[i])
+ return true;
+ }
+ return false;
+ }
+
+ // Intersect this Element with RHS and return true if this one changed.
+ // BecameZero is set to true if this element became all-zero bits.
+ bool intersectWith(const SparseBitVectorElement &RHS,
+ bool &BecameZero) {
+ bool changed = false;
+ bool allzero = true;
+
+ BecameZero = false;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+ BitWord old = changed ? 0 : Bits[i];
+
+ Bits[i] &= RHS.Bits[i];
+ if (Bits[i] != 0)
+ allzero = false;
+
+ if (!changed && old != Bits[i])
+ changed = true;
+ }
+ BecameZero = allzero;
+ return changed;
+ }
+
+ // Intersect this Element with the complement of RHS and return true if this
+ // one changed. BecameZero is set to true if this element became all-zero
+ // bits.
+ bool intersectWithComplement(const SparseBitVectorElement &RHS,
+ bool &BecameZero) {
+ bool changed = false;
+ bool allzero = true;
+
+ BecameZero = false;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+ BitWord old = changed ? 0 : Bits[i];
+
+ Bits[i] &= ~RHS.Bits[i];
+ if (Bits[i] != 0)
+ allzero = false;
+
+ if (!changed && old != Bits[i])
+ changed = true;
+ }
+ BecameZero = allzero;
+ return changed;
+ }
+
+ // Three argument version of intersectWithComplement that intersects
+ // RHS1 & ~RHS2 into this element
+ void intersectWithComplement(const SparseBitVectorElement &RHS1,
+ const SparseBitVectorElement &RHS2,
+ bool &BecameZero) {
+ bool allzero = true;
+
+ BecameZero = false;
+ for (unsigned i = 0; i < BITWORDS_PER_ELEMENT; ++i) {
+ Bits[i] = RHS1.Bits[i] & ~RHS2.Bits[i];
+ if (Bits[i] != 0)
+ allzero = false;
+ }
+ BecameZero = allzero;
+ }
+};
+
+template <unsigned ElementSize>
+struct ilist_traits<SparseBitVectorElement<ElementSize> >
+ : public ilist_default_traits<SparseBitVectorElement<ElementSize> > {
+ typedef SparseBitVectorElement<ElementSize> Element;
+
+ Element *createSentinel() const { return static_cast<Element *>(&Sentinel); }
+ static void destroySentinel(Element *) {}
+
+ Element *provideInitialHead() const { return createSentinel(); }
+ Element *ensureHead(Element *) const { return createSentinel(); }
+ static void noteHead(Element *, Element *) {}
+
+private:
+ mutable ilist_half_node<Element> Sentinel;
+};
+
+template <unsigned ElementSize = 128>
+class SparseBitVector {
+ typedef ilist<SparseBitVectorElement<ElementSize> > ElementList;
+ typedef typename ElementList::iterator ElementListIter;
+ typedef typename ElementList::const_iterator ElementListConstIter;
+ enum {
+ BITWORD_SIZE = SparseBitVectorElement<ElementSize>::BITWORD_SIZE
+ };
+
+ // Pointer to our current Element.
+ ElementListIter CurrElementIter;
+ ElementList Elements;
+
+ // This is like std::lower_bound, except we do linear searching from the
+ // current position.
+ ElementListIter FindLowerBound(unsigned ElementIndex) {
+
+ if (Elements.empty()) {
+ CurrElementIter = Elements.begin();
+ return Elements.begin();
+ }
+
+ // Make sure our current iterator is valid.
+ if (CurrElementIter == Elements.end())
+ --CurrElementIter;
+
+ // Search from our current iterator, either backwards or forwards,
+ // depending on what element we are looking for.
+ ElementListIter ElementIter = CurrElementIter;
+ if (CurrElementIter->index() == ElementIndex) {
+ return ElementIter;
+ } else if (CurrElementIter->index() > ElementIndex) {
+ while (ElementIter != Elements.begin()
+ && ElementIter->index() > ElementIndex)
+ --ElementIter;
+ } else {
+ while (ElementIter != Elements.end() &&
+ ElementIter->index() < ElementIndex)
+ ++ElementIter;
+ }
+ CurrElementIter = ElementIter;
+ return ElementIter;
+ }
+
+ // Iterator to walk set bits in the bitmap. This iterator is a lot uglier
+ // than it would be, in order to be efficient.
+ class SparseBitVectorIterator {
+ private:
+ bool AtEnd;
+
+ const SparseBitVector<ElementSize> *BitVector;
+
+ // Current element inside of bitmap.
+ ElementListConstIter Iter;
+
+ // Current bit number inside of our bitmap.
+ unsigned BitNumber;
+
+ // Current word number inside of our element.
+ unsigned WordNumber;
+
+ // Current bits from the element.
+ typename SparseBitVectorElement<ElementSize>::BitWord Bits;
+
+ // Move our iterator to the first non-zero bit in the bitmap.
+ void AdvanceToFirstNonZero() {
+ if (AtEnd)
+ return;
+ if (BitVector->Elements.empty()) {
+ AtEnd = true;
+ return;
+ }
+ Iter = BitVector->Elements.begin();
+ BitNumber = Iter->index() * ElementSize;
+ unsigned BitPos = Iter->find_first();
+ BitNumber += BitPos;
+ WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
+ Bits = Iter->word(WordNumber);
+ Bits >>= BitPos % BITWORD_SIZE;
+ }
+
+ // Move our iterator to the next non-zero bit.
+ void AdvanceToNextNonZero() {
+ if (AtEnd)
+ return;
+
+ while (Bits && !(Bits & 1)) {
+ Bits >>= 1;
+ BitNumber += 1;
+ }
+
+ // See if we ran out of Bits in this word.
+ if (!Bits) {
+ int NextSetBitNumber = Iter->find_next(BitNumber % ElementSize) ;
+ // If we ran out of set bits in this element, move to next element.
+ if (NextSetBitNumber == -1 || (BitNumber % ElementSize == 0)) {
+ ++Iter;
+ WordNumber = 0;
+
+ // We may run out of elements in the bitmap.
+ if (Iter == BitVector->Elements.end()) {
+ AtEnd = true;
+ return;
+ }
+ // Set up for next non-zero word in bitmap.
+ BitNumber = Iter->index() * ElementSize;
+ NextSetBitNumber = Iter->find_first();
+ BitNumber += NextSetBitNumber;
+ WordNumber = (BitNumber % ElementSize) / BITWORD_SIZE;
+ Bits = Iter->word(WordNumber);
+ Bits >>= NextSetBitNumber % BITWORD_SIZE;
+ } else {
+ WordNumber = (NextSetBitNumber % ElementSize) / BITWORD_SIZE;
+ Bits = Iter->word(WordNumber);
+ Bits >>= NextSetBitNumber % BITWORD_SIZE;
+ BitNumber = Iter->index() * ElementSize;
+ BitNumber += NextSetBitNumber;
+ }
+ }
+ }
+ public:
+ // Preincrement.
+ inline SparseBitVectorIterator& operator++() {
+ ++BitNumber;
+ Bits >>= 1;
+ AdvanceToNextNonZero();
+ return *this;
+ }
+
+ // Postincrement.
+ inline SparseBitVectorIterator operator++(int) {
+ SparseBitVectorIterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ // Return the current set bit number.
+ unsigned operator*() const {
+ return BitNumber;
+ }
+
+ bool operator==(const SparseBitVectorIterator &RHS) const {
+ // If they are both at the end, ignore the rest of the fields.
+ if (AtEnd && RHS.AtEnd)
+ return true;
+ // Otherwise they are the same if they have the same bit number and
+ // bitmap.
+ return AtEnd == RHS.AtEnd && RHS.BitNumber == BitNumber;
+ }
+
+ bool operator!=(const SparseBitVectorIterator &RHS) const {
+ return !(*this == RHS);
+ }
+
+ SparseBitVectorIterator(): BitVector(nullptr) {
+ }
+
+ SparseBitVectorIterator(const SparseBitVector<ElementSize> *RHS,
+ bool end = false):BitVector(RHS) {
+ Iter = BitVector->Elements.begin();
+ BitNumber = 0;
+ Bits = 0;
+ WordNumber = ~0;
+ AtEnd = end;
+ AdvanceToFirstNonZero();
+ }
+ };
+public:
+ typedef SparseBitVectorIterator iterator;
+
+ SparseBitVector () {
+ CurrElementIter = Elements.begin ();
+ }
+
+ ~SparseBitVector() {
+ }
+
+ // SparseBitVector copy ctor.
+ SparseBitVector(const SparseBitVector &RHS) {
+ ElementListConstIter ElementIter = RHS.Elements.begin();
+ while (ElementIter != RHS.Elements.end()) {
+ Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter));
+ ++ElementIter;
+ }
+
+ CurrElementIter = Elements.begin ();
+ }
+
+ // Clear.
+ void clear() {
+ Elements.clear();
+ }
+
+ // Assignment
+ SparseBitVector& operator=(const SparseBitVector& RHS) {
+ if (this == &RHS)
+ return *this;
+
+ Elements.clear();
+
+ ElementListConstIter ElementIter = RHS.Elements.begin();
+ while (ElementIter != RHS.Elements.end()) {
+ Elements.push_back(SparseBitVectorElement<ElementSize>(*ElementIter));
+ ++ElementIter;
+ }
+
+ CurrElementIter = Elements.begin ();
+
+ return *this;
+ }
+
+ // Test, Reset, and Set a bit in the bitmap.
+ bool test(unsigned Idx) {
+ if (Elements.empty())
+ return false;
+
+ unsigned ElementIndex = Idx / ElementSize;
+ ElementListIter ElementIter = FindLowerBound(ElementIndex);
+
+ // If we can't find an element that is supposed to contain this bit, there
+ // is nothing more to do.
+ if (ElementIter == Elements.end() ||
+ ElementIter->index() != ElementIndex)
+ return false;
+ return ElementIter->test(Idx % ElementSize);
+ }
+
+ void reset(unsigned Idx) {
+ if (Elements.empty())
+ return;
+
+ unsigned ElementIndex = Idx / ElementSize;
+ ElementListIter ElementIter = FindLowerBound(ElementIndex);
+
+ // If we can't find an element that is supposed to contain this bit, there
+ // is nothing more to do.
+ if (ElementIter == Elements.end() ||
+ ElementIter->index() != ElementIndex)
+ return;
+ ElementIter->reset(Idx % ElementSize);
+
+ // When the element is zeroed out, delete it.
+ if (ElementIter->empty()) {
+ ++CurrElementIter;
+ Elements.erase(ElementIter);
+ }
+ }
+
+ void set(unsigned Idx) {
+ unsigned ElementIndex = Idx / ElementSize;
+ SparseBitVectorElement<ElementSize> *Element;
+ ElementListIter ElementIter;
+ if (Elements.empty()) {
+ Element = new SparseBitVectorElement<ElementSize>(ElementIndex);
+ ElementIter = Elements.insert(Elements.end(), Element);
+
+ } else {
+ ElementIter = FindLowerBound(ElementIndex);
+
+ if (ElementIter == Elements.end() ||
+ ElementIter->index() != ElementIndex) {
+ Element = new SparseBitVectorElement<ElementSize>(ElementIndex);
+ // We may have hit the beginning of our SparseBitVector, in which case,
+ // we may need to insert right after this element, which requires moving
+ // the current iterator forward one, because insert does insert before.
+ if (ElementIter != Elements.end() &&
+ ElementIter->index() < ElementIndex)
+ ElementIter = Elements.insert(++ElementIter, Element);
+ else
+ ElementIter = Elements.insert(ElementIter, Element);
+ }
+ }
+ CurrElementIter = ElementIter;
+
+ ElementIter->set(Idx % ElementSize);
+ }
+
+ bool test_and_set (unsigned Idx) {
+ bool old = test(Idx);
+ if (!old) {
+ set(Idx);
+ return true;
+ }
+ return false;
+ }
+
+ bool operator!=(const SparseBitVector &RHS) const {
+ return !(*this == RHS);
+ }
+
+ bool operator==(const SparseBitVector &RHS) const {
+ ElementListConstIter Iter1 = Elements.begin();
+ ElementListConstIter Iter2 = RHS.Elements.begin();
+
+ for (; Iter1 != Elements.end() && Iter2 != RHS.Elements.end();
+ ++Iter1, ++Iter2) {
+ if (*Iter1 != *Iter2)
+ return false;
+ }
+ return Iter1 == Elements.end() && Iter2 == RHS.Elements.end();
+ }
+
+ // Union our bitmap with the RHS and return true if we changed.
+ bool operator|=(const SparseBitVector &RHS) {
+ if (this == &RHS)
+ return false;
+
+ bool changed = false;
+ ElementListIter Iter1 = Elements.begin();
+ ElementListConstIter Iter2 = RHS.Elements.begin();
+
+ // If RHS is empty, we are done
+ if (RHS.Elements.empty())
+ return false;
+
+ while (Iter2 != RHS.Elements.end()) {
+ if (Iter1 == Elements.end() || Iter1->index() > Iter2->index()) {
+ Elements.insert(Iter1,
+ new SparseBitVectorElement<ElementSize>(*Iter2));
+ ++Iter2;
+ changed = true;
+ } else if (Iter1->index() == Iter2->index()) {
+ changed |= Iter1->unionWith(*Iter2);
+ ++Iter1;
+ ++Iter2;
+ } else {
+ ++Iter1;
+ }
+ }
+ CurrElementIter = Elements.begin();
+ return changed;
+ }
+
+ // Intersect our bitmap with the RHS and return true if ours changed.
+ bool operator&=(const SparseBitVector &RHS) {
+ if (this == &RHS)
+ return false;
+
+ bool changed = false;
+ ElementListIter Iter1 = Elements.begin();
+ ElementListConstIter Iter2 = RHS.Elements.begin();
+
+ // Check if both bitmaps are empty.
+ if (Elements.empty() && RHS.Elements.empty())
+ return false;
+
+ // Loop through, intersecting as we go, erasing elements when necessary.
+ while (Iter2 != RHS.Elements.end()) {
+ if (Iter1 == Elements.end()) {
+ CurrElementIter = Elements.begin();
+ return changed;
+ }
+
+ if (Iter1->index() > Iter2->index()) {
+ ++Iter2;
+ } else if (Iter1->index() == Iter2->index()) {
+ bool BecameZero;
+ changed |= Iter1->intersectWith(*Iter2, BecameZero);
+ if (BecameZero) {
+ ElementListIter IterTmp = Iter1;
+ ++Iter1;
+ Elements.erase(IterTmp);
+ } else {
+ ++Iter1;
+ }
+ ++Iter2;
+ } else {
+ ElementListIter IterTmp = Iter1;
+ ++Iter1;
+ Elements.erase(IterTmp);
+ changed = true;
+ }
+ }
+ if (Iter1 != Elements.end()) {
+ Elements.erase(Iter1, Elements.end());
+ changed = true;
+ }
+ CurrElementIter = Elements.begin();
+ return changed;
+ }
+
+ // Intersect our bitmap with the complement of the RHS and return true
+ // if ours changed.
+ bool intersectWithComplement(const SparseBitVector &RHS) {
+ if (this == &RHS) {
+ if (!empty()) {
+ clear();
+ return true;
+ }
+ return false;
+ }
+
+ bool changed = false;
+ ElementListIter Iter1 = Elements.begin();
+ ElementListConstIter Iter2 = RHS.Elements.begin();
+
+ // If either our bitmap or RHS is empty, we are done
+ if (Elements.empty() || RHS.Elements.empty())
+ return false;
+
+ // Loop through, intersecting as we go, erasing elements when necessary.
+ while (Iter2 != RHS.Elements.end()) {
+ if (Iter1 == Elements.end()) {
+ CurrElementIter = Elements.begin();
+ return changed;
+ }
+
+ if (Iter1->index() > Iter2->index()) {
+ ++Iter2;
+ } else if (Iter1->index() == Iter2->index()) {
+ bool BecameZero;
+ changed |= Iter1->intersectWithComplement(*Iter2, BecameZero);
+ if (BecameZero) {
+ ElementListIter IterTmp = Iter1;
+ ++Iter1;
+ Elements.erase(IterTmp);
+ } else {
+ ++Iter1;
+ }
+ ++Iter2;
+ } else {
+ ++Iter1;
+ }
+ }
+ CurrElementIter = Elements.begin();
+ return changed;
+ }
+
+ bool intersectWithComplement(const SparseBitVector<ElementSize> *RHS) const {
+ return intersectWithComplement(*RHS);
+ }
+
+ // Three argument version of intersectWithComplement.
+ // Result of RHS1 & ~RHS2 is stored into this bitmap.
+ void intersectWithComplement(const SparseBitVector<ElementSize> &RHS1,
+ const SparseBitVector<ElementSize> &RHS2)
+ {
+ if (this == &RHS1) {
+ intersectWithComplement(RHS2);
+ return;
+ } else if (this == &RHS2) {
+ SparseBitVector RHS2Copy(RHS2);
+ intersectWithComplement(RHS1, RHS2Copy);
+ return;
+ }
+
+ Elements.clear();
+ CurrElementIter = Elements.begin();
+ ElementListConstIter Iter1 = RHS1.Elements.begin();
+ ElementListConstIter Iter2 = RHS2.Elements.begin();
+
+ // If RHS1 is empty, we are done
+ // If RHS2 is empty, we still have to copy RHS1
+ if (RHS1.Elements.empty())
+ return;
+
+ // Loop through, intersecting as we go, erasing elements when necessary.
+ while (Iter2 != RHS2.Elements.end()) {
+ if (Iter1 == RHS1.Elements.end())
+ return;
+
+ if (Iter1->index() > Iter2->index()) {
+ ++Iter2;
+ } else if (Iter1->index() == Iter2->index()) {
+ bool BecameZero = false;
+ SparseBitVectorElement<ElementSize> *NewElement =
+ new SparseBitVectorElement<ElementSize>(Iter1->index());
+ NewElement->intersectWithComplement(*Iter1, *Iter2, BecameZero);
+ if (!BecameZero) {
+ Elements.push_back(NewElement);
+ }
+ else
+ delete NewElement;
+ ++Iter1;
+ ++Iter2;
+ } else {
+ SparseBitVectorElement<ElementSize> *NewElement =
+ new SparseBitVectorElement<ElementSize>(*Iter1);
+ Elements.push_back(NewElement);
+ ++Iter1;
+ }
+ }
+
+ // copy the remaining elements
+ while (Iter1 != RHS1.Elements.end()) {
+ SparseBitVectorElement<ElementSize> *NewElement =
+ new SparseBitVectorElement<ElementSize>(*Iter1);
+ Elements.push_back(NewElement);
+ ++Iter1;
+ }
+ }
+
+ void intersectWithComplement(const SparseBitVector<ElementSize> *RHS1,
+ const SparseBitVector<ElementSize> *RHS2) {
+ intersectWithComplement(*RHS1, *RHS2);
+ }
+
+ bool intersects(const SparseBitVector<ElementSize> *RHS) const {
+ return intersects(*RHS);
+ }
+
+ // Return true if we share any bits in common with RHS
+ bool intersects(const SparseBitVector<ElementSize> &RHS) const {
+ ElementListConstIter Iter1 = Elements.begin();
+ ElementListConstIter Iter2 = RHS.Elements.begin();
+
+ // Check if both bitmaps are empty.
+ if (Elements.empty() && RHS.Elements.empty())
+ return false;
+
+ // Loop through, intersecting stopping when we hit bits in common.
+ while (Iter2 != RHS.Elements.end()) {
+ if (Iter1 == Elements.end())
+ return false;
+
+ if (Iter1->index() > Iter2->index()) {
+ ++Iter2;
+ } else if (Iter1->index() == Iter2->index()) {
+ if (Iter1->intersects(*Iter2))
+ return true;
+ ++Iter1;
+ ++Iter2;
+ } else {
+ ++Iter1;
+ }
+ }
+ return false;
+ }
+
+ // Return true iff all bits set in this SparseBitVector are
+ // also set in RHS.
+ bool contains(const SparseBitVector<ElementSize> &RHS) const {
+ SparseBitVector<ElementSize> Result(*this);
+ Result &= RHS;
+ return (Result == RHS);
+ }
+
+ // Return the first set bit in the bitmap. Return -1 if no bits are set.
+ int find_first() const {
+ if (Elements.empty())
+ return -1;
+ const SparseBitVectorElement<ElementSize> &First = *(Elements.begin());
+ return (First.index() * ElementSize) + First.find_first();
+ }
+
+ // Return true if the SparseBitVector is empty
+ bool empty() const {
+ return Elements.empty();
+ }
+
+ unsigned count() const {
+ unsigned BitCount = 0;
+ for (ElementListConstIter Iter = Elements.begin();
+ Iter != Elements.end();
+ ++Iter)
+ BitCount += Iter->count();
+
+ return BitCount;
+ }
+ iterator begin() const {
+ return iterator(this);
+ }
+
+ iterator end() const {
+ return iterator(this, true);
+ }
+};
+
+// Convenience functions to allow Or and And without dereferencing in the user
+// code.
+
+template <unsigned ElementSize>
+inline bool operator |=(SparseBitVector<ElementSize> &LHS,
+ const SparseBitVector<ElementSize> *RHS) {
+ return LHS |= *RHS;
+}
+
+template <unsigned ElementSize>
+inline bool operator |=(SparseBitVector<ElementSize> *LHS,
+ const SparseBitVector<ElementSize> &RHS) {
+ return LHS->operator|=(RHS);
+}
+
+template <unsigned ElementSize>
+inline bool operator &=(SparseBitVector<ElementSize> *LHS,
+ const SparseBitVector<ElementSize> &RHS) {
+ return LHS->operator&=(RHS);
+}
+
+template <unsigned ElementSize>
+inline bool operator &=(SparseBitVector<ElementSize> &LHS,
+ const SparseBitVector<ElementSize> *RHS) {
+ return LHS &= *RHS;
+}
+
+// Convenience functions for infix union, intersection, difference operators.
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator|(const SparseBitVector<ElementSize> &LHS,
+ const SparseBitVector<ElementSize> &RHS) {
+ SparseBitVector<ElementSize> Result(LHS);
+ Result |= RHS;
+ return Result;
+}
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator&(const SparseBitVector<ElementSize> &LHS,
+ const SparseBitVector<ElementSize> &RHS) {
+ SparseBitVector<ElementSize> Result(LHS);
+ Result &= RHS;
+ return Result;
+}
+
+template <unsigned ElementSize>
+inline SparseBitVector<ElementSize>
+operator-(const SparseBitVector<ElementSize> &LHS,
+ const SparseBitVector<ElementSize> &RHS) {
+ SparseBitVector<ElementSize> Result;
+ Result.intersectWithComplement(LHS, RHS);
+ return Result;
+}
+
+// Dump a SparseBitVector to a stream
+template <unsigned ElementSize>
+void dump(const SparseBitVector<ElementSize> &LHS, raw_ostream &out) {
+ out << "[";
+
+ typename SparseBitVector<ElementSize>::iterator bi = LHS.begin(),
+ be = LHS.end();
+ if (bi != be) {
+ out << *bi;
+ for (++bi; bi != be; ++bi) {
+ out << " " << *bi;
+ }
+ }
+ out << "]\n";
+}
+} // end namespace llvm
+
+#endif // LLVM_ADT_SPARSEBITVECTOR_H
diff --git a/contrib/llvm/include/llvm/ADT/SparseMultiSet.h b/contrib/llvm/include/llvm/ADT/SparseMultiSet.h
new file mode 100644
index 0000000..e3aa258
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SparseMultiSet.h
@@ -0,0 +1,518 @@
+//===--- llvm/ADT/SparseMultiSet.h - Sparse multiset ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseMultiSet class, which adds multiset behavior to
+// the SparseSet.
+//
+// A sparse multiset holds a small number of objects identified by integer keys
+// from a moderately sized universe. The sparse multiset uses more memory than
+// other containers in order to provide faster operations. Any key can map to
+// multiple values. A SparseMultiSetNode class is provided, which serves as a
+// convenient base class for the contents of a SparseMultiSet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSEMULTISET_H
+#define LLVM_ADT_SPARSEMULTISET_H
+
+#include "llvm/ADT/SparseSet.h"
+
+namespace llvm {
+
+/// Fast multiset implementation for objects that can be identified by small
+/// unsigned keys.
+///
+/// SparseMultiSet allocates memory proportional to the size of the key
+/// universe, so it is not recommended for building composite data structures.
+/// It is useful for algorithms that require a single set with fast operations.
+///
+/// Compared to DenseSet and DenseMap, SparseMultiSet provides constant-time
+/// fast clear() as fast as a vector. The find(), insert(), and erase()
+/// operations are all constant time, and typically faster than a hash table.
+/// The iteration order doesn't depend on numerical key values, it only depends
+/// on the order of insert() and erase() operations. Iteration order is the
+/// insertion order. Iteration is only provided over elements of equivalent
+/// keys, but iterators are bidirectional.
+///
+/// Compared to BitVector, SparseMultiSet<unsigned> uses 8x-40x more memory, but
+/// offers constant-time clear() and size() operations as well as fast iteration
+/// independent on the size of the universe.
+///
+/// SparseMultiSet contains a dense vector holding all the objects and a sparse
+/// array holding indexes into the dense vector. Most of the memory is used by
+/// the sparse array which is the size of the key universe. The SparseT template
+/// parameter provides a space/speed tradeoff for sets holding many elements.
+///
+/// When SparseT is uint32_t, find() only touches up to 3 cache lines, but the
+/// sparse array uses 4 x Universe bytes.
+///
+/// When SparseT is uint8_t (the default), find() touches up to 3+[N/256] cache
+/// lines, but the sparse array is 4x smaller. N is the number of elements in
+/// the set.
+///
+/// For sets that may grow to thousands of elements, SparseT should be set to
+/// uint16_t or uint32_t.
+///
+/// Multiset behavior is provided by providing doubly linked lists for values
+/// that are inlined in the dense vector. SparseMultiSet is a good choice when
+/// one desires a growable number of entries per key, as it will retain the
+/// SparseSet algorithmic properties despite being growable. Thus, it is often a
+/// better choice than a SparseSet of growable containers or a vector of
+/// vectors. SparseMultiSet also keeps iterators valid after erasure (provided
+/// the iterators don't point to the element erased), allowing for more
+/// intuitive and fast removal.
+///
+/// @tparam ValueT The type of objects in the set.
+/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
+/// @tparam SparseT An unsigned integer type. See above.
+///
+template<typename ValueT,
+ typename KeyFunctorT = llvm::identity<unsigned>,
+ typename SparseT = uint8_t>
+class SparseMultiSet {
+ static_assert(std::numeric_limits<SparseT>::is_integer &&
+ !std::numeric_limits<SparseT>::is_signed,
+ "SparseT must be an unsigned integer type");
+
+ /// The actual data that's stored, as a doubly-linked list implemented via
+ /// indices into the DenseVector. The doubly linked list is implemented
+ /// circular in Prev indices, and INVALID-terminated in Next indices. This
+ /// provides efficient access to list tails. These nodes can also be
+ /// tombstones, in which case they are actually nodes in a single-linked
+ /// freelist of recyclable slots.
+ struct SMSNode {
+ static const unsigned INVALID = ~0U;
+
+ ValueT Data;
+ unsigned Prev;
+ unsigned Next;
+
+ SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) { }
+
+ /// List tails have invalid Nexts.
+ bool isTail() const {
+ return Next == INVALID;
+ }
+
+ /// Whether this node is a tombstone node, and thus is in our freelist.
+ bool isTombstone() const {
+ return Prev == INVALID;
+ }
+
+ /// Since the list is circular in Prev, all non-tombstone nodes have a valid
+ /// Prev.
+ bool isValid() const { return Prev != INVALID; }
+ };
+
+ typedef typename KeyFunctorT::argument_type KeyT;
+ typedef SmallVector<SMSNode, 8> DenseT;
+ DenseT Dense;
+ SparseT *Sparse;
+ unsigned Universe;
+ KeyFunctorT KeyIndexOf;
+ SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
+
+ /// We have a built-in recycler for reusing tombstone slots. This recycler
+ /// puts a singly-linked free list into tombstone slots, allowing us quick
+ /// erasure, iterator preservation, and dense size.
+ unsigned FreelistIdx;
+ unsigned NumFree;
+
+ unsigned sparseIndex(const ValueT &Val) const {
+ assert(ValIndexOf(Val) < Universe &&
+ "Invalid key in set. Did object mutate?");
+ return ValIndexOf(Val);
+ }
+ unsigned sparseIndex(const SMSNode &N) const { return sparseIndex(N.Data); }
+
+ // Disable copy construction and assignment.
+ // This data structure is not meant to be used that way.
+ SparseMultiSet(const SparseMultiSet&) = delete;
+ SparseMultiSet &operator=(const SparseMultiSet&) = delete;
+
+ /// Whether the given entry is the head of the list. List heads's previous
+ /// pointers are to the tail of the list, allowing for efficient access to the
+ /// list tail. D must be a valid entry node.
+ bool isHead(const SMSNode &D) const {
+ assert(D.isValid() && "Invalid node for head");
+ return Dense[D.Prev].isTail();
+ }
+
+ /// Whether the given entry is a singleton entry, i.e. the only entry with
+ /// that key.
+ bool isSingleton(const SMSNode &N) const {
+ assert(N.isValid() && "Invalid node for singleton");
+ // Is N its own predecessor?
+ return &Dense[N.Prev] == &N;
+ }
+
+ /// Add in the given SMSNode. Uses a free entry in our freelist if
+ /// available. Returns the index of the added node.
+ unsigned addValue(const ValueT& V, unsigned Prev, unsigned Next) {
+ if (NumFree == 0) {
+ Dense.push_back(SMSNode(V, Prev, Next));
+ return Dense.size() - 1;
+ }
+
+ // Peel off a free slot
+ unsigned Idx = FreelistIdx;
+ unsigned NextFree = Dense[Idx].Next;
+ assert(Dense[Idx].isTombstone() && "Non-tombstone free?");
+
+ Dense[Idx] = SMSNode(V, Prev, Next);
+ FreelistIdx = NextFree;
+ --NumFree;
+ return Idx;
+ }
+
+ /// Make the current index a new tombstone. Pushes it onto the freelist.
+ void makeTombstone(unsigned Idx) {
+ Dense[Idx].Prev = SMSNode::INVALID;
+ Dense[Idx].Next = FreelistIdx;
+ FreelistIdx = Idx;
+ ++NumFree;
+ }
+
+public:
+ typedef ValueT value_type;
+ typedef ValueT &reference;
+ typedef const ValueT &const_reference;
+ typedef ValueT *pointer;
+ typedef const ValueT *const_pointer;
+ typedef unsigned size_type;
+
+ SparseMultiSet()
+ : Sparse(nullptr), Universe(0), FreelistIdx(SMSNode::INVALID), NumFree(0) {}
+
+ ~SparseMultiSet() { free(Sparse); }
+
+ /// Set the universe size which determines the largest key the set can hold.
+ /// The universe must be sized before any elements can be added.
+ ///
+ /// @param U Universe size. All object keys must be less than U.
+ ///
+ void setUniverse(unsigned U) {
+ // It's not hard to resize the universe on a non-empty set, but it doesn't
+ // seem like a likely use case, so we can add that code when we need it.
+ assert(empty() && "Can only resize universe on an empty map");
+ // Hysteresis prevents needless reallocations.
+ if (U >= Universe/4 && U <= Universe)
+ return;
+ free(Sparse);
+ // The Sparse array doesn't actually need to be initialized, so malloc
+ // would be enough here, but that will cause tools like valgrind to
+ // complain about branching on uninitialized data.
+ Sparse = reinterpret_cast<SparseT*>(calloc(U, sizeof(SparseT)));
+ Universe = U;
+ }
+
+ /// Our iterators are iterators over the collection of objects that share a
+ /// key.
+ template<typename SMSPtrTy>
+ class iterator_base : public std::iterator<std::bidirectional_iterator_tag,
+ ValueT> {
+ friend class SparseMultiSet;
+ SMSPtrTy SMS;
+ unsigned Idx;
+ unsigned SparseIdx;
+
+ iterator_base(SMSPtrTy P, unsigned I, unsigned SI)
+ : SMS(P), Idx(I), SparseIdx(SI) { }
+
+ /// Whether our iterator has fallen outside our dense vector.
+ bool isEnd() const {
+ if (Idx == SMSNode::INVALID)
+ return true;
+
+ assert(Idx < SMS->Dense.size() && "Out of range, non-INVALID Idx?");
+ return false;
+ }
+
+ /// Whether our iterator is properly keyed, i.e. the SparseIdx is valid
+ bool isKeyed() const { return SparseIdx < SMS->Universe; }
+
+ unsigned Prev() const { return SMS->Dense[Idx].Prev; }
+ unsigned Next() const { return SMS->Dense[Idx].Next; }
+
+ void setPrev(unsigned P) { SMS->Dense[Idx].Prev = P; }
+ void setNext(unsigned N) { SMS->Dense[Idx].Next = N; }
+
+ public:
+ typedef std::iterator<std::bidirectional_iterator_tag, ValueT> super;
+ typedef typename super::value_type value_type;
+ typedef typename super::difference_type difference_type;
+ typedef typename super::pointer pointer;
+ typedef typename super::reference reference;
+
+ reference operator*() const {
+ assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx &&
+ "Dereferencing iterator of invalid key or index");
+
+ return SMS->Dense[Idx].Data;
+ }
+ pointer operator->() const { return &operator*(); }
+
+ /// Comparison operators
+ bool operator==(const iterator_base &RHS) const {
+ // end compares equal
+ if (SMS == RHS.SMS && Idx == RHS.Idx) {
+ assert((isEnd() || SparseIdx == RHS.SparseIdx) &&
+ "Same dense entry, but different keys?");
+ return true;
+ }
+
+ return false;
+ }
+
+ bool operator!=(const iterator_base &RHS) const {
+ return !operator==(RHS);
+ }
+
+ /// Increment and decrement operators
+ iterator_base &operator--() { // predecrement - Back up
+ assert(isKeyed() && "Decrementing an invalid iterator");
+ assert((isEnd() || !SMS->isHead(SMS->Dense[Idx])) &&
+ "Decrementing head of list");
+
+ // If we're at the end, then issue a new find()
+ if (isEnd())
+ Idx = SMS->findIndex(SparseIdx).Prev();
+ else
+ Idx = Prev();
+
+ return *this;
+ }
+ iterator_base &operator++() { // preincrement - Advance
+ assert(!isEnd() && isKeyed() && "Incrementing an invalid/end iterator");
+ Idx = Next();
+ return *this;
+ }
+ iterator_base operator--(int) { // postdecrement
+ iterator_base I(*this);
+ --*this;
+ return I;
+ }
+ iterator_base operator++(int) { // postincrement
+ iterator_base I(*this);
+ ++*this;
+ return I;
+ }
+ };
+ typedef iterator_base<SparseMultiSet *> iterator;
+ typedef iterator_base<const SparseMultiSet *> const_iterator;
+
+ // Convenience types
+ typedef std::pair<iterator, iterator> RangePair;
+
+ /// Returns an iterator past this container. Note that such an iterator cannot
+ /// be decremented, but will compare equal to other end iterators.
+ iterator end() { return iterator(this, SMSNode::INVALID, SMSNode::INVALID); }
+ const_iterator end() const {
+ return const_iterator(this, SMSNode::INVALID, SMSNode::INVALID);
+ }
+
+ /// Returns true if the set is empty.
+ ///
+ /// This is not the same as BitVector::empty().
+ ///
+ bool empty() const { return size() == 0; }
+
+ /// Returns the number of elements in the set.
+ ///
+ /// This is not the same as BitVector::size() which returns the size of the
+ /// universe.
+ ///
+ size_type size() const {
+ assert(NumFree <= Dense.size() && "Out-of-bounds free entries");
+ return Dense.size() - NumFree;
+ }
+
+ /// Clears the set. This is a very fast constant time operation.
+ ///
+ void clear() {
+ // Sparse does not need to be cleared, see find().
+ Dense.clear();
+ NumFree = 0;
+ FreelistIdx = SMSNode::INVALID;
+ }
+
+ /// Find an element by its index.
+ ///
+ /// @param Idx A valid index to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator findIndex(unsigned Idx) {
+ assert(Idx < Universe && "Key out of range");
+ const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
+ for (unsigned i = Sparse[Idx], e = Dense.size(); i < e; i += Stride) {
+ const unsigned FoundIdx = sparseIndex(Dense[i]);
+ // Check that we're pointing at the correct entry and that it is the head
+ // of a valid list.
+ if (Idx == FoundIdx && Dense[i].isValid() && isHead(Dense[i]))
+ return iterator(this, i, Idx);
+ // Stride is 0 when SparseT >= unsigned. We don't need to loop.
+ if (!Stride)
+ break;
+ }
+ return end();
+ }
+
+ /// Find an element by its key.
+ ///
+ /// @param Key A valid key to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator find(const KeyT &Key) {
+ return findIndex(KeyIndexOf(Key));
+ }
+
+ const_iterator find(const KeyT &Key) const {
+ iterator I = const_cast<SparseMultiSet*>(this)->findIndex(KeyIndexOf(Key));
+ return const_iterator(I.SMS, I.Idx, KeyIndexOf(Key));
+ }
+
+ /// Returns the number of elements identified by Key. This will be linear in
+ /// the number of elements of that key.
+ size_type count(const KeyT &Key) const {
+ unsigned Ret = 0;
+ for (const_iterator It = find(Key); It != end(); ++It)
+ ++Ret;
+
+ return Ret;
+ }
+
+ /// Returns true if this set contains an element identified by Key.
+ bool contains(const KeyT &Key) const {
+ return find(Key) != end();
+ }
+
+ /// Return the head and tail of the subset's list, otherwise returns end().
+ iterator getHead(const KeyT &Key) { return find(Key); }
+ iterator getTail(const KeyT &Key) {
+ iterator I = find(Key);
+ if (I != end())
+ I = iterator(this, I.Prev(), KeyIndexOf(Key));
+ return I;
+ }
+
+ /// The bounds of the range of items sharing Key K. First member is the head
+ /// of the list, and the second member is a decrementable end iterator for
+ /// that key.
+ RangePair equal_range(const KeyT &K) {
+ iterator B = find(K);
+ iterator E = iterator(this, SMSNode::INVALID, B.SparseIdx);
+ return make_pair(B, E);
+ }
+
+ /// Insert a new element at the tail of the subset list. Returns an iterator
+ /// to the newly added entry.
+ iterator insert(const ValueT &Val) {
+ unsigned Idx = sparseIndex(Val);
+ iterator I = findIndex(Idx);
+
+ unsigned NodeIdx = addValue(Val, SMSNode::INVALID, SMSNode::INVALID);
+
+ if (I == end()) {
+ // Make a singleton list
+ Sparse[Idx] = NodeIdx;
+ Dense[NodeIdx].Prev = NodeIdx;
+ return iterator(this, NodeIdx, Idx);
+ }
+
+ // Stick it at the end.
+ unsigned HeadIdx = I.Idx;
+ unsigned TailIdx = I.Prev();
+ Dense[TailIdx].Next = NodeIdx;
+ Dense[HeadIdx].Prev = NodeIdx;
+ Dense[NodeIdx].Prev = TailIdx;
+
+ return iterator(this, NodeIdx, Idx);
+ }
+
+ /// Erases an existing element identified by a valid iterator.
+ ///
+ /// This invalidates iterators pointing at the same entry, but erase() returns
+ /// an iterator pointing to the next element in the subset's list. This makes
+ /// it possible to erase selected elements while iterating over the subset:
+ ///
+ /// tie(I, E) = Set.equal_range(Key);
+ /// while (I != E)
+ /// if (test(*I))
+ /// I = Set.erase(I);
+ /// else
+ /// ++I;
+ ///
+ /// Note that if the last element in the subset list is erased, this will
+ /// return an end iterator which can be decremented to get the new tail (if it
+ /// exists):
+ ///
+ /// tie(B, I) = Set.equal_range(Key);
+ /// for (bool isBegin = B == I; !isBegin; /* empty */) {
+ /// isBegin = (--I) == B;
+ /// if (test(I))
+ /// break;
+ /// I = erase(I);
+ /// }
+ iterator erase(iterator I) {
+ assert(I.isKeyed() && !I.isEnd() && !Dense[I.Idx].isTombstone() &&
+ "erasing invalid/end/tombstone iterator");
+
+ // First, unlink the node from its list. Then swap the node out with the
+ // dense vector's last entry
+ iterator NextI = unlink(Dense[I.Idx]);
+
+ // Put in a tombstone.
+ makeTombstone(I.Idx);
+
+ return NextI;
+ }
+
+ /// Erase all elements with the given key. This invalidates all
+ /// iterators of that key.
+ void eraseAll(const KeyT &K) {
+ for (iterator I = find(K); I != end(); /* empty */)
+ I = erase(I);
+ }
+
+private:
+ /// Unlink the node from its list. Returns the next node in the list.
+ iterator unlink(const SMSNode &N) {
+ if (isSingleton(N)) {
+ // Singleton is already unlinked
+ assert(N.Next == SMSNode::INVALID && "Singleton has next?");
+ return iterator(this, SMSNode::INVALID, ValIndexOf(N.Data));
+ }
+
+ if (isHead(N)) {
+ // If we're the head, then update the sparse array and our next.
+ Sparse[sparseIndex(N)] = N.Next;
+ Dense[N.Next].Prev = N.Prev;
+ return iterator(this, N.Next, ValIndexOf(N.Data));
+ }
+
+ if (N.isTail()) {
+ // If we're the tail, then update our head and our previous.
+ findIndex(sparseIndex(N)).setPrev(N.Prev);
+ Dense[N.Prev].Next = N.Next;
+
+ // Give back an end iterator that can be decremented
+ iterator I(this, N.Prev, ValIndexOf(N.Data));
+ return ++I;
+ }
+
+ // Otherwise, just drop us
+ Dense[N.Next].Prev = N.Prev;
+ Dense[N.Prev].Next = N.Next;
+ return iterator(this, N.Next, ValIndexOf(N.Data));
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/SparseSet.h b/contrib/llvm/include/llvm/ADT/SparseSet.h
new file mode 100644
index 0000000..a45d1c8
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/SparseSet.h
@@ -0,0 +1,311 @@
+//===--- llvm/ADT/SparseSet.h - Sparse set ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SparseSet class derived from the version described in
+// Briggs, Torczon, "An efficient representation for sparse sets", ACM Letters
+// on Programming Languages and Systems, Volume 2 Issue 1-4, March-Dec. 1993.
+//
+// A sparse set holds a small number of objects identified by integer keys from
+// a moderately sized universe. The sparse set uses more memory than other
+// containers in order to provide faster operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_SPARSESET_H
+#define LLVM_ADT_SPARSESET_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
+#include <limits>
+
+namespace llvm {
+
+/// SparseSetValTraits - Objects in a SparseSet are identified by keys that can
+/// be uniquely converted to a small integer less than the set's universe. This
+/// class allows the set to hold values that differ from the set's key type as
+/// long as an index can still be derived from the value. SparseSet never
+/// directly compares ValueT, only their indices, so it can map keys to
+/// arbitrary values. SparseSetValTraits computes the index from the value
+/// object. To compute the index from a key, SparseSet uses a separate
+/// KeyFunctorT template argument.
+///
+/// A simple type declaration, SparseSet<Type>, handles these cases:
+/// - unsigned key, identity index, identity value
+/// - unsigned key, identity index, fat value providing getSparseSetIndex()
+///
+/// The type declaration SparseSet<Type, UnaryFunction> handles:
+/// - unsigned key, remapped index, identity value (virtual registers)
+/// - pointer key, pointer-derived index, identity value (node+ID)
+/// - pointer key, pointer-derived index, fat value with getSparseSetIndex()
+///
+/// Only other, unexpected cases require specializing SparseSetValTraits.
+///
+/// For best results, ValueT should not require a destructor.
+///
+template<typename ValueT>
+struct SparseSetValTraits {
+ static unsigned getValIndex(const ValueT &Val) {
+ return Val.getSparseSetIndex();
+ }
+};
+
+/// SparseSetValFunctor - Helper class for selecting SparseSetValTraits. The
+/// generic implementation handles ValueT classes which either provide
+/// getSparseSetIndex() or specialize SparseSetValTraits<>.
+///
+template<typename KeyT, typename ValueT, typename KeyFunctorT>
+struct SparseSetValFunctor {
+ unsigned operator()(const ValueT &Val) const {
+ return SparseSetValTraits<ValueT>::getValIndex(Val);
+ }
+};
+
+/// SparseSetValFunctor<KeyT, KeyT> - Helper class for the common case of
+/// identity key/value sets.
+template<typename KeyT, typename KeyFunctorT>
+struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
+ unsigned operator()(const KeyT &Key) const {
+ return KeyFunctorT()(Key);
+ }
+};
+
+/// SparseSet - Fast set implmentation for objects that can be identified by
+/// small unsigned keys.
+///
+/// SparseSet allocates memory proportional to the size of the key universe, so
+/// it is not recommended for building composite data structures. It is useful
+/// for algorithms that require a single set with fast operations.
+///
+/// Compared to DenseSet and DenseMap, SparseSet provides constant-time fast
+/// clear() and iteration as fast as a vector. The find(), insert(), and
+/// erase() operations are all constant time, and typically faster than a hash
+/// table. The iteration order doesn't depend on numerical key values, it only
+/// depends on the order of insert() and erase() operations. When no elements
+/// have been erased, the iteration order is the insertion order.
+///
+/// Compared to BitVector, SparseSet<unsigned> uses 8x-40x more memory, but
+/// offers constant-time clear() and size() operations as well as fast
+/// iteration independent on the size of the universe.
+///
+/// SparseSet contains a dense vector holding all the objects and a sparse
+/// array holding indexes into the dense vector. Most of the memory is used by
+/// the sparse array which is the size of the key universe. The SparseT
+/// template parameter provides a space/speed tradeoff for sets holding many
+/// elements.
+///
+/// When SparseT is uint32_t, find() only touches 2 cache lines, but the sparse
+/// array uses 4 x Universe bytes.
+///
+/// When SparseT is uint8_t (the default), find() touches up to 2+[N/256] cache
+/// lines, but the sparse array is 4x smaller. N is the number of elements in
+/// the set.
+///
+/// For sets that may grow to thousands of elements, SparseT should be set to
+/// uint16_t or uint32_t.
+///
+/// @tparam ValueT The type of objects in the set.
+/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
+/// @tparam SparseT An unsigned integer type. See above.
+///
+template<typename ValueT,
+ typename KeyFunctorT = llvm::identity<unsigned>,
+ typename SparseT = uint8_t>
+class SparseSet {
+ static_assert(std::numeric_limits<SparseT>::is_integer &&
+ !std::numeric_limits<SparseT>::is_signed,
+ "SparseT must be an unsigned integer type");
+
+ typedef typename KeyFunctorT::argument_type KeyT;
+ typedef SmallVector<ValueT, 8> DenseT;
+ typedef unsigned size_type;
+ DenseT Dense;
+ SparseT *Sparse;
+ unsigned Universe;
+ KeyFunctorT KeyIndexOf;
+ SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
+
+ // Disable copy construction and assignment.
+ // This data structure is not meant to be used that way.
+ SparseSet(const SparseSet&) = delete;
+ SparseSet &operator=(const SparseSet&) = delete;
+
+public:
+ typedef ValueT value_type;
+ typedef ValueT &reference;
+ typedef const ValueT &const_reference;
+ typedef ValueT *pointer;
+ typedef const ValueT *const_pointer;
+
+ SparseSet() : Sparse(nullptr), Universe(0) {}
+ ~SparseSet() { free(Sparse); }
+
+ /// setUniverse - Set the universe size which determines the largest key the
+ /// set can hold. The universe must be sized before any elements can be
+ /// added.
+ ///
+ /// @param U Universe size. All object keys must be less than U.
+ ///
+ void setUniverse(unsigned U) {
+ // It's not hard to resize the universe on a non-empty set, but it doesn't
+ // seem like a likely use case, so we can add that code when we need it.
+ assert(empty() && "Can only resize universe on an empty map");
+ // Hysteresis prevents needless reallocations.
+ if (U >= Universe/4 && U <= Universe)
+ return;
+ free(Sparse);
+ // The Sparse array doesn't actually need to be initialized, so malloc
+ // would be enough here, but that will cause tools like valgrind to
+ // complain about branching on uninitialized data.
+ Sparse = reinterpret_cast<SparseT*>(calloc(U, sizeof(SparseT)));
+ Universe = U;
+ }
+
+ // Import trivial vector stuff from DenseT.
+ typedef typename DenseT::iterator iterator;
+ typedef typename DenseT::const_iterator const_iterator;
+
+ const_iterator begin() const { return Dense.begin(); }
+ const_iterator end() const { return Dense.end(); }
+ iterator begin() { return Dense.begin(); }
+ iterator end() { return Dense.end(); }
+
+ /// empty - Returns true if the set is empty.
+ ///
+ /// This is not the same as BitVector::empty().
+ ///
+ bool empty() const { return Dense.empty(); }
+
+ /// size - Returns the number of elements in the set.
+ ///
+ /// This is not the same as BitVector::size() which returns the size of the
+ /// universe.
+ ///
+ size_type size() const { return Dense.size(); }
+
+ /// clear - Clears the set. This is a very fast constant time operation.
+ ///
+ void clear() {
+ // Sparse does not need to be cleared, see find().
+ Dense.clear();
+ }
+
+ /// findIndex - Find an element by its index.
+ ///
+ /// @param Idx A valid index to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator findIndex(unsigned Idx) {
+ assert(Idx < Universe && "Key out of range");
+ const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
+ for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) {
+ const unsigned FoundIdx = ValIndexOf(Dense[i]);
+ assert(FoundIdx < Universe && "Invalid key in set. Did object mutate?");
+ if (Idx == FoundIdx)
+ return begin() + i;
+ // Stride is 0 when SparseT >= unsigned. We don't need to loop.
+ if (!Stride)
+ break;
+ }
+ return end();
+ }
+
+ /// find - Find an element by its key.
+ ///
+ /// @param Key A valid key to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator find(const KeyT &Key) {
+ return findIndex(KeyIndexOf(Key));
+ }
+
+ const_iterator find(const KeyT &Key) const {
+ return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key));
+ }
+
+ /// count - Returns 1 if this set contains an element identified by Key,
+ /// 0 otherwise.
+ ///
+ size_type count(const KeyT &Key) const {
+ return find(Key) == end() ? 0 : 1;
+ }
+
+ /// insert - Attempts to insert a new element.
+ ///
+ /// If Val is successfully inserted, return (I, true), where I is an iterator
+ /// pointing to the newly inserted element.
+ ///
+ /// If the set already contains an element with the same key as Val, return
+ /// (I, false), where I is an iterator pointing to the existing element.
+ ///
+ /// Insertion invalidates all iterators.
+ ///
+ std::pair<iterator, bool> insert(const ValueT &Val) {
+ unsigned Idx = ValIndexOf(Val);
+ iterator I = findIndex(Idx);
+ if (I != end())
+ return std::make_pair(I, false);
+ Sparse[Idx] = size();
+ Dense.push_back(Val);
+ return std::make_pair(end() - 1, true);
+ }
+
+ /// array subscript - If an element already exists with this key, return it.
+ /// Otherwise, automatically construct a new value from Key, insert it,
+ /// and return the newly inserted element.
+ ValueT &operator[](const KeyT &Key) {
+ return *insert(ValueT(Key)).first;
+ }
+
+ /// erase - Erases an existing element identified by a valid iterator.
+ ///
+ /// This invalidates all iterators, but erase() returns an iterator pointing
+ /// to the next element. This makes it possible to erase selected elements
+ /// while iterating over the set:
+ ///
+ /// for (SparseSet::iterator I = Set.begin(); I != Set.end();)
+ /// if (test(*I))
+ /// I = Set.erase(I);
+ /// else
+ /// ++I;
+ ///
+ /// Note that end() changes when elements are erased, unlike std::list.
+ ///
+ iterator erase(iterator I) {
+ assert(unsigned(I - begin()) < size() && "Invalid iterator");
+ if (I != end() - 1) {
+ *I = Dense.back();
+ unsigned BackIdx = ValIndexOf(Dense.back());
+ assert(BackIdx < Universe && "Invalid key in set. Did object mutate?");
+ Sparse[BackIdx] = I - begin();
+ }
+ // This depends on SmallVector::pop_back() not invalidating iterators.
+ // std::vector::pop_back() doesn't give that guarantee.
+ Dense.pop_back();
+ return I;
+ }
+
+ /// erase - Erases an element identified by Key, if it exists.
+ ///
+ /// @param Key The key identifying the element to erase.
+ /// @returns True when an element was erased, false if no element was found.
+ ///
+ bool erase(const KeyT &Key) {
+ iterator I = find(Key);
+ if (I == end())
+ return false;
+ erase(I);
+ return true;
+ }
+
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/Statistic.h b/contrib/llvm/include/llvm/ADT/Statistic.h
new file mode 100644
index 0000000..7c84e3e
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/Statistic.h
@@ -0,0 +1,186 @@
+//===-- llvm/ADT/Statistic.h - Easy way to expose stats ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the 'Statistic' class, which is designed to be an easy way
+// to expose various metrics from passes. These statistics are printed at the
+// end of a run (from llvm_shutdown), when the -stats command line option is
+// passed on the command line.
+//
+// This is useful for reporting information like the number of instructions
+// simplified, optimized or removed by various transformations, like this:
+//
+// static Statistic NumInstsKilled("gcse", "Number of instructions killed");
+//
+// Later, in the code: ++NumInstsKilled;
+//
+// NOTE: Statistics *must* be declared as global variables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STATISTIC_H
+#define LLVM_ADT_STATISTIC_H
+
+#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Valgrind.h"
+#include <memory>
+
+namespace llvm {
+class raw_ostream;
+class raw_fd_ostream;
+
+class Statistic {
+public:
+ const char *Name;
+ const char *Desc;
+ volatile llvm::sys::cas_flag Value;
+ bool Initialized;
+
+ llvm::sys::cas_flag getValue() const { return Value; }
+ const char *getName() const { return Name; }
+ const char *getDesc() const { return Desc; }
+
+ /// construct - This should only be called for non-global statistics.
+ void construct(const char *name, const char *desc) {
+ Name = name; Desc = desc;
+ Value = 0; Initialized = false;
+ }
+
+ // Allow use of this class as the value itself.
+ operator unsigned() const { return Value; }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
+ const Statistic &operator=(unsigned Val) {
+ Value = Val;
+ return init();
+ }
+
+ const Statistic &operator++() {
+ // FIXME: This function and all those that follow carefully use an
+ // atomic operation to update the value safely in the presence of
+ // concurrent accesses, but not to read the return value, so the
+ // return value is not thread safe.
+ sys::AtomicIncrement(&Value);
+ return init();
+ }
+
+ unsigned operator++(int) {
+ init();
+ unsigned OldValue = Value;
+ sys::AtomicIncrement(&Value);
+ return OldValue;
+ }
+
+ const Statistic &operator--() {
+ sys::AtomicDecrement(&Value);
+ return init();
+ }
+
+ unsigned operator--(int) {
+ init();
+ unsigned OldValue = Value;
+ sys::AtomicDecrement(&Value);
+ return OldValue;
+ }
+
+ const Statistic &operator+=(const unsigned &V) {
+ if (!V) return *this;
+ sys::AtomicAdd(&Value, V);
+ return init();
+ }
+
+ const Statistic &operator-=(const unsigned &V) {
+ if (!V) return *this;
+ sys::AtomicAdd(&Value, -V);
+ return init();
+ }
+
+ const Statistic &operator*=(const unsigned &V) {
+ sys::AtomicMul(&Value, V);
+ return init();
+ }
+
+ const Statistic &operator/=(const unsigned &V) {
+ sys::AtomicDiv(&Value, V);
+ return init();
+ }
+
+#else // Statistics are disabled in release builds.
+
+ const Statistic &operator=(unsigned Val) {
+ return *this;
+ }
+
+ const Statistic &operator++() {
+ return *this;
+ }
+
+ unsigned operator++(int) {
+ return 0;
+ }
+
+ const Statistic &operator--() {
+ return *this;
+ }
+
+ unsigned operator--(int) {
+ return 0;
+ }
+
+ const Statistic &operator+=(const unsigned &V) {
+ return *this;
+ }
+
+ const Statistic &operator-=(const unsigned &V) {
+ return *this;
+ }
+
+ const Statistic &operator*=(const unsigned &V) {
+ return *this;
+ }
+
+ const Statistic &operator/=(const unsigned &V) {
+ return *this;
+ }
+
+#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
+
+protected:
+ Statistic &init() {
+ bool tmp = Initialized;
+ sys::MemoryFence();
+ if (!tmp) RegisterStatistic();
+ TsanHappensAfter(this);
+ return *this;
+ }
+ void RegisterStatistic();
+};
+
+// STATISTIC - A macro to make definition of statistics really simple. This
+// automatically passes the DEBUG_TYPE of the file into the statistic.
+#define STATISTIC(VARNAME, DESC) \
+ static llvm::Statistic VARNAME = { DEBUG_TYPE, DESC, 0, 0 }
+
+/// \brief Enable the collection and printing of statistics.
+void EnableStatistics();
+
+/// \brief Check if statistics are enabled.
+bool AreStatisticsEnabled();
+
+/// \brief Return a file stream to print our output on.
+std::unique_ptr<raw_fd_ostream> CreateInfoOutputFile();
+
+/// \brief Print statistics to the file returned by CreateInfoOutputFile().
+void PrintStatistics();
+
+/// \brief Print statistics to the given output stream.
+void PrintStatistics(raw_ostream &OS);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/StringExtras.h b/contrib/llvm/include/llvm/ADT/StringExtras.h
new file mode 100644
index 0000000..0992f5d
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/StringExtras.h
@@ -0,0 +1,212 @@
+//===-- llvm/ADT/StringExtras.h - Useful string functions -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some functions that are useful when dealing with strings.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGEXTRAS_H
+#define LLVM_ADT_STRINGEXTRAS_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include <iterator>
+
+namespace llvm {
+template<typename T> class SmallVectorImpl;
+
+/// hexdigit - Return the hexadecimal character for the
+/// given number \p X (which should be less than 16).
+static inline char hexdigit(unsigned X, bool LowerCase = false) {
+ const char HexChar = LowerCase ? 'a' : 'A';
+ return X < 10 ? '0' + X : HexChar + X - 10;
+}
+
+/// Construct a string ref from a boolean.
+static inline StringRef toStringRef(bool B) {
+ return StringRef(B ? "true" : "false");
+}
+
+/// Interpret the given character \p C as a hexadecimal digit and return its
+/// value.
+///
+/// If \p C is not a valid hex digit, -1U is returned.
+static inline unsigned hexDigitValue(char C) {
+ if (C >= '0' && C <= '9') return C-'0';
+ if (C >= 'a' && C <= 'f') return C-'a'+10U;
+ if (C >= 'A' && C <= 'F') return C-'A'+10U;
+ return -1U;
+}
+
+/// utohex_buffer - Emit the specified number into the buffer specified by
+/// BufferEnd, returning a pointer to the start of the string. This can be used
+/// like this: (note that the buffer must be large enough to handle any number):
+/// char Buffer[40];
+/// printf("0x%s", utohex_buffer(X, Buffer+40));
+///
+/// This should only be used with unsigned types.
+///
+template<typename IntTy>
+static inline char *utohex_buffer(IntTy X, char *BufferEnd, bool LowerCase = false) {
+ char *BufPtr = BufferEnd;
+ *--BufPtr = 0; // Null terminate buffer.
+ if (X == 0) {
+ *--BufPtr = '0'; // Handle special case.
+ return BufPtr;
+ }
+
+ while (X) {
+ unsigned char Mod = static_cast<unsigned char>(X) & 15;
+ *--BufPtr = hexdigit(Mod, LowerCase);
+ X >>= 4;
+ }
+ return BufPtr;
+}
+
+static inline std::string utohexstr(uint64_t X, bool LowerCase = false) {
+ char Buffer[17];
+ return utohex_buffer(X, Buffer+17, LowerCase);
+}
+
+static inline std::string utostr_32(uint32_t X, bool isNeg = false) {
+ char Buffer[11];
+ char *BufPtr = Buffer+11;
+
+ if (X == 0) *--BufPtr = '0'; // Handle special case...
+
+ while (X) {
+ *--BufPtr = '0' + char(X % 10);
+ X /= 10;
+ }
+
+ if (isNeg) *--BufPtr = '-'; // Add negative sign...
+
+ return std::string(BufPtr, Buffer+11);
+}
+
+static inline std::string utostr(uint64_t X, bool isNeg = false) {
+ char Buffer[21];
+ char *BufPtr = Buffer+21;
+
+ if (X == 0) *--BufPtr = '0'; // Handle special case...
+
+ while (X) {
+ *--BufPtr = '0' + char(X % 10);
+ X /= 10;
+ }
+
+ if (isNeg) *--BufPtr = '-'; // Add negative sign...
+ return std::string(BufPtr, Buffer+21);
+}
+
+
+static inline std::string itostr(int64_t X) {
+ if (X < 0)
+ return utostr(static_cast<uint64_t>(-X), true);
+ else
+ return utostr(static_cast<uint64_t>(X));
+}
+
+/// StrInStrNoCase - Portable version of strcasestr. Locates the first
+/// occurrence of string 's1' in string 's2', ignoring case. Returns
+/// the offset of s2 in s1 or npos if s2 cannot be found.
+StringRef::size_type StrInStrNoCase(StringRef s1, StringRef s2);
+
+/// getToken - This function extracts one token from source, ignoring any
+/// leading characters that appear in the Delimiters string, and ending the
+/// token at any of the characters that appear in the Delimiters string. If
+/// there are no tokens in the source string, an empty string is returned.
+/// The function returns a pair containing the extracted token and the
+/// remaining tail string.
+std::pair<StringRef, StringRef> getToken(StringRef Source,
+ StringRef Delimiters = " \t\n\v\f\r");
+
+/// SplitString - Split up the specified string according to the specified
+/// delimiters, appending the result fragments to the output list.
+void SplitString(StringRef Source,
+ SmallVectorImpl<StringRef> &OutFragments,
+ StringRef Delimiters = " \t\n\v\f\r");
+
+/// HashString - Hash function for strings.
+///
+/// This is the Bernstein hash function.
+//
+// FIXME: Investigate whether a modified bernstein hash function performs
+// better: http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
+// X*33+c -> X*33^c
+static inline unsigned HashString(StringRef Str, unsigned Result = 0) {
+ for (StringRef::size_type i = 0, e = Str.size(); i != e; ++i)
+ Result = Result * 33 + (unsigned char)Str[i];
+ return Result;
+}
+
+/// Returns the English suffix for an ordinal integer (-st, -nd, -rd, -th).
+static inline StringRef getOrdinalSuffix(unsigned Val) {
+ // It is critically important that we do this perfectly for
+ // user-written sequences with over 100 elements.
+ switch (Val % 100) {
+ case 11:
+ case 12:
+ case 13:
+ return "th";
+ default:
+ switch (Val % 10) {
+ case 1: return "st";
+ case 2: return "nd";
+ case 3: return "rd";
+ default: return "th";
+ }
+ }
+}
+
+template <typename IteratorT>
+inline std::string join_impl(IteratorT Begin, IteratorT End,
+ StringRef Separator, std::input_iterator_tag) {
+ std::string S;
+ if (Begin == End)
+ return S;
+
+ S += (*Begin);
+ while (++Begin != End) {
+ S += Separator;
+ S += (*Begin);
+ }
+ return S;
+}
+
+template <typename IteratorT>
+inline std::string join_impl(IteratorT Begin, IteratorT End,
+ StringRef Separator, std::forward_iterator_tag) {
+ std::string S;
+ if (Begin == End)
+ return S;
+
+ size_t Len = (std::distance(Begin, End) - 1) * Separator.size();
+ for (IteratorT I = Begin; I != End; ++I)
+ Len += (*Begin).size();
+ S.reserve(Len);
+ S += (*Begin);
+ while (++Begin != End) {
+ S += Separator;
+ S += (*Begin);
+ }
+ return S;
+}
+
+/// Joins the strings in the range [Begin, End), adding Separator between
+/// the elements.
+template <typename IteratorT>
+inline std::string join(IteratorT Begin, IteratorT End, StringRef Separator) {
+ typedef typename std::iterator_traits<IteratorT>::iterator_category tag;
+ return join_impl(Begin, End, Separator, tag());
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/StringMap.h b/contrib/llvm/include/llvm/ADT/StringMap.h
new file mode 100644
index 0000000..700bb9e
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/StringMap.h
@@ -0,0 +1,463 @@
+//===--- StringMap.h - String Hash table map interface ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the StringMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGMAP_H
+#define LLVM_ADT_STRINGMAP_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include <cstring>
+#include <utility>
+
+namespace llvm {
+ template<typename ValueT>
+ class StringMapConstIterator;
+ template<typename ValueT>
+ class StringMapIterator;
+ template<typename ValueTy>
+ class StringMapEntry;
+
+/// StringMapEntryBase - Shared base class of StringMapEntry instances.
+class StringMapEntryBase {
+ unsigned StrLen;
+
+public:
+ explicit StringMapEntryBase(unsigned Len) : StrLen(Len) {}
+
+ unsigned getKeyLength() const { return StrLen; }
+};
+
+/// StringMapImpl - This is the base class of StringMap that is shared among
+/// all of its instantiations.
+class StringMapImpl {
+protected:
+ // Array of NumBuckets pointers to entries, null pointers are holes.
+ // TheTable[NumBuckets] contains a sentinel value for easy iteration. Followed
+ // by an array of the actual hash values as unsigned integers.
+ StringMapEntryBase **TheTable;
+ unsigned NumBuckets;
+ unsigned NumItems;
+ unsigned NumTombstones;
+ unsigned ItemSize;
+
+protected:
+ explicit StringMapImpl(unsigned itemSize)
+ : TheTable(nullptr),
+ // Initialize the map with zero buckets to allocation.
+ NumBuckets(0), NumItems(0), NumTombstones(0), ItemSize(itemSize) {}
+ StringMapImpl(StringMapImpl &&RHS)
+ : TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets),
+ NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones),
+ ItemSize(RHS.ItemSize) {
+ RHS.TheTable = nullptr;
+ RHS.NumBuckets = 0;
+ RHS.NumItems = 0;
+ RHS.NumTombstones = 0;
+ }
+
+ StringMapImpl(unsigned InitSize, unsigned ItemSize);
+ unsigned RehashTable(unsigned BucketNo = 0);
+
+ /// LookupBucketFor - Look up the bucket that the specified string should end
+ /// up in. If it already exists as a key in the map, the Item pointer for the
+ /// specified bucket will be non-null. Otherwise, it will be null. In either
+ /// case, the FullHashValue field of the bucket will be set to the hash value
+ /// of the string.
+ unsigned LookupBucketFor(StringRef Key);
+
+ /// FindKey - Look up the bucket that contains the specified key. If it exists
+ /// in the map, return the bucket number of the key. Otherwise return -1.
+ /// This does not modify the map.
+ int FindKey(StringRef Key) const;
+
+ /// RemoveKey - Remove the specified StringMapEntry from the table, but do not
+ /// delete it. This aborts if the value isn't in the table.
+ void RemoveKey(StringMapEntryBase *V);
+
+ /// RemoveKey - Remove the StringMapEntry for the specified key from the
+ /// table, returning it. If the key is not in the table, this returns null.
+ StringMapEntryBase *RemoveKey(StringRef Key);
+
+private:
+ void init(unsigned Size);
+
+public:
+ static StringMapEntryBase *getTombstoneVal() {
+ return (StringMapEntryBase*)-1;
+ }
+
+ unsigned getNumBuckets() const { return NumBuckets; }
+ unsigned getNumItems() const { return NumItems; }
+
+ bool empty() const { return NumItems == 0; }
+ unsigned size() const { return NumItems; }
+
+ void swap(StringMapImpl &Other) {
+ std::swap(TheTable, Other.TheTable);
+ std::swap(NumBuckets, Other.NumBuckets);
+ std::swap(NumItems, Other.NumItems);
+ std::swap(NumTombstones, Other.NumTombstones);
+ }
+};
+
+/// StringMapEntry - This is used to represent one value that is inserted into
+/// a StringMap. It contains the Value itself and the key: the string length
+/// and data.
+template<typename ValueTy>
+class StringMapEntry : public StringMapEntryBase {
+ StringMapEntry(StringMapEntry &E) = delete;
+
+public:
+ ValueTy second;
+
+ explicit StringMapEntry(unsigned strLen)
+ : StringMapEntryBase(strLen), second() {}
+ template <class InitTy>
+ StringMapEntry(unsigned strLen, InitTy &&V)
+ : StringMapEntryBase(strLen), second(std::forward<InitTy>(V)) {}
+
+ StringRef getKey() const {
+ return StringRef(getKeyData(), getKeyLength());
+ }
+
+ const ValueTy &getValue() const { return second; }
+ ValueTy &getValue() { return second; }
+
+ void setValue(const ValueTy &V) { second = V; }
+
+ /// getKeyData - Return the start of the string data that is the key for this
+ /// value. The string data is always stored immediately after the
+ /// StringMapEntry object.
+ const char *getKeyData() const {return reinterpret_cast<const char*>(this+1);}
+
+ StringRef first() const { return StringRef(getKeyData(), getKeyLength()); }
+
+ /// Create - Create a StringMapEntry for the specified key and default
+ /// construct the value.
+ template <typename AllocatorTy, typename InitType>
+ static StringMapEntry *Create(StringRef Key, AllocatorTy &Allocator,
+ InitType &&InitVal) {
+ unsigned KeyLength = Key.size();
+
+ // Allocate a new item with space for the string at the end and a null
+ // terminator.
+ unsigned AllocSize = static_cast<unsigned>(sizeof(StringMapEntry))+
+ KeyLength+1;
+ unsigned Alignment = alignOf<StringMapEntry>();
+
+ StringMapEntry *NewItem =
+ static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
+
+ // Default construct the value.
+ new (NewItem) StringMapEntry(KeyLength, std::forward<InitType>(InitVal));
+
+ // Copy the string information.
+ char *StrBuffer = const_cast<char*>(NewItem->getKeyData());
+ if (KeyLength > 0)
+ memcpy(StrBuffer, Key.data(), KeyLength);
+ StrBuffer[KeyLength] = 0; // Null terminate for convenience of clients.
+ return NewItem;
+ }
+
+ template<typename AllocatorTy>
+ static StringMapEntry *Create(StringRef Key, AllocatorTy &Allocator) {
+ return Create(Key, Allocator, ValueTy());
+ }
+
+ /// Create - Create a StringMapEntry with normal malloc/free.
+ template<typename InitType>
+ static StringMapEntry *Create(StringRef Key, InitType &&InitVal) {
+ MallocAllocator A;
+ return Create(Key, A, std::forward<InitType>(InitVal));
+ }
+
+ static StringMapEntry *Create(StringRef Key) {
+ return Create(Key, ValueTy());
+ }
+
+ /// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
+ /// into a StringMapEntry, return the StringMapEntry itself.
+ static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) {
+ char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>);
+ return *reinterpret_cast<StringMapEntry*>(Ptr);
+ }
+
+ /// Destroy - Destroy this StringMapEntry, releasing memory back to the
+ /// specified allocator.
+ template<typename AllocatorTy>
+ void Destroy(AllocatorTy &Allocator) {
+ // Free memory referenced by the item.
+ unsigned AllocSize =
+ static_cast<unsigned>(sizeof(StringMapEntry)) + getKeyLength() + 1;
+ this->~StringMapEntry();
+ Allocator.Deallocate(static_cast<void *>(this), AllocSize);
+ }
+
+ /// Destroy this object, releasing memory back to the malloc allocator.
+ void Destroy() {
+ MallocAllocator A;
+ Destroy(A);
+ }
+};
+
+/// StringMap - This is an unconventional map that is specialized for handling
+/// keys that are "strings", which are basically ranges of bytes. This does some
+/// funky memory allocation and hashing things to make it extremely efficient,
+/// storing the string data *after* the value in the map.
+template<typename ValueTy, typename AllocatorTy = MallocAllocator>
+class StringMap : public StringMapImpl {
+ AllocatorTy Allocator;
+
+public:
+ typedef StringMapEntry<ValueTy> MapEntryTy;
+
+ StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
+ explicit StringMap(unsigned InitialSize)
+ : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
+
+ explicit StringMap(AllocatorTy A)
+ : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {}
+
+ StringMap(unsigned InitialSize, AllocatorTy A)
+ : StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))),
+ Allocator(A) {}
+
+ StringMap(std::initializer_list<std::pair<StringRef, ValueTy>> List)
+ : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {
+ for (const auto &P : List) {
+ insert(P);
+ }
+ }
+
+ StringMap(StringMap &&RHS)
+ : StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {}
+
+ StringMap &operator=(StringMap RHS) {
+ StringMapImpl::swap(RHS);
+ std::swap(Allocator, RHS.Allocator);
+ return *this;
+ }
+
+ // FIXME: Implement copy operations if/when they're needed.
+
+ AllocatorTy &getAllocator() { return Allocator; }
+ const AllocatorTy &getAllocator() const { return Allocator; }
+
+ typedef const char* key_type;
+ typedef ValueTy mapped_type;
+ typedef StringMapEntry<ValueTy> value_type;
+ typedef size_t size_type;
+
+ typedef StringMapConstIterator<ValueTy> const_iterator;
+ typedef StringMapIterator<ValueTy> iterator;
+
+ iterator begin() {
+ return iterator(TheTable, NumBuckets == 0);
+ }
+ iterator end() {
+ return iterator(TheTable+NumBuckets, true);
+ }
+ const_iterator begin() const {
+ return const_iterator(TheTable, NumBuckets == 0);
+ }
+ const_iterator end() const {
+ return const_iterator(TheTable+NumBuckets, true);
+ }
+
+ iterator find(StringRef Key) {
+ int Bucket = FindKey(Key);
+ if (Bucket == -1) return end();
+ return iterator(TheTable+Bucket, true);
+ }
+
+ const_iterator find(StringRef Key) const {
+ int Bucket = FindKey(Key);
+ if (Bucket == -1) return end();
+ return const_iterator(TheTable+Bucket, true);
+ }
+
+ /// lookup - Return the entry for the specified key, or a default
+ /// constructed value if no such entry exists.
+ ValueTy lookup(StringRef Key) const {
+ const_iterator it = find(Key);
+ if (it != end())
+ return it->second;
+ return ValueTy();
+ }
+
+ ValueTy &operator[](StringRef Key) {
+ return insert(std::make_pair(Key, ValueTy())).first->second;
+ }
+
+ /// count - Return 1 if the element is in the map, 0 otherwise.
+ size_type count(StringRef Key) const {
+ return find(Key) == end() ? 0 : 1;
+ }
+
+ /// insert - Insert the specified key/value pair into the map. If the key
+ /// already exists in the map, return false and ignore the request, otherwise
+ /// insert it and return true.
+ bool insert(MapEntryTy *KeyValue) {
+ unsigned BucketNo = LookupBucketFor(KeyValue->getKey());
+ StringMapEntryBase *&Bucket = TheTable[BucketNo];
+ if (Bucket && Bucket != getTombstoneVal())
+ return false; // Already exists in map.
+
+ if (Bucket == getTombstoneVal())
+ --NumTombstones;
+ Bucket = KeyValue;
+ ++NumItems;
+ assert(NumItems + NumTombstones <= NumBuckets);
+
+ RehashTable();
+ return true;
+ }
+
+ /// insert - Inserts the specified key/value pair into the map if the key
+ /// isn't already in the map. The bool component of the returned pair is true
+ /// if and only if the insertion takes place, and the iterator component of
+ /// the pair points to the element with key equivalent to the key of the pair.
+ std::pair<iterator, bool> insert(std::pair<StringRef, ValueTy> KV) {
+ unsigned BucketNo = LookupBucketFor(KV.first);
+ StringMapEntryBase *&Bucket = TheTable[BucketNo];
+ if (Bucket && Bucket != getTombstoneVal())
+ return std::make_pair(iterator(TheTable + BucketNo, false),
+ false); // Already exists in map.
+
+ if (Bucket == getTombstoneVal())
+ --NumTombstones;
+ Bucket =
+ MapEntryTy::Create(KV.first, Allocator, std::move(KV.second));
+ ++NumItems;
+ assert(NumItems + NumTombstones <= NumBuckets);
+
+ BucketNo = RehashTable(BucketNo);
+ return std::make_pair(iterator(TheTable + BucketNo, false), true);
+ }
+
+ // clear - Empties out the StringMap
+ void clear() {
+ if (empty()) return;
+
+ // Zap all values, resetting the keys back to non-present (not tombstone),
+ // which is safe because we're removing all elements.
+ for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+ StringMapEntryBase *&Bucket = TheTable[I];
+ if (Bucket && Bucket != getTombstoneVal()) {
+ static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
+ }
+ Bucket = nullptr;
+ }
+
+ NumItems = 0;
+ NumTombstones = 0;
+ }
+
+ /// remove - Remove the specified key/value pair from the map, but do not
+ /// erase it. This aborts if the key is not in the map.
+ void remove(MapEntryTy *KeyValue) {
+ RemoveKey(KeyValue);
+ }
+
+ void erase(iterator I) {
+ MapEntryTy &V = *I;
+ remove(&V);
+ V.Destroy(Allocator);
+ }
+
+ bool erase(StringRef Key) {
+ iterator I = find(Key);
+ if (I == end()) return false;
+ erase(I);
+ return true;
+ }
+
+ ~StringMap() {
+ // Delete all the elements in the map, but don't reset the elements
+ // to default values. This is a copy of clear(), but avoids unnecessary
+ // work not required in the destructor.
+ if (!empty()) {
+ for (unsigned I = 0, E = NumBuckets; I != E; ++I) {
+ StringMapEntryBase *Bucket = TheTable[I];
+ if (Bucket && Bucket != getTombstoneVal()) {
+ static_cast<MapEntryTy*>(Bucket)->Destroy(Allocator);
+ }
+ }
+ }
+ free(TheTable);
+ }
+};
+
+template <typename ValueTy> class StringMapConstIterator {
+protected:
+ StringMapEntryBase **Ptr;
+
+public:
+ typedef StringMapEntry<ValueTy> value_type;
+
+ StringMapConstIterator() : Ptr(nullptr) { }
+
+ explicit StringMapConstIterator(StringMapEntryBase **Bucket,
+ bool NoAdvance = false)
+ : Ptr(Bucket) {
+ if (!NoAdvance) AdvancePastEmptyBuckets();
+ }
+
+ const value_type &operator*() const {
+ return *static_cast<StringMapEntry<ValueTy>*>(*Ptr);
+ }
+ const value_type *operator->() const {
+ return static_cast<StringMapEntry<ValueTy>*>(*Ptr);
+ }
+
+ bool operator==(const StringMapConstIterator &RHS) const {
+ return Ptr == RHS.Ptr;
+ }
+ bool operator!=(const StringMapConstIterator &RHS) const {
+ return Ptr != RHS.Ptr;
+ }
+
+ inline StringMapConstIterator& operator++() { // Preincrement
+ ++Ptr;
+ AdvancePastEmptyBuckets();
+ return *this;
+ }
+ StringMapConstIterator operator++(int) { // Postincrement
+ StringMapConstIterator tmp = *this; ++*this; return tmp;
+ }
+
+private:
+ void AdvancePastEmptyBuckets() {
+ while (*Ptr == nullptr || *Ptr == StringMapImpl::getTombstoneVal())
+ ++Ptr;
+ }
+};
+
+template<typename ValueTy>
+class StringMapIterator : public StringMapConstIterator<ValueTy> {
+public:
+ StringMapIterator() {}
+ explicit StringMapIterator(StringMapEntryBase **Bucket,
+ bool NoAdvance = false)
+ : StringMapConstIterator<ValueTy>(Bucket, NoAdvance) {
+ }
+ StringMapEntry<ValueTy> &operator*() const {
+ return *static_cast<StringMapEntry<ValueTy>*>(*this->Ptr);
+ }
+ StringMapEntry<ValueTy> *operator->() const {
+ return static_cast<StringMapEntry<ValueTy>*>(*this->Ptr);
+ }
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/StringRef.h b/contrib/llvm/include/llvm/ADT/StringRef.h
new file mode 100644
index 0000000..350032b
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/StringRef.h
@@ -0,0 +1,606 @@
+//===--- StringRef.h - Constant String Reference Wrapper --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGREF_H
+#define LLVM_ADT_STRINGREF_H
+
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <limits>
+#include <string>
+#include <utility>
+
+namespace llvm {
+ template <typename T>
+ class SmallVectorImpl;
+ class APInt;
+ class hash_code;
+ class StringRef;
+
+ /// Helper functions for StringRef::getAsInteger.
+ bool getAsUnsignedInteger(StringRef Str, unsigned Radix,
+ unsigned long long &Result);
+
+ bool getAsSignedInteger(StringRef Str, unsigned Radix, long long &Result);
+
+ /// StringRef - Represent a constant reference to a string, i.e. a character
+ /// array and a length, which need not be null terminated.
+ ///
+ /// This class does not own the string data, it is expected to be used in
+ /// situations where the character data resides in some other buffer, whose
+ /// lifetime extends past that of the StringRef. For this reason, it is not in
+ /// general safe to store a StringRef.
+ class StringRef {
+ public:
+ typedef const char *iterator;
+ typedef const char *const_iterator;
+ static const size_t npos = ~size_t(0);
+ typedef size_t size_type;
+
+ private:
+ /// The start of the string, in an external buffer.
+ const char *Data;
+
+ /// The length of the string.
+ size_t Length;
+
+ // Workaround memcmp issue with null pointers (undefined behavior)
+ // by providing a specialized version
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ static int compareMemory(const char *Lhs, const char *Rhs, size_t Length) {
+ if (Length == 0) { return 0; }
+ return ::memcmp(Lhs,Rhs,Length);
+ }
+
+ public:
+ /// @name Constructors
+ /// @{
+
+ /// Construct an empty string ref.
+ /*implicit*/ StringRef() : Data(nullptr), Length(0) {}
+
+ /// Construct a string ref from a cstring.
+ /*implicit*/ StringRef(const char *Str)
+ : Data(Str) {
+ assert(Str && "StringRef cannot be built from a NULL argument");
+ Length = ::strlen(Str); // invoking strlen(NULL) is undefined behavior
+ }
+
+ /// Construct a string ref from a pointer and length.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ /*implicit*/ StringRef(const char *data, size_t length)
+ : Data(data), Length(length) {
+ assert((data || length == 0) &&
+ "StringRef cannot be built from a NULL argument with non-null length");
+ }
+
+ /// Construct a string ref from an std::string.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ /*implicit*/ StringRef(const std::string &Str)
+ : Data(Str.data()), Length(Str.length()) {}
+
+ /// @}
+ /// @name Iterators
+ /// @{
+
+ iterator begin() const { return Data; }
+
+ iterator end() const { return Data + Length; }
+
+ const unsigned char *bytes_begin() const {
+ return reinterpret_cast<const unsigned char *>(begin());
+ }
+ const unsigned char *bytes_end() const {
+ return reinterpret_cast<const unsigned char *>(end());
+ }
+
+ /// @}
+ /// @name String Operations
+ /// @{
+
+ /// data - Get a pointer to the start of the string (which may not be null
+ /// terminated).
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ const char *data() const { return Data; }
+
+ /// empty - Check if the string is empty.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool empty() const { return Length == 0; }
+
+ /// size - Get the string size.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ size_t size() const { return Length; }
+
+ /// front - Get the first character in the string.
+ char front() const {
+ assert(!empty());
+ return Data[0];
+ }
+
+ /// back - Get the last character in the string.
+ char back() const {
+ assert(!empty());
+ return Data[Length-1];
+ }
+
+ // copy - Allocate copy in Allocator and return StringRef to it.
+ template <typename Allocator> StringRef copy(Allocator &A) const {
+ char *S = A.template Allocate<char>(Length);
+ std::copy(begin(), end(), S);
+ return StringRef(S, Length);
+ }
+
+ /// equals - Check for string equality, this is more efficient than
+ /// compare() when the relative ordering of inequal strings isn't needed.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool equals(StringRef RHS) const {
+ return (Length == RHS.Length &&
+ compareMemory(Data, RHS.Data, RHS.Length) == 0);
+ }
+
+ /// equals_lower - Check for string equality, ignoring case.
+ bool equals_lower(StringRef RHS) const {
+ return Length == RHS.Length && compare_lower(RHS) == 0;
+ }
+
+ /// compare - Compare two strings; the result is -1, 0, or 1 if this string
+ /// is lexicographically less than, equal to, or greater than the \p RHS.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ int compare(StringRef RHS) const {
+ // Check the prefix for a mismatch.
+ if (int Res = compareMemory(Data, RHS.Data, std::min(Length, RHS.Length)))
+ return Res < 0 ? -1 : 1;
+
+ // Otherwise the prefixes match, so we only need to check the lengths.
+ if (Length == RHS.Length)
+ return 0;
+ return Length < RHS.Length ? -1 : 1;
+ }
+
+ /// compare_lower - Compare two strings, ignoring case.
+ int compare_lower(StringRef RHS) const;
+
+ /// compare_numeric - Compare two strings, treating sequences of digits as
+ /// numbers.
+ int compare_numeric(StringRef RHS) const;
+
+ /// \brief Determine the edit distance between this string and another
+ /// string.
+ ///
+ /// \param Other the string to compare this string against.
+ ///
+ /// \param AllowReplacements whether to allow character
+ /// replacements (change one character into another) as a single
+ /// operation, rather than as two operations (an insertion and a
+ /// removal).
+ ///
+ /// \param MaxEditDistance If non-zero, the maximum edit distance that
+ /// this routine is allowed to compute. If the edit distance will exceed
+ /// that maximum, returns \c MaxEditDistance+1.
+ ///
+ /// \returns the minimum number of character insertions, removals,
+ /// or (if \p AllowReplacements is \c true) replacements needed to
+ /// transform one of the given strings into the other. If zero,
+ /// the strings are identical.
+ unsigned edit_distance(StringRef Other, bool AllowReplacements = true,
+ unsigned MaxEditDistance = 0) const;
+
+ /// str - Get the contents as an std::string.
+ std::string str() const {
+ if (!Data) return std::string();
+ return std::string(Data, Length);
+ }
+
+ /// @}
+ /// @name Operator Overloads
+ /// @{
+
+ char operator[](size_t Index) const {
+ assert(Index < Length && "Invalid index!");
+ return Data[Index];
+ }
+
+ /// @}
+ /// @name Type Conversions
+ /// @{
+
+ operator std::string() const {
+ return str();
+ }
+
+ /// @}
+ /// @name String Predicates
+ /// @{
+
+ /// Check if this string starts with the given \p Prefix.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool startswith(StringRef Prefix) const {
+ return Length >= Prefix.Length &&
+ compareMemory(Data, Prefix.Data, Prefix.Length) == 0;
+ }
+
+ /// Check if this string starts with the given \p Prefix, ignoring case.
+ bool startswith_lower(StringRef Prefix) const;
+
+ /// Check if this string ends with the given \p Suffix.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ bool endswith(StringRef Suffix) const {
+ return Length >= Suffix.Length &&
+ compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0;
+ }
+
+ /// Check if this string ends with the given \p Suffix, ignoring case.
+ bool endswith_lower(StringRef Suffix) const;
+
+ /// @}
+ /// @name String Searching
+ /// @{
+
+ /// Search for the first character \p C in the string.
+ ///
+ /// \returns The index of the first occurrence of \p C, or npos if not
+ /// found.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ size_t find(char C, size_t From = 0) const {
+ size_t FindBegin = std::min(From, Length);
+ if (FindBegin < Length) { // Avoid calling memchr with nullptr.
+ // Just forward to memchr, which is faster than a hand-rolled loop.
+ if (const void *P = ::memchr(Data + FindBegin, C, Length - FindBegin))
+ return static_cast<const char *>(P) - Data;
+ }
+ return npos;
+ }
+
+ /// Search for the first string \p Str in the string.
+ ///
+ /// \returns The index of the first occurrence of \p Str, or npos if not
+ /// found.
+ size_t find(StringRef Str, size_t From = 0) const;
+
+ /// Search for the last character \p C in the string.
+ ///
+ /// \returns The index of the last occurrence of \p C, or npos if not
+ /// found.
+ size_t rfind(char C, size_t From = npos) const {
+ From = std::min(From, Length);
+ size_t i = From;
+ while (i != 0) {
+ --i;
+ if (Data[i] == C)
+ return i;
+ }
+ return npos;
+ }
+
+ /// Search for the last string \p Str in the string.
+ ///
+ /// \returns The index of the last occurrence of \p Str, or npos if not
+ /// found.
+ size_t rfind(StringRef Str) const;
+
+ /// Find the first character in the string that is \p C, or npos if not
+ /// found. Same as find.
+ size_t find_first_of(char C, size_t From = 0) const {
+ return find(C, From);
+ }
+
+ /// Find the first character in the string that is in \p Chars, or npos if
+ /// not found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ size_t find_first_of(StringRef Chars, size_t From = 0) const;
+
+ /// Find the first character in the string that is not \p C or npos if not
+ /// found.
+ size_t find_first_not_of(char C, size_t From = 0) const;
+
+ /// Find the first character in the string that is not in the string
+ /// \p Chars, or npos if not found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ size_t find_first_not_of(StringRef Chars, size_t From = 0) const;
+
+ /// Find the last character in the string that is \p C, or npos if not
+ /// found.
+ size_t find_last_of(char C, size_t From = npos) const {
+ return rfind(C, From);
+ }
+
+ /// Find the last character in the string that is in \p C, or npos if not
+ /// found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ size_t find_last_of(StringRef Chars, size_t From = npos) const;
+
+ /// Find the last character in the string that is not \p C, or npos if not
+ /// found.
+ size_t find_last_not_of(char C, size_t From = npos) const;
+
+ /// Find the last character in the string that is not in \p Chars, or
+ /// npos if not found.
+ ///
+ /// Complexity: O(size() + Chars.size())
+ size_t find_last_not_of(StringRef Chars, size_t From = npos) const;
+
+ /// @}
+ /// @name Helpful Algorithms
+ /// @{
+
+ /// Return the number of occurrences of \p C in the string.
+ size_t count(char C) const {
+ size_t Count = 0;
+ for (size_t i = 0, e = Length; i != e; ++i)
+ if (Data[i] == C)
+ ++Count;
+ return Count;
+ }
+
+ /// Return the number of non-overlapped occurrences of \p Str in
+ /// the string.
+ size_t count(StringRef Str) const;
+
+ /// Parse the current string as an integer of the specified radix. If
+ /// \p Radix is specified as zero, this does radix autosensing using
+ /// extended C rules: 0 is octal, 0x is hex, 0b is binary.
+ ///
+ /// If the string is invalid or if only a subset of the string is valid,
+ /// this returns true to signify the error. The string is considered
+ /// erroneous if empty or if it overflows T.
+ template <typename T>
+ typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
+ getAsInteger(unsigned Radix, T &Result) const {
+ long long LLVal;
+ if (getAsSignedInteger(*this, Radix, LLVal) ||
+ static_cast<T>(LLVal) != LLVal)
+ return true;
+ Result = LLVal;
+ return false;
+ }
+
+ template <typename T>
+ typename std::enable_if<!std::numeric_limits<T>::is_signed, bool>::type
+ getAsInteger(unsigned Radix, T &Result) const {
+ unsigned long long ULLVal;
+ // The additional cast to unsigned long long is required to avoid the
+ // Visual C++ warning C4805: '!=' : unsafe mix of type 'bool' and type
+ // 'unsigned __int64' when instantiating getAsInteger with T = bool.
+ if (getAsUnsignedInteger(*this, Radix, ULLVal) ||
+ static_cast<unsigned long long>(static_cast<T>(ULLVal)) != ULLVal)
+ return true;
+ Result = ULLVal;
+ return false;
+ }
+
+ /// Parse the current string as an integer of the specified \p Radix, or of
+ /// an autosensed radix if the \p Radix given is 0. The current value in
+ /// \p Result is discarded, and the storage is changed to be wide enough to
+ /// store the parsed integer.
+ ///
+ /// \returns true if the string does not solely consist of a valid
+ /// non-empty number in the appropriate base.
+ ///
+ /// APInt::fromString is superficially similar but assumes the
+ /// string is well-formed in the given radix.
+ bool getAsInteger(unsigned Radix, APInt &Result) const;
+
+ /// @}
+ /// @name String Operations
+ /// @{
+
+ // Convert the given ASCII string to lowercase.
+ std::string lower() const;
+
+ /// Convert the given ASCII string to uppercase.
+ std::string upper() const;
+
+ /// @}
+ /// @name Substring Operations
+ /// @{
+
+ /// Return a reference to the substring from [Start, Start + N).
+ ///
+ /// \param Start The index of the starting character in the substring; if
+ /// the index is npos or greater than the length of the string then the
+ /// empty substring will be returned.
+ ///
+ /// \param N The number of characters to included in the substring. If N
+ /// exceeds the number of characters remaining in the string, the string
+ /// suffix (starting with \p Start) will be returned.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef substr(size_t Start, size_t N = npos) const {
+ Start = std::min(Start, Length);
+ return StringRef(Data + Start, std::min(N, Length - Start));
+ }
+
+ /// Return a StringRef equal to 'this' but with the first \p N elements
+ /// dropped.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef drop_front(size_t N = 1) const {
+ assert(size() >= N && "Dropping more elements than exist");
+ return substr(N);
+ }
+
+ /// Return a StringRef equal to 'this' but with the last \p N elements
+ /// dropped.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef drop_back(size_t N = 1) const {
+ assert(size() >= N && "Dropping more elements than exist");
+ return substr(0, size()-N);
+ }
+
+ /// Return a reference to the substring from [Start, End).
+ ///
+ /// \param Start The index of the starting character in the substring; if
+ /// the index is npos or greater than the length of the string then the
+ /// empty substring will be returned.
+ ///
+ /// \param End The index following the last character to include in the
+ /// substring. If this is npos, or less than \p Start, or exceeds the
+ /// number of characters remaining in the string, the string suffix
+ /// (starting with \p Start) will be returned.
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringRef slice(size_t Start, size_t End) const {
+ Start = std::min(Start, Length);
+ End = std::min(std::max(Start, End), Length);
+ return StringRef(Data + Start, End - Start);
+ }
+
+ /// Split into two substrings around the first occurrence of a separator
+ /// character.
+ ///
+ /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+ /// such that (*this == LHS + Separator + RHS) is true and RHS is
+ /// maximal. If \p Separator is not in the string, then the result is a
+ /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+ ///
+ /// \param Separator The character to split on.
+ /// \returns The split substrings.
+ std::pair<StringRef, StringRef> split(char Separator) const {
+ size_t Idx = find(Separator);
+ if (Idx == npos)
+ return std::make_pair(*this, StringRef());
+ return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
+ }
+
+ /// Split into two substrings around the first occurrence of a separator
+ /// string.
+ ///
+ /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+ /// such that (*this == LHS + Separator + RHS) is true and RHS is
+ /// maximal. If \p Separator is not in the string, then the result is a
+ /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+ ///
+ /// \param Separator - The string to split on.
+ /// \return - The split substrings.
+ std::pair<StringRef, StringRef> split(StringRef Separator) const {
+ size_t Idx = find(Separator);
+ if (Idx == npos)
+ return std::make_pair(*this, StringRef());
+ return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
+ }
+
+ /// Split into substrings around the occurrences of a separator string.
+ ///
+ /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
+ /// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
+ /// elements are added to A.
+ /// If \p KeepEmpty is false, empty strings are not added to \p A. They
+ /// still count when considering \p MaxSplit
+ /// An useful invariant is that
+ /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
+ ///
+ /// \param A - Where to put the substrings.
+ /// \param Separator - The string to split on.
+ /// \param MaxSplit - The maximum number of times the string is split.
+ /// \param KeepEmpty - True if empty substring should be added.
+ void split(SmallVectorImpl<StringRef> &A,
+ StringRef Separator, int MaxSplit = -1,
+ bool KeepEmpty = true) const;
+
+ /// Split into substrings around the occurrences of a separator character.
+ ///
+ /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
+ /// \p MaxSplit splits are done and consequently <= \p MaxSplit + 1
+ /// elements are added to A.
+ /// If \p KeepEmpty is false, empty strings are not added to \p A. They
+ /// still count when considering \p MaxSplit
+ /// An useful invariant is that
+ /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true
+ ///
+ /// \param A - Where to put the substrings.
+ /// \param Separator - The string to split on.
+ /// \param MaxSplit - The maximum number of times the string is split.
+ /// \param KeepEmpty - True if empty substring should be added.
+ void split(SmallVectorImpl<StringRef> &A, char Separator, int MaxSplit = -1,
+ bool KeepEmpty = true) const;
+
+ /// Split into two substrings around the last occurrence of a separator
+ /// character.
+ ///
+ /// If \p Separator is in the string, then the result is a pair (LHS, RHS)
+ /// such that (*this == LHS + Separator + RHS) is true and RHS is
+ /// minimal. If \p Separator is not in the string, then the result is a
+ /// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
+ ///
+ /// \param Separator - The character to split on.
+ /// \return - The split substrings.
+ std::pair<StringRef, StringRef> rsplit(char Separator) const {
+ size_t Idx = rfind(Separator);
+ if (Idx == npos)
+ return std::make_pair(*this, StringRef());
+ return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
+ }
+
+ /// Return string with consecutive characters in \p Chars starting from
+ /// the left removed.
+ StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
+ return drop_front(std::min(Length, find_first_not_of(Chars)));
+ }
+
+ /// Return string with consecutive characters in \p Chars starting from
+ /// the right removed.
+ StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
+ return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
+ }
+
+ /// Return string with consecutive characters in \p Chars starting from
+ /// the left and right removed.
+ StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
+ return ltrim(Chars).rtrim(Chars);
+ }
+
+ /// @}
+ };
+
+ /// @name StringRef Comparison Operators
+ /// @{
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ inline bool operator==(StringRef LHS, StringRef RHS) {
+ return LHS.equals(RHS);
+ }
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ inline bool operator!=(StringRef LHS, StringRef RHS) {
+ return !(LHS == RHS);
+ }
+
+ inline bool operator<(StringRef LHS, StringRef RHS) {
+ return LHS.compare(RHS) == -1;
+ }
+
+ inline bool operator<=(StringRef LHS, StringRef RHS) {
+ return LHS.compare(RHS) != 1;
+ }
+
+ inline bool operator>(StringRef LHS, StringRef RHS) {
+ return LHS.compare(RHS) == 1;
+ }
+
+ inline bool operator>=(StringRef LHS, StringRef RHS) {
+ return LHS.compare(RHS) != -1;
+ }
+
+ inline std::string &operator+=(std::string &buffer, StringRef string) {
+ return buffer.append(string.data(), string.size());
+ }
+
+ /// @}
+
+ /// \brief Compute a hash_code for a StringRef.
+ hash_code hash_value(StringRef S);
+
+ // StringRefs can be treated like a POD type.
+ template <typename T> struct isPodLike;
+ template <> struct isPodLike<StringRef> { static const bool value = true; };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/StringSet.h b/contrib/llvm/include/llvm/ADT/StringSet.h
new file mode 100644
index 0000000..08626dc
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/StringSet.h
@@ -0,0 +1,39 @@
+//===--- StringSet.h - The LLVM Compiler Driver -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open
+// Source License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// StringSet - A set-like wrapper for the StringMap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_STRINGSET_H
+#define LLVM_ADT_STRINGSET_H
+
+#include "llvm/ADT/StringMap.h"
+
+namespace llvm {
+
+ /// StringSet - A wrapper for StringMap that provides set-like functionality.
+ template <class AllocatorTy = llvm::MallocAllocator>
+ class StringSet : public llvm::StringMap<char, AllocatorTy> {
+ typedef llvm::StringMap<char, AllocatorTy> base;
+ public:
+ StringSet() = default;
+ StringSet(std::initializer_list<StringRef> S) {
+ for (StringRef X : S)
+ insert(X);
+ }
+
+ std::pair<typename base::iterator, bool> insert(StringRef Key) {
+ assert(!Key.empty());
+ return base::insert(std::make_pair(Key, '\0'));
+ }
+ };
+}
+
+#endif // LLVM_ADT_STRINGSET_H
diff --git a/contrib/llvm/include/llvm/ADT/StringSwitch.h b/contrib/llvm/include/llvm/ADT/StringSwitch.h
new file mode 100644
index 0000000..42b0fc4
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/StringSwitch.h
@@ -0,0 +1,166 @@
+//===--- StringSwitch.h - Switch-on-literal-string Construct --------------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements the StringSwitch template, which mimics a switch()
+// statement whose cases are string literals.
+//
+//===----------------------------------------------------------------------===/
+#ifndef LLVM_ADT_STRINGSWITCH_H
+#define LLVM_ADT_STRINGSWITCH_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <cstring>
+
+namespace llvm {
+
+/// \brief A switch()-like statement whose cases are string literals.
+///
+/// The StringSwitch class is a simple form of a switch() statement that
+/// determines whether the given string matches one of the given string
+/// literals. The template type parameter \p T is the type of the value that
+/// will be returned from the string-switch expression. For example,
+/// the following code switches on the name of a color in \c argv[i]:
+///
+/// \code
+/// Color color = StringSwitch<Color>(argv[i])
+/// .Case("red", Red)
+/// .Case("orange", Orange)
+/// .Case("yellow", Yellow)
+/// .Case("green", Green)
+/// .Case("blue", Blue)
+/// .Case("indigo", Indigo)
+/// .Cases("violet", "purple", Violet)
+/// .Default(UnknownColor);
+/// \endcode
+template<typename T, typename R = T>
+class StringSwitch {
+ /// \brief The string we are matching.
+ StringRef Str;
+
+ /// \brief The pointer to the result of this switch statement, once known,
+ /// null before that.
+ const T *Result;
+
+public:
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ explicit StringSwitch(StringRef S)
+ : Str(S), Result(nullptr) { }
+
+ template<unsigned N>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch& Case(const char (&S)[N], const T& Value) {
+ if (!Result && N-1 == Str.size() &&
+ (std::memcmp(S, Str.data(), N-1) == 0)) {
+ Result = &Value;
+ }
+
+ return *this;
+ }
+
+ template<unsigned N>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch& EndsWith(const char (&S)[N], const T &Value) {
+ if (!Result && Str.size() >= N-1 &&
+ std::memcmp(S, Str.data() + Str.size() + 1 - N, N-1) == 0) {
+ Result = &Value;
+ }
+
+ return *this;
+ }
+
+ template<unsigned N>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch& StartsWith(const char (&S)[N], const T &Value) {
+ if (!Result && Str.size() >= N-1 &&
+ std::memcmp(S, Str.data(), N-1) == 0) {
+ Result = &Value;
+ }
+
+ return *this;
+ }
+
+ template<unsigned N0, unsigned N1>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
+ const T& Value) {
+ if (!Result && (
+ (N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
+ (N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0))) {
+ Result = &Value;
+ }
+
+ return *this;
+ }
+
+ template<unsigned N0, unsigned N1, unsigned N2>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
+ const char (&S2)[N2], const T& Value) {
+ if (!Result && (
+ (N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
+ (N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0) ||
+ (N2-1 == Str.size() && std::memcmp(S2, Str.data(), N2-1) == 0))) {
+ Result = &Value;
+ }
+
+ return *this;
+ }
+
+ template<unsigned N0, unsigned N1, unsigned N2, unsigned N3>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
+ const char (&S2)[N2], const char (&S3)[N3],
+ const T& Value) {
+ if (!Result && (
+ (N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
+ (N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0) ||
+ (N2-1 == Str.size() && std::memcmp(S2, Str.data(), N2-1) == 0) ||
+ (N3-1 == Str.size() && std::memcmp(S3, Str.data(), N3-1) == 0))) {
+ Result = &Value;
+ }
+
+ return *this;
+ }
+
+ template<unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4>
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ StringSwitch& Cases(const char (&S0)[N0], const char (&S1)[N1],
+ const char (&S2)[N2], const char (&S3)[N3],
+ const char (&S4)[N4], const T& Value) {
+ if (!Result && (
+ (N0-1 == Str.size() && std::memcmp(S0, Str.data(), N0-1) == 0) ||
+ (N1-1 == Str.size() && std::memcmp(S1, Str.data(), N1-1) == 0) ||
+ (N2-1 == Str.size() && std::memcmp(S2, Str.data(), N2-1) == 0) ||
+ (N3-1 == Str.size() && std::memcmp(S3, Str.data(), N3-1) == 0) ||
+ (N4-1 == Str.size() && std::memcmp(S4, Str.data(), N4-1) == 0))) {
+ Result = &Value;
+ }
+
+ return *this;
+ }
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ R Default(const T& Value) const {
+ if (Result)
+ return *Result;
+
+ return Value;
+ }
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE
+ operator R() const {
+ assert(Result && "Fell off the end of a string-switch");
+ return *Result;
+ }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_STRINGSWITCH_H
diff --git a/contrib/llvm/include/llvm/ADT/TinyPtrVector.h b/contrib/llvm/include/llvm/ADT/TinyPtrVector.h
new file mode 100644
index 0000000..487aa46
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/TinyPtrVector.h
@@ -0,0 +1,306 @@
+//===- llvm/ADT/TinyPtrVector.h - 'Normally tiny' vectors -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TINYPTRVECTOR_H
+#define LLVM_ADT_TINYPTRVECTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+/// TinyPtrVector - This class is specialized for cases where there are
+/// normally 0 or 1 element in a vector, but is general enough to go beyond that
+/// when required.
+///
+/// NOTE: This container doesn't allow you to store a null pointer into it.
+///
+template <typename EltTy>
+class TinyPtrVector {
+public:
+ typedef llvm::SmallVector<EltTy, 4> VecTy;
+ typedef typename VecTy::value_type value_type;
+ typedef llvm::PointerUnion<EltTy, VecTy *> PtrUnion;
+
+private:
+ PtrUnion Val;
+
+public:
+ TinyPtrVector() {}
+ ~TinyPtrVector() {
+ if (VecTy *V = Val.template dyn_cast<VecTy*>())
+ delete V;
+ }
+
+ TinyPtrVector(const TinyPtrVector &RHS) : Val(RHS.Val) {
+ if (VecTy *V = Val.template dyn_cast<VecTy*>())
+ Val = new VecTy(*V);
+ }
+ TinyPtrVector &operator=(const TinyPtrVector &RHS) {
+ if (this == &RHS)
+ return *this;
+ if (RHS.empty()) {
+ this->clear();
+ return *this;
+ }
+
+ // Try to squeeze into the single slot. If it won't fit, allocate a copied
+ // vector.
+ if (Val.template is<EltTy>()) {
+ if (RHS.size() == 1)
+ Val = RHS.front();
+ else
+ Val = new VecTy(*RHS.Val.template get<VecTy*>());
+ return *this;
+ }
+
+ // If we have a full vector allocated, try to re-use it.
+ if (RHS.Val.template is<EltTy>()) {
+ Val.template get<VecTy*>()->clear();
+ Val.template get<VecTy*>()->push_back(RHS.front());
+ } else {
+ *Val.template get<VecTy*>() = *RHS.Val.template get<VecTy*>();
+ }
+ return *this;
+ }
+
+ TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
+ RHS.Val = (EltTy)nullptr;
+ }
+ TinyPtrVector &operator=(TinyPtrVector &&RHS) {
+ if (this == &RHS)
+ return *this;
+ if (RHS.empty()) {
+ this->clear();
+ return *this;
+ }
+
+ // If this vector has been allocated on the heap, re-use it if cheap. If it
+ // would require more copying, just delete it and we'll steal the other
+ // side.
+ if (VecTy *V = Val.template dyn_cast<VecTy*>()) {
+ if (RHS.Val.template is<EltTy>()) {
+ V->clear();
+ V->push_back(RHS.front());
+ return *this;
+ }
+ delete V;
+ }
+
+ Val = RHS.Val;
+ RHS.Val = (EltTy)nullptr;
+ return *this;
+ }
+
+ /// Constructor from an ArrayRef.
+ ///
+ /// This also is a constructor for individual array elements due to the single
+ /// element constructor for ArrayRef.
+ explicit TinyPtrVector(ArrayRef<EltTy> Elts)
+ : Val(Elts.size() == 1 ? PtrUnion(Elts[0])
+ : PtrUnion(new VecTy(Elts.begin(), Elts.end()))) {}
+
+ // implicit conversion operator to ArrayRef.
+ operator ArrayRef<EltTy>() const {
+ if (Val.isNull())
+ return None;
+ if (Val.template is<EltTy>())
+ return *Val.getAddrOfPtr1();
+ return *Val.template get<VecTy*>();
+ }
+
+ // implicit conversion operator to MutableArrayRef.
+ operator MutableArrayRef<EltTy>() {
+ if (Val.isNull())
+ return None;
+ if (Val.template is<EltTy>())
+ return *Val.getAddrOfPtr1();
+ return *Val.template get<VecTy*>();
+ }
+
+ bool empty() const {
+ // This vector can be empty if it contains no element, or if it
+ // contains a pointer to an empty vector.
+ if (Val.isNull()) return true;
+ if (VecTy *Vec = Val.template dyn_cast<VecTy*>())
+ return Vec->empty();
+ return false;
+ }
+
+ unsigned size() const {
+ if (empty())
+ return 0;
+ if (Val.template is<EltTy>())
+ return 1;
+ return Val.template get<VecTy*>()->size();
+ }
+
+ typedef const EltTy *const_iterator;
+ typedef EltTy *iterator;
+
+ iterator begin() {
+ if (Val.template is<EltTy>())
+ return Val.getAddrOfPtr1();
+
+ return Val.template get<VecTy *>()->begin();
+ }
+ iterator end() {
+ if (Val.template is<EltTy>())
+ return begin() + (Val.isNull() ? 0 : 1);
+
+ return Val.template get<VecTy *>()->end();
+ }
+
+ const_iterator begin() const {
+ return (const_iterator)const_cast<TinyPtrVector*>(this)->begin();
+ }
+
+ const_iterator end() const {
+ return (const_iterator)const_cast<TinyPtrVector*>(this)->end();
+ }
+
+ EltTy operator[](unsigned i) const {
+ assert(!Val.isNull() && "can't index into an empty vector");
+ if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ assert(i == 0 && "tinyvector index out of range");
+ return V;
+ }
+
+ assert(i < Val.template get<VecTy*>()->size() &&
+ "tinyvector index out of range");
+ return (*Val.template get<VecTy*>())[i];
+ }
+
+ EltTy front() const {
+ assert(!empty() && "vector empty");
+ if (EltTy V = Val.template dyn_cast<EltTy>())
+ return V;
+ return Val.template get<VecTy*>()->front();
+ }
+
+ EltTy back() const {
+ assert(!empty() && "vector empty");
+ if (EltTy V = Val.template dyn_cast<EltTy>())
+ return V;
+ return Val.template get<VecTy*>()->back();
+ }
+
+ void push_back(EltTy NewVal) {
+ assert(NewVal && "Can't add a null value");
+
+ // If we have nothing, add something.
+ if (Val.isNull()) {
+ Val = NewVal;
+ return;
+ }
+
+ // If we have a single value, convert to a vector.
+ if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ Val = new VecTy();
+ Val.template get<VecTy*>()->push_back(V);
+ }
+
+ // Add the new value, we know we have a vector.
+ Val.template get<VecTy*>()->push_back(NewVal);
+ }
+
+ void pop_back() {
+ // If we have a single value, convert to empty.
+ if (Val.template is<EltTy>())
+ Val = (EltTy)nullptr;
+ else if (VecTy *Vec = Val.template get<VecTy*>())
+ Vec->pop_back();
+ }
+
+ void clear() {
+ // If we have a single value, convert to empty.
+ if (Val.template is<EltTy>()) {
+ Val = (EltTy)nullptr;
+ } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+ // If we have a vector form, just clear it.
+ Vec->clear();
+ }
+ // Otherwise, we're already empty.
+ }
+
+ iterator erase(iterator I) {
+ assert(I >= begin() && "Iterator to erase is out of bounds.");
+ assert(I < end() && "Erasing at past-the-end iterator.");
+
+ // If we have a single value, convert to empty.
+ if (Val.template is<EltTy>()) {
+ if (I == begin())
+ Val = (EltTy)nullptr;
+ } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+ // multiple items in a vector; just do the erase, there is no
+ // benefit to collapsing back to a pointer
+ return Vec->erase(I);
+ }
+ return end();
+ }
+
+ iterator erase(iterator S, iterator E) {
+ assert(S >= begin() && "Range to erase is out of bounds.");
+ assert(S <= E && "Trying to erase invalid range.");
+ assert(E <= end() && "Trying to erase past the end.");
+
+ if (Val.template is<EltTy>()) {
+ if (S == begin() && S != E)
+ Val = (EltTy)nullptr;
+ } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+ return Vec->erase(S, E);
+ }
+ return end();
+ }
+
+ iterator insert(iterator I, const EltTy &Elt) {
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+ if (I == end()) {
+ push_back(Elt);
+ return std::prev(end());
+ }
+ assert(!Val.isNull() && "Null value with non-end insert iterator.");
+ if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ assert(I == begin());
+ Val = Elt;
+ push_back(V);
+ return begin();
+ }
+
+ return Val.template get<VecTy*>()->insert(I, Elt);
+ }
+
+ template<typename ItTy>
+ iterator insert(iterator I, ItTy From, ItTy To) {
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+ if (From == To)
+ return I;
+
+ // If we have a single value, convert to a vector.
+ ptrdiff_t Offset = I - begin();
+ if (Val.isNull()) {
+ if (std::next(From) == To) {
+ Val = *From;
+ return begin();
+ }
+
+ Val = new VecTy();
+ } else if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ Val = new VecTy();
+ Val.template get<VecTy*>()->push_back(V);
+ }
+ return Val.template get<VecTy*>()->insert(begin() + Offset, From, To);
+ }
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/Triple.h b/contrib/llvm/include/llvm/ADT/Triple.h
new file mode 100644
index 0000000..e01db0a
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/Triple.h
@@ -0,0 +1,674 @@
+//===-- llvm/ADT/Triple.h - Target triple helper class ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TRIPLE_H
+#define LLVM_ADT_TRIPLE_H
+
+#include "llvm/ADT/Twine.h"
+
+// Some system headers or GCC predefined macros conflict with identifiers in
+// this file. Undefine them here.
+#undef NetBSD
+#undef mips
+#undef sparc
+
+namespace llvm {
+
+/// Triple - Helper class for working with autoconf configuration names. For
+/// historical reasons, we also call these 'triples' (they used to contain
+/// exactly three fields).
+///
+/// Configuration names are strings in the canonical form:
+/// ARCHITECTURE-VENDOR-OPERATING_SYSTEM
+/// or
+/// ARCHITECTURE-VENDOR-OPERATING_SYSTEM-ENVIRONMENT
+///
+/// This class is used for clients which want to support arbitrary
+/// configuration names, but also want to implement certain special
+/// behavior for particular configurations. This class isolates the mapping
+/// from the components of the configuration name to well known IDs.
+///
+/// At its core the Triple class is designed to be a wrapper for a triple
+/// string; the constructor does not change or normalize the triple string.
+/// Clients that need to handle the non-canonical triples that users often
+/// specify should use the normalize method.
+///
+/// See autoconf/config.guess for a glimpse into what configuration names
+/// look like in practice.
+class Triple {
+public:
+ enum ArchType {
+ UnknownArch,
+
+ arm, // ARM (little endian): arm, armv.*, xscale
+ armeb, // ARM (big endian): armeb
+ aarch64, // AArch64 (little endian): aarch64
+ aarch64_be, // AArch64 (big endian): aarch64_be
+ avr, // AVR: Atmel AVR microcontroller
+ bpfel, // eBPF or extended BPF or 64-bit BPF (little endian)
+ bpfeb, // eBPF or extended BPF or 64-bit BPF (big endian)
+ hexagon, // Hexagon: hexagon
+ mips, // MIPS: mips, mipsallegrex
+ mipsel, // MIPSEL: mipsel, mipsallegrexel
+ mips64, // MIPS64: mips64
+ mips64el, // MIPS64EL: mips64el
+ msp430, // MSP430: msp430
+ ppc, // PPC: powerpc
+ ppc64, // PPC64: powerpc64, ppu
+ ppc64le, // PPC64LE: powerpc64le
+ r600, // R600: AMD GPUs HD2XXX - HD6XXX
+ amdgcn, // AMDGCN: AMD GCN GPUs
+ sparc, // Sparc: sparc
+ sparcv9, // Sparcv9: Sparcv9
+ sparcel, // Sparc: (endianness = little). NB: 'Sparcle' is a CPU variant
+ systemz, // SystemZ: s390x
+ tce, // TCE (http://tce.cs.tut.fi/): tce
+ thumb, // Thumb (little endian): thumb, thumbv.*
+ thumbeb, // Thumb (big endian): thumbeb
+ x86, // X86: i[3-9]86
+ x86_64, // X86-64: amd64, x86_64
+ xcore, // XCore: xcore
+ nvptx, // NVPTX: 32-bit
+ nvptx64, // NVPTX: 64-bit
+ le32, // le32: generic little-endian 32-bit CPU (PNaCl)
+ le64, // le64: generic little-endian 64-bit CPU (PNaCl)
+ amdil, // AMDIL
+ amdil64, // AMDIL with 64-bit pointers
+ hsail, // AMD HSAIL
+ hsail64, // AMD HSAIL with 64-bit pointers
+ spir, // SPIR: standard portable IR for OpenCL 32-bit version
+ spir64, // SPIR: standard portable IR for OpenCL 64-bit version
+ kalimba, // Kalimba: generic kalimba
+ shave, // SHAVE: Movidius vector VLIW processors
+ wasm32, // WebAssembly with 32-bit pointers
+ wasm64, // WebAssembly with 64-bit pointers
+ LastArchType = wasm64
+ };
+ enum SubArchType {
+ NoSubArch,
+
+ ARMSubArch_v8_2a,
+ ARMSubArch_v8_1a,
+ ARMSubArch_v8,
+ ARMSubArch_v7,
+ ARMSubArch_v7em,
+ ARMSubArch_v7m,
+ ARMSubArch_v7s,
+ ARMSubArch_v7k,
+ ARMSubArch_v6,
+ ARMSubArch_v6m,
+ ARMSubArch_v6k,
+ ARMSubArch_v6t2,
+ ARMSubArch_v5,
+ ARMSubArch_v5te,
+ ARMSubArch_v4t,
+
+ KalimbaSubArch_v3,
+ KalimbaSubArch_v4,
+ KalimbaSubArch_v5
+ };
+ enum VendorType {
+ UnknownVendor,
+
+ Apple,
+ PC,
+ SCEI,
+ BGP,
+ BGQ,
+ Freescale,
+ IBM,
+ ImaginationTechnologies,
+ MipsTechnologies,
+ NVIDIA,
+ CSR,
+ Myriad,
+ LastVendorType = Myriad
+ };
+ enum OSType {
+ UnknownOS,
+
+ CloudABI,
+ Darwin,
+ DragonFly,
+ FreeBSD,
+ IOS,
+ KFreeBSD,
+ Linux,
+ Lv2, // PS3
+ MacOSX,
+ NetBSD,
+ OpenBSD,
+ Solaris,
+ Win32,
+ Haiku,
+ Minix,
+ RTEMS,
+ NaCl, // Native Client
+ CNK, // BG/P Compute-Node Kernel
+ Bitrig,
+ AIX,
+ CUDA, // NVIDIA CUDA
+ NVCL, // NVIDIA OpenCL
+ AMDHSA, // AMD HSA Runtime
+ PS4,
+ ELFIAMCU,
+ TvOS, // Apple tvOS
+ WatchOS, // Apple watchOS
+ LastOSType = WatchOS
+ };
+ enum EnvironmentType {
+ UnknownEnvironment,
+
+ GNU,
+ GNUEABI,
+ GNUEABIHF,
+ GNUX32,
+ CODE16,
+ EABI,
+ EABIHF,
+ Android,
+
+ MSVC,
+ Itanium,
+ Cygnus,
+ AMDOpenCL,
+ CoreCLR,
+ LastEnvironmentType = CoreCLR
+ };
+ enum ObjectFormatType {
+ UnknownObjectFormat,
+
+ COFF,
+ ELF,
+ MachO,
+ };
+
+private:
+ std::string Data;
+
+ /// The parsed arch type.
+ ArchType Arch;
+
+ /// The parsed subarchitecture type.
+ SubArchType SubArch;
+
+ /// The parsed vendor type.
+ VendorType Vendor;
+
+ /// The parsed OS type.
+ OSType OS;
+
+ /// The parsed Environment type.
+ EnvironmentType Environment;
+
+ /// The object format type.
+ ObjectFormatType ObjectFormat;
+
+public:
+ /// @name Constructors
+ /// @{
+
+ /// Default constructor is the same as an empty string and leaves all
+ /// triple fields unknown.
+ Triple() : Data(), Arch(), Vendor(), OS(), Environment(), ObjectFormat() {}
+
+ explicit Triple(const Twine &Str);
+ Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr);
+ Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr,
+ const Twine &EnvironmentStr);
+
+ bool operator==(const Triple &Other) const {
+ return Arch == Other.Arch && SubArch == Other.SubArch &&
+ Vendor == Other.Vendor && OS == Other.OS &&
+ Environment == Other.Environment &&
+ ObjectFormat == Other.ObjectFormat;
+ }
+
+ /// @}
+ /// @name Normalization
+ /// @{
+
+ /// normalize - Turn an arbitrary machine specification into the canonical
+ /// triple form (or something sensible that the Triple class understands if
+ /// nothing better can reasonably be done). In particular, it handles the
+ /// common case in which otherwise valid components are in the wrong order.
+ static std::string normalize(StringRef Str);
+
+ /// Return the normalized form of this triple's string.
+ std::string normalize() const { return normalize(Data); }
+
+ /// @}
+ /// @name Typed Component Access
+ /// @{
+
+ /// getArch - Get the parsed architecture type of this triple.
+ ArchType getArch() const { return Arch; }
+
+ /// getSubArch - get the parsed subarchitecture type for this triple.
+ SubArchType getSubArch() const { return SubArch; }
+
+ /// getVendor - Get the parsed vendor type of this triple.
+ VendorType getVendor() const { return Vendor; }
+
+ /// getOS - Get the parsed operating system type of this triple.
+ OSType getOS() const { return OS; }
+
+ /// hasEnvironment - Does this triple have the optional environment
+ /// (fourth) component?
+ bool hasEnvironment() const {
+ return getEnvironmentName() != "";
+ }
+
+ /// getEnvironment - Get the parsed environment type of this triple.
+ EnvironmentType getEnvironment() const { return Environment; }
+
+ /// Parse the version number from the OS name component of the
+ /// triple, if present.
+ ///
+ /// For example, "fooos1.2.3" would return (1, 2, 3).
+ ///
+ /// If an entry is not defined, it will be returned as 0.
+ void getEnvironmentVersion(unsigned &Major, unsigned &Minor,
+ unsigned &Micro) const;
+
+ /// getFormat - Get the object format for this triple.
+ ObjectFormatType getObjectFormat() const { return ObjectFormat; }
+
+ /// getOSVersion - Parse the version number from the OS name component of the
+ /// triple, if present.
+ ///
+ /// For example, "fooos1.2.3" would return (1, 2, 3).
+ ///
+ /// If an entry is not defined, it will be returned as 0.
+ void getOSVersion(unsigned &Major, unsigned &Minor, unsigned &Micro) const;
+
+ /// getOSMajorVersion - Return just the major version number, this is
+ /// specialized because it is a common query.
+ unsigned getOSMajorVersion() const {
+ unsigned Maj, Min, Micro;
+ getOSVersion(Maj, Min, Micro);
+ return Maj;
+ }
+
+ /// getMacOSXVersion - Parse the version number as with getOSVersion and then
+ /// translate generic "darwin" versions to the corresponding OS X versions.
+ /// This may also be called with IOS triples but the OS X version number is
+ /// just set to a constant 10.4.0 in that case. Returns true if successful.
+ bool getMacOSXVersion(unsigned &Major, unsigned &Minor,
+ unsigned &Micro) const;
+
+ /// getiOSVersion - Parse the version number as with getOSVersion. This should
+ /// only be called with IOS or generic triples.
+ void getiOSVersion(unsigned &Major, unsigned &Minor,
+ unsigned &Micro) const;
+
+ /// getWatchOSVersion - Parse the version number as with getOSVersion. This
+ /// should only be called with WatchOS or generic triples.
+ void getWatchOSVersion(unsigned &Major, unsigned &Minor,
+ unsigned &Micro) const;
+
+ /// @}
+ /// @name Direct Component Access
+ /// @{
+
+ const std::string &str() const { return Data; }
+
+ const std::string &getTriple() const { return Data; }
+
+ /// getArchName - Get the architecture (first) component of the
+ /// triple.
+ StringRef getArchName() const;
+
+ /// getVendorName - Get the vendor (second) component of the triple.
+ StringRef getVendorName() const;
+
+ /// getOSName - Get the operating system (third) component of the
+ /// triple.
+ StringRef getOSName() const;
+
+ /// getEnvironmentName - Get the optional environment (fourth)
+ /// component of the triple, or "" if empty.
+ StringRef getEnvironmentName() const;
+
+ /// getOSAndEnvironmentName - Get the operating system and optional
+ /// environment components as a single string (separated by a '-'
+ /// if the environment component is present).
+ StringRef getOSAndEnvironmentName() const;
+
+ /// @}
+ /// @name Convenience Predicates
+ /// @{
+
+ /// Test whether the architecture is 64-bit
+ ///
+ /// Note that this tests for 64-bit pointer width, and nothing else. Note
+ /// that we intentionally expose only three predicates, 64-bit, 32-bit, and
+ /// 16-bit. The inner details of pointer width for particular architectures
+ /// is not summed up in the triple, and so only a coarse grained predicate
+ /// system is provided.
+ bool isArch64Bit() const;
+
+ /// Test whether the architecture is 32-bit
+ ///
+ /// Note that this tests for 32-bit pointer width, and nothing else.
+ bool isArch32Bit() const;
+
+ /// Test whether the architecture is 16-bit
+ ///
+ /// Note that this tests for 16-bit pointer width, and nothing else.
+ bool isArch16Bit() const;
+
+ /// isOSVersionLT - Helper function for doing comparisons against version
+ /// numbers included in the target triple.
+ bool isOSVersionLT(unsigned Major, unsigned Minor = 0,
+ unsigned Micro = 0) const {
+ unsigned LHS[3];
+ getOSVersion(LHS[0], LHS[1], LHS[2]);
+
+ if (LHS[0] != Major)
+ return LHS[0] < Major;
+ if (LHS[1] != Minor)
+ return LHS[1] < Minor;
+ if (LHS[2] != Micro)
+ return LHS[1] < Micro;
+
+ return false;
+ }
+
+ bool isOSVersionLT(const Triple &Other) const {
+ unsigned RHS[3];
+ Other.getOSVersion(RHS[0], RHS[1], RHS[2]);
+ return isOSVersionLT(RHS[0], RHS[1], RHS[2]);
+ }
+
+ /// isMacOSXVersionLT - Comparison function for checking OS X version
+ /// compatibility, which handles supporting skewed version numbering schemes
+ /// used by the "darwin" triples.
+ unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
+ unsigned Micro = 0) const {
+ assert(isMacOSX() && "Not an OS X triple!");
+
+ // If this is OS X, expect a sane version number.
+ if (getOS() == Triple::MacOSX)
+ return isOSVersionLT(Major, Minor, Micro);
+
+ // Otherwise, compare to the "Darwin" number.
+ assert(Major == 10 && "Unexpected major version");
+ return isOSVersionLT(Minor + 4, Micro, 0);
+ }
+
+ /// isMacOSX - Is this a Mac OS X triple. For legacy reasons, we support both
+ /// "darwin" and "osx" as OS X triples.
+ bool isMacOSX() const {
+ return getOS() == Triple::Darwin || getOS() == Triple::MacOSX;
+ }
+
+ /// Is this an iOS triple.
+ /// Note: This identifies tvOS as a variant of iOS. If that ever
+ /// changes, i.e., if the two operating systems diverge or their version
+ /// numbers get out of sync, that will need to be changed.
+ /// watchOS has completely different version numbers so it is not included.
+ bool isiOS() const {
+ return getOS() == Triple::IOS || isTvOS();
+ }
+
+ /// Is this an Apple tvOS triple.
+ bool isTvOS() const {
+ return getOS() == Triple::TvOS;
+ }
+
+ /// Is this an Apple watchOS triple.
+ bool isWatchOS() const {
+ return getOS() == Triple::WatchOS;
+ }
+
+ /// isOSDarwin - Is this a "Darwin" OS (OS X, iOS, or watchOS).
+ bool isOSDarwin() const {
+ return isMacOSX() || isiOS() || isWatchOS();
+ }
+
+ bool isOSNetBSD() const {
+ return getOS() == Triple::NetBSD;
+ }
+
+ bool isOSOpenBSD() const {
+ return getOS() == Triple::OpenBSD;
+ }
+
+ bool isOSFreeBSD() const {
+ return getOS() == Triple::FreeBSD;
+ }
+
+ bool isOSDragonFly() const { return getOS() == Triple::DragonFly; }
+
+ bool isOSSolaris() const {
+ return getOS() == Triple::Solaris;
+ }
+
+ bool isOSBitrig() const {
+ return getOS() == Triple::Bitrig;
+ }
+
+ bool isOSIAMCU() const {
+ return getOS() == Triple::ELFIAMCU;
+ }
+
+ /// Checks if the environment could be MSVC.
+ bool isWindowsMSVCEnvironment() const {
+ return getOS() == Triple::Win32 &&
+ (getEnvironment() == Triple::UnknownEnvironment ||
+ getEnvironment() == Triple::MSVC);
+ }
+
+ /// Checks if the environment is MSVC.
+ bool isKnownWindowsMSVCEnvironment() const {
+ return getOS() == Triple::Win32 && getEnvironment() == Triple::MSVC;
+ }
+
+ bool isWindowsCoreCLREnvironment() const {
+ return getOS() == Triple::Win32 && getEnvironment() == Triple::CoreCLR;
+ }
+
+ bool isWindowsItaniumEnvironment() const {
+ return getOS() == Triple::Win32 && getEnvironment() == Triple::Itanium;
+ }
+
+ bool isWindowsCygwinEnvironment() const {
+ return getOS() == Triple::Win32 && getEnvironment() == Triple::Cygnus;
+ }
+
+ bool isWindowsGNUEnvironment() const {
+ return getOS() == Triple::Win32 && getEnvironment() == Triple::GNU;
+ }
+
+ /// Tests for either Cygwin or MinGW OS
+ bool isOSCygMing() const {
+ return isWindowsCygwinEnvironment() || isWindowsGNUEnvironment();
+ }
+
+ /// Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
+ bool isOSMSVCRT() const {
+ return isWindowsMSVCEnvironment() || isWindowsGNUEnvironment() ||
+ isWindowsItaniumEnvironment();
+ }
+
+ /// Tests whether the OS is Windows.
+ bool isOSWindows() const {
+ return getOS() == Triple::Win32;
+ }
+
+ /// Tests whether the OS is NaCl (Native Client)
+ bool isOSNaCl() const {
+ return getOS() == Triple::NaCl;
+ }
+
+ /// Tests whether the OS is Linux.
+ bool isOSLinux() const {
+ return getOS() == Triple::Linux;
+ }
+
+ /// Tests whether the OS uses the ELF binary format.
+ bool isOSBinFormatELF() const {
+ return getObjectFormat() == Triple::ELF;
+ }
+
+ /// Tests whether the OS uses the COFF binary format.
+ bool isOSBinFormatCOFF() const {
+ return getObjectFormat() == Triple::COFF;
+ }
+
+ /// Tests whether the environment is MachO.
+ bool isOSBinFormatMachO() const {
+ return getObjectFormat() == Triple::MachO;
+ }
+
+ /// Tests whether the target is the PS4 CPU
+ bool isPS4CPU() const {
+ return getArch() == Triple::x86_64 &&
+ getVendor() == Triple::SCEI &&
+ getOS() == Triple::PS4;
+ }
+
+ /// Tests whether the target is the PS4 platform
+ bool isPS4() const {
+ return getVendor() == Triple::SCEI &&
+ getOS() == Triple::PS4;
+ }
+
+ /// Tests whether the target is Android
+ bool isAndroid() const { return getEnvironment() == Triple::Android; }
+
+ /// @}
+ /// @name Mutators
+ /// @{
+
+ /// setArch - Set the architecture (first) component of the triple
+ /// to a known type.
+ void setArch(ArchType Kind);
+
+ /// setVendor - Set the vendor (second) component of the triple to a
+ /// known type.
+ void setVendor(VendorType Kind);
+
+ /// setOS - Set the operating system (third) component of the triple
+ /// to a known type.
+ void setOS(OSType Kind);
+
+ /// setEnvironment - Set the environment (fourth) component of the triple
+ /// to a known type.
+ void setEnvironment(EnvironmentType Kind);
+
+ /// setObjectFormat - Set the object file format
+ void setObjectFormat(ObjectFormatType Kind);
+
+ /// setTriple - Set all components to the new triple \p Str.
+ void setTriple(const Twine &Str);
+
+ /// setArchName - Set the architecture (first) component of the
+ /// triple by name.
+ void setArchName(StringRef Str);
+
+ /// setVendorName - Set the vendor (second) component of the triple
+ /// by name.
+ void setVendorName(StringRef Str);
+
+ /// setOSName - Set the operating system (third) component of the
+ /// triple by name.
+ void setOSName(StringRef Str);
+
+ /// setEnvironmentName - Set the optional environment (fourth)
+ /// component of the triple by name.
+ void setEnvironmentName(StringRef Str);
+
+ /// setOSAndEnvironmentName - Set the operating system and optional
+ /// environment components with a single string.
+ void setOSAndEnvironmentName(StringRef Str);
+
+ /// @}
+ /// @name Helpers to build variants of a particular triple.
+ /// @{
+
+ /// Form a triple with a 32-bit variant of the current architecture.
+ ///
+ /// This can be used to move across "families" of architectures where useful.
+ ///
+ /// \returns A new triple with a 32-bit architecture or an unknown
+ /// architecture if no such variant can be found.
+ llvm::Triple get32BitArchVariant() const;
+
+ /// Form a triple with a 64-bit variant of the current architecture.
+ ///
+ /// This can be used to move across "families" of architectures where useful.
+ ///
+ /// \returns A new triple with a 64-bit architecture or an unknown
+ /// architecture if no such variant can be found.
+ llvm::Triple get64BitArchVariant() const;
+
+ /// Form a triple with a big endian variant of the current architecture.
+ ///
+ /// This can be used to move across "families" of architectures where useful.
+ ///
+ /// \returns A new triple with a big endian architecture or an unknown
+ /// architecture if no such variant can be found.
+ llvm::Triple getBigEndianArchVariant() const;
+
+ /// Form a triple with a little endian variant of the current architecture.
+ ///
+ /// This can be used to move across "families" of architectures where useful.
+ ///
+ /// \returns A new triple with a little endian architecture or an unknown
+ /// architecture if no such variant can be found.
+ llvm::Triple getLittleEndianArchVariant() const;
+
+ /// Get the (LLVM) name of the minimum ARM CPU for the arch we are targeting.
+ ///
+ /// \param Arch the architecture name (e.g., "armv7s"). If it is an empty
+ /// string then the triple's arch name is used.
+ StringRef getARMCPUForArch(StringRef Arch = StringRef()) const;
+
+ /// @}
+ /// @name Static helpers for IDs.
+ /// @{
+
+ /// getArchTypeName - Get the canonical name for the \p Kind architecture.
+ static const char *getArchTypeName(ArchType Kind);
+
+ /// getArchTypePrefix - Get the "prefix" canonical name for the \p Kind
+ /// architecture. This is the prefix used by the architecture specific
+ /// builtins, and is suitable for passing to \see
+ /// Intrinsic::getIntrinsicForGCCBuiltin().
+ ///
+ /// \return - The architecture prefix, or 0 if none is defined.
+ static const char *getArchTypePrefix(ArchType Kind);
+
+ /// getVendorTypeName - Get the canonical name for the \p Kind vendor.
+ static const char *getVendorTypeName(VendorType Kind);
+
+ /// getOSTypeName - Get the canonical name for the \p Kind operating system.
+ static const char *getOSTypeName(OSType Kind);
+
+ /// getEnvironmentTypeName - Get the canonical name for the \p Kind
+ /// environment.
+ static const char *getEnvironmentTypeName(EnvironmentType Kind);
+
+ /// @}
+ /// @name Static helpers for converting alternate architecture names.
+ /// @{
+
+ /// getArchTypeForLLVMName - The canonical type for the given LLVM
+ /// architecture name (e.g., "x86").
+ static ArchType getArchTypeForLLVMName(StringRef Str);
+
+ /// @}
+};
+
+} // End llvm namespace
+
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/Twine.h b/contrib/llvm/include/llvm/ADT/Twine.h
new file mode 100644
index 0000000..db0bf4b
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/Twine.h
@@ -0,0 +1,542 @@
+//===-- Twine.h - Fast Temporary String Concatenation -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_TWINE_H
+#define LLVM_ADT_TWINE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <string>
+
+namespace llvm {
+ class raw_ostream;
+
+ /// Twine - A lightweight data structure for efficiently representing the
+ /// concatenation of temporary values as strings.
+ ///
+ /// A Twine is a kind of rope, it represents a concatenated string using a
+ /// binary-tree, where the string is the preorder of the nodes. Since the
+ /// Twine can be efficiently rendered into a buffer when its result is used,
+ /// it avoids the cost of generating temporary values for intermediate string
+ /// results -- particularly in cases when the Twine result is never
+ /// required. By explicitly tracking the type of leaf nodes, we can also avoid
+ /// the creation of temporary strings for conversions operations (such as
+ /// appending an integer to a string).
+ ///
+ /// A Twine is not intended for use directly and should not be stored, its
+ /// implementation relies on the ability to store pointers to temporary stack
+ /// objects which may be deallocated at the end of a statement. Twines should
+ /// only be used accepted as const references in arguments, when an API wishes
+ /// to accept possibly-concatenated strings.
+ ///
+ /// Twines support a special 'null' value, which always concatenates to form
+ /// itself, and renders as an empty string. This can be returned from APIs to
+ /// effectively nullify any concatenations performed on the result.
+ ///
+ /// \b Implementation
+ ///
+ /// Given the nature of a Twine, it is not possible for the Twine's
+ /// concatenation method to construct interior nodes; the result must be
+ /// represented inside the returned value. For this reason a Twine object
+ /// actually holds two values, the left- and right-hand sides of a
+ /// concatenation. We also have nullary Twine objects, which are effectively
+ /// sentinel values that represent empty strings.
+ ///
+ /// Thus, a Twine can effectively have zero, one, or two children. The \see
+ /// isNullary(), \see isUnary(), and \see isBinary() predicates exist for
+ /// testing the number of children.
+ ///
+ /// We maintain a number of invariants on Twine objects (FIXME: Why):
+ /// - Nullary twines are always represented with their Kind on the left-hand
+ /// side, and the Empty kind on the right-hand side.
+ /// - Unary twines are always represented with the value on the left-hand
+ /// side, and the Empty kind on the right-hand side.
+ /// - If a Twine has another Twine as a child, that child should always be
+ /// binary (otherwise it could have been folded into the parent).
+ ///
+ /// These invariants are check by \see isValid().
+ ///
+ /// \b Efficiency Considerations
+ ///
+ /// The Twine is designed to yield efficient and small code for common
+ /// situations. For this reason, the concat() method is inlined so that
+ /// concatenations of leaf nodes can be optimized into stores directly into a
+ /// single stack allocated object.
+ ///
+ /// In practice, not all compilers can be trusted to optimize concat() fully,
+ /// so we provide two additional methods (and accompanying operator+
+ /// overloads) to guarantee that particularly important cases (cstring plus
+ /// StringRef) codegen as desired.
+ class Twine {
+ /// NodeKind - Represent the type of an argument.
+ enum NodeKind : unsigned char {
+ /// An empty string; the result of concatenating anything with it is also
+ /// empty.
+ NullKind,
+
+ /// The empty string.
+ EmptyKind,
+
+ /// A pointer to a Twine instance.
+ TwineKind,
+
+ /// A pointer to a C string instance.
+ CStringKind,
+
+ /// A pointer to an std::string instance.
+ StdStringKind,
+
+ /// A pointer to a StringRef instance.
+ StringRefKind,
+
+ /// A pointer to a SmallString instance.
+ SmallStringKind,
+
+ /// A char value reinterpreted as a pointer, to render as a character.
+ CharKind,
+
+ /// An unsigned int value reinterpreted as a pointer, to render as an
+ /// unsigned decimal integer.
+ DecUIKind,
+
+ /// An int value reinterpreted as a pointer, to render as a signed
+ /// decimal integer.
+ DecIKind,
+
+ /// A pointer to an unsigned long value, to render as an unsigned decimal
+ /// integer.
+ DecULKind,
+
+ /// A pointer to a long value, to render as a signed decimal integer.
+ DecLKind,
+
+ /// A pointer to an unsigned long long value, to render as an unsigned
+ /// decimal integer.
+ DecULLKind,
+
+ /// A pointer to a long long value, to render as a signed decimal integer.
+ DecLLKind,
+
+ /// A pointer to a uint64_t value, to render as an unsigned hexadecimal
+ /// integer.
+ UHexKind
+ };
+
+ union Child
+ {
+ const Twine *twine;
+ const char *cString;
+ const std::string *stdString;
+ const StringRef *stringRef;
+ const SmallVectorImpl<char> *smallString;
+ char character;
+ unsigned int decUI;
+ int decI;
+ const unsigned long *decUL;
+ const long *decL;
+ const unsigned long long *decULL;
+ const long long *decLL;
+ const uint64_t *uHex;
+ };
+
+ private:
+ /// LHS - The prefix in the concatenation, which may be uninitialized for
+ /// Null or Empty kinds.
+ Child LHS;
+ /// RHS - The suffix in the concatenation, which may be uninitialized for
+ /// Null or Empty kinds.
+ Child RHS;
+ /// LHSKind - The NodeKind of the left hand side, \see getLHSKind().
+ NodeKind LHSKind;
+ /// RHSKind - The NodeKind of the right hand side, \see getRHSKind().
+ NodeKind RHSKind;
+
+ private:
+ /// Construct a nullary twine; the kind must be NullKind or EmptyKind.
+ explicit Twine(NodeKind Kind)
+ : LHSKind(Kind), RHSKind(EmptyKind) {
+ assert(isNullary() && "Invalid kind!");
+ }
+
+ /// Construct a binary twine.
+ explicit Twine(const Twine &LHS, const Twine &RHS)
+ : LHSKind(TwineKind), RHSKind(TwineKind) {
+ this->LHS.twine = &LHS;
+ this->RHS.twine = &RHS;
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct a twine from explicit values.
+ explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind)
+ : LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) {
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Since the intended use of twines is as temporary objects, assignments
+ /// when concatenating might cause undefined behavior or stack corruptions
+ Twine &operator=(const Twine &Other) = delete;
+
+ /// Check for the null twine.
+ bool isNull() const {
+ return getLHSKind() == NullKind;
+ }
+
+ /// Check for the empty twine.
+ bool isEmpty() const {
+ return getLHSKind() == EmptyKind;
+ }
+
+ /// Check if this is a nullary twine (null or empty).
+ bool isNullary() const {
+ return isNull() || isEmpty();
+ }
+
+ /// Check if this is a unary twine.
+ bool isUnary() const {
+ return getRHSKind() == EmptyKind && !isNullary();
+ }
+
+ /// Check if this is a binary twine.
+ bool isBinary() const {
+ return getLHSKind() != NullKind && getRHSKind() != EmptyKind;
+ }
+
+ /// Check if this is a valid twine (satisfying the invariants on
+ /// order and number of arguments).
+ bool isValid() const {
+ // Nullary twines always have Empty on the RHS.
+ if (isNullary() && getRHSKind() != EmptyKind)
+ return false;
+
+ // Null should never appear on the RHS.
+ if (getRHSKind() == NullKind)
+ return false;
+
+ // The RHS cannot be non-empty if the LHS is empty.
+ if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind)
+ return false;
+
+ // A twine child should always be binary.
+ if (getLHSKind() == TwineKind &&
+ !LHS.twine->isBinary())
+ return false;
+ if (getRHSKind() == TwineKind &&
+ !RHS.twine->isBinary())
+ return false;
+
+ return true;
+ }
+
+ /// Get the NodeKind of the left-hand side.
+ NodeKind getLHSKind() const { return LHSKind; }
+
+ /// Get the NodeKind of the right-hand side.
+ NodeKind getRHSKind() const { return RHSKind; }
+
+ /// Print one child from a twine.
+ void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const;
+
+ /// Print the representation of one child from a twine.
+ void printOneChildRepr(raw_ostream &OS, Child Ptr,
+ NodeKind Kind) const;
+
+ public:
+ /// @name Constructors
+ /// @{
+
+ /// Construct from an empty string.
+ /*implicit*/ Twine() : LHSKind(EmptyKind), RHSKind(EmptyKind) {
+ assert(isValid() && "Invalid twine!");
+ }
+
+ Twine(const Twine &) = default;
+
+ /// Construct from a C string.
+ ///
+ /// We take care here to optimize "" into the empty twine -- this will be
+ /// optimized out for string constants. This allows Twine arguments have
+ /// default "" values, without introducing unnecessary string constants.
+ /*implicit*/ Twine(const char *Str)
+ : RHSKind(EmptyKind) {
+ if (Str[0] != '\0') {
+ LHS.cString = Str;
+ LHSKind = CStringKind;
+ } else
+ LHSKind = EmptyKind;
+
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct from an std::string.
+ /*implicit*/ Twine(const std::string &Str)
+ : LHSKind(StdStringKind), RHSKind(EmptyKind) {
+ LHS.stdString = &Str;
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct from a StringRef.
+ /*implicit*/ Twine(const StringRef &Str)
+ : LHSKind(StringRefKind), RHSKind(EmptyKind) {
+ LHS.stringRef = &Str;
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct from a SmallString.
+ /*implicit*/ Twine(const SmallVectorImpl<char> &Str)
+ : LHSKind(SmallStringKind), RHSKind(EmptyKind) {
+ LHS.smallString = &Str;
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct from a char.
+ explicit Twine(char Val)
+ : LHSKind(CharKind), RHSKind(EmptyKind) {
+ LHS.character = Val;
+ }
+
+ /// Construct from a signed char.
+ explicit Twine(signed char Val)
+ : LHSKind(CharKind), RHSKind(EmptyKind) {
+ LHS.character = static_cast<char>(Val);
+ }
+
+ /// Construct from an unsigned char.
+ explicit Twine(unsigned char Val)
+ : LHSKind(CharKind), RHSKind(EmptyKind) {
+ LHS.character = static_cast<char>(Val);
+ }
+
+ /// Construct a twine to print \p Val as an unsigned decimal integer.
+ explicit Twine(unsigned Val)
+ : LHSKind(DecUIKind), RHSKind(EmptyKind) {
+ LHS.decUI = Val;
+ }
+
+ /// Construct a twine to print \p Val as a signed decimal integer.
+ explicit Twine(int Val)
+ : LHSKind(DecIKind), RHSKind(EmptyKind) {
+ LHS.decI = Val;
+ }
+
+ /// Construct a twine to print \p Val as an unsigned decimal integer.
+ explicit Twine(const unsigned long &Val)
+ : LHSKind(DecULKind), RHSKind(EmptyKind) {
+ LHS.decUL = &Val;
+ }
+
+ /// Construct a twine to print \p Val as a signed decimal integer.
+ explicit Twine(const long &Val)
+ : LHSKind(DecLKind), RHSKind(EmptyKind) {
+ LHS.decL = &Val;
+ }
+
+ /// Construct a twine to print \p Val as an unsigned decimal integer.
+ explicit Twine(const unsigned long long &Val)
+ : LHSKind(DecULLKind), RHSKind(EmptyKind) {
+ LHS.decULL = &Val;
+ }
+
+ /// Construct a twine to print \p Val as a signed decimal integer.
+ explicit Twine(const long long &Val)
+ : LHSKind(DecLLKind), RHSKind(EmptyKind) {
+ LHS.decLL = &Val;
+ }
+
+ // FIXME: Unfortunately, to make sure this is as efficient as possible we
+ // need extra binary constructors from particular types. We can't rely on
+ // the compiler to be smart enough to fold operator+()/concat() down to the
+ // right thing. Yet.
+
+ /// Construct as the concatenation of a C string and a StringRef.
+ /*implicit*/ Twine(const char *LHS, const StringRef &RHS)
+ : LHSKind(CStringKind), RHSKind(StringRefKind) {
+ this->LHS.cString = LHS;
+ this->RHS.stringRef = &RHS;
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Construct as the concatenation of a StringRef and a C string.
+ /*implicit*/ Twine(const StringRef &LHS, const char *RHS)
+ : LHSKind(StringRefKind), RHSKind(CStringKind) {
+ this->LHS.stringRef = &LHS;
+ this->RHS.cString = RHS;
+ assert(isValid() && "Invalid twine!");
+ }
+
+ /// Create a 'null' string, which is an empty string that always
+ /// concatenates to form another empty string.
+ static Twine createNull() {
+ return Twine(NullKind);
+ }
+
+ /// @}
+ /// @name Numeric Conversions
+ /// @{
+
+ // Construct a twine to print \p Val as an unsigned hexadecimal integer.
+ static Twine utohexstr(const uint64_t &Val) {
+ Child LHS, RHS;
+ LHS.uHex = &Val;
+ RHS.twine = nullptr;
+ return Twine(LHS, UHexKind, RHS, EmptyKind);
+ }
+
+ /// @}
+ /// @name Predicate Operations
+ /// @{
+
+ /// Check if this twine is trivially empty; a false return value does not
+ /// necessarily mean the twine is empty.
+ bool isTriviallyEmpty() const {
+ return isNullary();
+ }
+
+ /// Return true if this twine can be dynamically accessed as a single
+ /// StringRef value with getSingleStringRef().
+ bool isSingleStringRef() const {
+ if (getRHSKind() != EmptyKind) return false;
+
+ switch (getLHSKind()) {
+ case EmptyKind:
+ case CStringKind:
+ case StdStringKind:
+ case StringRefKind:
+ case SmallStringKind:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /// @}
+ /// @name String Operations
+ /// @{
+
+ Twine concat(const Twine &Suffix) const;
+
+ /// @}
+ /// @name Output & Conversion.
+ /// @{
+
+ /// Return the twine contents as a std::string.
+ std::string str() const;
+
+ /// Append the concatenated string into the given SmallString or SmallVector.
+ void toVector(SmallVectorImpl<char> &Out) const;
+
+ /// This returns the twine as a single StringRef. This method is only valid
+ /// if isSingleStringRef() is true.
+ StringRef getSingleStringRef() const {
+ assert(isSingleStringRef() &&"This cannot be had as a single stringref!");
+ switch (getLHSKind()) {
+ default: llvm_unreachable("Out of sync with isSingleStringRef");
+ case EmptyKind: return StringRef();
+ case CStringKind: return StringRef(LHS.cString);
+ case StdStringKind: return StringRef(*LHS.stdString);
+ case StringRefKind: return *LHS.stringRef;
+ case SmallStringKind:
+ return StringRef(LHS.smallString->data(), LHS.smallString->size());
+ }
+ }
+
+ /// This returns the twine as a single StringRef if it can be
+ /// represented as such. Otherwise the twine is written into the given
+ /// SmallVector and a StringRef to the SmallVector's data is returned.
+ StringRef toStringRef(SmallVectorImpl<char> &Out) const {
+ if (isSingleStringRef())
+ return getSingleStringRef();
+ toVector(Out);
+ return StringRef(Out.data(), Out.size());
+ }
+
+ /// This returns the twine as a single null terminated StringRef if it
+ /// can be represented as such. Otherwise the twine is written into the
+ /// given SmallVector and a StringRef to the SmallVector's data is returned.
+ ///
+ /// The returned StringRef's size does not include the null terminator.
+ StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
+
+ /// Write the concatenated string represented by this twine to the
+ /// stream \p OS.
+ void print(raw_ostream &OS) const;
+
+ /// Dump the concatenated string represented by this twine to stderr.
+ void dump() const;
+
+ /// Write the representation of this twine to the stream \p OS.
+ void printRepr(raw_ostream &OS) const;
+
+ /// Dump the representation of this twine to stderr.
+ void dumpRepr() const;
+
+ /// @}
+ };
+
+ /// @name Twine Inline Implementations
+ /// @{
+
+ inline Twine Twine::concat(const Twine &Suffix) const {
+ // Concatenation with null is null.
+ if (isNull() || Suffix.isNull())
+ return Twine(NullKind);
+
+ // Concatenation with empty yields the other side.
+ if (isEmpty())
+ return Suffix;
+ if (Suffix.isEmpty())
+ return *this;
+
+ // Otherwise we need to create a new node, taking care to fold in unary
+ // twines.
+ Child NewLHS, NewRHS;
+ NewLHS.twine = this;
+ NewRHS.twine = &Suffix;
+ NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind;
+ if (isUnary()) {
+ NewLHS = LHS;
+ NewLHSKind = getLHSKind();
+ }
+ if (Suffix.isUnary()) {
+ NewRHS = Suffix.LHS;
+ NewRHSKind = Suffix.getLHSKind();
+ }
+
+ return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind);
+ }
+
+ inline Twine operator+(const Twine &LHS, const Twine &RHS) {
+ return LHS.concat(RHS);
+ }
+
+ /// Additional overload to guarantee simplified codegen; this is equivalent to
+ /// concat().
+
+ inline Twine operator+(const char *LHS, const StringRef &RHS) {
+ return Twine(LHS, RHS);
+ }
+
+ /// Additional overload to guarantee simplified codegen; this is equivalent to
+ /// concat().
+
+ inline Twine operator+(const StringRef &LHS, const char *RHS) {
+ return Twine(LHS, RHS);
+ }
+
+ inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) {
+ RHS.print(OS);
+ return OS;
+ }
+
+ /// @}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/UniqueVector.h b/contrib/llvm/include/llvm/ADT/UniqueVector.h
new file mode 100644
index 0000000..e1ab4b5
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/UniqueVector.h
@@ -0,0 +1,107 @@
+//===-- llvm/ADT/UniqueVector.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_UNIQUEVECTOR_H
+#define LLVM_ADT_UNIQUEVECTOR_H
+
+#include <cassert>
+#include <cstddef>
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// UniqueVector - This class produces a sequential ID number (base 1) for each
+/// unique entry that is added. T is the type of entries in the vector. This
+/// class should have an implementation of operator== and of operator<.
+/// Entries can be fetched using operator[] with the entry ID.
+template<class T> class UniqueVector {
+public:
+ typedef typename std::vector<T> VectorType;
+ typedef typename VectorType::iterator iterator;
+ typedef typename VectorType::const_iterator const_iterator;
+
+private:
+ // Map - Used to handle the correspondence of entry to ID.
+ std::map<T, unsigned> Map;
+
+ // Vector - ID ordered vector of entries. Entries can be indexed by ID - 1.
+ //
+ VectorType Vector;
+
+public:
+ /// insert - Append entry to the vector if it doesn't already exist. Returns
+ /// the entry's index + 1 to be used as a unique ID.
+ unsigned insert(const T &Entry) {
+ // Check if the entry is already in the map.
+ unsigned &Val = Map[Entry];
+
+ // See if entry exists, if so return prior ID.
+ if (Val) return Val;
+
+ // Compute ID for entry.
+ Val = static_cast<unsigned>(Vector.size()) + 1;
+
+ // Insert in vector.
+ Vector.push_back(Entry);
+ return Val;
+ }
+
+ /// idFor - return the ID for an existing entry. Returns 0 if the entry is
+ /// not found.
+ unsigned idFor(const T &Entry) const {
+ // Search for entry in the map.
+ typename std::map<T, unsigned>::const_iterator MI = Map.find(Entry);
+
+ // See if entry exists, if so return ID.
+ if (MI != Map.end()) return MI->second;
+
+ // No luck.
+ return 0;
+ }
+
+ /// operator[] - Returns a reference to the entry with the specified ID.
+ ///
+ const T &operator[](unsigned ID) const {
+ assert(ID-1 < size() && "ID is 0 or out of range!");
+ return Vector[ID - 1];
+ }
+
+ /// \brief Return an iterator to the start of the vector.
+ iterator begin() { return Vector.begin(); }
+
+ /// \brief Return an iterator to the start of the vector.
+ const_iterator begin() const { return Vector.begin(); }
+
+ /// \brief Return an iterator to the end of the vector.
+ iterator end() { return Vector.end(); }
+
+ /// \brief Return an iterator to the end of the vector.
+ const_iterator end() const { return Vector.end(); }
+
+ /// size - Returns the number of entries in the vector.
+ ///
+ size_t size() const { return Vector.size(); }
+
+ /// empty - Returns true if the vector is empty.
+ ///
+ bool empty() const { return Vector.empty(); }
+
+ /// reset - Clears all the entries.
+ ///
+ void reset() {
+ Map.clear();
+ Vector.resize(0, 0);
+ }
+};
+
+} // End of namespace llvm
+
+#endif // LLVM_ADT_UNIQUEVECTOR_H
diff --git a/contrib/llvm/include/llvm/ADT/VariadicFunction.h b/contrib/llvm/include/llvm/ADT/VariadicFunction.h
new file mode 100644
index 0000000..403130c
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/VariadicFunction.h
@@ -0,0 +1,331 @@
+//===--- VariadicFunctions.h - Variadic Functions ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements compile-time type-safe variadic functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_VARIADICFUNCTION_H
+#define LLVM_ADT_VARIADICFUNCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+
+// Define macros to aid in expanding a comma separated series with the index of
+// the series pasted onto the last token.
+#define LLVM_COMMA_JOIN1(x) x ## 0
+#define LLVM_COMMA_JOIN2(x) LLVM_COMMA_JOIN1(x), x ## 1
+#define LLVM_COMMA_JOIN3(x) LLVM_COMMA_JOIN2(x), x ## 2
+#define LLVM_COMMA_JOIN4(x) LLVM_COMMA_JOIN3(x), x ## 3
+#define LLVM_COMMA_JOIN5(x) LLVM_COMMA_JOIN4(x), x ## 4
+#define LLVM_COMMA_JOIN6(x) LLVM_COMMA_JOIN5(x), x ## 5
+#define LLVM_COMMA_JOIN7(x) LLVM_COMMA_JOIN6(x), x ## 6
+#define LLVM_COMMA_JOIN8(x) LLVM_COMMA_JOIN7(x), x ## 7
+#define LLVM_COMMA_JOIN9(x) LLVM_COMMA_JOIN8(x), x ## 8
+#define LLVM_COMMA_JOIN10(x) LLVM_COMMA_JOIN9(x), x ## 9
+#define LLVM_COMMA_JOIN11(x) LLVM_COMMA_JOIN10(x), x ## 10
+#define LLVM_COMMA_JOIN12(x) LLVM_COMMA_JOIN11(x), x ## 11
+#define LLVM_COMMA_JOIN13(x) LLVM_COMMA_JOIN12(x), x ## 12
+#define LLVM_COMMA_JOIN14(x) LLVM_COMMA_JOIN13(x), x ## 13
+#define LLVM_COMMA_JOIN15(x) LLVM_COMMA_JOIN14(x), x ## 14
+#define LLVM_COMMA_JOIN16(x) LLVM_COMMA_JOIN15(x), x ## 15
+#define LLVM_COMMA_JOIN17(x) LLVM_COMMA_JOIN16(x), x ## 16
+#define LLVM_COMMA_JOIN18(x) LLVM_COMMA_JOIN17(x), x ## 17
+#define LLVM_COMMA_JOIN19(x) LLVM_COMMA_JOIN18(x), x ## 18
+#define LLVM_COMMA_JOIN20(x) LLVM_COMMA_JOIN19(x), x ## 19
+#define LLVM_COMMA_JOIN21(x) LLVM_COMMA_JOIN20(x), x ## 20
+#define LLVM_COMMA_JOIN22(x) LLVM_COMMA_JOIN21(x), x ## 21
+#define LLVM_COMMA_JOIN23(x) LLVM_COMMA_JOIN22(x), x ## 22
+#define LLVM_COMMA_JOIN24(x) LLVM_COMMA_JOIN23(x), x ## 23
+#define LLVM_COMMA_JOIN25(x) LLVM_COMMA_JOIN24(x), x ## 24
+#define LLVM_COMMA_JOIN26(x) LLVM_COMMA_JOIN25(x), x ## 25
+#define LLVM_COMMA_JOIN27(x) LLVM_COMMA_JOIN26(x), x ## 26
+#define LLVM_COMMA_JOIN28(x) LLVM_COMMA_JOIN27(x), x ## 27
+#define LLVM_COMMA_JOIN29(x) LLVM_COMMA_JOIN28(x), x ## 28
+#define LLVM_COMMA_JOIN30(x) LLVM_COMMA_JOIN29(x), x ## 29
+#define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30
+#define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31
+
+/// \brief Class which can simulate a type-safe variadic function.
+///
+/// The VariadicFunction class template makes it easy to define
+/// type-safe variadic functions where all arguments have the same
+/// type.
+///
+/// Suppose we need a variadic function like this:
+///
+/// ResultT Foo(const ArgT &A_0, const ArgT &A_1, ..., const ArgT &A_N);
+///
+/// Instead of many overloads of Foo(), we only need to define a helper
+/// function that takes an array of arguments:
+///
+/// ResultT FooImpl(ArrayRef<const ArgT *> Args) {
+/// // 'Args[i]' is a pointer to the i-th argument passed to Foo().
+/// ...
+/// }
+///
+/// and then define Foo() like this:
+///
+/// const VariadicFunction<ResultT, ArgT, FooImpl> Foo;
+///
+/// VariadicFunction takes care of defining the overloads of Foo().
+///
+/// Actually, Foo is a function object (i.e. functor) instead of a plain
+/// function. This object is stateless and its constructor/destructor
+/// does nothing, so it's safe to create global objects and call Foo(...) at
+/// any time.
+///
+/// Sometimes we need a variadic function to have some fixed leading
+/// arguments whose types may be different from that of the optional
+/// arguments. For example:
+///
+/// bool FullMatch(const StringRef &S, const RE &Regex,
+/// const ArgT &A_0, ..., const ArgT &A_N);
+///
+/// VariadicFunctionN is for such cases, where N is the number of fixed
+/// arguments. It is like VariadicFunction, except that it takes N more
+/// template arguments for the types of the fixed arguments:
+///
+/// bool FullMatchImpl(const StringRef &S, const RE &Regex,
+/// ArrayRef<const ArgT *> Args) { ... }
+/// const VariadicFunction2<bool, const StringRef&,
+/// const RE&, ArgT, FullMatchImpl>
+/// FullMatch;
+///
+/// Currently VariadicFunction and friends support up-to 3
+/// fixed leading arguments and up-to 32 optional arguments.
+template <typename ResultT, typename ArgT,
+ ResultT (*Func)(ArrayRef<const ArgT *>)>
+struct VariadicFunction {
+ ResultT operator()() const {
+ return Func(None);
+ }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+ ResultT operator()(LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+ return Func(makeArrayRef(Args)); \
+ }
+ LLVM_DEFINE_OVERLOAD(1)
+ LLVM_DEFINE_OVERLOAD(2)
+ LLVM_DEFINE_OVERLOAD(3)
+ LLVM_DEFINE_OVERLOAD(4)
+ LLVM_DEFINE_OVERLOAD(5)
+ LLVM_DEFINE_OVERLOAD(6)
+ LLVM_DEFINE_OVERLOAD(7)
+ LLVM_DEFINE_OVERLOAD(8)
+ LLVM_DEFINE_OVERLOAD(9)
+ LLVM_DEFINE_OVERLOAD(10)
+ LLVM_DEFINE_OVERLOAD(11)
+ LLVM_DEFINE_OVERLOAD(12)
+ LLVM_DEFINE_OVERLOAD(13)
+ LLVM_DEFINE_OVERLOAD(14)
+ LLVM_DEFINE_OVERLOAD(15)
+ LLVM_DEFINE_OVERLOAD(16)
+ LLVM_DEFINE_OVERLOAD(17)
+ LLVM_DEFINE_OVERLOAD(18)
+ LLVM_DEFINE_OVERLOAD(19)
+ LLVM_DEFINE_OVERLOAD(20)
+ LLVM_DEFINE_OVERLOAD(21)
+ LLVM_DEFINE_OVERLOAD(22)
+ LLVM_DEFINE_OVERLOAD(23)
+ LLVM_DEFINE_OVERLOAD(24)
+ LLVM_DEFINE_OVERLOAD(25)
+ LLVM_DEFINE_OVERLOAD(26)
+ LLVM_DEFINE_OVERLOAD(27)
+ LLVM_DEFINE_OVERLOAD(28)
+ LLVM_DEFINE_OVERLOAD(29)
+ LLVM_DEFINE_OVERLOAD(30)
+ LLVM_DEFINE_OVERLOAD(31)
+ LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename ArgT,
+ ResultT (*Func)(Param0T, ArrayRef<const ArgT *>)>
+struct VariadicFunction1 {
+ ResultT operator()(Param0T P0) const {
+ return Func(P0, None);
+ }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+ ResultT operator()(Param0T P0, LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+ return Func(P0, makeArrayRef(Args)); \
+ }
+ LLVM_DEFINE_OVERLOAD(1)
+ LLVM_DEFINE_OVERLOAD(2)
+ LLVM_DEFINE_OVERLOAD(3)
+ LLVM_DEFINE_OVERLOAD(4)
+ LLVM_DEFINE_OVERLOAD(5)
+ LLVM_DEFINE_OVERLOAD(6)
+ LLVM_DEFINE_OVERLOAD(7)
+ LLVM_DEFINE_OVERLOAD(8)
+ LLVM_DEFINE_OVERLOAD(9)
+ LLVM_DEFINE_OVERLOAD(10)
+ LLVM_DEFINE_OVERLOAD(11)
+ LLVM_DEFINE_OVERLOAD(12)
+ LLVM_DEFINE_OVERLOAD(13)
+ LLVM_DEFINE_OVERLOAD(14)
+ LLVM_DEFINE_OVERLOAD(15)
+ LLVM_DEFINE_OVERLOAD(16)
+ LLVM_DEFINE_OVERLOAD(17)
+ LLVM_DEFINE_OVERLOAD(18)
+ LLVM_DEFINE_OVERLOAD(19)
+ LLVM_DEFINE_OVERLOAD(20)
+ LLVM_DEFINE_OVERLOAD(21)
+ LLVM_DEFINE_OVERLOAD(22)
+ LLVM_DEFINE_OVERLOAD(23)
+ LLVM_DEFINE_OVERLOAD(24)
+ LLVM_DEFINE_OVERLOAD(25)
+ LLVM_DEFINE_OVERLOAD(26)
+ LLVM_DEFINE_OVERLOAD(27)
+ LLVM_DEFINE_OVERLOAD(28)
+ LLVM_DEFINE_OVERLOAD(29)
+ LLVM_DEFINE_OVERLOAD(30)
+ LLVM_DEFINE_OVERLOAD(31)
+ LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename Param1T, typename ArgT,
+ ResultT (*Func)(Param0T, Param1T, ArrayRef<const ArgT *>)>
+struct VariadicFunction2 {
+ ResultT operator()(Param0T P0, Param1T P1) const {
+ return Func(P0, P1, None);
+ }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+ ResultT operator()(Param0T P0, Param1T P1, \
+ LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+ return Func(P0, P1, makeArrayRef(Args)); \
+ }
+ LLVM_DEFINE_OVERLOAD(1)
+ LLVM_DEFINE_OVERLOAD(2)
+ LLVM_DEFINE_OVERLOAD(3)
+ LLVM_DEFINE_OVERLOAD(4)
+ LLVM_DEFINE_OVERLOAD(5)
+ LLVM_DEFINE_OVERLOAD(6)
+ LLVM_DEFINE_OVERLOAD(7)
+ LLVM_DEFINE_OVERLOAD(8)
+ LLVM_DEFINE_OVERLOAD(9)
+ LLVM_DEFINE_OVERLOAD(10)
+ LLVM_DEFINE_OVERLOAD(11)
+ LLVM_DEFINE_OVERLOAD(12)
+ LLVM_DEFINE_OVERLOAD(13)
+ LLVM_DEFINE_OVERLOAD(14)
+ LLVM_DEFINE_OVERLOAD(15)
+ LLVM_DEFINE_OVERLOAD(16)
+ LLVM_DEFINE_OVERLOAD(17)
+ LLVM_DEFINE_OVERLOAD(18)
+ LLVM_DEFINE_OVERLOAD(19)
+ LLVM_DEFINE_OVERLOAD(20)
+ LLVM_DEFINE_OVERLOAD(21)
+ LLVM_DEFINE_OVERLOAD(22)
+ LLVM_DEFINE_OVERLOAD(23)
+ LLVM_DEFINE_OVERLOAD(24)
+ LLVM_DEFINE_OVERLOAD(25)
+ LLVM_DEFINE_OVERLOAD(26)
+ LLVM_DEFINE_OVERLOAD(27)
+ LLVM_DEFINE_OVERLOAD(28)
+ LLVM_DEFINE_OVERLOAD(29)
+ LLVM_DEFINE_OVERLOAD(30)
+ LLVM_DEFINE_OVERLOAD(31)
+ LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+template <typename ResultT, typename Param0T, typename Param1T,
+ typename Param2T, typename ArgT,
+ ResultT (*Func)(Param0T, Param1T, Param2T, ArrayRef<const ArgT *>)>
+struct VariadicFunction3 {
+ ResultT operator()(Param0T P0, Param1T P1, Param2T P2) const {
+ return Func(P0, P1, P2, None);
+ }
+
+#define LLVM_DEFINE_OVERLOAD(N) \
+ ResultT operator()(Param0T P0, Param1T P1, Param2T P2, \
+ LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
+ const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
+ return Func(P0, P1, P2, makeArrayRef(Args)); \
+ }
+ LLVM_DEFINE_OVERLOAD(1)
+ LLVM_DEFINE_OVERLOAD(2)
+ LLVM_DEFINE_OVERLOAD(3)
+ LLVM_DEFINE_OVERLOAD(4)
+ LLVM_DEFINE_OVERLOAD(5)
+ LLVM_DEFINE_OVERLOAD(6)
+ LLVM_DEFINE_OVERLOAD(7)
+ LLVM_DEFINE_OVERLOAD(8)
+ LLVM_DEFINE_OVERLOAD(9)
+ LLVM_DEFINE_OVERLOAD(10)
+ LLVM_DEFINE_OVERLOAD(11)
+ LLVM_DEFINE_OVERLOAD(12)
+ LLVM_DEFINE_OVERLOAD(13)
+ LLVM_DEFINE_OVERLOAD(14)
+ LLVM_DEFINE_OVERLOAD(15)
+ LLVM_DEFINE_OVERLOAD(16)
+ LLVM_DEFINE_OVERLOAD(17)
+ LLVM_DEFINE_OVERLOAD(18)
+ LLVM_DEFINE_OVERLOAD(19)
+ LLVM_DEFINE_OVERLOAD(20)
+ LLVM_DEFINE_OVERLOAD(21)
+ LLVM_DEFINE_OVERLOAD(22)
+ LLVM_DEFINE_OVERLOAD(23)
+ LLVM_DEFINE_OVERLOAD(24)
+ LLVM_DEFINE_OVERLOAD(25)
+ LLVM_DEFINE_OVERLOAD(26)
+ LLVM_DEFINE_OVERLOAD(27)
+ LLVM_DEFINE_OVERLOAD(28)
+ LLVM_DEFINE_OVERLOAD(29)
+ LLVM_DEFINE_OVERLOAD(30)
+ LLVM_DEFINE_OVERLOAD(31)
+ LLVM_DEFINE_OVERLOAD(32)
+#undef LLVM_DEFINE_OVERLOAD
+};
+
+// Cleanup the macro namespace.
+#undef LLVM_COMMA_JOIN1
+#undef LLVM_COMMA_JOIN2
+#undef LLVM_COMMA_JOIN3
+#undef LLVM_COMMA_JOIN4
+#undef LLVM_COMMA_JOIN5
+#undef LLVM_COMMA_JOIN6
+#undef LLVM_COMMA_JOIN7
+#undef LLVM_COMMA_JOIN8
+#undef LLVM_COMMA_JOIN9
+#undef LLVM_COMMA_JOIN10
+#undef LLVM_COMMA_JOIN11
+#undef LLVM_COMMA_JOIN12
+#undef LLVM_COMMA_JOIN13
+#undef LLVM_COMMA_JOIN14
+#undef LLVM_COMMA_JOIN15
+#undef LLVM_COMMA_JOIN16
+#undef LLVM_COMMA_JOIN17
+#undef LLVM_COMMA_JOIN18
+#undef LLVM_COMMA_JOIN19
+#undef LLVM_COMMA_JOIN20
+#undef LLVM_COMMA_JOIN21
+#undef LLVM_COMMA_JOIN22
+#undef LLVM_COMMA_JOIN23
+#undef LLVM_COMMA_JOIN24
+#undef LLVM_COMMA_JOIN25
+#undef LLVM_COMMA_JOIN26
+#undef LLVM_COMMA_JOIN27
+#undef LLVM_COMMA_JOIN28
+#undef LLVM_COMMA_JOIN29
+#undef LLVM_COMMA_JOIN30
+#undef LLVM_COMMA_JOIN31
+#undef LLVM_COMMA_JOIN32
+
+} // end namespace llvm
+
+#endif // LLVM_ADT_VARIADICFUNCTION_H
diff --git a/contrib/llvm/include/llvm/ADT/edit_distance.h b/contrib/llvm/include/llvm/ADT/edit_distance.h
new file mode 100644
index 0000000..06a01b1
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/edit_distance.h
@@ -0,0 +1,103 @@
+//===-- llvm/ADT/edit_distance.h - Array edit distance function --- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a Levenshtein distance function that works for any two
+// sequences, with each element of each sequence being analogous to a character
+// in a string.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_EDIT_DISTANCE_H
+#define LLVM_ADT_EDIT_DISTANCE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include <algorithm>
+#include <memory>
+
+namespace llvm {
+
+/// \brief Determine the edit distance between two sequences.
+///
+/// \param FromArray the first sequence to compare.
+///
+/// \param ToArray the second sequence to compare.
+///
+/// \param AllowReplacements whether to allow element replacements (change one
+/// element into another) as a single operation, rather than as two operations
+/// (an insertion and a removal).
+///
+/// \param MaxEditDistance If non-zero, the maximum edit distance that this
+/// routine is allowed to compute. If the edit distance will exceed that
+/// maximum, returns \c MaxEditDistance+1.
+///
+/// \returns the minimum number of element insertions, removals, or (if
+/// \p AllowReplacements is \c true) replacements needed to transform one of
+/// the given sequences into the other. If zero, the sequences are identical.
+template<typename T>
+unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
+ bool AllowReplacements = true,
+ unsigned MaxEditDistance = 0) {
+ // The algorithm implemented below is the "classic"
+ // dynamic-programming algorithm for computing the Levenshtein
+ // distance, which is described here:
+ //
+ // http://en.wikipedia.org/wiki/Levenshtein_distance
+ //
+ // Although the algorithm is typically described using an m x n
+ // array, only one row plus one element are used at a time, so this
+ // implementation just keeps one vector for the row. To update one entry,
+ // only the entries to the left, top, and top-left are needed. The left
+ // entry is in Row[x-1], the top entry is what's in Row[x] from the last
+ // iteration, and the top-left entry is stored in Previous.
+ typename ArrayRef<T>::size_type m = FromArray.size();
+ typename ArrayRef<T>::size_type n = ToArray.size();
+
+ const unsigned SmallBufferSize = 64;
+ unsigned SmallBuffer[SmallBufferSize];
+ std::unique_ptr<unsigned[]> Allocated;
+ unsigned *Row = SmallBuffer;
+ if (n + 1 > SmallBufferSize) {
+ Row = new unsigned[n + 1];
+ Allocated.reset(Row);
+ }
+
+ for (unsigned i = 1; i <= n; ++i)
+ Row[i] = i;
+
+ for (typename ArrayRef<T>::size_type y = 1; y <= m; ++y) {
+ Row[0] = y;
+ unsigned BestThisRow = Row[0];
+
+ unsigned Previous = y - 1;
+ for (typename ArrayRef<T>::size_type x = 1; x <= n; ++x) {
+ int OldRow = Row[x];
+ if (AllowReplacements) {
+ Row[x] = std::min(
+ Previous + (FromArray[y-1] == ToArray[x-1] ? 0u : 1u),
+ std::min(Row[x-1], Row[x])+1);
+ }
+ else {
+ if (FromArray[y-1] == ToArray[x-1]) Row[x] = Previous;
+ else Row[x] = std::min(Row[x-1], Row[x]) + 1;
+ }
+ Previous = OldRow;
+ BestThisRow = std::min(BestThisRow, Row[x]);
+ }
+
+ if (MaxEditDistance && BestThisRow > MaxEditDistance)
+ return MaxEditDistance + 1;
+ }
+
+ unsigned Result = Row[n];
+ return Result;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/ilist.h b/contrib/llvm/include/llvm/ADT/ilist.h
new file mode 100644
index 0000000..3044a6c
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/ilist.h
@@ -0,0 +1,800 @@
+//==-- llvm/ADT/ilist.h - Intrusive Linked List Template ---------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes to implement an intrusive doubly linked list class
+// (i.e. each node of the list must contain a next and previous field for the
+// list.
+//
+// The ilist_traits trait class is used to gain access to the next and previous
+// fields of the node type that the list is instantiated with. If it is not
+// specialized, the list defaults to using the getPrev(), getNext() method calls
+// to get the next and previous pointers.
+//
+// The ilist class itself, should be a plug in replacement for list, assuming
+// that the nodes contain next/prev pointers. This list replacement does not
+// provide a constant time size() method, so be careful to use empty() when you
+// really want to know if it's empty.
+//
+// The ilist class is implemented by allocating a 'tail' node when the list is
+// created (using ilist_traits<>::createSentinel()). This tail node is
+// absolutely required because the user must be able to compute end()-1. Because
+// of this, users of the direct next/prev links will see an extra link on the
+// end of the list, which should be ignored.
+//
+// Requirements for a user of this list:
+//
+// 1. The user must provide {g|s}et{Next|Prev} methods, or specialize
+// ilist_traits to provide an alternate way of getting and setting next and
+// prev links.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_H
+#define LLVM_ADT_ILIST_H
+
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+
+namespace llvm {
+
+template<typename NodeTy, typename Traits> class iplist;
+template<typename NodeTy> class ilist_iterator;
+
+/// ilist_nextprev_traits - A fragment for template traits for intrusive list
+/// that provides default next/prev implementations for common operations.
+///
+template<typename NodeTy>
+struct ilist_nextprev_traits {
+ static NodeTy *getPrev(NodeTy *N) { return N->getPrev(); }
+ static NodeTy *getNext(NodeTy *N) { return N->getNext(); }
+ static const NodeTy *getPrev(const NodeTy *N) { return N->getPrev(); }
+ static const NodeTy *getNext(const NodeTy *N) { return N->getNext(); }
+
+ static void setPrev(NodeTy *N, NodeTy *Prev) { N->setPrev(Prev); }
+ static void setNext(NodeTy *N, NodeTy *Next) { N->setNext(Next); }
+};
+
+template<typename NodeTy>
+struct ilist_traits;
+
+/// ilist_sentinel_traits - A fragment for template traits for intrusive list
+/// that provides default sentinel implementations for common operations.
+///
+/// ilist_sentinel_traits implements a lazy dynamic sentinel allocation
+/// strategy. The sentinel is stored in the prev field of ilist's Head.
+///
+template<typename NodeTy>
+struct ilist_sentinel_traits {
+ /// createSentinel - create the dynamic sentinel
+ static NodeTy *createSentinel() { return new NodeTy(); }
+
+ /// destroySentinel - deallocate the dynamic sentinel
+ static void destroySentinel(NodeTy *N) { delete N; }
+
+ /// provideInitialHead - when constructing an ilist, provide a starting
+ /// value for its Head
+ /// @return null node to indicate that it needs to be allocated later
+ static NodeTy *provideInitialHead() { return nullptr; }
+
+ /// ensureHead - make sure that Head is either already
+ /// initialized or assigned a fresh sentinel
+ /// @return the sentinel
+ static NodeTy *ensureHead(NodeTy *&Head) {
+ if (!Head) {
+ Head = ilist_traits<NodeTy>::createSentinel();
+ ilist_traits<NodeTy>::noteHead(Head, Head);
+ ilist_traits<NodeTy>::setNext(Head, nullptr);
+ return Head;
+ }
+ return ilist_traits<NodeTy>::getPrev(Head);
+ }
+
+ /// noteHead - stash the sentinel into its default location
+ static void noteHead(NodeTy *NewHead, NodeTy *Sentinel) {
+ ilist_traits<NodeTy>::setPrev(NewHead, Sentinel);
+ }
+};
+
+template <typename NodeTy> class ilist_half_node;
+template <typename NodeTy> class ilist_node;
+
+/// Traits with an embedded ilist_node as a sentinel.
+///
+/// FIXME: The downcast in createSentinel() is UB.
+template <typename NodeTy> struct ilist_embedded_sentinel_traits {
+ /// Get hold of the node that marks the end of the list.
+ NodeTy *createSentinel() const {
+ // Since i(p)lists always publicly derive from their corresponding traits,
+ // placing a data member in this class will augment the i(p)list. But since
+ // the NodeTy is expected to be publicly derive from ilist_node<NodeTy>,
+ // there is a legal viable downcast from it to NodeTy. We use this trick to
+ // superimpose an i(p)list with a "ghostly" NodeTy, which becomes the
+ // sentinel. Dereferencing the sentinel is forbidden (save the
+ // ilist_node<NodeTy>), so no one will ever notice the superposition.
+ return static_cast<NodeTy *>(&Sentinel);
+ }
+ static void destroySentinel(NodeTy *) {}
+
+ NodeTy *provideInitialHead() const { return createSentinel(); }
+ NodeTy *ensureHead(NodeTy *) const { return createSentinel(); }
+ static void noteHead(NodeTy *, NodeTy *) {}
+
+private:
+ mutable ilist_node<NodeTy> Sentinel;
+};
+
+/// Trait with an embedded ilist_half_node as a sentinel.
+///
+/// FIXME: The downcast in createSentinel() is UB.
+template <typename NodeTy> struct ilist_half_embedded_sentinel_traits {
+ /// Get hold of the node that marks the end of the list.
+ NodeTy *createSentinel() const {
+ // See comment in ilist_embedded_sentinel_traits::createSentinel().
+ return static_cast<NodeTy *>(&Sentinel);
+ }
+ static void destroySentinel(NodeTy *) {}
+
+ NodeTy *provideInitialHead() const { return createSentinel(); }
+ NodeTy *ensureHead(NodeTy *) const { return createSentinel(); }
+ static void noteHead(NodeTy *, NodeTy *) {}
+
+private:
+ mutable ilist_half_node<NodeTy> Sentinel;
+};
+
+/// ilist_node_traits - A fragment for template traits for intrusive list
+/// that provides default node related operations.
+///
+template<typename NodeTy>
+struct ilist_node_traits {
+ static NodeTy *createNode(const NodeTy &V) { return new NodeTy(V); }
+ static void deleteNode(NodeTy *V) { delete V; }
+
+ void addNodeToList(NodeTy *) {}
+ void removeNodeFromList(NodeTy *) {}
+ void transferNodesFromList(ilist_node_traits & /*SrcTraits*/,
+ ilist_iterator<NodeTy> /*first*/,
+ ilist_iterator<NodeTy> /*last*/) {}
+};
+
+/// ilist_default_traits - Default template traits for intrusive list.
+/// By inheriting from this, you can easily use default implementations
+/// for all common operations.
+///
+template<typename NodeTy>
+struct ilist_default_traits : public ilist_nextprev_traits<NodeTy>,
+ public ilist_sentinel_traits<NodeTy>,
+ public ilist_node_traits<NodeTy> {
+};
+
+// Template traits for intrusive list. By specializing this template class, you
+// can change what next/prev fields are used to store the links...
+template<typename NodeTy>
+struct ilist_traits : public ilist_default_traits<NodeTy> {};
+
+// Const traits are the same as nonconst traits...
+template<typename Ty>
+struct ilist_traits<const Ty> : public ilist_traits<Ty> {};
+
+//===----------------------------------------------------------------------===//
+// ilist_iterator<Node> - Iterator for intrusive list.
+//
+template<typename NodeTy>
+class ilist_iterator
+ : public std::iterator<std::bidirectional_iterator_tag, NodeTy, ptrdiff_t> {
+
+public:
+ typedef ilist_traits<NodeTy> Traits;
+ typedef std::iterator<std::bidirectional_iterator_tag,
+ NodeTy, ptrdiff_t> super;
+
+ typedef typename super::value_type value_type;
+ typedef typename super::difference_type difference_type;
+ typedef typename super::pointer pointer;
+ typedef typename super::reference reference;
+private:
+ pointer NodePtr;
+
+ // ilist_iterator is not a random-access iterator, but it has an
+ // implicit conversion to pointer-type, which is. Declare (but
+ // don't define) these functions as private to help catch
+ // accidental misuse.
+ void operator[](difference_type) const;
+ void operator+(difference_type) const;
+ void operator-(difference_type) const;
+ void operator+=(difference_type) const;
+ void operator-=(difference_type) const;
+ template<class T> void operator<(T) const;
+ template<class T> void operator<=(T) const;
+ template<class T> void operator>(T) const;
+ template<class T> void operator>=(T) const;
+ template<class T> void operator-(T) const;
+public:
+
+ explicit ilist_iterator(pointer NP) : NodePtr(NP) {}
+ explicit ilist_iterator(reference NR) : NodePtr(&NR) {}
+ ilist_iterator() : NodePtr(nullptr) {}
+
+ // This is templated so that we can allow constructing a const iterator from
+ // a nonconst iterator...
+ template<class node_ty>
+ ilist_iterator(const ilist_iterator<node_ty> &RHS)
+ : NodePtr(RHS.getNodePtrUnchecked()) {}
+
+ // This is templated so that we can allow assigning to a const iterator from
+ // a nonconst iterator...
+ template<class node_ty>
+ const ilist_iterator &operator=(const ilist_iterator<node_ty> &RHS) {
+ NodePtr = RHS.getNodePtrUnchecked();
+ return *this;
+ }
+
+ void reset(pointer NP) { NodePtr = NP; }
+
+ // Accessors...
+ explicit operator pointer() const {
+ return NodePtr;
+ }
+
+ reference operator*() const {
+ return *NodePtr;
+ }
+ pointer operator->() const { return &operator*(); }
+
+ // Comparison operators
+ template <class Y> bool operator==(const ilist_iterator<Y> &RHS) const {
+ return NodePtr == RHS.getNodePtrUnchecked();
+ }
+ template <class Y> bool operator!=(const ilist_iterator<Y> &RHS) const {
+ return NodePtr != RHS.getNodePtrUnchecked();
+ }
+
+ // Increment and decrement operators...
+ ilist_iterator &operator--() { // predecrement - Back up
+ NodePtr = Traits::getPrev(NodePtr);
+ assert(NodePtr && "--'d off the beginning of an ilist!");
+ return *this;
+ }
+ ilist_iterator &operator++() { // preincrement - Advance
+ NodePtr = Traits::getNext(NodePtr);
+ return *this;
+ }
+ ilist_iterator operator--(int) { // postdecrement operators...
+ ilist_iterator tmp = *this;
+ --*this;
+ return tmp;
+ }
+ ilist_iterator operator++(int) { // postincrement operators...
+ ilist_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ // Internal interface, do not use...
+ pointer getNodePtrUnchecked() const { return NodePtr; }
+};
+
+// These are to catch errors when people try to use them as random access
+// iterators.
+template<typename T>
+void operator-(int, ilist_iterator<T>) = delete;
+template<typename T>
+void operator-(ilist_iterator<T>,int) = delete;
+
+template<typename T>
+void operator+(int, ilist_iterator<T>) = delete;
+template<typename T>
+void operator+(ilist_iterator<T>,int) = delete;
+
+// operator!=/operator== - Allow mixed comparisons without dereferencing
+// the iterator, which could very likely be pointing to end().
+template<typename T>
+bool operator!=(const T* LHS, const ilist_iterator<const T> &RHS) {
+ return LHS != RHS.getNodePtrUnchecked();
+}
+template<typename T>
+bool operator==(const T* LHS, const ilist_iterator<const T> &RHS) {
+ return LHS == RHS.getNodePtrUnchecked();
+}
+template<typename T>
+bool operator!=(T* LHS, const ilist_iterator<T> &RHS) {
+ return LHS != RHS.getNodePtrUnchecked();
+}
+template<typename T>
+bool operator==(T* LHS, const ilist_iterator<T> &RHS) {
+ return LHS == RHS.getNodePtrUnchecked();
+}
+
+
+// Allow ilist_iterators to convert into pointers to a node automatically when
+// used by the dyn_cast, cast, isa mechanisms...
+
+template<typename From> struct simplify_type;
+
+template<typename NodeTy> struct simplify_type<ilist_iterator<NodeTy> > {
+ typedef NodeTy* SimpleType;
+
+ static SimpleType getSimplifiedValue(ilist_iterator<NodeTy> &Node) {
+ return &*Node;
+ }
+};
+template<typename NodeTy> struct simplify_type<const ilist_iterator<NodeTy> > {
+ typedef /*const*/ NodeTy* SimpleType;
+
+ static SimpleType getSimplifiedValue(const ilist_iterator<NodeTy> &Node) {
+ return &*Node;
+ }
+};
+
+
+//===----------------------------------------------------------------------===//
+//
+/// iplist - The subset of list functionality that can safely be used on nodes
+/// of polymorphic types, i.e. a heterogeneous list with a common base class that
+/// holds the next/prev pointers. The only state of the list itself is a single
+/// pointer to the head of the list.
+///
+/// This list can be in one of three interesting states:
+/// 1. The list may be completely unconstructed. In this case, the head
+/// pointer is null. When in this form, any query for an iterator (e.g.
+/// begin() or end()) causes the list to transparently change to state #2.
+/// 2. The list may be empty, but contain a sentinel for the end iterator. This
+/// sentinel is created by the Traits::createSentinel method and is a link
+/// in the list. When the list is empty, the pointer in the iplist points
+/// to the sentinel. Once the sentinel is constructed, it
+/// is not destroyed until the list is.
+/// 3. The list may contain actual objects in it, which are stored as a doubly
+/// linked list of nodes. One invariant of the list is that the predecessor
+/// of the first node in the list always points to the last node in the list,
+/// and the successor pointer for the sentinel (which always stays at the
+/// end of the list) is always null.
+///
+template<typename NodeTy, typename Traits=ilist_traits<NodeTy> >
+class iplist : public Traits {
+ mutable NodeTy *Head;
+
+ // Use the prev node pointer of 'head' as the tail pointer. This is really a
+ // circularly linked list where we snip the 'next' link from the sentinel node
+ // back to the first node in the list (to preserve assertions about going off
+ // the end of the list).
+ NodeTy *getTail() { return this->ensureHead(Head); }
+ const NodeTy *getTail() const { return this->ensureHead(Head); }
+ void setTail(NodeTy *N) const { this->noteHead(Head, N); }
+
+ /// CreateLazySentinel - This method verifies whether the sentinel for the
+ /// list has been created and lazily makes it if not.
+ void CreateLazySentinel() const {
+ this->ensureHead(Head);
+ }
+
+ static bool op_less(NodeTy &L, NodeTy &R) { return L < R; }
+ static bool op_equal(NodeTy &L, NodeTy &R) { return L == R; }
+
+ // No fundamental reason why iplist can't be copyable, but the default
+ // copy/copy-assign won't do.
+ iplist(const iplist &) = delete;
+ void operator=(const iplist &) = delete;
+
+public:
+ typedef NodeTy *pointer;
+ typedef const NodeTy *const_pointer;
+ typedef NodeTy &reference;
+ typedef const NodeTy &const_reference;
+ typedef NodeTy value_type;
+ typedef ilist_iterator<NodeTy> iterator;
+ typedef ilist_iterator<const NodeTy> const_iterator;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ iplist() : Head(this->provideInitialHead()) {}
+ ~iplist() {
+ if (!Head) return;
+ clear();
+ Traits::destroySentinel(getTail());
+ }
+
+ // Iterator creation methods.
+ iterator begin() {
+ CreateLazySentinel();
+ return iterator(Head);
+ }
+ const_iterator begin() const {
+ CreateLazySentinel();
+ return const_iterator(Head);
+ }
+ iterator end() {
+ CreateLazySentinel();
+ return iterator(getTail());
+ }
+ const_iterator end() const {
+ CreateLazySentinel();
+ return const_iterator(getTail());
+ }
+
+ // reverse iterator creation methods.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
+
+
+ // Miscellaneous inspection routines.
+ size_type max_size() const { return size_type(-1); }
+ bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
+ return !Head || Head == getTail();
+ }
+
+ // Front and back accessor functions...
+ reference front() {
+ assert(!empty() && "Called front() on empty list!");
+ return *Head;
+ }
+ const_reference front() const {
+ assert(!empty() && "Called front() on empty list!");
+ return *Head;
+ }
+ reference back() {
+ assert(!empty() && "Called back() on empty list!");
+ return *this->getPrev(getTail());
+ }
+ const_reference back() const {
+ assert(!empty() && "Called back() on empty list!");
+ return *this->getPrev(getTail());
+ }
+
+ void swap(iplist &RHS) {
+ assert(0 && "Swap does not use list traits callback correctly yet!");
+ std::swap(Head, RHS.Head);
+ }
+
+ iterator insert(iterator where, NodeTy *New) {
+ NodeTy *CurNode = where.getNodePtrUnchecked();
+ NodeTy *PrevNode = this->getPrev(CurNode);
+ this->setNext(New, CurNode);
+ this->setPrev(New, PrevNode);
+
+ if (CurNode != Head) // Is PrevNode off the beginning of the list?
+ this->setNext(PrevNode, New);
+ else
+ Head = New;
+ this->setPrev(CurNode, New);
+
+ this->addNodeToList(New); // Notify traits that we added a node...
+ return iterator(New);
+ }
+
+ iterator insertAfter(iterator where, NodeTy *New) {
+ if (empty())
+ return insert(begin(), New);
+ else
+ return insert(++where, New);
+ }
+
+ NodeTy *remove(iterator &IT) {
+ assert(IT != end() && "Cannot remove end of list!");
+ NodeTy *Node = &*IT;
+ NodeTy *NextNode = this->getNext(Node);
+ NodeTy *PrevNode = this->getPrev(Node);
+
+ if (Node != Head) // Is PrevNode off the beginning of the list?
+ this->setNext(PrevNode, NextNode);
+ else
+ Head = NextNode;
+ this->setPrev(NextNode, PrevNode);
+ IT.reset(NextNode);
+ this->removeNodeFromList(Node); // Notify traits that we removed a node...
+
+ // Set the next/prev pointers of the current node to null. This isn't
+ // strictly required, but this catches errors where a node is removed from
+ // an ilist (and potentially deleted) with iterators still pointing at it.
+ // When those iterators are incremented or decremented, they will assert on
+ // the null next/prev pointer instead of "usually working".
+ this->setNext(Node, nullptr);
+ this->setPrev(Node, nullptr);
+ return Node;
+ }
+
+ NodeTy *remove(const iterator &IT) {
+ iterator MutIt = IT;
+ return remove(MutIt);
+ }
+
+ NodeTy *remove(NodeTy *IT) { return remove(iterator(IT)); }
+ NodeTy *remove(NodeTy &IT) { return remove(iterator(IT)); }
+
+ // erase - remove a node from the controlled sequence... and delete it.
+ iterator erase(iterator where) {
+ this->deleteNode(remove(where));
+ return where;
+ }
+
+ iterator erase(NodeTy *IT) { return erase(iterator(IT)); }
+ iterator erase(NodeTy &IT) { return erase(iterator(IT)); }
+
+ /// Remove all nodes from the list like clear(), but do not call
+ /// removeNodeFromList() or deleteNode().
+ ///
+ /// This should only be used immediately before freeing nodes in bulk to
+ /// avoid traversing the list and bringing all the nodes into cache.
+ void clearAndLeakNodesUnsafely() {
+ if (Head) {
+ Head = getTail();
+ this->setPrev(Head, Head);
+ }
+ }
+
+private:
+ // transfer - The heart of the splice function. Move linked list nodes from
+ // [first, last) into position.
+ //
+ void transfer(iterator position, iplist &L2, iterator first, iterator last) {
+ assert(first != last && "Should be checked by callers");
+ // Position cannot be contained in the range to be transferred.
+ // Check for the most common mistake.
+ assert(position != first &&
+ "Insertion point can't be one of the transferred nodes");
+
+ if (position != last) {
+ // Note: we have to be careful about the case when we move the first node
+ // in the list. This node is the list sentinel node and we can't move it.
+ NodeTy *ThisSentinel = getTail();
+ setTail(nullptr);
+ NodeTy *L2Sentinel = L2.getTail();
+ L2.setTail(nullptr);
+
+ // Remove [first, last) from its old position.
+ NodeTy *First = &*first, *Prev = this->getPrev(First);
+ NodeTy *Next = last.getNodePtrUnchecked(), *Last = this->getPrev(Next);
+ if (Prev)
+ this->setNext(Prev, Next);
+ else
+ L2.Head = Next;
+ this->setPrev(Next, Prev);
+
+ // Splice [first, last) into its new position.
+ NodeTy *PosNext = position.getNodePtrUnchecked();
+ NodeTy *PosPrev = this->getPrev(PosNext);
+
+ // Fix head of list...
+ if (PosPrev)
+ this->setNext(PosPrev, First);
+ else
+ Head = First;
+ this->setPrev(First, PosPrev);
+
+ // Fix end of list...
+ this->setNext(Last, PosNext);
+ this->setPrev(PosNext, Last);
+
+ this->transferNodesFromList(L2, iterator(First), iterator(PosNext));
+
+ // Now that everything is set, restore the pointers to the list sentinels.
+ L2.setTail(L2Sentinel);
+ setTail(ThisSentinel);
+ }
+ }
+
+public:
+
+ //===----------------------------------------------------------------------===
+ // Functionality derived from other functions defined above...
+ //
+
+ size_type LLVM_ATTRIBUTE_UNUSED_RESULT size() const {
+ if (!Head) return 0; // Don't require construction of sentinel if empty.
+ return std::distance(begin(), end());
+ }
+
+ iterator erase(iterator first, iterator last) {
+ while (first != last)
+ first = erase(first);
+ return last;
+ }
+
+ void clear() { if (Head) erase(begin(), end()); }
+
+ // Front and back inserters...
+ void push_front(NodeTy *val) { insert(begin(), val); }
+ void push_back(NodeTy *val) { insert(end(), val); }
+ void pop_front() {
+ assert(!empty() && "pop_front() on empty list!");
+ erase(begin());
+ }
+ void pop_back() {
+ assert(!empty() && "pop_back() on empty list!");
+ iterator t = end(); erase(--t);
+ }
+
+ // Special forms of insert...
+ template<class InIt> void insert(iterator where, InIt first, InIt last) {
+ for (; first != last; ++first) insert(where, *first);
+ }
+
+ // Splice members - defined in terms of transfer...
+ void splice(iterator where, iplist &L2) {
+ if (!L2.empty())
+ transfer(where, L2, L2.begin(), L2.end());
+ }
+ void splice(iterator where, iplist &L2, iterator first) {
+ iterator last = first; ++last;
+ if (where == first || where == last) return; // No change
+ transfer(where, L2, first, last);
+ }
+ void splice(iterator where, iplist &L2, iterator first, iterator last) {
+ if (first != last) transfer(where, L2, first, last);
+ }
+ void splice(iterator where, iplist &L2, NodeTy &N) {
+ splice(where, L2, iterator(N));
+ }
+ void splice(iterator where, iplist &L2, NodeTy *N) {
+ splice(where, L2, iterator(N));
+ }
+
+ template <class Compare>
+ void merge(iplist &Right, Compare comp) {
+ if (this == &Right)
+ return;
+ iterator First1 = begin(), Last1 = end();
+ iterator First2 = Right.begin(), Last2 = Right.end();
+ while (First1 != Last1 && First2 != Last2) {
+ if (comp(*First2, *First1)) {
+ iterator Next = First2;
+ transfer(First1, Right, First2, ++Next);
+ First2 = Next;
+ } else {
+ ++First1;
+ }
+ }
+ if (First2 != Last2)
+ transfer(Last1, Right, First2, Last2);
+ }
+ void merge(iplist &Right) { return merge(Right, op_less); }
+
+ template <class Compare>
+ void sort(Compare comp) {
+ // The list is empty, vacuously sorted.
+ if (empty())
+ return;
+ // The list has a single element, vacuously sorted.
+ if (std::next(begin()) == end())
+ return;
+ // Find the split point for the list.
+ iterator Center = begin(), End = begin();
+ while (End != end() && std::next(End) != end()) {
+ Center = std::next(Center);
+ End = std::next(std::next(End));
+ }
+ // Split the list into two.
+ iplist RightHalf;
+ RightHalf.splice(RightHalf.begin(), *this, Center, end());
+
+ // Sort the two sublists.
+ sort(comp);
+ RightHalf.sort(comp);
+
+ // Merge the two sublists back together.
+ merge(RightHalf, comp);
+ }
+ void sort() { sort(op_less); }
+
+ /// \brief Get the previous node, or \c nullptr for the list head.
+ NodeTy *getPrevNode(NodeTy &N) const {
+ auto I = N.getIterator();
+ if (I == begin())
+ return nullptr;
+ return &*std::prev(I);
+ }
+ /// \brief Get the previous node, or \c nullptr for the list head.
+ const NodeTy *getPrevNode(const NodeTy &N) const {
+ return getPrevNode(const_cast<NodeTy &>(N));
+ }
+
+ /// \brief Get the next node, or \c nullptr for the list tail.
+ NodeTy *getNextNode(NodeTy &N) const {
+ auto Next = std::next(N.getIterator());
+ if (Next == end())
+ return nullptr;
+ return &*Next;
+ }
+ /// \brief Get the next node, or \c nullptr for the list tail.
+ const NodeTy *getNextNode(const NodeTy &N) const {
+ return getNextNode(const_cast<NodeTy &>(N));
+ }
+};
+
+
+template<typename NodeTy>
+struct ilist : public iplist<NodeTy> {
+ typedef typename iplist<NodeTy>::size_type size_type;
+ typedef typename iplist<NodeTy>::iterator iterator;
+
+ ilist() {}
+ ilist(const ilist &right) {
+ insert(this->begin(), right.begin(), right.end());
+ }
+ explicit ilist(size_type count) {
+ insert(this->begin(), count, NodeTy());
+ }
+ ilist(size_type count, const NodeTy &val) {
+ insert(this->begin(), count, val);
+ }
+ template<class InIt> ilist(InIt first, InIt last) {
+ insert(this->begin(), first, last);
+ }
+
+ // bring hidden functions into scope
+ using iplist<NodeTy>::insert;
+ using iplist<NodeTy>::push_front;
+ using iplist<NodeTy>::push_back;
+
+ // Main implementation here - Insert for a node passed by value...
+ iterator insert(iterator where, const NodeTy &val) {
+ return insert(where, this->createNode(val));
+ }
+
+
+ // Front and back inserters...
+ void push_front(const NodeTy &val) { insert(this->begin(), val); }
+ void push_back(const NodeTy &val) { insert(this->end(), val); }
+
+ void insert(iterator where, size_type count, const NodeTy &val) {
+ for (; count != 0; --count) insert(where, val);
+ }
+
+ // Assign special forms...
+ void assign(size_type count, const NodeTy &val) {
+ iterator I = this->begin();
+ for (; I != this->end() && count != 0; ++I, --count)
+ *I = val;
+ if (count != 0)
+ insert(this->end(), val, val);
+ else
+ erase(I, this->end());
+ }
+ template<class InIt> void assign(InIt first1, InIt last1) {
+ iterator first2 = this->begin(), last2 = this->end();
+ for ( ; first1 != last1 && first2 != last2; ++first1, ++first2)
+ *first1 = *first2;
+ if (first2 == last2)
+ erase(first1, last1);
+ else
+ insert(last1, first2, last2);
+ }
+
+
+ // Resize members...
+ void resize(size_type newsize, NodeTy val) {
+ iterator i = this->begin();
+ size_type len = 0;
+ for ( ; i != this->end() && len < newsize; ++i, ++len) /* empty*/ ;
+
+ if (len == newsize)
+ erase(i, this->end());
+ else // i == end()
+ insert(this->end(), newsize - len, val);
+ }
+ void resize(size_type newsize) { resize(newsize, NodeTy()); }
+};
+
+} // End llvm namespace
+
+namespace std {
+ // Ensure that swap uses the fast list swap...
+ template<class Ty>
+ void swap(llvm::iplist<Ty> &Left, llvm::iplist<Ty> &Right) {
+ Left.swap(Right);
+ }
+} // End 'std' extensions...
+
+#endif // LLVM_ADT_ILIST_H
diff --git a/contrib/llvm/include/llvm/ADT/ilist_node.h b/contrib/llvm/include/llvm/ADT/ilist_node.h
new file mode 100644
index 0000000..7e5a0e0
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/ilist_node.h
@@ -0,0 +1,123 @@
+//==-- llvm/ADT/ilist_node.h - Intrusive Linked List Helper ------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ilist_node class template, which is a convenient
+// base class for creating classes that can be used with ilists.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ILIST_NODE_H
+#define LLVM_ADT_ILIST_NODE_H
+
+namespace llvm {
+
+template<typename NodeTy>
+struct ilist_traits;
+template <typename NodeTy> struct ilist_embedded_sentinel_traits;
+template <typename NodeTy> struct ilist_half_embedded_sentinel_traits;
+
+/// ilist_half_node - Base class that provides prev services for sentinels.
+///
+template<typename NodeTy>
+class ilist_half_node {
+ friend struct ilist_traits<NodeTy>;
+ friend struct ilist_half_embedded_sentinel_traits<NodeTy>;
+ NodeTy *Prev;
+protected:
+ NodeTy *getPrev() { return Prev; }
+ const NodeTy *getPrev() const { return Prev; }
+ void setPrev(NodeTy *P) { Prev = P; }
+ ilist_half_node() : Prev(nullptr) {}
+};
+
+template<typename NodeTy>
+struct ilist_nextprev_traits;
+
+template <typename NodeTy> class ilist_iterator;
+
+/// ilist_node - Base class that provides next/prev services for nodes
+/// that use ilist_nextprev_traits or ilist_default_traits.
+///
+template<typename NodeTy>
+class ilist_node : private ilist_half_node<NodeTy> {
+ friend struct ilist_nextprev_traits<NodeTy>;
+ friend struct ilist_traits<NodeTy>;
+ friend struct ilist_half_embedded_sentinel_traits<NodeTy>;
+ friend struct ilist_embedded_sentinel_traits<NodeTy>;
+ NodeTy *Next;
+ NodeTy *getNext() { return Next; }
+ const NodeTy *getNext() const { return Next; }
+ void setNext(NodeTy *N) { Next = N; }
+protected:
+ ilist_node() : Next(nullptr) {}
+
+public:
+ ilist_iterator<NodeTy> getIterator() {
+ // FIXME: Stop downcasting to create the iterator (potential UB).
+ return ilist_iterator<NodeTy>(static_cast<NodeTy *>(this));
+ }
+ ilist_iterator<const NodeTy> getIterator() const {
+ // FIXME: Stop downcasting to create the iterator (potential UB).
+ return ilist_iterator<const NodeTy>(static_cast<const NodeTy *>(this));
+ }
+};
+
+/// An ilist node that can access its parent list.
+///
+/// Requires \c NodeTy to have \a getParent() to find the parent node, and the
+/// \c ParentTy to have \a getSublistAccess() to get a reference to the list.
+template <typename NodeTy, typename ParentTy>
+class ilist_node_with_parent : public ilist_node<NodeTy> {
+protected:
+ ilist_node_with_parent() = default;
+
+private:
+ /// Forward to NodeTy::getParent().
+ ///
+ /// Note: do not use the name "getParent()". We want a compile error
+ /// (instead of recursion) when the subclass fails to implement \a
+ /// getParent().
+ const ParentTy *getNodeParent() const {
+ return static_cast<const NodeTy *>(this)->getParent();
+ }
+
+public:
+ /// @name Adjacent Node Accessors
+ /// @{
+ /// \brief Get the previous node, or \c nullptr for the list head.
+ NodeTy *getPrevNode() {
+ // Should be separated to a reused function, but then we couldn't use auto
+ // (and would need the type of the list).
+ const auto &List =
+ getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
+ return List.getPrevNode(*static_cast<NodeTy *>(this));
+ }
+ /// \brief Get the previous node, or \c nullptr for the list head.
+ const NodeTy *getPrevNode() const {
+ return const_cast<ilist_node_with_parent *>(this)->getPrevNode();
+ }
+
+ /// \brief Get the next node, or \c nullptr for the list tail.
+ NodeTy *getNextNode() {
+ // Should be separated to a reused function, but then we couldn't use auto
+ // (and would need the type of the list).
+ const auto &List =
+ getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
+ return List.getNextNode(*static_cast<NodeTy *>(this));
+ }
+ /// \brief Get the next node, or \c nullptr for the list tail.
+ const NodeTy *getNextNode() const {
+ return const_cast<ilist_node_with_parent *>(this)->getNextNode();
+ }
+ /// @}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/iterator.h b/contrib/llvm/include/llvm/ADT/iterator.h
new file mode 100644
index 0000000..c307928
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/iterator.h
@@ -0,0 +1,246 @@
+//===- iterator.h - Utilities for using and defining iterators --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ITERATOR_H
+#define LLVM_ADT_ITERATOR_H
+
+#include <cstddef>
+#include <iterator>
+
+namespace llvm {
+
+/// \brief CRTP base class which implements the entire standard iterator facade
+/// in terms of a minimal subset of the interface.
+///
+/// Use this when it is reasonable to implement most of the iterator
+/// functionality in terms of a core subset. If you need special behavior or
+/// there are performance implications for this, you may want to override the
+/// relevant members instead.
+///
+/// Note, one abstraction that this does *not* provide is implementing
+/// subtraction in terms of addition by negating the difference. Negation isn't
+/// always information preserving, and I can see very reasonable iterator
+/// designs where this doesn't work well. It doesn't really force much added
+/// boilerplate anyways.
+///
+/// Another abstraction that this doesn't provide is implementing increment in
+/// terms of addition of one. These aren't equivalent for all iterator
+/// categories, and respecting that adds a lot of complexity for little gain.
+template <typename DerivedT, typename IteratorCategoryT, typename T,
+ typename DifferenceTypeT = std::ptrdiff_t, typename PointerT = T *,
+ typename ReferenceT = T &>
+class iterator_facade_base
+ : public std::iterator<IteratorCategoryT, T, DifferenceTypeT, PointerT,
+ ReferenceT> {
+protected:
+ enum {
+ IsRandomAccess =
+ std::is_base_of<std::random_access_iterator_tag, IteratorCategoryT>::value,
+ IsBidirectional =
+ std::is_base_of<std::bidirectional_iterator_tag, IteratorCategoryT>::value,
+ };
+
+public:
+ DerivedT operator+(DifferenceTypeT n) const {
+ static_assert(
+ IsRandomAccess,
+ "The '+' operator is only defined for random access iterators.");
+ DerivedT tmp = *static_cast<const DerivedT *>(this);
+ tmp += n;
+ return tmp;
+ }
+ friend DerivedT operator+(DifferenceTypeT n, const DerivedT &i) {
+ static_assert(
+ IsRandomAccess,
+ "The '+' operator is only defined for random access iterators.");
+ return i + n;
+ }
+ DerivedT operator-(DifferenceTypeT n) const {
+ static_assert(
+ IsRandomAccess,
+ "The '-' operator is only defined for random access iterators.");
+ DerivedT tmp = *static_cast<const DerivedT *>(this);
+ tmp -= n;
+ return tmp;
+ }
+
+ DerivedT &operator++() {
+ return static_cast<DerivedT *>(this)->operator+=(1);
+ }
+ DerivedT operator++(int) {
+ DerivedT tmp = *static_cast<DerivedT *>(this);
+ ++*static_cast<DerivedT *>(this);
+ return tmp;
+ }
+ DerivedT &operator--() {
+ static_assert(
+ IsBidirectional,
+ "The decrement operator is only defined for bidirectional iterators.");
+ return static_cast<DerivedT *>(this)->operator-=(1);
+ }
+ DerivedT operator--(int) {
+ static_assert(
+ IsBidirectional,
+ "The decrement operator is only defined for bidirectional iterators.");
+ DerivedT tmp = *static_cast<DerivedT *>(this);
+ --*static_cast<DerivedT *>(this);
+ return tmp;
+ }
+
+ bool operator!=(const DerivedT &RHS) const {
+ return !static_cast<const DerivedT *>(this)->operator==(RHS);
+ }
+
+ bool operator>(const DerivedT &RHS) const {
+ static_assert(
+ IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return !static_cast<const DerivedT *>(this)->operator<(RHS) &&
+ !static_cast<const DerivedT *>(this)->operator==(RHS);
+ }
+ bool operator<=(const DerivedT &RHS) const {
+ static_assert(
+ IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return !static_cast<const DerivedT *>(this)->operator>(RHS);
+ }
+ bool operator>=(const DerivedT &RHS) const {
+ static_assert(
+ IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return !static_cast<const DerivedT *>(this)->operator<(RHS);
+ }
+
+ PointerT operator->() const {
+ return &static_cast<const DerivedT *>(this)->operator*();
+ }
+ ReferenceT operator[](DifferenceTypeT n) const {
+ static_assert(IsRandomAccess,
+ "Subscripting is only defined for random access iterators.");
+ return *static_cast<const DerivedT *>(this)->operator+(n);
+ }
+};
+
+/// \brief CRTP base class for adapting an iterator to a different type.
+///
+/// This class can be used through CRTP to adapt one iterator into another.
+/// Typically this is done through providing in the derived class a custom \c
+/// operator* implementation. Other methods can be overridden as well.
+template <
+ typename DerivedT, typename WrappedIteratorT,
+ typename IteratorCategoryT =
+ typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+ typename T = typename std::iterator_traits<WrappedIteratorT>::value_type,
+ typename DifferenceTypeT =
+ typename std::iterator_traits<WrappedIteratorT>::difference_type,
+ typename PointerT = T *, typename ReferenceT = T &,
+ // Don't provide these, they are mostly to act as aliases below.
+ typename WrappedTraitsT = std::iterator_traits<WrappedIteratorT>>
+class iterator_adaptor_base
+ : public iterator_facade_base<DerivedT, IteratorCategoryT, T,
+ DifferenceTypeT, PointerT, ReferenceT> {
+ typedef typename iterator_adaptor_base::iterator_facade_base BaseT;
+
+protected:
+ WrappedIteratorT I;
+
+ iterator_adaptor_base() = default;
+
+ template <typename U>
+ explicit iterator_adaptor_base(
+ U &&u,
+ typename std::enable_if<
+ !std::is_base_of<typename std::remove_cv<
+ typename std::remove_reference<U>::type>::type,
+ DerivedT>::value,
+ int>::type = 0)
+ : I(std::forward<U &&>(u)) {}
+
+ const WrappedIteratorT &wrapped() const { return I; }
+
+public:
+ typedef DifferenceTypeT difference_type;
+
+ DerivedT &operator+=(difference_type n) {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "The '+=' operator is only defined for random access iterators.");
+ I += n;
+ return *static_cast<DerivedT *>(this);
+ }
+ DerivedT &operator-=(difference_type n) {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "The '-=' operator is only defined for random access iterators.");
+ I -= n;
+ return *static_cast<DerivedT *>(this);
+ }
+ using BaseT::operator-;
+ difference_type operator-(const DerivedT &RHS) const {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "The '-' operator is only defined for random access iterators.");
+ return I - RHS.I;
+ }
+
+ // We have to explicitly provide ++ and -- rather than letting the facade
+ // forward to += because WrappedIteratorT might not support +=.
+ using BaseT::operator++;
+ DerivedT &operator++() {
+ ++I;
+ return *static_cast<DerivedT *>(this);
+ }
+ using BaseT::operator--;
+ DerivedT &operator--() {
+ static_assert(
+ BaseT::IsBidirectional,
+ "The decrement operator is only defined for bidirectional iterators.");
+ --I;
+ return *static_cast<DerivedT *>(this);
+ }
+
+ bool operator==(const DerivedT &RHS) const { return I == RHS.I; }
+ bool operator<(const DerivedT &RHS) const {
+ static_assert(
+ BaseT::IsRandomAccess,
+ "Relational operators are only defined for random access iterators.");
+ return I < RHS.I;
+ }
+
+ ReferenceT operator*() const { return *I; }
+};
+
+/// \brief An iterator type that allows iterating over the pointees via some
+/// other iterator.
+///
+/// The typical usage of this is to expose a type that iterates over Ts, but
+/// which is implemented with some iterator over T*s:
+///
+/// \code
+/// typedef pointee_iterator<SmallVectorImpl<T *>::iterator> iterator;
+/// \endcode
+template <typename WrappedIteratorT,
+ typename T = typename std::remove_reference<
+ decltype(**std::declval<WrappedIteratorT>())>::type>
+struct pointee_iterator
+ : iterator_adaptor_base<
+ pointee_iterator<WrappedIteratorT>, WrappedIteratorT,
+ typename std::iterator_traits<WrappedIteratorT>::iterator_category,
+ T> {
+ pointee_iterator() = default;
+ template <typename U>
+ pointee_iterator(U &&u)
+ : pointee_iterator::iterator_adaptor_base(std::forward<U &&>(u)) {}
+
+ T &operator*() const { return **this->I; }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ADT/iterator_range.h b/contrib/llvm/include/llvm/ADT/iterator_range.h
new file mode 100644
index 0000000..3dd679b
--- /dev/null
+++ b/contrib/llvm/include/llvm/ADT/iterator_range.h
@@ -0,0 +1,68 @@
+//===- iterator_range.h - A range adaptor for iterators ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This provides a very simple, boring adaptor for a begin and end iterator
+/// into a range type. This should be used to build range views that work well
+/// with range based for loops and range based constructors.
+///
+/// Note that code here follows more standards-based coding conventions as it
+/// is mirroring proposed interfaces for standardization.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ITERATOR_RANGE_H
+#define LLVM_ADT_ITERATOR_RANGE_H
+
+#include <utility>
+#include <iterator>
+
+namespace llvm {
+
+/// \brief A range adaptor for a pair of iterators.
+///
+/// This just wraps two iterators into a range-compatible interface. Nothing
+/// fancy at all.
+template <typename IteratorT>
+class iterator_range {
+ IteratorT begin_iterator, end_iterator;
+
+public:
+ //TODO: Add SFINAE to test that the Container's iterators match the range's
+ // iterators.
+ template <typename Container>
+ iterator_range(Container &&c)
+ //TODO: Consider ADL/non-member begin/end calls.
+ : begin_iterator(c.begin()), end_iterator(c.end()) {}
+ iterator_range(IteratorT begin_iterator, IteratorT end_iterator)
+ : begin_iterator(std::move(begin_iterator)),
+ end_iterator(std::move(end_iterator)) {}
+
+ IteratorT begin() const { return begin_iterator; }
+ IteratorT end() const { return end_iterator; }
+};
+
+/// \brief Convenience function for iterating over sub-ranges.
+///
+/// This provides a bit of syntactic sugar to make using sub-ranges
+/// in for loops a bit easier. Analogous to std::make_pair().
+template <class T> iterator_range<T> make_range(T x, T y) {
+ return iterator_range<T>(std::move(x), std::move(y));
+}
+
+template <typename T> iterator_range<T> make_range(std::pair<T, T> p) {
+ return iterator_range<T>(std::move(p.first), std::move(p.second));
+}
+
+template<typename T>
+iterator_range<decltype(begin(std::declval<T>()))> drop_begin(T &&t, int n) {
+ return make_range(std::next(begin(t), n), end(t));
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
new file mode 100644
index 0000000..5cc840a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/AliasAnalysis.h
@@ -0,0 +1,1072 @@
+//===- llvm/Analysis/AliasAnalysis.h - Alias Analysis Interface -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the generic AliasAnalysis interface, which is used as the
+// common interface used by all clients of alias analysis information, and
+// implemented by all alias analysis implementations. Mod/Ref information is
+// also captured by this interface.
+//
+// Implementations of this interface must implement the various virtual methods,
+// which automatically provides functionality for the entire suite of client
+// APIs.
+//
+// This API identifies memory regions with the MemoryLocation class. The pointer
+// component specifies the base memory address of the region. The Size specifies
+// the maximum size (in address units) of the memory region, or
+// MemoryLocation::UnknownSize if the size is not known. The TBAA tag
+// identifies the "type" of the memory reference; see the
+// TypeBasedAliasAnalysis class for details.
+//
+// Some non-obvious details include:
+// - Pointers that point to two completely different objects in memory never
+// alias, regardless of the value of the Size component.
+// - NoAlias doesn't imply inequal pointers. The most obvious example of this
+// is two pointers to constant memory. Even if they are equal, constant
+// memory is never stored to, so there will never be any dependencies.
+// In this and other situations, the pointers may be both NoAlias and
+// MustAlias at the same time. The current API can only return one result,
+// though this is rarely a problem in practice.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_ALIASANALYSIS_H
+#define LLVM_ANALYSIS_ALIASANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Analysis/MemoryLocation.h"
+
+namespace llvm {
+class BasicAAResult;
+class LoadInst;
+class StoreInst;
+class VAArgInst;
+class DataLayout;
+class TargetLibraryInfo;
+class Pass;
+class AnalysisUsage;
+class MemTransferInst;
+class MemIntrinsic;
+class DominatorTree;
+class OrderedBasicBlock;
+
+/// The possible results of an alias query.
+///
+/// These results are always computed between two MemoryLocation objects as
+/// a query to some alias analysis.
+///
+/// Note that these are unscoped enumerations because we would like to support
+/// implicitly testing a result for the existence of any possible aliasing with
+/// a conversion to bool, but an "enum class" doesn't support this. The
+/// canonical names from the literature are suffixed and unique anyways, and so
+/// they serve as global constants in LLVM for these results.
+///
+/// See docs/AliasAnalysis.html for more information on the specific meanings
+/// of these values.
+enum AliasResult {
+ /// The two locations do not alias at all.
+ ///
+ /// This value is arranged to convert to false, while all other values
+ /// convert to true. This allows a boolean context to convert the result to
+ /// a binary flag indicating whether there is the possibility of aliasing.
+ NoAlias = 0,
+ /// The two locations may or may not alias. This is the least precise result.
+ MayAlias,
+ /// The two locations alias, but only due to a partial overlap.
+ PartialAlias,
+ /// The two locations precisely alias each other.
+ MustAlias,
+};
+
+/// Flags indicating whether a memory access modifies or references memory.
+///
+/// This is no access at all, a modification, a reference, or both
+/// a modification and a reference. These are specifically structured such that
+/// they form a two bit matrix and bit-tests for 'mod' or 'ref' work with any
+/// of the possible values.
+enum ModRefInfo {
+ /// The access neither references nor modifies the value stored in memory.
+ MRI_NoModRef = 0,
+ /// The access references the value stored in memory.
+ MRI_Ref = 1,
+ /// The access modifies the value stored in memory.
+ MRI_Mod = 2,
+ /// The access both references and modifies the value stored in memory.
+ MRI_ModRef = MRI_Ref | MRI_Mod
+};
+
+/// The locations at which a function might access memory.
+///
+/// These are primarily used in conjunction with the \c AccessKind bits to
+/// describe both the nature of access and the locations of access for a
+/// function call.
+enum FunctionModRefLocation {
+ /// Base case is no access to memory.
+ FMRL_Nowhere = 0,
+ /// Access to memory via argument pointers.
+ FMRL_ArgumentPointees = 4,
+ /// Access to any memory.
+ FMRL_Anywhere = 8 | FMRL_ArgumentPointees
+};
+
+/// Summary of how a function affects memory in the program.
+///
+/// Loads from constant globals are not considered memory accesses for this
+/// interface. Also, functions may freely modify stack space local to their
+/// invocation without having to report it through these interfaces.
+enum FunctionModRefBehavior {
+ /// This function does not perform any non-local loads or stores to memory.
+ ///
+ /// This property corresponds to the GCC 'const' attribute.
+ /// This property corresponds to the LLVM IR 'readnone' attribute.
+ /// This property corresponds to the IntrNoMem LLVM intrinsic flag.
+ FMRB_DoesNotAccessMemory = FMRL_Nowhere | MRI_NoModRef,
+
+ /// The only memory references in this function (if it has any) are
+ /// non-volatile loads from objects pointed to by its pointer-typed
+ /// arguments, with arbitrary offsets.
+ ///
+ /// This property corresponds to the IntrReadArgMem LLVM intrinsic flag.
+ FMRB_OnlyReadsArgumentPointees = FMRL_ArgumentPointees | MRI_Ref,
+
+ /// The only memory references in this function (if it has any) are
+ /// non-volatile loads and stores from objects pointed to by its
+ /// pointer-typed arguments, with arbitrary offsets.
+ ///
+ /// This property corresponds to the IntrReadWriteArgMem LLVM intrinsic flag.
+ FMRB_OnlyAccessesArgumentPointees = FMRL_ArgumentPointees | MRI_ModRef,
+
+ /// This function does not perform any non-local stores or volatile loads,
+ /// but may read from any memory location.
+ ///
+ /// This property corresponds to the GCC 'pure' attribute.
+ /// This property corresponds to the LLVM IR 'readonly' attribute.
+ /// This property corresponds to the IntrReadMem LLVM intrinsic flag.
+ FMRB_OnlyReadsMemory = FMRL_Anywhere | MRI_Ref,
+
+ /// This indicates that the function could not be classified into one of the
+ /// behaviors above.
+ FMRB_UnknownModRefBehavior = FMRL_Anywhere | MRI_ModRef
+};
+
+class AAResults {
+public:
+ // Make these results default constructable and movable. We have to spell
+ // these out because MSVC won't synthesize them.
+ AAResults() {}
+ AAResults(AAResults &&Arg);
+ AAResults &operator=(AAResults &&Arg);
+ ~AAResults();
+
+ /// Register a specific AA result.
+ template <typename AAResultT> void addAAResult(AAResultT &AAResult) {
+ // FIXME: We should use a much lighter weight system than the usual
+ // polymorphic pattern because we don't own AAResult. It should
+ // ideally involve two pointers and no separate allocation.
+ AAs.emplace_back(new Model<AAResultT>(AAResult, *this));
+ }
+
+ //===--------------------------------------------------------------------===//
+ /// \name Alias Queries
+ /// @{
+
+ /// The main low level interface to the alias analysis implementation.
+ /// Returns an AliasResult indicating whether the two pointers are aliased to
+ /// each other. This is the interface that must be implemented by specific
+ /// alias analysis implementations.
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+
+ /// A convenience wrapper around the primary \c alias interface.
+ AliasResult alias(const Value *V1, uint64_t V1Size, const Value *V2,
+ uint64_t V2Size) {
+ return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
+ }
+
+ /// A convenience wrapper around the primary \c alias interface.
+ AliasResult alias(const Value *V1, const Value *V2) {
+ return alias(V1, MemoryLocation::UnknownSize, V2,
+ MemoryLocation::UnknownSize);
+ }
+
+ /// A trivial helper function to check to see if the specified pointers are
+ /// no-alias.
+ bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+ return alias(LocA, LocB) == NoAlias;
+ }
+
+ /// A convenience wrapper around the \c isNoAlias helper interface.
+ bool isNoAlias(const Value *V1, uint64_t V1Size, const Value *V2,
+ uint64_t V2Size) {
+ return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
+ }
+
+ /// A convenience wrapper around the \c isNoAlias helper interface.
+ bool isNoAlias(const Value *V1, const Value *V2) {
+ return isNoAlias(MemoryLocation(V1), MemoryLocation(V2));
+ }
+
+ /// A trivial helper function to check to see if the specified pointers are
+ /// must-alias.
+ bool isMustAlias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+ return alias(LocA, LocB) == MustAlias;
+ }
+
+ /// A convenience wrapper around the \c isMustAlias helper interface.
+ bool isMustAlias(const Value *V1, const Value *V2) {
+ return alias(V1, 1, V2, 1) == MustAlias;
+ }
+
+ /// Checks whether the given location points to constant memory, or if
+ /// \p OrLocal is true whether it points to a local alloca.
+ bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal = false);
+
+ /// A convenience wrapper around the primary \c pointsToConstantMemory
+ /// interface.
+ bool pointsToConstantMemory(const Value *P, bool OrLocal = false) {
+ return pointsToConstantMemory(MemoryLocation(P), OrLocal);
+ }
+
+ /// @}
+ //===--------------------------------------------------------------------===//
+ /// \name Simple mod/ref information
+ /// @{
+
+ /// Get the ModRef info associated with a pointer argument of a callsite. The
+ /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
+ /// that these bits do not necessarily account for the overall behavior of
+ /// the function, but rather only provide additional per-argument
+ /// information.
+ ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
+
+ /// Return the behavior of the given call site.
+ FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+
+ /// Return the behavior when calling the given function.
+ FunctionModRefBehavior getModRefBehavior(const Function *F);
+
+ /// Checks if the specified call is known to never read or write memory.
+ ///
+ /// Note that if the call only reads from known-constant memory, it is also
+ /// legal to return true. Also, calls that unwind the stack are legal for
+ /// this predicate.
+ ///
+ /// Many optimizations (such as CSE and LICM) can be performed on such calls
+ /// without worrying about aliasing properties, and many calls have this
+ /// property (e.g. calls to 'sin' and 'cos').
+ ///
+ /// This property corresponds to the GCC 'const' attribute.
+ bool doesNotAccessMemory(ImmutableCallSite CS) {
+ return getModRefBehavior(CS) == FMRB_DoesNotAccessMemory;
+ }
+
+ /// Checks if the specified function is known to never read or write memory.
+ ///
+ /// Note that if the function only reads from known-constant memory, it is
+ /// also legal to return true. Also, function that unwind the stack are legal
+ /// for this predicate.
+ ///
+ /// Many optimizations (such as CSE and LICM) can be performed on such calls
+ /// to such functions without worrying about aliasing properties, and many
+ /// functions have this property (e.g. 'sin' and 'cos').
+ ///
+ /// This property corresponds to the GCC 'const' attribute.
+ bool doesNotAccessMemory(const Function *F) {
+ return getModRefBehavior(F) == FMRB_DoesNotAccessMemory;
+ }
+
+ /// Checks if the specified call is known to only read from non-volatile
+ /// memory (or not access memory at all).
+ ///
+ /// Calls that unwind the stack are legal for this predicate.
+ ///
+ /// This property allows many common optimizations to be performed in the
+ /// absence of interfering store instructions, such as CSE of strlen calls.
+ ///
+ /// This property corresponds to the GCC 'pure' attribute.
+ bool onlyReadsMemory(ImmutableCallSite CS) {
+ return onlyReadsMemory(getModRefBehavior(CS));
+ }
+
+ /// Checks if the specified function is known to only read from non-volatile
+ /// memory (or not access memory at all).
+ ///
+ /// Functions that unwind the stack are legal for this predicate.
+ ///
+ /// This property allows many common optimizations to be performed in the
+ /// absence of interfering store instructions, such as CSE of strlen calls.
+ ///
+ /// This property corresponds to the GCC 'pure' attribute.
+ bool onlyReadsMemory(const Function *F) {
+ return onlyReadsMemory(getModRefBehavior(F));
+ }
+
+ /// Checks if functions with the specified behavior are known to only read
+ /// from non-volatile memory (or not access memory at all).
+ static bool onlyReadsMemory(FunctionModRefBehavior MRB) {
+ return !(MRB & MRI_Mod);
+ }
+
+ /// Checks if functions with the specified behavior are known to read and
+ /// write at most from objects pointed to by their pointer-typed arguments
+ /// (with arbitrary offsets).
+ static bool onlyAccessesArgPointees(FunctionModRefBehavior MRB) {
+ return !(MRB & FMRL_Anywhere & ~FMRL_ArgumentPointees);
+ }
+
+ /// Checks if functions with the specified behavior are known to potentially
+ /// read or write from objects pointed to be their pointer-typed arguments
+ /// (with arbitrary offsets).
+ static bool doesAccessArgPointees(FunctionModRefBehavior MRB) {
+ return (MRB & MRI_ModRef) && (MRB & FMRL_ArgumentPointees);
+ }
+
+ /// getModRefInfo (for call sites) - Return information about whether
+ /// a particular call site modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+
+ /// getModRefInfo (for call sites) - A convenience wrapper.
+ ModRefInfo getModRefInfo(ImmutableCallSite CS, const Value *P,
+ uint64_t Size) {
+ return getModRefInfo(CS, MemoryLocation(P, Size));
+ }
+
+ /// getModRefInfo (for calls) - Return information about whether
+ /// a particular call modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(const CallInst *C, const MemoryLocation &Loc) {
+ return getModRefInfo(ImmutableCallSite(C), Loc);
+ }
+
+ /// getModRefInfo (for calls) - A convenience wrapper.
+ ModRefInfo getModRefInfo(const CallInst *C, const Value *P, uint64_t Size) {
+ return getModRefInfo(C, MemoryLocation(P, Size));
+ }
+
+ /// getModRefInfo (for invokes) - Return information about whether
+ /// a particular invoke modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(const InvokeInst *I, const MemoryLocation &Loc) {
+ return getModRefInfo(ImmutableCallSite(I), Loc);
+ }
+
+ /// getModRefInfo (for invokes) - A convenience wrapper.
+ ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P, uint64_t Size) {
+ return getModRefInfo(I, MemoryLocation(P, Size));
+ }
+
+ /// getModRefInfo (for loads) - Return information about whether
+ /// a particular load modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc);
+
+ /// getModRefInfo (for loads) - A convenience wrapper.
+ ModRefInfo getModRefInfo(const LoadInst *L, const Value *P, uint64_t Size) {
+ return getModRefInfo(L, MemoryLocation(P, Size));
+ }
+
+ /// getModRefInfo (for stores) - Return information about whether
+ /// a particular store modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc);
+
+ /// getModRefInfo (for stores) - A convenience wrapper.
+ ModRefInfo getModRefInfo(const StoreInst *S, const Value *P, uint64_t Size) {
+ return getModRefInfo(S, MemoryLocation(P, Size));
+ }
+
+ /// getModRefInfo (for fences) - Return information about whether
+ /// a particular store modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc) {
+ // Conservatively correct. (We could possibly be a bit smarter if
+ // Loc is a alloca that doesn't escape.)
+ return MRI_ModRef;
+ }
+
+ /// getModRefInfo (for fences) - A convenience wrapper.
+ ModRefInfo getModRefInfo(const FenceInst *S, const Value *P, uint64_t Size) {
+ return getModRefInfo(S, MemoryLocation(P, Size));
+ }
+
+ /// getModRefInfo (for cmpxchges) - Return information about whether
+ /// a particular cmpxchg modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX,
+ const MemoryLocation &Loc);
+
+ /// getModRefInfo (for cmpxchges) - A convenience wrapper.
+ ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P,
+ unsigned Size) {
+ return getModRefInfo(CX, MemoryLocation(P, Size));
+ }
+
+ /// getModRefInfo (for atomicrmws) - Return information about whether
+ /// a particular atomicrmw modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc);
+
+ /// getModRefInfo (for atomicrmws) - A convenience wrapper.
+ ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const Value *P,
+ unsigned Size) {
+ return getModRefInfo(RMW, MemoryLocation(P, Size));
+ }
+
+ /// getModRefInfo (for va_args) - Return information about whether
+ /// a particular va_arg modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc);
+
+ /// getModRefInfo (for va_args) - A convenience wrapper.
+ ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P, uint64_t Size) {
+ return getModRefInfo(I, MemoryLocation(P, Size));
+ }
+
+ /// getModRefInfo (for catchpads) - Return information about whether
+ /// a particular catchpad modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(const CatchPadInst *I, const MemoryLocation &Loc);
+
+ /// getModRefInfo (for catchpads) - A convenience wrapper.
+ ModRefInfo getModRefInfo(const CatchPadInst *I, const Value *P,
+ uint64_t Size) {
+ return getModRefInfo(I, MemoryLocation(P, Size));
+ }
+
+ /// getModRefInfo (for catchrets) - Return information about whether
+ /// a particular catchret modifies or reads the specified memory location.
+ ModRefInfo getModRefInfo(const CatchReturnInst *I, const MemoryLocation &Loc);
+
+ /// getModRefInfo (for catchrets) - A convenience wrapper.
+ ModRefInfo getModRefInfo(const CatchReturnInst *I, const Value *P,
+ uint64_t Size) {
+ return getModRefInfo(I, MemoryLocation(P, Size));
+ }
+
+ /// Check whether or not an instruction may read or write memory (without
+ /// regard to a specific location).
+ ///
+ /// For function calls, this delegates to the alias-analysis specific
+ /// call-site mod-ref behavior queries. Otherwise it delegates to the generic
+ /// mod ref information query without a location.
+ ModRefInfo getModRefInfo(const Instruction *I) {
+ if (auto CS = ImmutableCallSite(I)) {
+ auto MRB = getModRefBehavior(CS);
+ if (MRB & MRI_ModRef)
+ return MRI_ModRef;
+ else if (MRB & MRI_Ref)
+ return MRI_Ref;
+ else if (MRB & MRI_Mod)
+ return MRI_Mod;
+ return MRI_NoModRef;
+ }
+
+ return getModRefInfo(I, MemoryLocation());
+ }
+
+ /// Check whether or not an instruction may read or write the specified
+ /// memory location.
+ ///
+ /// An instruction that doesn't read or write memory may be trivially LICM'd
+ /// for example.
+ ///
+ /// This primarily delegates to specific helpers above.
+ ModRefInfo getModRefInfo(const Instruction *I, const MemoryLocation &Loc) {
+ switch (I->getOpcode()) {
+ case Instruction::VAArg: return getModRefInfo((const VAArgInst*)I, Loc);
+ case Instruction::Load: return getModRefInfo((const LoadInst*)I, Loc);
+ case Instruction::Store: return getModRefInfo((const StoreInst*)I, Loc);
+ case Instruction::Fence: return getModRefInfo((const FenceInst*)I, Loc);
+ case Instruction::AtomicCmpXchg:
+ return getModRefInfo((const AtomicCmpXchgInst*)I, Loc);
+ case Instruction::AtomicRMW:
+ return getModRefInfo((const AtomicRMWInst*)I, Loc);
+ case Instruction::Call: return getModRefInfo((const CallInst*)I, Loc);
+ case Instruction::Invoke: return getModRefInfo((const InvokeInst*)I,Loc);
+ case Instruction::CatchPad:
+ return getModRefInfo((const CatchPadInst *)I, Loc);
+ case Instruction::CatchRet:
+ return getModRefInfo((const CatchReturnInst *)I, Loc);
+ default:
+ return MRI_NoModRef;
+ }
+ }
+
+ /// A convenience wrapper for constructing the memory location.
+ ModRefInfo getModRefInfo(const Instruction *I, const Value *P,
+ uint64_t Size) {
+ return getModRefInfo(I, MemoryLocation(P, Size));
+ }
+
+ /// Return information about whether a call and an instruction may refer to
+ /// the same memory locations.
+ ModRefInfo getModRefInfo(Instruction *I, ImmutableCallSite Call);
+
+ /// Return information about whether two call sites may refer to the same set
+ /// of memory locations. See the AA documentation for details:
+ /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
+ ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+
+ /// \brief Return information about whether a particular call site modifies
+ /// or reads the specified memory location \p MemLoc before instruction \p I
+ /// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
+ /// instruction ordering queries inside the BasicBlock containing \p I.
+ ModRefInfo callCapturesBefore(const Instruction *I,
+ const MemoryLocation &MemLoc, DominatorTree *DT,
+ OrderedBasicBlock *OBB = nullptr);
+
+ /// \brief A convenience wrapper to synthesize a memory location.
+ ModRefInfo callCapturesBefore(const Instruction *I, const Value *P,
+ uint64_t Size, DominatorTree *DT,
+ OrderedBasicBlock *OBB = nullptr) {
+ return callCapturesBefore(I, MemoryLocation(P, Size), DT, OBB);
+ }
+
+ /// @}
+ //===--------------------------------------------------------------------===//
+ /// \name Higher level methods for querying mod/ref information.
+ /// @{
+
+ /// Check if it is possible for execution of the specified basic block to
+ /// modify the location Loc.
+ bool canBasicBlockModify(const BasicBlock &BB, const MemoryLocation &Loc);
+
+ /// A convenience wrapper synthesizing a memory location.
+ bool canBasicBlockModify(const BasicBlock &BB, const Value *P,
+ uint64_t Size) {
+ return canBasicBlockModify(BB, MemoryLocation(P, Size));
+ }
+
+ /// Check if it is possible for the execution of the specified instructions
+ /// to mod\ref (according to the mode) the location Loc.
+ ///
+ /// The instructions to consider are all of the instructions in the range of
+ /// [I1,I2] INCLUSIVE. I1 and I2 must be in the same basic block.
+ bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
+ const MemoryLocation &Loc,
+ const ModRefInfo Mode);
+
+ /// A convenience wrapper synthesizing a memory location.
+ bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
+ const Value *Ptr, uint64_t Size,
+ const ModRefInfo Mode) {
+ return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode);
+ }
+
+private:
+ class Concept;
+ template <typename T> class Model;
+
+ template <typename T> friend class AAResultBase;
+
+ std::vector<std::unique_ptr<Concept>> AAs;
+};
+
+/// Temporary typedef for legacy code that uses a generic \c AliasAnalysis
+/// pointer or reference.
+typedef AAResults AliasAnalysis;
+
+/// A private abstract base class describing the concept of an individual alias
+/// analysis implementation.
+///
+/// This interface is implemented by any \c Model instantiation. It is also the
+/// interface which a type used to instantiate the model must provide.
+///
+/// All of these methods model methods by the same name in the \c
+/// AAResults class. Only differences and specifics to how the
+/// implementations are called are documented here.
+class AAResults::Concept {
+public:
+ virtual ~Concept() = 0;
+
+ /// An update API used internally by the AAResults to provide
+ /// a handle back to the top level aggregation.
+ virtual void setAAResults(AAResults *NewAAR) = 0;
+
+ //===--------------------------------------------------------------------===//
+ /// \name Alias Queries
+ /// @{
+
+ /// The main low level interface to the alias analysis implementation.
+ /// Returns an AliasResult indicating whether the two pointers are aliased to
+ /// each other. This is the interface that must be implemented by specific
+ /// alias analysis implementations.
+ virtual AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) = 0;
+
+ /// Checks whether the given location points to constant memory, or if
+ /// \p OrLocal is true whether it points to a local alloca.
+ virtual bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) = 0;
+
+ /// @}
+ //===--------------------------------------------------------------------===//
+ /// \name Simple mod/ref information
+ /// @{
+
+ /// Get the ModRef info associated with a pointer argument of a callsite. The
+ /// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
+ /// that these bits do not necessarily account for the overall behavior of
+ /// the function, but rather only provide additional per-argument
+ /// information.
+ virtual ModRefInfo getArgModRefInfo(ImmutableCallSite CS,
+ unsigned ArgIdx) = 0;
+
+ /// Return the behavior of the given call site.
+ virtual FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) = 0;
+
+ /// Return the behavior when calling the given function.
+ virtual FunctionModRefBehavior getModRefBehavior(const Function *F) = 0;
+
+ /// getModRefInfo (for call sites) - Return information about whether
+ /// a particular call site modifies or reads the specified memory location.
+ virtual ModRefInfo getModRefInfo(ImmutableCallSite CS,
+ const MemoryLocation &Loc) = 0;
+
+ /// Return information about whether two call sites may refer to the same set
+ /// of memory locations. See the AA documentation for details:
+ /// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
+ virtual ModRefInfo getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) = 0;
+
+ /// @}
+};
+
+/// A private class template which derives from \c Concept and wraps some other
+/// type.
+///
+/// This models the concept by directly forwarding each interface point to the
+/// wrapped type which must implement a compatible interface. This provides
+/// a type erased binding.
+template <typename AAResultT> class AAResults::Model final : public Concept {
+ AAResultT &Result;
+
+public:
+ explicit Model(AAResultT &Result, AAResults &AAR) : Result(Result) {
+ Result.setAAResults(&AAR);
+ }
+ ~Model() override {}
+
+ void setAAResults(AAResults *NewAAR) override { Result.setAAResults(NewAAR); }
+
+ AliasResult alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) override {
+ return Result.alias(LocA, LocB);
+ }
+
+ bool pointsToConstantMemory(const MemoryLocation &Loc,
+ bool OrLocal) override {
+ return Result.pointsToConstantMemory(Loc, OrLocal);
+ }
+
+ ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) override {
+ return Result.getArgModRefInfo(CS, ArgIdx);
+ }
+
+ FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) override {
+ return Result.getModRefBehavior(CS);
+ }
+
+ FunctionModRefBehavior getModRefBehavior(const Function *F) override {
+ return Result.getModRefBehavior(F);
+ }
+
+ ModRefInfo getModRefInfo(ImmutableCallSite CS,
+ const MemoryLocation &Loc) override {
+ return Result.getModRefInfo(CS, Loc);
+ }
+
+ ModRefInfo getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) override {
+ return Result.getModRefInfo(CS1, CS2);
+ }
+};
+
+/// A CRTP-driven "mixin" base class to help implement the function alias
+/// analysis results concept.
+///
+/// Because of the nature of many alias analysis implementations, they often
+/// only implement a subset of the interface. This base class will attempt to
+/// implement the remaining portions of the interface in terms of simpler forms
+/// of the interface where possible, and otherwise provide conservatively
+/// correct fallback implementations.
+///
+/// Implementors of an alias analysis should derive from this CRTP, and then
+/// override specific methods that they wish to customize. There is no need to
+/// use virtual anywhere, the CRTP base class does static dispatch to the
+/// derived type passed into it.
+template <typename DerivedT> class AAResultBase {
+ // Expose some parts of the interface only to the AAResults::Model
+ // for wrapping. Specifically, this allows the model to call our
+ // setAAResults method without exposing it as a fully public API.
+ friend class AAResults::Model<DerivedT>;
+
+ /// A pointer to the AAResults object that this AAResult is
+ /// aggregated within. May be null if not aggregated.
+ AAResults *AAR;
+
+ /// Helper to dispatch calls back through the derived type.
+ DerivedT &derived() { return static_cast<DerivedT &>(*this); }
+
+ /// A setter for the AAResults pointer, which is used to satisfy the
+ /// AAResults::Model contract.
+ void setAAResults(AAResults *NewAAR) { AAR = NewAAR; }
+
+protected:
+ /// This proxy class models a common pattern where we delegate to either the
+ /// top-level \c AAResults aggregation if one is registered, or to the
+ /// current result if none are registered.
+ class AAResultsProxy {
+ AAResults *AAR;
+ DerivedT &CurrentResult;
+
+ public:
+ AAResultsProxy(AAResults *AAR, DerivedT &CurrentResult)
+ : AAR(AAR), CurrentResult(CurrentResult) {}
+
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+ return AAR ? AAR->alias(LocA, LocB) : CurrentResult.alias(LocA, LocB);
+ }
+
+ bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) {
+ return AAR ? AAR->pointsToConstantMemory(Loc, OrLocal)
+ : CurrentResult.pointsToConstantMemory(Loc, OrLocal);
+ }
+
+ ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+ return AAR ? AAR->getArgModRefInfo(CS, ArgIdx) : CurrentResult.getArgModRefInfo(CS, ArgIdx);
+ }
+
+ FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+ return AAR ? AAR->getModRefBehavior(CS) : CurrentResult.getModRefBehavior(CS);
+ }
+
+ FunctionModRefBehavior getModRefBehavior(const Function *F) {
+ return AAR ? AAR->getModRefBehavior(F) : CurrentResult.getModRefBehavior(F);
+ }
+
+ ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc) {
+ return AAR ? AAR->getModRefInfo(CS, Loc)
+ : CurrentResult.getModRefInfo(CS, Loc);
+ }
+
+ ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
+ return AAR ? AAR->getModRefInfo(CS1, CS2) : CurrentResult.getModRefInfo(CS1, CS2);
+ }
+ };
+
+ const TargetLibraryInfo &TLI;
+
+ explicit AAResultBase(const TargetLibraryInfo &TLI) : TLI(TLI) {}
+
+ // Provide all the copy and move constructors so that derived types aren't
+ // constrained.
+ AAResultBase(const AAResultBase &Arg) : TLI(Arg.TLI) {}
+ AAResultBase(AAResultBase &&Arg) : TLI(Arg.TLI) {}
+
+ /// Get a proxy for the best AA result set to query at this time.
+ ///
+ /// When this result is part of a larger aggregation, this will proxy to that
+ /// aggregation. When this result is used in isolation, it will just delegate
+ /// back to the derived class's implementation.
+ AAResultsProxy getBestAAResults() { return AAResultsProxy(AAR, derived()); }
+
+public:
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+ return MayAlias;
+ }
+
+ bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal) {
+ return false;
+ }
+
+ ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
+ return MRI_ModRef;
+ }
+
+ FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS) {
+ if (!CS.hasOperandBundles())
+ // If CS has operand bundles then aliasing attributes from the function it
+ // calls do not directly apply to the CallSite. This can be made more
+ // precise in the future.
+ if (const Function *F = CS.getCalledFunction())
+ return getBestAAResults().getModRefBehavior(F);
+
+ return FMRB_UnknownModRefBehavior;
+ }
+
+ FunctionModRefBehavior getModRefBehavior(const Function *F) {
+ return FMRB_UnknownModRefBehavior;
+ }
+
+ ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+
+ ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+};
+
+/// Synthesize \c ModRefInfo for a call site and memory location by examining
+/// the general behavior of the call site and any specific information for its
+/// arguments.
+///
+/// This essentially, delegates across the alias analysis interface to collect
+/// information which may be enough to (conservatively) fulfill the query.
+template <typename DerivedT>
+ModRefInfo AAResultBase<DerivedT>::getModRefInfo(ImmutableCallSite CS,
+ const MemoryLocation &Loc) {
+ auto MRB = getBestAAResults().getModRefBehavior(CS);
+ if (MRB == FMRB_DoesNotAccessMemory)
+ return MRI_NoModRef;
+
+ ModRefInfo Mask = MRI_ModRef;
+ if (AAResults::onlyReadsMemory(MRB))
+ Mask = MRI_Ref;
+
+ if (AAResults::onlyAccessesArgPointees(MRB)) {
+ bool DoesAlias = false;
+ ModRefInfo AllArgsMask = MRI_NoModRef;
+ if (AAResults::doesAccessArgPointees(MRB)) {
+ for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(),
+ AE = CS.arg_end();
+ AI != AE; ++AI) {
+ const Value *Arg = *AI;
+ if (!Arg->getType()->isPointerTy())
+ continue;
+ unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
+ MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI);
+ AliasResult ArgAlias = getBestAAResults().alias(ArgLoc, Loc);
+ if (ArgAlias != NoAlias) {
+ ModRefInfo ArgMask = getBestAAResults().getArgModRefInfo(CS, ArgIdx);
+ DoesAlias = true;
+ AllArgsMask = ModRefInfo(AllArgsMask | ArgMask);
+ }
+ }
+ }
+ if (!DoesAlias)
+ return MRI_NoModRef;
+ Mask = ModRefInfo(Mask & AllArgsMask);
+ }
+
+ // If Loc is a constant memory location, the call definitely could not
+ // modify the memory location.
+ if ((Mask & MRI_Mod) &&
+ getBestAAResults().pointsToConstantMemory(Loc, /*OrLocal*/ false))
+ Mask = ModRefInfo(Mask & ~MRI_Mod);
+
+ return Mask;
+}
+
+/// Synthesize \c ModRefInfo for two call sites by examining the general
+/// behavior of the call site and any specific information for its arguments.
+///
+/// This essentially, delegates across the alias analysis interface to collect
+/// information which may be enough to (conservatively) fulfill the query.
+template <typename DerivedT>
+ModRefInfo AAResultBase<DerivedT>::getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
+ // If CS1 or CS2 are readnone, they don't interact.
+ auto CS1B = getBestAAResults().getModRefBehavior(CS1);
+ if (CS1B == FMRB_DoesNotAccessMemory)
+ return MRI_NoModRef;
+
+ auto CS2B = getBestAAResults().getModRefBehavior(CS2);
+ if (CS2B == FMRB_DoesNotAccessMemory)
+ return MRI_NoModRef;
+
+ // If they both only read from memory, there is no dependence.
+ if (AAResults::onlyReadsMemory(CS1B) && AAResults::onlyReadsMemory(CS2B))
+ return MRI_NoModRef;
+
+ ModRefInfo Mask = MRI_ModRef;
+
+ // If CS1 only reads memory, the only dependence on CS2 can be
+ // from CS1 reading memory written by CS2.
+ if (AAResults::onlyReadsMemory(CS1B))
+ Mask = ModRefInfo(Mask & MRI_Ref);
+
+ // If CS2 only access memory through arguments, accumulate the mod/ref
+ // information from CS1's references to the memory referenced by
+ // CS2's arguments.
+ if (AAResults::onlyAccessesArgPointees(CS2B)) {
+ ModRefInfo R = MRI_NoModRef;
+ if (AAResults::doesAccessArgPointees(CS2B)) {
+ for (ImmutableCallSite::arg_iterator I = CS2.arg_begin(),
+ E = CS2.arg_end();
+ I != E; ++I) {
+ const Value *Arg = *I;
+ if (!Arg->getType()->isPointerTy())
+ continue;
+ unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
+ auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI);
+
+ // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence
+ // of CS1 on that location is the inverse.
+ ModRefInfo ArgMask =
+ getBestAAResults().getArgModRefInfo(CS2, CS2ArgIdx);
+ if (ArgMask == MRI_Mod)
+ ArgMask = MRI_ModRef;
+ else if (ArgMask == MRI_Ref)
+ ArgMask = MRI_Mod;
+
+ ArgMask = ModRefInfo(ArgMask &
+ getBestAAResults().getModRefInfo(CS1, CS2ArgLoc));
+
+ R = ModRefInfo((R | ArgMask) & Mask);
+ if (R == Mask)
+ break;
+ }
+ }
+ return R;
+ }
+
+ // If CS1 only accesses memory through arguments, check if CS2 references
+ // any of the memory referenced by CS1's arguments. If not, return NoModRef.
+ if (AAResults::onlyAccessesArgPointees(CS1B)) {
+ ModRefInfo R = MRI_NoModRef;
+ if (AAResults::doesAccessArgPointees(CS1B)) {
+ for (ImmutableCallSite::arg_iterator I = CS1.arg_begin(),
+ E = CS1.arg_end();
+ I != E; ++I) {
+ const Value *Arg = *I;
+ if (!Arg->getType()->isPointerTy())
+ continue;
+ unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
+ auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI);
+
+ // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
+ // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
+ // might Ref, then we care only about a Mod by CS2.
+ ModRefInfo ArgMask = getBestAAResults().getArgModRefInfo(CS1, CS1ArgIdx);
+ ModRefInfo ArgR = getBestAAResults().getModRefInfo(CS2, CS1ArgLoc);
+ if (((ArgMask & MRI_Mod) != MRI_NoModRef &&
+ (ArgR & MRI_ModRef) != MRI_NoModRef) ||
+ ((ArgMask & MRI_Ref) != MRI_NoModRef &&
+ (ArgR & MRI_Mod) != MRI_NoModRef))
+ R = ModRefInfo((R | ArgMask) & Mask);
+
+ if (R == Mask)
+ break;
+ }
+ }
+ return R;
+ }
+
+ return Mask;
+}
+
+/// isNoAliasCall - Return true if this pointer is returned by a noalias
+/// function.
+bool isNoAliasCall(const Value *V);
+
+/// isNoAliasArgument - Return true if this is an argument with the noalias
+/// attribute.
+bool isNoAliasArgument(const Value *V);
+
+/// isIdentifiedObject - Return true if this pointer refers to a distinct and
+/// identifiable object. This returns true for:
+/// Global Variables and Functions (but not Global Aliases)
+/// Allocas
+/// ByVal and NoAlias Arguments
+/// NoAlias returns (e.g. calls to malloc)
+///
+bool isIdentifiedObject(const Value *V);
+
+/// isIdentifiedFunctionLocal - Return true if V is umabigously identified
+/// at the function-level. Different IdentifiedFunctionLocals can't alias.
+/// Further, an IdentifiedFunctionLocal can not alias with any function
+/// arguments other than itself, which is not necessarily true for
+/// IdentifiedObjects.
+bool isIdentifiedFunctionLocal(const Value *V);
+
+/// A manager for alias analyses.
+///
+/// This class can have analyses registered with it and when run, it will run
+/// all of them and aggregate their results into single AA results interface
+/// that dispatches across all of the alias analysis results available.
+///
+/// Note that the order in which analyses are registered is very significant.
+/// That is the order in which the results will be aggregated and queried.
+///
+/// This manager effectively wraps the AnalysisManager for registering alias
+/// analyses. When you register your alias analysis with this manager, it will
+/// ensure the analysis itself is registered with its AnalysisManager.
+class AAManager {
+public:
+ typedef AAResults Result;
+
+ // This type hase value semantics. We have to spell these out because MSVC
+ // won't synthesize them.
+ AAManager() {}
+ AAManager(AAManager &&Arg)
+ : FunctionResultGetters(std::move(Arg.FunctionResultGetters)) {}
+ AAManager(const AAManager &Arg)
+ : FunctionResultGetters(Arg.FunctionResultGetters) {}
+ AAManager &operator=(AAManager &&RHS) {
+ FunctionResultGetters = std::move(RHS.FunctionResultGetters);
+ return *this;
+ }
+ AAManager &operator=(const AAManager &RHS) {
+ FunctionResultGetters = RHS.FunctionResultGetters;
+ return *this;
+ }
+
+ /// Register a specific AA result.
+ template <typename AnalysisT> void registerFunctionAnalysis() {
+ FunctionResultGetters.push_back(&getFunctionAAResultImpl<AnalysisT>);
+ }
+
+ Result run(Function &F, AnalysisManager<Function> &AM) {
+ Result R;
+ for (auto &Getter : FunctionResultGetters)
+ (*Getter)(F, AM, R);
+ return R;
+ }
+
+private:
+ SmallVector<void (*)(Function &F, AnalysisManager<Function> &AM,
+ AAResults &AAResults),
+ 4> FunctionResultGetters;
+
+ template <typename AnalysisT>
+ static void getFunctionAAResultImpl(Function &F,
+ AnalysisManager<Function> &AM,
+ AAResults &AAResults) {
+ AAResults.addAAResult(AM.template getResult<AnalysisT>(F));
+ }
+};
+
+/// A wrapper pass to provide the legacy pass manager access to a suitably
+/// prepared AAResults object.
+class AAResultsWrapperPass : public FunctionPass {
+ std::unique_ptr<AAResults> AAR;
+
+public:
+ static char ID;
+
+ AAResultsWrapperPass();
+
+ AAResults &getAAResults() { return *AAR; }
+ const AAResults &getAAResults() const { return *AAR; }
+
+ bool runOnFunction(Function &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+FunctionPass *createAAResultsWrapperPass();
+
+/// A wrapper pass around a callback which can be used to populate the
+/// AAResults in the AAResultsWrapperPass from an external AA.
+///
+/// The callback provided here will be used each time we prepare an AAResults
+/// object, and will receive a reference to the function wrapper pass, the
+/// function, and the AAResults object to populate. This should be used when
+/// setting up a custom pass pipeline to inject a hook into the AA results.
+ImmutablePass *createExternalAAWrapperPass(
+ std::function<void(Pass &, Function &, AAResults &)> Callback);
+
+/// A helper for the legacy pass manager to create a \c AAResults
+/// object populated to the best of our ability for a particular function when
+/// inside of a \c ModulePass or a \c CallGraphSCCPass.
+AAResults createLegacyPMAAResults(Pass &P, Function &F, BasicAAResult &BAR);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h b/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
new file mode 100644
index 0000000..37fd69b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/AliasSetTracker.h
@@ -0,0 +1,449 @@
+//===- llvm/Analysis/AliasSetTracker.h - Build Alias Sets -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines two classes: AliasSetTracker and AliasSet. These interface
+// are used to classify a collection of pointer references into a maximal number
+// of disjoint sets. Each AliasSet object constructed by the AliasSetTracker
+// object refers to memory disjoint from the other sets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_ALIASSETTRACKER_H
+#define LLVM_ANALYSIS_ALIASSETTRACKER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/ValueHandle.h"
+#include <vector>
+
+namespace llvm {
+
+class LoadInst;
+class StoreInst;
+class VAArgInst;
+class AliasSetTracker;
+class AliasSet;
+
+class AliasSet : public ilist_node<AliasSet> {
+ friend class AliasSetTracker;
+
+ class PointerRec {
+ Value *Val; // The pointer this record corresponds to.
+ PointerRec **PrevInList, *NextInList;
+ AliasSet *AS;
+ uint64_t Size;
+ AAMDNodes AAInfo;
+
+ public:
+ PointerRec(Value *V)
+ : Val(V), PrevInList(nullptr), NextInList(nullptr), AS(nullptr), Size(0),
+ AAInfo(DenseMapInfo<AAMDNodes>::getEmptyKey()) {}
+
+ Value *getValue() const { return Val; }
+
+ PointerRec *getNext() const { return NextInList; }
+ bool hasAliasSet() const { return AS != nullptr; }
+
+ PointerRec** setPrevInList(PointerRec **PIL) {
+ PrevInList = PIL;
+ return &NextInList;
+ }
+
+ void updateSizeAndAAInfo(uint64_t NewSize, const AAMDNodes &NewAAInfo) {
+ if (NewSize > Size) Size = NewSize;
+
+ if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey())
+ // We don't have a AAInfo yet. Set it to NewAAInfo.
+ AAInfo = NewAAInfo;
+ else if (AAInfo != NewAAInfo)
+ // NewAAInfo conflicts with AAInfo.
+ AAInfo = DenseMapInfo<AAMDNodes>::getTombstoneKey();
+ }
+
+ uint64_t getSize() const { return Size; }
+
+ /// getAAInfo - Return the AAInfo, or null if there is no
+ /// information or conflicting information.
+ AAMDNodes getAAInfo() const {
+ // If we have missing or conflicting AAInfo, return null.
+ if (AAInfo == DenseMapInfo<AAMDNodes>::getEmptyKey() ||
+ AAInfo == DenseMapInfo<AAMDNodes>::getTombstoneKey())
+ return AAMDNodes();
+ return AAInfo;
+ }
+
+ AliasSet *getAliasSet(AliasSetTracker &AST) {
+ assert(AS && "No AliasSet yet!");
+ if (AS->Forward) {
+ AliasSet *OldAS = AS;
+ AS = OldAS->getForwardedTarget(AST);
+ AS->addRef();
+ OldAS->dropRef(AST);
+ }
+ return AS;
+ }
+
+ void setAliasSet(AliasSet *as) {
+ assert(!AS && "Already have an alias set!");
+ AS = as;
+ }
+
+ void eraseFromList() {
+ if (NextInList) NextInList->PrevInList = PrevInList;
+ *PrevInList = NextInList;
+ if (AS->PtrListEnd == &NextInList) {
+ AS->PtrListEnd = PrevInList;
+ assert(*AS->PtrListEnd == nullptr && "List not terminated right!");
+ }
+ delete this;
+ }
+ };
+
+ PointerRec *PtrList, **PtrListEnd; // Doubly linked list of nodes.
+ AliasSet *Forward; // Forwarding pointer.
+
+ // All instructions without a specific address in this alias set.
+ std::vector<AssertingVH<Instruction> > UnknownInsts;
+
+ // RefCount - Number of nodes pointing to this AliasSet plus the number of
+ // AliasSets forwarding to it.
+ unsigned RefCount : 28;
+
+ /// The kinds of access this alias set models.
+ ///
+ /// We keep track of whether this alias set merely refers to the locations of
+ /// memory (and not any particular access), whether it modifies or references
+ /// the memory, or whether it does both. The lattice goes from "NoAccess" to
+ /// either RefAccess or ModAccess, then to ModRefAccess as necessary.
+ enum AccessLattice {
+ NoAccess = 0,
+ RefAccess = 1,
+ ModAccess = 2,
+ ModRefAccess = RefAccess | ModAccess
+ };
+ unsigned Access : 2;
+
+ /// The kind of alias relationship between pointers of the set.
+ ///
+ /// These represent conservatively correct alias results between any members
+ /// of the set. We represent these independently of the values of alias
+ /// results in order to pack it into a single bit. Lattice goes from
+ /// MustAlias to MayAlias.
+ enum AliasLattice {
+ SetMustAlias = 0, SetMayAlias = 1
+ };
+ unsigned Alias : 1;
+
+ // Volatile - True if this alias set contains volatile loads or stores.
+ bool Volatile : 1;
+
+ void addRef() { ++RefCount; }
+ void dropRef(AliasSetTracker &AST) {
+ assert(RefCount >= 1 && "Invalid reference count detected!");
+ if (--RefCount == 0)
+ removeFromTracker(AST);
+ }
+
+ Instruction *getUnknownInst(unsigned i) const {
+ assert(i < UnknownInsts.size());
+ return UnknownInsts[i];
+ }
+
+public:
+ /// Accessors...
+ bool isRef() const { return Access & RefAccess; }
+ bool isMod() const { return Access & ModAccess; }
+ bool isMustAlias() const { return Alias == SetMustAlias; }
+ bool isMayAlias() const { return Alias == SetMayAlias; }
+
+ // isVolatile - Return true if this alias set contains volatile loads or
+ // stores.
+ bool isVolatile() const { return Volatile; }
+
+ /// isForwardingAliasSet - Return true if this alias set should be ignored as
+ /// part of the AliasSetTracker object.
+ bool isForwardingAliasSet() const { return Forward; }
+
+ /// mergeSetIn - Merge the specified alias set into this alias set...
+ ///
+ void mergeSetIn(AliasSet &AS, AliasSetTracker &AST);
+
+ // Alias Set iteration - Allow access to all of the pointer which are part of
+ // this alias set...
+ class iterator;
+ iterator begin() const { return iterator(PtrList); }
+ iterator end() const { return iterator(); }
+ bool empty() const { return PtrList == nullptr; }
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+ /// Define an iterator for alias sets... this is just a forward iterator.
+ class iterator : public std::iterator<std::forward_iterator_tag,
+ PointerRec, ptrdiff_t> {
+ PointerRec *CurNode;
+
+ public:
+ explicit iterator(PointerRec *CN = nullptr) : CurNode(CN) {}
+
+ bool operator==(const iterator& x) const {
+ return CurNode == x.CurNode;
+ }
+ bool operator!=(const iterator& x) const { return !operator==(x); }
+
+ value_type &operator*() const {
+ assert(CurNode && "Dereferencing AliasSet.end()!");
+ return *CurNode;
+ }
+ value_type *operator->() const { return &operator*(); }
+
+ Value *getPointer() const { return CurNode->getValue(); }
+ uint64_t getSize() const { return CurNode->getSize(); }
+ AAMDNodes getAAInfo() const { return CurNode->getAAInfo(); }
+
+ iterator& operator++() { // Preincrement
+ assert(CurNode && "Advancing past AliasSet.end()!");
+ CurNode = CurNode->getNext();
+ return *this;
+ }
+ iterator operator++(int) { // Postincrement
+ iterator tmp = *this; ++*this; return tmp;
+ }
+ };
+
+private:
+ // Can only be created by AliasSetTracker. Also, ilist creates one
+ // to serve as a sentinel.
+ friend struct ilist_sentinel_traits<AliasSet>;
+ AliasSet()
+ : PtrList(nullptr), PtrListEnd(&PtrList), Forward(nullptr), RefCount(0),
+ Access(NoAccess), Alias(SetMustAlias), Volatile(false) {
+ }
+
+ AliasSet(const AliasSet &AS) = delete;
+ void operator=(const AliasSet &AS) = delete;
+
+ PointerRec *getSomePointer() const {
+ return PtrList;
+ }
+
+ /// getForwardedTarget - Return the real alias set this represents. If this
+ /// has been merged with another set and is forwarding, return the ultimate
+ /// destination set. This also implements the union-find collapsing as well.
+ AliasSet *getForwardedTarget(AliasSetTracker &AST) {
+ if (!Forward) return this;
+
+ AliasSet *Dest = Forward->getForwardedTarget(AST);
+ if (Dest != Forward) {
+ Dest->addRef();
+ Forward->dropRef(AST);
+ Forward = Dest;
+ }
+ return Dest;
+ }
+
+ void removeFromTracker(AliasSetTracker &AST);
+
+ void addPointer(AliasSetTracker &AST, PointerRec &Entry, uint64_t Size,
+ const AAMDNodes &AAInfo,
+ bool KnownMustAlias = false);
+ void addUnknownInst(Instruction *I, AliasAnalysis &AA);
+ void removeUnknownInst(AliasSetTracker &AST, Instruction *I) {
+ bool WasEmpty = UnknownInsts.empty();
+ for (size_t i = 0, e = UnknownInsts.size(); i != e; ++i)
+ if (UnknownInsts[i] == I) {
+ UnknownInsts[i] = UnknownInsts.back();
+ UnknownInsts.pop_back();
+ --i; --e; // Revisit the moved entry.
+ }
+ if (!WasEmpty && UnknownInsts.empty())
+ dropRef(AST);
+ }
+ void setVolatile() { Volatile = true; }
+
+public:
+ /// aliasesPointer - Return true if the specified pointer "may" (or must)
+ /// alias one of the members in the set.
+ ///
+ bool aliasesPointer(const Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo,
+ AliasAnalysis &AA) const;
+ bool aliasesUnknownInst(const Instruction *Inst, AliasAnalysis &AA) const;
+};
+
+inline raw_ostream& operator<<(raw_ostream &OS, const AliasSet &AS) {
+ AS.print(OS);
+ return OS;
+}
+
+class AliasSetTracker {
+ /// CallbackVH - A CallbackVH to arrange for AliasSetTracker to be
+ /// notified whenever a Value is deleted.
+ class ASTCallbackVH final : public CallbackVH {
+ AliasSetTracker *AST;
+ void deleted() override;
+ void allUsesReplacedWith(Value *) override;
+
+ public:
+ ASTCallbackVH(Value *V, AliasSetTracker *AST = nullptr);
+ ASTCallbackVH &operator=(Value *V);
+ };
+ /// ASTCallbackVHDenseMapInfo - Traits to tell DenseMap that tell us how to
+ /// compare and hash the value handle.
+ struct ASTCallbackVHDenseMapInfo : public DenseMapInfo<Value *> {};
+
+ AliasAnalysis &AA;
+ ilist<AliasSet> AliasSets;
+
+ typedef DenseMap<ASTCallbackVH, AliasSet::PointerRec*,
+ ASTCallbackVHDenseMapInfo>
+ PointerMapType;
+
+ // Map from pointers to their node
+ PointerMapType PointerMap;
+
+public:
+ /// AliasSetTracker ctor - Create an empty collection of AliasSets, and use
+ /// the specified alias analysis object to disambiguate load and store
+ /// addresses.
+ explicit AliasSetTracker(AliasAnalysis &aa) : AA(aa) {}
+ ~AliasSetTracker() { clear(); }
+
+ /// add methods - These methods are used to add different types of
+ /// instructions to the alias sets. Adding a new instruction can result in
+ /// one of three actions happening:
+ ///
+ /// 1. If the instruction doesn't alias any other sets, create a new set.
+ /// 2. If the instruction aliases exactly one set, add it to the set
+ /// 3. If the instruction aliases multiple sets, merge the sets, and add
+ /// the instruction to the result.
+ ///
+ /// These methods return true if inserting the instruction resulted in the
+ /// addition of a new alias set (i.e., the pointer did not alias anything).
+ ///
+ bool add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo); // Add a loc.
+ bool add(LoadInst *LI);
+ bool add(StoreInst *SI);
+ bool add(VAArgInst *VAAI);
+ bool add(Instruction *I); // Dispatch to one of the other add methods...
+ void add(BasicBlock &BB); // Add all instructions in basic block
+ void add(const AliasSetTracker &AST); // Add alias relations from another AST
+ bool addUnknown(Instruction *I);
+
+ /// remove methods - These methods are used to remove all entries that might
+ /// be aliased by the specified instruction. These methods return true if any
+ /// alias sets were eliminated.
+ // Remove a location
+ bool remove(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo);
+ bool remove(LoadInst *LI);
+ bool remove(StoreInst *SI);
+ bool remove(VAArgInst *VAAI);
+ bool remove(Instruction *I);
+ void remove(AliasSet &AS);
+ bool removeUnknown(Instruction *I);
+
+ void clear();
+
+ /// getAliasSets - Return the alias sets that are active.
+ ///
+ const ilist<AliasSet> &getAliasSets() const { return AliasSets; }
+
+ /// getAliasSetForPointer - Return the alias set that the specified pointer
+ /// lives in. If the New argument is non-null, this method sets the value to
+ /// true if a new alias set is created to contain the pointer (because the
+ /// pointer didn't alias anything).
+ AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
+ const AAMDNodes &AAInfo,
+ bool *New = nullptr);
+
+ /// getAliasSetForPointerIfExists - Return the alias set containing the
+ /// location specified if one exists, otherwise return null.
+ AliasSet *getAliasSetForPointerIfExists(const Value *P, uint64_t Size,
+ const AAMDNodes &AAInfo) {
+ return findAliasSetForPointer(P, Size, AAInfo);
+ }
+
+ /// containsPointer - Return true if the specified location is represented by
+ /// this alias set, false otherwise. This does not modify the AST object or
+ /// alias sets.
+ bool containsPointer(const Value *P, uint64_t Size,
+ const AAMDNodes &AAInfo) const;
+
+ /// Return true if the specified instruction "may" (or must) alias one of the
+ /// members in any of the sets.
+ bool containsUnknown(const Instruction *I) const;
+
+ /// getAliasAnalysis - Return the underlying alias analysis object used by
+ /// this tracker.
+ AliasAnalysis &getAliasAnalysis() const { return AA; }
+
+ /// deleteValue method - This method is used to remove a pointer value from
+ /// the AliasSetTracker entirely. It should be used when an instruction is
+ /// deleted from the program to update the AST. If you don't use this, you
+ /// would have dangling pointers to deleted instructions.
+ ///
+ void deleteValue(Value *PtrVal);
+
+ /// copyValue - This method should be used whenever a preexisting value in the
+ /// program is copied or cloned, introducing a new value. Note that it is ok
+ /// for clients that use this method to introduce the same value multiple
+ /// times: if the tracker already knows about a value, it will ignore the
+ /// request.
+ ///
+ void copyValue(Value *From, Value *To);
+
+ typedef ilist<AliasSet>::iterator iterator;
+ typedef ilist<AliasSet>::const_iterator const_iterator;
+
+ const_iterator begin() const { return AliasSets.begin(); }
+ const_iterator end() const { return AliasSets.end(); }
+
+ iterator begin() { return AliasSets.begin(); }
+ iterator end() { return AliasSets.end(); }
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+private:
+ friend class AliasSet;
+ void removeAliasSet(AliasSet *AS);
+
+ // getEntryFor - Just like operator[] on the map, except that it creates an
+ // entry for the pointer if it doesn't already exist.
+ AliasSet::PointerRec &getEntryFor(Value *V) {
+ AliasSet::PointerRec *&Entry = PointerMap[ASTCallbackVH(V, this)];
+ if (!Entry)
+ Entry = new AliasSet::PointerRec(V);
+ return *Entry;
+ }
+
+ AliasSet &addPointer(Value *P, uint64_t Size, const AAMDNodes &AAInfo,
+ AliasSet::AccessLattice E,
+ bool &NewSet) {
+ NewSet = false;
+ AliasSet &AS = getAliasSetForPointer(P, Size, AAInfo, &NewSet);
+ AS.Access |= E;
+ return AS;
+ }
+ AliasSet *findAliasSetForPointer(const Value *Ptr, uint64_t Size,
+ const AAMDNodes &AAInfo);
+
+ AliasSet *findAliasSetForUnknownInst(Instruction *Inst);
+};
+
+inline raw_ostream& operator<<(raw_ostream &OS, const AliasSetTracker &AST) {
+ AST.print(OS);
+ return OS;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/AssumptionCache.h b/contrib/llvm/include/llvm/Analysis/AssumptionCache.h
new file mode 100644
index 0000000..b903f96
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/AssumptionCache.h
@@ -0,0 +1,183 @@
+//===- llvm/Analysis/AssumptionCache.h - Track @llvm.assume ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a pass that keeps track of @llvm.assume intrinsics in
+// the functions of a module (allowing assumptions within any function to be
+// found cheaply by other parts of the optimizer).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_ASSUMPTIONCACHE_H
+#define LLVM_ANALYSIS_ASSUMPTIONCACHE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include <memory>
+
+namespace llvm {
+
+// FIXME: Replace this brittle forward declaration with the include of the new
+// PassManager.h when doing so doesn't break the PassManagerBuilder.
+template <typename IRUnitT> class AnalysisManager;
+class PreservedAnalyses;
+
+/// \brief A cache of @llvm.assume calls within a function.
+///
+/// This cache provides fast lookup of assumptions within a function by caching
+/// them and amortizing the cost of scanning for them across all queries. The
+/// cache is also conservatively self-updating so that it will never return
+/// incorrect results about a function even as the function is being mutated.
+/// However, flushing the cache and rebuilding it (or explicitly updating it)
+/// may allow it to discover new assumptions.
+class AssumptionCache {
+ /// \brief The function for which this cache is handling assumptions.
+ ///
+ /// We track this to lazily populate our assumptions.
+ Function &F;
+
+ /// \brief Vector of weak value handles to calls of the @llvm.assume
+ /// intrinsic.
+ SmallVector<WeakVH, 4> AssumeHandles;
+
+ /// \brief Flag tracking whether we have scanned the function yet.
+ ///
+ /// We want to be as lazy about this as possible, and so we scan the function
+ /// at the last moment.
+ bool Scanned;
+
+ /// \brief Scan the function for assumptions and add them to the cache.
+ void scanFunction();
+
+public:
+ /// \brief Construct an AssumptionCache from a function by scanning all of
+ /// its instructions.
+ AssumptionCache(Function &F) : F(F), Scanned(false) {}
+
+ /// \brief Add an @llvm.assume intrinsic to this function's cache.
+ ///
+ /// The call passed in must be an instruction within this function and must
+ /// not already be in the cache.
+ void registerAssumption(CallInst *CI);
+
+ /// \brief Clear the cache of @llvm.assume intrinsics for a function.
+ ///
+ /// It will be re-scanned the next time it is requested.
+ void clear() {
+ AssumeHandles.clear();
+ Scanned = false;
+ }
+
+ /// \brief Access the list of assumption handles currently tracked for this
+ /// function.
+ ///
+ /// Note that these produce weak handles that may be null. The caller must
+ /// handle that case.
+ /// FIXME: We should replace this with pointee_iterator<filter_iterator<...>>
+ /// when we can write that to filter out the null values. Then caller code
+ /// will become simpler.
+ MutableArrayRef<WeakVH> assumptions() {
+ if (!Scanned)
+ scanFunction();
+ return AssumeHandles;
+ }
+};
+
+/// \brief A function analysis which provides an \c AssumptionCache.
+///
+/// This analysis is intended for use with the new pass manager and will vend
+/// assumption caches for a given function.
+class AssumptionAnalysis {
+ static char PassID;
+
+public:
+ typedef AssumptionCache Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ /// \brief Provide a name for the analysis for debugging and logging.
+ static StringRef name() { return "AssumptionAnalysis"; }
+
+ AssumptionAnalysis() {}
+ AssumptionAnalysis(const AssumptionAnalysis &Arg) {}
+ AssumptionAnalysis(AssumptionAnalysis &&Arg) {}
+ AssumptionAnalysis &operator=(const AssumptionAnalysis &RHS) { return *this; }
+ AssumptionAnalysis &operator=(AssumptionAnalysis &&RHS) { return *this; }
+
+ AssumptionCache run(Function &F) { return AssumptionCache(F); }
+};
+
+/// \brief Printer pass for the \c AssumptionAnalysis results.
+class AssumptionPrinterPass {
+ raw_ostream &OS;
+
+public:
+ explicit AssumptionPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+
+ static StringRef name() { return "AssumptionPrinterPass"; }
+};
+
+/// \brief An immutable pass that tracks lazily created \c AssumptionCache
+/// objects.
+///
+/// This is essentially a workaround for the legacy pass manager's weaknesses
+/// which associates each assumption cache with Function and clears it if the
+/// function is deleted. The nature of the AssumptionCache is that it is not
+/// invalidated by any changes to the function body and so this is sufficient
+/// to be conservatively correct.
+class AssumptionCacheTracker : public ImmutablePass {
+ /// A callback value handle applied to function objects, which we use to
+ /// delete our cache of intrinsics for a function when it is deleted.
+ class FunctionCallbackVH final : public CallbackVH {
+ AssumptionCacheTracker *ACT;
+ void deleted() override;
+
+ public:
+ typedef DenseMapInfo<Value *> DMI;
+
+ FunctionCallbackVH(Value *V, AssumptionCacheTracker *ACT = nullptr)
+ : CallbackVH(V), ACT(ACT) {}
+ };
+
+ friend FunctionCallbackVH;
+
+ typedef DenseMap<FunctionCallbackVH, std::unique_ptr<AssumptionCache>,
+ FunctionCallbackVH::DMI> FunctionCallsMap;
+ FunctionCallsMap AssumptionCaches;
+
+public:
+ /// \brief Get the cached assumptions for a function.
+ ///
+ /// If no assumptions are cached, this will scan the function. Otherwise, the
+ /// existing cache will be returned.
+ AssumptionCache &getAssumptionCache(Function &F);
+
+ AssumptionCacheTracker();
+ ~AssumptionCacheTracker() override;
+
+ void releaseMemory() override { AssumptionCaches.shrink_and_clear(); }
+
+ void verifyAnalysis() const override;
+ bool doFinalization(Module &) override {
+ verifyAnalysis();
+ return false;
+ }
+
+ static char ID; // Pass identification, replacement for typeid
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/BasicAliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
new file mode 100644
index 0000000..181a932
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/BasicAliasAnalysis.h
@@ -0,0 +1,223 @@
+//===- BasicAliasAnalysis.h - Stateless, local Alias Analysis ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for LLVM's primary stateless and local alias analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_BASICALIASANALYSIS_H
+#define LLVM_ANALYSIS_BASICALIASANALYSIS_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+class AssumptionCache;
+class DominatorTree;
+class LoopInfo;
+
+/// This is the AA result object for the basic, local, and stateless alias
+/// analysis. It implements the AA query interface in an entirely stateless
+/// manner. As one consequence, it is never invalidated. While it does retain
+/// some storage, that is used as an optimization and not to preserve
+/// information from query to query.
+class BasicAAResult : public AAResultBase<BasicAAResult> {
+ friend AAResultBase<BasicAAResult>;
+
+ const DataLayout &DL;
+ AssumptionCache &AC;
+ DominatorTree *DT;
+ LoopInfo *LI;
+
+public:
+ BasicAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI,
+ AssumptionCache &AC, DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr)
+ : AAResultBase(TLI), DL(DL), AC(AC), DT(DT), LI(LI) {}
+
+ BasicAAResult(const BasicAAResult &Arg)
+ : AAResultBase(Arg), DL(Arg.DL), AC(Arg.AC), DT(Arg.DT), LI(Arg.LI) {}
+ BasicAAResult(BasicAAResult &&Arg)
+ : AAResultBase(std::move(Arg)), DL(Arg.DL), AC(Arg.AC), DT(Arg.DT),
+ LI(Arg.LI) {}
+
+ /// Handle invalidation events from the new pass manager.
+ ///
+ /// By definition, this result is stateless and so remains valid.
+ bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+
+ ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+
+ ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+
+ /// Chases pointers until we find a (constant global) or not.
+ bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
+
+ /// Get the location associated with a pointer argument of a callsite.
+ ModRefInfo getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx);
+
+ /// Returns the behavior when calling the given call site.
+ FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+
+ /// Returns the behavior when calling the given function. For use when the
+ /// call site is not known.
+ FunctionModRefBehavior getModRefBehavior(const Function *F);
+
+private:
+ // A linear transformation of a Value; this class represents ZExt(SExt(V,
+ // SExtBits), ZExtBits) * Scale + Offset.
+ struct VariableGEPIndex {
+
+ // An opaque Value - we can't decompose this further.
+ const Value *V;
+
+ // We need to track what extensions we've done as we consider the same Value
+ // with different extensions as different variables in a GEP's linear
+ // expression;
+ // e.g.: if V == -1, then sext(x) != zext(x).
+ unsigned ZExtBits;
+ unsigned SExtBits;
+
+ int64_t Scale;
+
+ bool operator==(const VariableGEPIndex &Other) const {
+ return V == Other.V && ZExtBits == Other.ZExtBits &&
+ SExtBits == Other.SExtBits && Scale == Other.Scale;
+ }
+
+ bool operator!=(const VariableGEPIndex &Other) const {
+ return !operator==(Other);
+ }
+ };
+
+ /// Track alias queries to guard against recursion.
+ typedef std::pair<MemoryLocation, MemoryLocation> LocPair;
+ typedef SmallDenseMap<LocPair, AliasResult, 8> AliasCacheTy;
+ AliasCacheTy AliasCache;
+
+ /// Tracks phi nodes we have visited.
+ ///
+ /// When interpret "Value" pointer equality as value equality we need to make
+ /// sure that the "Value" is not part of a cycle. Otherwise, two uses could
+ /// come from different "iterations" of a cycle and see different values for
+ /// the same "Value" pointer.
+ ///
+ /// The following example shows the problem:
+ /// %p = phi(%alloca1, %addr2)
+ /// %l = load %ptr
+ /// %addr1 = gep, %alloca2, 0, %l
+ /// %addr2 = gep %alloca2, 0, (%l + 1)
+ /// alias(%p, %addr1) -> MayAlias !
+ /// store %l, ...
+ SmallPtrSet<const BasicBlock *, 8> VisitedPhiBBs;
+
+ /// Tracks instructions visited by pointsToConstantMemory.
+ SmallPtrSet<const Value *, 16> Visited;
+
+ static const Value *
+ GetLinearExpression(const Value *V, APInt &Scale, APInt &Offset,
+ unsigned &ZExtBits, unsigned &SExtBits,
+ const DataLayout &DL, unsigned Depth, AssumptionCache *AC,
+ DominatorTree *DT, bool &NSW, bool &NUW);
+
+ static const Value *
+ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
+ SmallVectorImpl<VariableGEPIndex> &VarIndices,
+ bool &MaxLookupReached, const DataLayout &DL,
+ AssumptionCache *AC, DominatorTree *DT);
+ /// \brief A Heuristic for aliasGEP that searches for a constant offset
+ /// between the variables.
+ ///
+ /// GetLinearExpression has some limitations, as generally zext(%x + 1)
+ /// != zext(%x) + zext(1) if the arithmetic overflows. GetLinearExpression
+ /// will therefore conservatively refuse to decompose these expressions.
+ /// However, we know that, for all %x, zext(%x) != zext(%x + 1), even if
+ /// the addition overflows.
+ bool
+ constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> &VarIndices,
+ uint64_t V1Size, uint64_t V2Size, int64_t BaseOffset,
+ AssumptionCache *AC, DominatorTree *DT);
+
+ bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2);
+
+ void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
+ const SmallVectorImpl<VariableGEPIndex> &Src);
+
+ AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
+ const AAMDNodes &V1AAInfo, const Value *V2,
+ uint64_t V2Size, const AAMDNodes &V2AAInfo,
+ const Value *UnderlyingV1, const Value *UnderlyingV2);
+
+ AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize,
+ const AAMDNodes &PNAAInfo, const Value *V2,
+ uint64_t V2Size, const AAMDNodes &V2AAInfo);
+
+ AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize,
+ const AAMDNodes &SIAAInfo, const Value *V2,
+ uint64_t V2Size, const AAMDNodes &V2AAInfo);
+
+ AliasResult aliasCheck(const Value *V1, uint64_t V1Size, AAMDNodes V1AATag,
+ const Value *V2, uint64_t V2Size, AAMDNodes V2AATag);
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class BasicAA {
+public:
+ typedef BasicAAResult Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ BasicAAResult run(Function &F, AnalysisManager<Function> *AM);
+
+ /// \brief Provide access to a name for this pass for debugging purposes.
+ static StringRef name() { return "BasicAliasAnalysis"; }
+
+private:
+ static char PassID;
+};
+
+/// Legacy wrapper pass to provide the BasicAAResult object.
+class BasicAAWrapperPass : public FunctionPass {
+ std::unique_ptr<BasicAAResult> Result;
+
+ virtual void anchor();
+
+public:
+ static char ID;
+
+ BasicAAWrapperPass();
+
+ BasicAAResult &getResult() { return *Result; }
+ const BasicAAResult &getResult() const { return *Result; }
+
+ bool runOnFunction(Function &F) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+FunctionPass *createBasicAAWrapperPass();
+
+/// A helper for the legacy pass manager to create a \c BasicAAResult object
+/// populated to the best of our ability for a particular function when inside
+/// of a \c ModulePass or a \c CallGraphSCCPass.
+BasicAAResult createLegacyPMBasicAAResult(Pass &P, Function &F);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h b/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
new file mode 100644
index 0000000..6f2a2b5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfo.h
@@ -0,0 +1,90 @@
+//===- BlockFrequencyInfo.h - Block Frequency Analysis ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Loops should be simplified before this analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
+#define LLVM_ANALYSIS_BLOCKFREQUENCYINFO_H
+
+#include "llvm/Pass.h"
+#include "llvm/Support/BlockFrequency.h"
+#include <climits>
+
+namespace llvm {
+
+class BranchProbabilityInfo;
+class LoopInfo;
+template <class BlockT> class BlockFrequencyInfoImpl;
+
+/// BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to
+/// estimate IR basic block frequencies.
+class BlockFrequencyInfo {
+ typedef BlockFrequencyInfoImpl<BasicBlock> ImplType;
+ std::unique_ptr<ImplType> BFI;
+
+public:
+ BlockFrequencyInfo();
+ BlockFrequencyInfo(const Function &F, const BranchProbabilityInfo &BPI,
+ const LoopInfo &LI);
+
+ const Function *getFunction() const;
+ void view() const;
+
+ /// getblockFreq - Return block frequency. Return 0 if we don't have the
+ /// information. Please note that initial frequency is equal to ENTRY_FREQ. It
+ /// means that we should not rely on the value itself, but only on the
+ /// comparison to the other block frequencies. We do this to avoid using of
+ /// floating points.
+ BlockFrequency getBlockFreq(const BasicBlock *BB) const;
+
+ // Set the frequency of the given basic block.
+ void setBlockFreq(const BasicBlock *BB, uint64_t Freq);
+
+ /// calculate - compute block frequency info for the given function.
+ void calculate(const Function &F, const BranchProbabilityInfo &BPI,
+ const LoopInfo &LI);
+
+ // Print the block frequency Freq to OS using the current functions entry
+ // frequency to convert freq into a relative decimal form.
+ raw_ostream &printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const;
+
+ // Convenience method that attempts to look up the frequency associated with
+ // BB and print it to OS.
+ raw_ostream &printBlockFreq(raw_ostream &OS, const BasicBlock *BB) const;
+
+ uint64_t getEntryFreq() const;
+ void releaseMemory();
+ void print(raw_ostream &OS) const;
+};
+
+/// \brief Legacy analysis pass which computes \c BlockFrequencyInfo.
+class BlockFrequencyInfoWrapperPass : public FunctionPass {
+ BlockFrequencyInfo BFI;
+
+public:
+ static char ID;
+
+ BlockFrequencyInfoWrapperPass();
+ ~BlockFrequencyInfoWrapperPass() override;
+
+ BlockFrequencyInfo &getBFI() { return BFI; }
+ const BlockFrequencyInfo &getBFI() const { return BFI; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+ void print(raw_ostream &OS, const Module *M) const override;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h b/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
new file mode 100644
index 0000000..387e9a8
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/BlockFrequencyInfoImpl.h
@@ -0,0 +1,1231 @@
+//==- BlockFrequencyInfoImpl.h - Block Frequency Implementation -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Shared implementation of BlockFrequency for IR and Machine Instructions.
+// See the documentation below for BlockFrequencyInfoImpl for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
+#define LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/Support/BlockFrequency.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ScaledNumber.h"
+#include "llvm/Support/raw_ostream.h"
+#include <deque>
+#include <list>
+#include <string>
+#include <vector>
+
+#define DEBUG_TYPE "block-freq"
+
+namespace llvm {
+
+class BasicBlock;
+class BranchProbabilityInfo;
+class Function;
+class Loop;
+class LoopInfo;
+class MachineBasicBlock;
+class MachineBranchProbabilityInfo;
+class MachineFunction;
+class MachineLoop;
+class MachineLoopInfo;
+
+namespace bfi_detail {
+
+struct IrreducibleGraph;
+
+// This is part of a workaround for a GCC 4.7 crash on lambdas.
+template <class BT> struct BlockEdgesAdder;
+
+/// \brief Mass of a block.
+///
+/// This class implements a sort of fixed-point fraction always between 0.0 and
+/// 1.0. getMass() == UINT64_MAX indicates a value of 1.0.
+///
+/// Masses can be added and subtracted. Simple saturation arithmetic is used,
+/// so arithmetic operations never overflow or underflow.
+///
+/// Masses can be multiplied. Multiplication treats full mass as 1.0 and uses
+/// an inexpensive floating-point algorithm that's off-by-one (almost, but not
+/// quite, maximum precision).
+///
+/// Masses can be scaled by \a BranchProbability at maximum precision.
+class BlockMass {
+ uint64_t Mass;
+
+public:
+ BlockMass() : Mass(0) {}
+ explicit BlockMass(uint64_t Mass) : Mass(Mass) {}
+
+ static BlockMass getEmpty() { return BlockMass(); }
+ static BlockMass getFull() { return BlockMass(UINT64_MAX); }
+
+ uint64_t getMass() const { return Mass; }
+
+ bool isFull() const { return Mass == UINT64_MAX; }
+ bool isEmpty() const { return !Mass; }
+
+ bool operator!() const { return isEmpty(); }
+
+ /// \brief Add another mass.
+ ///
+ /// Adds another mass, saturating at \a isFull() rather than overflowing.
+ BlockMass &operator+=(BlockMass X) {
+ uint64_t Sum = Mass + X.Mass;
+ Mass = Sum < Mass ? UINT64_MAX : Sum;
+ return *this;
+ }
+
+ /// \brief Subtract another mass.
+ ///
+ /// Subtracts another mass, saturating at \a isEmpty() rather than
+ /// undeflowing.
+ BlockMass &operator-=(BlockMass X) {
+ uint64_t Diff = Mass - X.Mass;
+ Mass = Diff > Mass ? 0 : Diff;
+ return *this;
+ }
+
+ BlockMass &operator*=(BranchProbability P) {
+ Mass = P.scale(Mass);
+ return *this;
+ }
+
+ bool operator==(BlockMass X) const { return Mass == X.Mass; }
+ bool operator!=(BlockMass X) const { return Mass != X.Mass; }
+ bool operator<=(BlockMass X) const { return Mass <= X.Mass; }
+ bool operator>=(BlockMass X) const { return Mass >= X.Mass; }
+ bool operator<(BlockMass X) const { return Mass < X.Mass; }
+ bool operator>(BlockMass X) const { return Mass > X.Mass; }
+
+ /// \brief Convert to scaled number.
+ ///
+ /// Convert to \a ScaledNumber. \a isFull() gives 1.0, while \a isEmpty()
+ /// gives slightly above 0.0.
+ ScaledNumber<uint64_t> toScaled() const;
+
+ void dump() const;
+ raw_ostream &print(raw_ostream &OS) const;
+};
+
+inline BlockMass operator+(BlockMass L, BlockMass R) {
+ return BlockMass(L) += R;
+}
+inline BlockMass operator-(BlockMass L, BlockMass R) {
+ return BlockMass(L) -= R;
+}
+inline BlockMass operator*(BlockMass L, BranchProbability R) {
+ return BlockMass(L) *= R;
+}
+inline BlockMass operator*(BranchProbability L, BlockMass R) {
+ return BlockMass(R) *= L;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS, BlockMass X) {
+ return X.print(OS);
+}
+
+} // end namespace bfi_detail
+
+template <> struct isPodLike<bfi_detail::BlockMass> {
+ static const bool value = true;
+};
+
+/// \brief Base class for BlockFrequencyInfoImpl
+///
+/// BlockFrequencyInfoImplBase has supporting data structures and some
+/// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
+/// the block type (or that call such algorithms) are skipped here.
+///
+/// Nevertheless, the majority of the overall algorithm documention lives with
+/// BlockFrequencyInfoImpl. See there for details.
+class BlockFrequencyInfoImplBase {
+public:
+ typedef ScaledNumber<uint64_t> Scaled64;
+ typedef bfi_detail::BlockMass BlockMass;
+
+ /// \brief Representative of a block.
+ ///
+ /// This is a simple wrapper around an index into the reverse-post-order
+ /// traversal of the blocks.
+ ///
+ /// Unlike a block pointer, its order has meaning (location in the
+ /// topological sort) and it's class is the same regardless of block type.
+ struct BlockNode {
+ typedef uint32_t IndexType;
+ IndexType Index;
+
+ bool operator==(const BlockNode &X) const { return Index == X.Index; }
+ bool operator!=(const BlockNode &X) const { return Index != X.Index; }
+ bool operator<=(const BlockNode &X) const { return Index <= X.Index; }
+ bool operator>=(const BlockNode &X) const { return Index >= X.Index; }
+ bool operator<(const BlockNode &X) const { return Index < X.Index; }
+ bool operator>(const BlockNode &X) const { return Index > X.Index; }
+
+ BlockNode() : Index(UINT32_MAX) {}
+ BlockNode(IndexType Index) : Index(Index) {}
+
+ bool isValid() const { return Index <= getMaxIndex(); }
+ static size_t getMaxIndex() { return UINT32_MAX - 1; }
+ };
+
+ /// \brief Stats about a block itself.
+ struct FrequencyData {
+ Scaled64 Scaled;
+ uint64_t Integer;
+ };
+
+ /// \brief Data about a loop.
+ ///
+ /// Contains the data necessary to represent a loop as a pseudo-node once it's
+ /// packaged.
+ struct LoopData {
+ typedef SmallVector<std::pair<BlockNode, BlockMass>, 4> ExitMap;
+ typedef SmallVector<BlockNode, 4> NodeList;
+ typedef SmallVector<BlockMass, 1> HeaderMassList;
+ LoopData *Parent; ///< The parent loop.
+ bool IsPackaged; ///< Whether this has been packaged.
+ uint32_t NumHeaders; ///< Number of headers.
+ ExitMap Exits; ///< Successor edges (and weights).
+ NodeList Nodes; ///< Header and the members of the loop.
+ HeaderMassList BackedgeMass; ///< Mass returned to each loop header.
+ BlockMass Mass;
+ Scaled64 Scale;
+
+ LoopData(LoopData *Parent, const BlockNode &Header)
+ : Parent(Parent), IsPackaged(false), NumHeaders(1), Nodes(1, Header),
+ BackedgeMass(1) {}
+ template <class It1, class It2>
+ LoopData(LoopData *Parent, It1 FirstHeader, It1 LastHeader, It2 FirstOther,
+ It2 LastOther)
+ : Parent(Parent), IsPackaged(false), Nodes(FirstHeader, LastHeader) {
+ NumHeaders = Nodes.size();
+ Nodes.insert(Nodes.end(), FirstOther, LastOther);
+ BackedgeMass.resize(NumHeaders);
+ }
+ bool isHeader(const BlockNode &Node) const {
+ if (isIrreducible())
+ return std::binary_search(Nodes.begin(), Nodes.begin() + NumHeaders,
+ Node);
+ return Node == Nodes[0];
+ }
+ BlockNode getHeader() const { return Nodes[0]; }
+ bool isIrreducible() const { return NumHeaders > 1; }
+
+ HeaderMassList::difference_type getHeaderIndex(const BlockNode &B) {
+ assert(isHeader(B) && "this is only valid on loop header blocks");
+ if (isIrreducible())
+ return std::lower_bound(Nodes.begin(), Nodes.begin() + NumHeaders, B) -
+ Nodes.begin();
+ return 0;
+ }
+
+ NodeList::const_iterator members_begin() const {
+ return Nodes.begin() + NumHeaders;
+ }
+ NodeList::const_iterator members_end() const { return Nodes.end(); }
+ iterator_range<NodeList::const_iterator> members() const {
+ return make_range(members_begin(), members_end());
+ }
+ };
+
+ /// \brief Index of loop information.
+ struct WorkingData {
+ BlockNode Node; ///< This node.
+ LoopData *Loop; ///< The loop this block is inside.
+ BlockMass Mass; ///< Mass distribution from the entry block.
+
+ WorkingData(const BlockNode &Node) : Node(Node), Loop(nullptr) {}
+
+ bool isLoopHeader() const { return Loop && Loop->isHeader(Node); }
+ bool isDoubleLoopHeader() const {
+ return isLoopHeader() && Loop->Parent && Loop->Parent->isIrreducible() &&
+ Loop->Parent->isHeader(Node);
+ }
+
+ LoopData *getContainingLoop() const {
+ if (!isLoopHeader())
+ return Loop;
+ if (!isDoubleLoopHeader())
+ return Loop->Parent;
+ return Loop->Parent->Parent;
+ }
+
+ /// \brief Resolve a node to its representative.
+ ///
+ /// Get the node currently representing Node, which could be a containing
+ /// loop.
+ ///
+ /// This function should only be called when distributing mass. As long as
+ /// there are no irreducible edges to Node, then it will have complexity
+ /// O(1) in this context.
+ ///
+ /// In general, the complexity is O(L), where L is the number of loop
+ /// headers Node has been packaged into. Since this method is called in
+ /// the context of distributing mass, L will be the number of loop headers
+ /// an early exit edge jumps out of.
+ BlockNode getResolvedNode() const {
+ auto L = getPackagedLoop();
+ return L ? L->getHeader() : Node;
+ }
+ LoopData *getPackagedLoop() const {
+ if (!Loop || !Loop->IsPackaged)
+ return nullptr;
+ auto L = Loop;
+ while (L->Parent && L->Parent->IsPackaged)
+ L = L->Parent;
+ return L;
+ }
+
+ /// \brief Get the appropriate mass for a node.
+ ///
+ /// Get appropriate mass for Node. If Node is a loop-header (whose loop
+ /// has been packaged), returns the mass of its pseudo-node. If it's a
+ /// node inside a packaged loop, it returns the loop's mass.
+ BlockMass &getMass() {
+ if (!isAPackage())
+ return Mass;
+ if (!isADoublePackage())
+ return Loop->Mass;
+ return Loop->Parent->Mass;
+ }
+
+ /// \brief Has ContainingLoop been packaged up?
+ bool isPackaged() const { return getResolvedNode() != Node; }
+ /// \brief Has Loop been packaged up?
+ bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }
+ /// \brief Has Loop been packaged up twice?
+ bool isADoublePackage() const {
+ return isDoubleLoopHeader() && Loop->Parent->IsPackaged;
+ }
+ };
+
+ /// \brief Unscaled probability weight.
+ ///
+ /// Probability weight for an edge in the graph (including the
+ /// successor/target node).
+ ///
+ /// All edges in the original function are 32-bit. However, exit edges from
+ /// loop packages are taken from 64-bit exit masses, so we need 64-bits of
+ /// space in general.
+ ///
+ /// In addition to the raw weight amount, Weight stores the type of the edge
+ /// in the current context (i.e., the context of the loop being processed).
+ /// Is this a local edge within the loop, an exit from the loop, or a
+ /// backedge to the loop header?
+ struct Weight {
+ enum DistType { Local, Exit, Backedge };
+ DistType Type;
+ BlockNode TargetNode;
+ uint64_t Amount;
+ Weight() : Type(Local), Amount(0) {}
+ Weight(DistType Type, BlockNode TargetNode, uint64_t Amount)
+ : Type(Type), TargetNode(TargetNode), Amount(Amount) {}
+ };
+
+ /// \brief Distribution of unscaled probability weight.
+ ///
+ /// Distribution of unscaled probability weight to a set of successors.
+ ///
+ /// This class collates the successor edge weights for later processing.
+ ///
+ /// \a DidOverflow indicates whether \a Total did overflow while adding to
+ /// the distribution. It should never overflow twice.
+ struct Distribution {
+ typedef SmallVector<Weight, 4> WeightList;
+ WeightList Weights; ///< Individual successor weights.
+ uint64_t Total; ///< Sum of all weights.
+ bool DidOverflow; ///< Whether \a Total did overflow.
+
+ Distribution() : Total(0), DidOverflow(false) {}
+ void addLocal(const BlockNode &Node, uint64_t Amount) {
+ add(Node, Amount, Weight::Local);
+ }
+ void addExit(const BlockNode &Node, uint64_t Amount) {
+ add(Node, Amount, Weight::Exit);
+ }
+ void addBackedge(const BlockNode &Node, uint64_t Amount) {
+ add(Node, Amount, Weight::Backedge);
+ }
+
+ /// \brief Normalize the distribution.
+ ///
+ /// Combines multiple edges to the same \a Weight::TargetNode and scales
+ /// down so that \a Total fits into 32-bits.
+ ///
+ /// This is linear in the size of \a Weights. For the vast majority of
+ /// cases, adjacent edge weights are combined by sorting WeightList and
+ /// combining adjacent weights. However, for very large edge lists an
+ /// auxiliary hash table is used.
+ void normalize();
+
+ private:
+ void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
+ };
+
+ /// \brief Data about each block. This is used downstream.
+ std::vector<FrequencyData> Freqs;
+
+ /// \brief Loop data: see initializeLoops().
+ std::vector<WorkingData> Working;
+
+ /// \brief Indexed information about loops.
+ std::list<LoopData> Loops;
+
+ /// \brief Add all edges out of a packaged loop to the distribution.
+ ///
+ /// Adds all edges from LocalLoopHead to Dist. Calls addToDist() to add each
+ /// successor edge.
+ ///
+ /// \return \c true unless there's an irreducible backedge.
+ bool addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
+ Distribution &Dist);
+
+ /// \brief Add an edge to the distribution.
+ ///
+ /// Adds an edge to Succ to Dist. If \c LoopHead.isValid(), then whether the
+ /// edge is local/exit/backedge is in the context of LoopHead. Otherwise,
+ /// every edge should be a local edge (since all the loops are packaged up).
+ ///
+ /// \return \c true unless aborted due to an irreducible backedge.
+ bool addToDist(Distribution &Dist, const LoopData *OuterLoop,
+ const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight);
+
+ LoopData &getLoopPackage(const BlockNode &Head) {
+ assert(Head.Index < Working.size());
+ assert(Working[Head.Index].isLoopHeader());
+ return *Working[Head.Index].Loop;
+ }
+
+ /// \brief Analyze irreducible SCCs.
+ ///
+ /// Separate irreducible SCCs from \c G, which is an explict graph of \c
+ /// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
+ /// Insert them into \a Loops before \c Insert.
+ ///
+ /// \return the \c LoopData nodes representing the irreducible SCCs.
+ iterator_range<std::list<LoopData>::iterator>
+ analyzeIrreducible(const bfi_detail::IrreducibleGraph &G, LoopData *OuterLoop,
+ std::list<LoopData>::iterator Insert);
+
+ /// \brief Update a loop after packaging irreducible SCCs inside of it.
+ ///
+ /// Update \c OuterLoop. Before finding irreducible control flow, it was
+ /// partway through \a computeMassInLoop(), so \a LoopData::Exits and \a
+ /// LoopData::BackedgeMass need to be reset. Also, nodes that were packaged
+ /// up need to be removed from \a OuterLoop::Nodes.
+ void updateLoopWithIrreducible(LoopData &OuterLoop);
+
+ /// \brief Distribute mass according to a distribution.
+ ///
+ /// Distributes the mass in Source according to Dist. If LoopHead.isValid(),
+ /// backedges and exits are stored in its entry in Loops.
+ ///
+ /// Mass is distributed in parallel from two copies of the source mass.
+ void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
+ Distribution &Dist);
+
+ /// \brief Compute the loop scale for a loop.
+ void computeLoopScale(LoopData &Loop);
+
+ /// Adjust the mass of all headers in an irreducible loop.
+ ///
+ /// Initially, irreducible loops are assumed to distribute their mass
+ /// equally among its headers. This can lead to wrong frequency estimates
+ /// since some headers may be executed more frequently than others.
+ ///
+ /// This adjusts header mass distribution so it matches the weights of
+ /// the backedges going into each of the loop headers.
+ void adjustLoopHeaderMass(LoopData &Loop);
+
+ /// \brief Package up a loop.
+ void packageLoop(LoopData &Loop);
+
+ /// \brief Unwrap loops.
+ void unwrapLoops();
+
+ /// \brief Finalize frequency metrics.
+ ///
+ /// Calculates final frequencies and cleans up no-longer-needed data
+ /// structures.
+ void finalizeMetrics();
+
+ /// \brief Clear all memory.
+ void clear();
+
+ virtual std::string getBlockName(const BlockNode &Node) const;
+ std::string getLoopName(const LoopData &Loop) const;
+
+ virtual raw_ostream &print(raw_ostream &OS) const { return OS; }
+ void dump() const { print(dbgs()); }
+
+ Scaled64 getFloatingBlockFreq(const BlockNode &Node) const;
+
+ BlockFrequency getBlockFreq(const BlockNode &Node) const;
+
+ void setBlockFreq(const BlockNode &Node, uint64_t Freq);
+
+ raw_ostream &printBlockFreq(raw_ostream &OS, const BlockNode &Node) const;
+ raw_ostream &printBlockFreq(raw_ostream &OS,
+ const BlockFrequency &Freq) const;
+
+ uint64_t getEntryFreq() const {
+ assert(!Freqs.empty());
+ return Freqs[0].Integer;
+ }
+ /// \brief Virtual destructor.
+ ///
+ /// Need a virtual destructor to mask the compiler warning about
+ /// getBlockName().
+ virtual ~BlockFrequencyInfoImplBase() {}
+};
+
+namespace bfi_detail {
+template <class BlockT> struct TypeMap {};
+template <> struct TypeMap<BasicBlock> {
+ typedef BasicBlock BlockT;
+ typedef Function FunctionT;
+ typedef BranchProbabilityInfo BranchProbabilityInfoT;
+ typedef Loop LoopT;
+ typedef LoopInfo LoopInfoT;
+};
+template <> struct TypeMap<MachineBasicBlock> {
+ typedef MachineBasicBlock BlockT;
+ typedef MachineFunction FunctionT;
+ typedef MachineBranchProbabilityInfo BranchProbabilityInfoT;
+ typedef MachineLoop LoopT;
+ typedef MachineLoopInfo LoopInfoT;
+};
+
+/// \brief Get the name of a MachineBasicBlock.
+///
+/// Get the name of a MachineBasicBlock. It's templated so that including from
+/// CodeGen is unnecessary (that would be a layering issue).
+///
+/// This is used mainly for debug output. The name is similar to
+/// MachineBasicBlock::getFullName(), but skips the name of the function.
+template <class BlockT> std::string getBlockName(const BlockT *BB) {
+ assert(BB && "Unexpected nullptr");
+ auto MachineName = "BB" + Twine(BB->getNumber());
+ if (BB->getBasicBlock())
+ return (MachineName + "[" + BB->getName() + "]").str();
+ return MachineName.str();
+}
+/// \brief Get the name of a BasicBlock.
+template <> inline std::string getBlockName(const BasicBlock *BB) {
+ assert(BB && "Unexpected nullptr");
+ return BB->getName().str();
+}
+
+/// \brief Graph of irreducible control flow.
+///
+/// This graph is used for determining the SCCs in a loop (or top-level
+/// function) that has irreducible control flow.
+///
+/// During the block frequency algorithm, the local graphs are defined in a
+/// light-weight way, deferring to the \a BasicBlock or \a MachineBasicBlock
+/// graphs for most edges, but getting others from \a LoopData::ExitMap. The
+/// latter only has successor information.
+///
+/// \a IrreducibleGraph makes this graph explicit. It's in a form that can use
+/// \a GraphTraits (so that \a analyzeIrreducible() can use \a scc_iterator),
+/// and it explicitly lists predecessors and successors. The initialization
+/// that relies on \c MachineBasicBlock is defined in the header.
+struct IrreducibleGraph {
+ typedef BlockFrequencyInfoImplBase BFIBase;
+
+ BFIBase &BFI;
+
+ typedef BFIBase::BlockNode BlockNode;
+ struct IrrNode {
+ BlockNode Node;
+ unsigned NumIn;
+ std::deque<const IrrNode *> Edges;
+ IrrNode(const BlockNode &Node) : Node(Node), NumIn(0) {}
+
+ typedef std::deque<const IrrNode *>::const_iterator iterator;
+ iterator pred_begin() const { return Edges.begin(); }
+ iterator succ_begin() const { return Edges.begin() + NumIn; }
+ iterator pred_end() const { return succ_begin(); }
+ iterator succ_end() const { return Edges.end(); }
+ };
+ BlockNode Start;
+ const IrrNode *StartIrr;
+ std::vector<IrrNode> Nodes;
+ SmallDenseMap<uint32_t, IrrNode *, 4> Lookup;
+
+ /// \brief Construct an explicit graph containing irreducible control flow.
+ ///
+ /// Construct an explicit graph of the control flow in \c OuterLoop (or the
+ /// top-level function, if \c OuterLoop is \c nullptr). Uses \c
+ /// addBlockEdges to add block successors that have not been packaged into
+ /// loops.
+ ///
+ /// \a BlockFrequencyInfoImpl::computeIrreducibleMass() is the only expected
+ /// user of this.
+ template <class BlockEdgesAdder>
+ IrreducibleGraph(BFIBase &BFI, const BFIBase::LoopData *OuterLoop,
+ BlockEdgesAdder addBlockEdges)
+ : BFI(BFI), StartIrr(nullptr) {
+ initialize(OuterLoop, addBlockEdges);
+ }
+
+ template <class BlockEdgesAdder>
+ void initialize(const BFIBase::LoopData *OuterLoop,
+ BlockEdgesAdder addBlockEdges);
+ void addNodesInLoop(const BFIBase::LoopData &OuterLoop);
+ void addNodesInFunction();
+ void addNode(const BlockNode &Node) {
+ Nodes.emplace_back(Node);
+ BFI.Working[Node.Index].getMass() = BlockMass::getEmpty();
+ }
+ void indexNodes();
+ template <class BlockEdgesAdder>
+ void addEdges(const BlockNode &Node, const BFIBase::LoopData *OuterLoop,
+ BlockEdgesAdder addBlockEdges);
+ void addEdge(IrrNode &Irr, const BlockNode &Succ,
+ const BFIBase::LoopData *OuterLoop);
+};
+template <class BlockEdgesAdder>
+void IrreducibleGraph::initialize(const BFIBase::LoopData *OuterLoop,
+ BlockEdgesAdder addBlockEdges) {
+ if (OuterLoop) {
+ addNodesInLoop(*OuterLoop);
+ for (auto N : OuterLoop->Nodes)
+ addEdges(N, OuterLoop, addBlockEdges);
+ } else {
+ addNodesInFunction();
+ for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
+ addEdges(Index, OuterLoop, addBlockEdges);
+ }
+ StartIrr = Lookup[Start.Index];
+}
+template <class BlockEdgesAdder>
+void IrreducibleGraph::addEdges(const BlockNode &Node,
+ const BFIBase::LoopData *OuterLoop,
+ BlockEdgesAdder addBlockEdges) {
+ auto L = Lookup.find(Node.Index);
+ if (L == Lookup.end())
+ return;
+ IrrNode &Irr = *L->second;
+ const auto &Working = BFI.Working[Node.Index];
+
+ if (Working.isAPackage())
+ for (const auto &I : Working.Loop->Exits)
+ addEdge(Irr, I.first, OuterLoop);
+ else
+ addBlockEdges(*this, Irr, OuterLoop);
+}
+}
+
+/// \brief Shared implementation for block frequency analysis.
+///
+/// This is a shared implementation of BlockFrequencyInfo and
+/// MachineBlockFrequencyInfo, and calculates the relative frequencies of
+/// blocks.
+///
+/// LoopInfo defines a loop as a "non-trivial" SCC dominated by a single block,
+/// which is called the header. A given loop, L, can have sub-loops, which are
+/// loops within the subgraph of L that exclude its header. (A "trivial" SCC
+/// consists of a single block that does not have a self-edge.)
+///
+/// In addition to loops, this algorithm has limited support for irreducible
+/// SCCs, which are SCCs with multiple entry blocks. Irreducible SCCs are
+/// discovered on they fly, and modelled as loops with multiple headers.
+///
+/// The headers of irreducible sub-SCCs consist of its entry blocks and all
+/// nodes that are targets of a backedge within it (excluding backedges within
+/// true sub-loops). Block frequency calculations act as if a block is
+/// inserted that intercepts all the edges to the headers. All backedges and
+/// entries point to this block. Its successors are the headers, which split
+/// the frequency evenly.
+///
+/// This algorithm leverages BlockMass and ScaledNumber to maintain precision,
+/// separates mass distribution from loop scaling, and dithers to eliminate
+/// probability mass loss.
+///
+/// The implementation is split between BlockFrequencyInfoImpl, which knows the
+/// type of graph being modelled (BasicBlock vs. MachineBasicBlock), and
+/// BlockFrequencyInfoImplBase, which doesn't. The base class uses \a
+/// BlockNode, a wrapper around a uint32_t. BlockNode is numbered from 0 in
+/// reverse-post order. This gives two advantages: it's easy to compare the
+/// relative ordering of two nodes, and maps keyed on BlockT can be represented
+/// by vectors.
+///
+/// This algorithm is O(V+E), unless there is irreducible control flow, in
+/// which case it's O(V*E) in the worst case.
+///
+/// These are the main stages:
+///
+/// 0. Reverse post-order traversal (\a initializeRPOT()).
+///
+/// Run a single post-order traversal and save it (in reverse) in RPOT.
+/// All other stages make use of this ordering. Save a lookup from BlockT
+/// to BlockNode (the index into RPOT) in Nodes.
+///
+/// 1. Loop initialization (\a initializeLoops()).
+///
+/// Translate LoopInfo/MachineLoopInfo into a form suitable for the rest of
+/// the algorithm. In particular, store the immediate members of each loop
+/// in reverse post-order.
+///
+/// 2. Calculate mass and scale in loops (\a computeMassInLoops()).
+///
+/// For each loop (bottom-up), distribute mass through the DAG resulting
+/// from ignoring backedges and treating sub-loops as a single pseudo-node.
+/// Track the backedge mass distributed to the loop header, and use it to
+/// calculate the loop scale (number of loop iterations). Immediate
+/// members that represent sub-loops will already have been visited and
+/// packaged into a pseudo-node.
+///
+/// Distributing mass in a loop is a reverse-post-order traversal through
+/// the loop. Start by assigning full mass to the Loop header. For each
+/// node in the loop:
+///
+/// - Fetch and categorize the weight distribution for its successors.
+/// If this is a packaged-subloop, the weight distribution is stored
+/// in \a LoopData::Exits. Otherwise, fetch it from
+/// BranchProbabilityInfo.
+///
+/// - Each successor is categorized as \a Weight::Local, a local edge
+/// within the current loop, \a Weight::Backedge, a backedge to the
+/// loop header, or \a Weight::Exit, any successor outside the loop.
+/// The weight, the successor, and its category are stored in \a
+/// Distribution. There can be multiple edges to each successor.
+///
+/// - If there's a backedge to a non-header, there's an irreducible SCC.
+/// The usual flow is temporarily aborted. \a
+/// computeIrreducibleMass() finds the irreducible SCCs within the
+/// loop, packages them up, and restarts the flow.
+///
+/// - Normalize the distribution: scale weights down so that their sum
+/// is 32-bits, and coalesce multiple edges to the same node.
+///
+/// - Distribute the mass accordingly, dithering to minimize mass loss,
+/// as described in \a distributeMass().
+///
+/// In the case of irreducible loops, instead of a single loop header,
+/// there will be several. The computation of backedge masses is similar
+/// but instead of having a single backedge mass, there will be one
+/// backedge per loop header. In these cases, each backedge will carry
+/// a mass proportional to the edge weights along the corresponding
+/// path.
+///
+/// At the end of propagation, the full mass assigned to the loop will be
+/// distributed among the loop headers proportionally according to the
+/// mass flowing through their backedges.
+///
+/// Finally, calculate the loop scale from the accumulated backedge mass.
+///
+/// 3. Distribute mass in the function (\a computeMassInFunction()).
+///
+/// Finally, distribute mass through the DAG resulting from packaging all
+/// loops in the function. This uses the same algorithm as distributing
+/// mass in a loop, except that there are no exit or backedge edges.
+///
+/// 4. Unpackage loops (\a unwrapLoops()).
+///
+/// Initialize each block's frequency to a floating point representation of
+/// its mass.
+///
+/// Visit loops top-down, scaling the frequencies of its immediate members
+/// by the loop's pseudo-node's frequency.
+///
+/// 5. Convert frequencies to a 64-bit range (\a finalizeMetrics()).
+///
+/// Using the min and max frequencies as a guide, translate floating point
+/// frequencies to an appropriate range in uint64_t.
+///
+/// It has some known flaws.
+///
+/// - The model of irreducible control flow is a rough approximation.
+///
+/// Modelling irreducible control flow exactly involves setting up and
+/// solving a group of infinite geometric series. Such precision is
+/// unlikely to be worthwhile, since most of our algorithms give up on
+/// irreducible control flow anyway.
+///
+/// Nevertheless, we might find that we need to get closer. Here's a sort
+/// of TODO list for the model with diminishing returns, to be completed as
+/// necessary.
+///
+/// - The headers for the \a LoopData representing an irreducible SCC
+/// include non-entry blocks. When these extra blocks exist, they
+/// indicate a self-contained irreducible sub-SCC. We could treat them
+/// as sub-loops, rather than arbitrarily shoving the problematic
+/// blocks into the headers of the main irreducible SCC.
+///
+/// - Entry frequencies are assumed to be evenly split between the
+/// headers of a given irreducible SCC, which is the only option if we
+/// need to compute mass in the SCC before its parent loop. Instead,
+/// we could partially compute mass in the parent loop, and stop when
+/// we get to the SCC. Here, we have the correct ratio of entry
+/// masses, which we can use to adjust their relative frequencies.
+/// Compute mass in the SCC, and then continue propagation in the
+/// parent.
+///
+/// - We can propagate mass iteratively through the SCC, for some fixed
+/// number of iterations. Each iteration starts by assigning the entry
+/// blocks their backedge mass from the prior iteration. The final
+/// mass for each block (and each exit, and the total backedge mass
+/// used for computing loop scale) is the sum of all iterations.
+/// (Running this until fixed point would "solve" the geometric
+/// series by simulation.)
+template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
+ typedef typename bfi_detail::TypeMap<BT>::BlockT BlockT;
+ typedef typename bfi_detail::TypeMap<BT>::FunctionT FunctionT;
+ typedef typename bfi_detail::TypeMap<BT>::BranchProbabilityInfoT
+ BranchProbabilityInfoT;
+ typedef typename bfi_detail::TypeMap<BT>::LoopT LoopT;
+ typedef typename bfi_detail::TypeMap<BT>::LoopInfoT LoopInfoT;
+
+ // This is part of a workaround for a GCC 4.7 crash on lambdas.
+ friend struct bfi_detail::BlockEdgesAdder<BT>;
+
+ typedef GraphTraits<const BlockT *> Successor;
+ typedef GraphTraits<Inverse<const BlockT *>> Predecessor;
+
+ const BranchProbabilityInfoT *BPI;
+ const LoopInfoT *LI;
+ const FunctionT *F;
+
+ // All blocks in reverse postorder.
+ std::vector<const BlockT *> RPOT;
+ DenseMap<const BlockT *, BlockNode> Nodes;
+
+ typedef typename std::vector<const BlockT *>::const_iterator rpot_iterator;
+
+ rpot_iterator rpot_begin() const { return RPOT.begin(); }
+ rpot_iterator rpot_end() const { return RPOT.end(); }
+
+ size_t getIndex(const rpot_iterator &I) const { return I - rpot_begin(); }
+
+ BlockNode getNode(const rpot_iterator &I) const {
+ return BlockNode(getIndex(I));
+ }
+ BlockNode getNode(const BlockT *BB) const { return Nodes.lookup(BB); }
+
+ const BlockT *getBlock(const BlockNode &Node) const {
+ assert(Node.Index < RPOT.size());
+ return RPOT[Node.Index];
+ }
+
+ /// \brief Run (and save) a post-order traversal.
+ ///
+ /// Saves a reverse post-order traversal of all the nodes in \a F.
+ void initializeRPOT();
+
+ /// \brief Initialize loop data.
+ ///
+ /// Build up \a Loops using \a LoopInfo. \a LoopInfo gives us a mapping from
+ /// each block to the deepest loop it's in, but we need the inverse. For each
+ /// loop, we store in reverse post-order its "immediate" members, defined as
+ /// the header, the headers of immediate sub-loops, and all other blocks in
+ /// the loop that are not in sub-loops.
+ void initializeLoops();
+
+ /// \brief Propagate to a block's successors.
+ ///
+ /// In the context of distributing mass through \c OuterLoop, divide the mass
+ /// currently assigned to \c Node between its successors.
+ ///
+ /// \return \c true unless there's an irreducible backedge.
+ bool propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);
+
+ /// \brief Compute mass in a particular loop.
+ ///
+ /// Assign mass to \c Loop's header, and then for each block in \c Loop in
+ /// reverse post-order, distribute mass to its successors. Only visits nodes
+ /// that have not been packaged into sub-loops.
+ ///
+ /// \pre \a computeMassInLoop() has been called for each subloop of \c Loop.
+ /// \return \c true unless there's an irreducible backedge.
+ bool computeMassInLoop(LoopData &Loop);
+
+ /// \brief Try to compute mass in the top-level function.
+ ///
+ /// Assign mass to the entry block, and then for each block in reverse
+ /// post-order, distribute mass to its successors. Skips nodes that have
+ /// been packaged into loops.
+ ///
+ /// \pre \a computeMassInLoops() has been called.
+ /// \return \c true unless there's an irreducible backedge.
+ bool tryToComputeMassInFunction();
+
+ /// \brief Compute mass in (and package up) irreducible SCCs.
+ ///
+ /// Find the irreducible SCCs in \c OuterLoop, add them to \a Loops (in front
+ /// of \c Insert), and call \a computeMassInLoop() on each of them.
+ ///
+ /// If \c OuterLoop is \c nullptr, it refers to the top-level function.
+ ///
+ /// \pre \a computeMassInLoop() has been called for each subloop of \c
+ /// OuterLoop.
+ /// \pre \c Insert points at the last loop successfully processed by \a
+ /// computeMassInLoop().
+ /// \pre \c OuterLoop has irreducible SCCs.
+ void computeIrreducibleMass(LoopData *OuterLoop,
+ std::list<LoopData>::iterator Insert);
+
+ /// \brief Compute mass in all loops.
+ ///
+ /// For each loop bottom-up, call \a computeMassInLoop().
+ ///
+ /// \a computeMassInLoop() aborts (and returns \c false) on loops that
+ /// contain a irreducible sub-SCCs. Use \a computeIrreducibleMass() and then
+ /// re-enter \a computeMassInLoop().
+ ///
+ /// \post \a computeMassInLoop() has returned \c true for every loop.
+ void computeMassInLoops();
+
+ /// \brief Compute mass in the top-level function.
+ ///
+ /// Uses \a tryToComputeMassInFunction() and \a computeIrreducibleMass() to
+ /// compute mass in the top-level function.
+ ///
+ /// \post \a tryToComputeMassInFunction() has returned \c true.
+ void computeMassInFunction();
+
+ std::string getBlockName(const BlockNode &Node) const override {
+ return bfi_detail::getBlockName(getBlock(Node));
+ }
+
+public:
+ const FunctionT *getFunction() const { return F; }
+
+ void calculate(const FunctionT &F, const BranchProbabilityInfoT &BPI,
+ const LoopInfoT &LI);
+ BlockFrequencyInfoImpl() : BPI(nullptr), LI(nullptr), F(nullptr) {}
+
+ using BlockFrequencyInfoImplBase::getEntryFreq;
+ BlockFrequency getBlockFreq(const BlockT *BB) const {
+ return BlockFrequencyInfoImplBase::getBlockFreq(getNode(BB));
+ }
+ void setBlockFreq(const BlockT *BB, uint64_t Freq);
+ Scaled64 getFloatingBlockFreq(const BlockT *BB) const {
+ return BlockFrequencyInfoImplBase::getFloatingBlockFreq(getNode(BB));
+ }
+
+ /// \brief Print the frequencies for the current function.
+ ///
+ /// Prints the frequencies for the blocks in the current function.
+ ///
+ /// Blocks are printed in the natural iteration order of the function, rather
+ /// than reverse post-order. This provides two advantages: writing -analyze
+ /// tests is easier (since blocks come out in source order), and even
+ /// unreachable blocks are printed.
+ ///
+ /// \a BlockFrequencyInfoImplBase::print() only knows reverse post-order, so
+ /// we need to override it here.
+ raw_ostream &print(raw_ostream &OS) const override;
+ using BlockFrequencyInfoImplBase::dump;
+
+ using BlockFrequencyInfoImplBase::printBlockFreq;
+ raw_ostream &printBlockFreq(raw_ostream &OS, const BlockT *BB) const {
+ return BlockFrequencyInfoImplBase::printBlockFreq(OS, getNode(BB));
+ }
+};
+
+template <class BT>
+void BlockFrequencyInfoImpl<BT>::calculate(const FunctionT &F,
+ const BranchProbabilityInfoT &BPI,
+ const LoopInfoT &LI) {
+ // Save the parameters.
+ this->BPI = &BPI;
+ this->LI = &LI;
+ this->F = &F;
+
+ // Clean up left-over data structures.
+ BlockFrequencyInfoImplBase::clear();
+ RPOT.clear();
+ Nodes.clear();
+
+ // Initialize.
+ DEBUG(dbgs() << "\nblock-frequency: " << F.getName() << "\n================="
+ << std::string(F.getName().size(), '=') << "\n");
+ initializeRPOT();
+ initializeLoops();
+
+ // Visit loops in post-order to find the local mass distribution, and then do
+ // the full function.
+ computeMassInLoops();
+ computeMassInFunction();
+ unwrapLoops();
+ finalizeMetrics();
+}
+
+template <class BT>
+void BlockFrequencyInfoImpl<BT>::setBlockFreq(const BlockT *BB, uint64_t Freq) {
+ if (Nodes.count(BB))
+ BlockFrequencyInfoImplBase::setBlockFreq(getNode(BB), Freq);
+ else {
+ // If BB is a newly added block after BFI is done, we need to create a new
+ // BlockNode for it assigned with a new index. The index can be determined
+ // by the size of Freqs.
+ BlockNode NewNode(Freqs.size());
+ Nodes[BB] = NewNode;
+ Freqs.emplace_back();
+ BlockFrequencyInfoImplBase::setBlockFreq(NewNode, Freq);
+ }
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
+ const BlockT *Entry = &F->front();
+ RPOT.reserve(F->size());
+ std::copy(po_begin(Entry), po_end(Entry), std::back_inserter(RPOT));
+ std::reverse(RPOT.begin(), RPOT.end());
+
+ assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
+ "More nodes in function than Block Frequency Info supports");
+
+ DEBUG(dbgs() << "reverse-post-order-traversal\n");
+ for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
+ BlockNode Node = getNode(I);
+ DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
+ Nodes[*I] = Node;
+ }
+
+ Working.reserve(RPOT.size());
+ for (size_t Index = 0; Index < RPOT.size(); ++Index)
+ Working.emplace_back(Index);
+ Freqs.resize(RPOT.size());
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
+ DEBUG(dbgs() << "loop-detection\n");
+ if (LI->empty())
+ return;
+
+ // Visit loops top down and assign them an index.
+ std::deque<std::pair<const LoopT *, LoopData *>> Q;
+ for (const LoopT *L : *LI)
+ Q.emplace_back(L, nullptr);
+ while (!Q.empty()) {
+ const LoopT *Loop = Q.front().first;
+ LoopData *Parent = Q.front().second;
+ Q.pop_front();
+
+ BlockNode Header = getNode(Loop->getHeader());
+ assert(Header.isValid());
+
+ Loops.emplace_back(Parent, Header);
+ Working[Header.Index].Loop = &Loops.back();
+ DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
+
+ for (const LoopT *L : *Loop)
+ Q.emplace_back(L, &Loops.back());
+ }
+
+ // Visit nodes in reverse post-order and add them to their deepest containing
+ // loop.
+ for (size_t Index = 0; Index < RPOT.size(); ++Index) {
+ // Loop headers have already been mostly mapped.
+ if (Working[Index].isLoopHeader()) {
+ LoopData *ContainingLoop = Working[Index].getContainingLoop();
+ if (ContainingLoop)
+ ContainingLoop->Nodes.push_back(Index);
+ continue;
+ }
+
+ const LoopT *Loop = LI->getLoopFor(RPOT[Index]);
+ if (!Loop)
+ continue;
+
+ // Add this node to its containing loop's member list.
+ BlockNode Header = getNode(Loop->getHeader());
+ assert(Header.isValid());
+ const auto &HeaderData = Working[Header.Index];
+ assert(HeaderData.isLoopHeader());
+
+ Working[Index].Loop = HeaderData.Loop;
+ HeaderData.Loop->Nodes.push_back(Index);
+ DEBUG(dbgs() << " - loop = " << getBlockName(Header)
+ << ": member = " << getBlockName(Index) << "\n");
+ }
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
+ // Visit loops with the deepest first, and the top-level loops last.
+ for (auto L = Loops.rbegin(), E = Loops.rend(); L != E; ++L) {
+ if (computeMassInLoop(*L))
+ continue;
+ auto Next = std::next(L);
+ computeIrreducibleMass(&*L, L.base());
+ L = std::prev(Next);
+ if (computeMassInLoop(*L))
+ continue;
+ llvm_unreachable("unhandled irreducible control flow");
+ }
+}
+
+template <class BT>
+bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
+ // Compute mass in loop.
+ DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
+
+ if (Loop.isIrreducible()) {
+ BlockMass Remaining = BlockMass::getFull();
+ for (uint32_t H = 0; H < Loop.NumHeaders; ++H) {
+ auto &Mass = Working[Loop.Nodes[H].Index].getMass();
+ Mass = Remaining * BranchProbability(1, Loop.NumHeaders - H);
+ Remaining -= Mass;
+ }
+ for (const BlockNode &M : Loop.Nodes)
+ if (!propagateMassToSuccessors(&Loop, M))
+ llvm_unreachable("unhandled irreducible control flow");
+
+ adjustLoopHeaderMass(Loop);
+ } else {
+ Working[Loop.getHeader().Index].getMass() = BlockMass::getFull();
+ if (!propagateMassToSuccessors(&Loop, Loop.getHeader()))
+ llvm_unreachable("irreducible control flow to loop header!?");
+ for (const BlockNode &M : Loop.members())
+ if (!propagateMassToSuccessors(&Loop, M))
+ // Irreducible backedge.
+ return false;
+ }
+
+ computeLoopScale(Loop);
+ packageLoop(Loop);
+ return true;
+}
+
+template <class BT>
+bool BlockFrequencyInfoImpl<BT>::tryToComputeMassInFunction() {
+ // Compute mass in function.
+ DEBUG(dbgs() << "compute-mass-in-function\n");
+ assert(!Working.empty() && "no blocks in function");
+ assert(!Working[0].isLoopHeader() && "entry block is a loop header");
+
+ Working[0].getMass() = BlockMass::getFull();
+ for (rpot_iterator I = rpot_begin(), IE = rpot_end(); I != IE; ++I) {
+ // Check for nodes that have been packaged.
+ BlockNode Node = getNode(I);
+ if (Working[Node.Index].isPackaged())
+ continue;
+
+ if (!propagateMassToSuccessors(nullptr, Node))
+ return false;
+ }
+ return true;
+}
+
+template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInFunction() {
+ if (tryToComputeMassInFunction())
+ return;
+ computeIrreducibleMass(nullptr, Loops.begin());
+ if (tryToComputeMassInFunction())
+ return;
+ llvm_unreachable("unhandled irreducible control flow");
+}
+
+/// \note This should be a lambda, but that crashes GCC 4.7.
+namespace bfi_detail {
+template <class BT> struct BlockEdgesAdder {
+ typedef BT BlockT;
+ typedef BlockFrequencyInfoImplBase::LoopData LoopData;
+ typedef GraphTraits<const BlockT *> Successor;
+
+ const BlockFrequencyInfoImpl<BT> &BFI;
+ explicit BlockEdgesAdder(const BlockFrequencyInfoImpl<BT> &BFI)
+ : BFI(BFI) {}
+ void operator()(IrreducibleGraph &G, IrreducibleGraph::IrrNode &Irr,
+ const LoopData *OuterLoop) {
+ const BlockT *BB = BFI.RPOT[Irr.Node.Index];
+ for (auto I = Successor::child_begin(BB), E = Successor::child_end(BB);
+ I != E; ++I)
+ G.addEdge(Irr, BFI.getNode(*I), OuterLoop);
+ }
+};
+}
+template <class BT>
+void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
+ LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {
+ DEBUG(dbgs() << "analyze-irreducible-in-";
+ if (OuterLoop) dbgs() << "loop: " << getLoopName(*OuterLoop) << "\n";
+ else dbgs() << "function\n");
+
+ using namespace bfi_detail;
+ // Ideally, addBlockEdges() would be declared here as a lambda, but that
+ // crashes GCC 4.7.
+ BlockEdgesAdder<BT> addBlockEdges(*this);
+ IrreducibleGraph G(*this, OuterLoop, addBlockEdges);
+
+ for (auto &L : analyzeIrreducible(G, OuterLoop, Insert))
+ computeMassInLoop(L);
+
+ if (!OuterLoop)
+ return;
+ updateLoopWithIrreducible(*OuterLoop);
+}
+
+namespace {
+// A helper function that converts a branch probability into weight.
+inline uint32_t getWeightFromBranchProb(const BranchProbability Prob) {
+ return Prob.getNumerator();
+}
+} // namespace
+
+template <class BT>
+bool
+BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
+ const BlockNode &Node) {
+ DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
+ // Calculate probability for successors.
+ Distribution Dist;
+ if (auto *Loop = Working[Node.Index].getPackagedLoop()) {
+ assert(Loop != OuterLoop && "Cannot propagate mass in a packaged loop");
+ if (!addLoopSuccessorsToDist(OuterLoop, *Loop, Dist))
+ // Irreducible backedge.
+ return false;
+ } else {
+ const BlockT *BB = getBlock(Node);
+ for (auto SI = Successor::child_begin(BB), SE = Successor::child_end(BB);
+ SI != SE; ++SI)
+ if (!addToDist(Dist, OuterLoop, Node, getNode(*SI),
+ getWeightFromBranchProb(BPI->getEdgeProbability(BB, SI))))
+ // Irreducible backedge.
+ return false;
+ }
+
+ // Distribute mass to successors, saving exit and backedge data in the
+ // loop header.
+ distributeMass(Node, OuterLoop, Dist);
+ return true;
+}
+
+template <class BT>
+raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
+ if (!F)
+ return OS;
+ OS << "block-frequency-info: " << F->getName() << "\n";
+ for (const BlockT &BB : *F) {
+ OS << " - " << bfi_detail::getBlockName(&BB) << ": float = ";
+ getFloatingBlockFreq(&BB).print(OS, 5)
+ << ", int = " << getBlockFreq(&BB).getFrequency() << "\n";
+ }
+
+ // Add an extra newline for readability.
+ OS << "\n";
+ return OS;
+}
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h b/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
new file mode 100644
index 0000000..cfdf218
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/BranchProbabilityInfo.h
@@ -0,0 +1,160 @@
+//===--- BranchProbabilityInfo.h - Branch Probability Analysis --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is used to evaluate branch probabilties.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
+#define LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/BranchProbability.h"
+
+namespace llvm {
+class LoopInfo;
+class raw_ostream;
+
+/// \brief Analysis providing branch probability information.
+///
+/// This is a function analysis which provides information on the relative
+/// probabilities of each "edge" in the function's CFG where such an edge is
+/// defined by a pair (PredBlock and an index in the successors). The
+/// probability of an edge from one block is always relative to the
+/// probabilities of other edges from the block. The probabilites of all edges
+/// from a block sum to exactly one (100%).
+/// We use a pair (PredBlock and an index in the successors) to uniquely
+/// identify an edge, since we can have multiple edges from Src to Dst.
+/// As an example, we can have a switch which jumps to Dst with value 0 and
+/// value 10.
+class BranchProbabilityInfo {
+public:
+ BranchProbabilityInfo() {}
+ BranchProbabilityInfo(Function &F, const LoopInfo &LI) { calculate(F, LI); }
+
+ void releaseMemory();
+
+ void print(raw_ostream &OS) const;
+
+ /// \brief Get an edge's probability, relative to other out-edges of the Src.
+ ///
+ /// This routine provides access to the fractional probability between zero
+ /// (0%) and one (100%) of this edge executing, relative to other edges
+ /// leaving the 'Src' block. The returned probability is never zero, and can
+ /// only be one if the source block has only one successor.
+ BranchProbability getEdgeProbability(const BasicBlock *Src,
+ unsigned IndexInSuccessors) const;
+
+ /// \brief Get the probability of going from Src to Dst.
+ ///
+ /// It returns the sum of all probabilities for edges from Src to Dst.
+ BranchProbability getEdgeProbability(const BasicBlock *Src,
+ const BasicBlock *Dst) const;
+
+ BranchProbability getEdgeProbability(const BasicBlock *Src,
+ succ_const_iterator Dst) const;
+
+ /// \brief Test if an edge is hot relative to other out-edges of the Src.
+ ///
+ /// Check whether this edge out of the source block is 'hot'. We define hot
+ /// as having a relative probability >= 80%.
+ bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
+
+ /// \brief Retrieve the hot successor of a block if one exists.
+ ///
+ /// Given a basic block, look through its successors and if one exists for
+ /// which \see isEdgeHot would return true, return that successor block.
+ BasicBlock *getHotSucc(BasicBlock *BB) const;
+
+ /// \brief Print an edge's probability.
+ ///
+ /// Retrieves an edge's probability similarly to \see getEdgeProbability, but
+ /// then prints that probability to the provided stream. That stream is then
+ /// returned.
+ raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
+ const BasicBlock *Dst) const;
+
+ /// \brief Set the raw edge probability for the given edge.
+ ///
+ /// This allows a pass to explicitly set the edge probability for an edge. It
+ /// can be used when updating the CFG to update and preserve the branch
+ /// probability information. Read the implementation of how these edge
+ /// probabilities are calculated carefully before using!
+ void setEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors,
+ BranchProbability Prob);
+
+ static BranchProbability getBranchProbStackProtector(bool IsLikely) {
+ static const BranchProbability LikelyProb((1u << 20) - 1, 1u << 20);
+ return IsLikely ? LikelyProb : LikelyProb.getCompl();
+ }
+
+ void calculate(Function &F, const LoopInfo& LI);
+
+private:
+ // Since we allow duplicate edges from one basic block to another, we use
+ // a pair (PredBlock and an index in the successors) to specify an edge.
+ typedef std::pair<const BasicBlock *, unsigned> Edge;
+
+ // Default weight value. Used when we don't have information about the edge.
+ // TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
+ // the successors have a weight yet. But it doesn't make sense when providing
+ // weight to an edge that may have siblings with non-zero weights. This can
+ // be handled various ways, but it's probably fine for an edge with unknown
+ // weight to just "inherit" the non-zero weight of an adjacent successor.
+ static const uint32_t DEFAULT_WEIGHT = 16;
+
+ DenseMap<Edge, BranchProbability> Probs;
+
+ /// \brief Track the last function we run over for printing.
+ Function *LastF;
+
+ /// \brief Track the set of blocks directly succeeded by a returning block.
+ SmallPtrSet<BasicBlock *, 16> PostDominatedByUnreachable;
+
+ /// \brief Track the set of blocks that always lead to a cold call.
+ SmallPtrSet<BasicBlock *, 16> PostDominatedByColdCall;
+
+ bool calcUnreachableHeuristics(BasicBlock *BB);
+ bool calcMetadataWeights(BasicBlock *BB);
+ bool calcColdCallHeuristics(BasicBlock *BB);
+ bool calcPointerHeuristics(BasicBlock *BB);
+ bool calcLoopBranchHeuristics(BasicBlock *BB, const LoopInfo &LI);
+ bool calcZeroHeuristics(BasicBlock *BB);
+ bool calcFloatingPointHeuristics(BasicBlock *BB);
+ bool calcInvokeHeuristics(BasicBlock *BB);
+};
+
+/// \brief Legacy analysis pass which computes \c BranchProbabilityInfo.
+class BranchProbabilityInfoWrapperPass : public FunctionPass {
+ BranchProbabilityInfo BPI;
+
+public:
+ static char ID;
+
+ BranchProbabilityInfoWrapperPass() : FunctionPass(ID) {
+ initializeBranchProbabilityInfoWrapperPassPass(
+ *PassRegistry::getPassRegistry());
+ }
+
+ BranchProbabilityInfo &getBPI() { return BPI; }
+ const BranchProbabilityInfo &getBPI() const { return BPI; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+ void print(raw_ostream &OS, const Module *M = nullptr) const override;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CFG.h b/contrib/llvm/include/llvm/Analysis/CFG.h
new file mode 100644
index 0000000..35165f4
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/CFG.h
@@ -0,0 +1,94 @@
+//===-- Analysis/CFG.h - BasicBlock Analyses --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions performs analyses on basic blocks, and instructions
+// contained within basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CFG_H
+#define LLVM_ANALYSIS_CFG_H
+
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+
+namespace llvm {
+
+class BasicBlock;
+class DominatorTree;
+class Function;
+class Instruction;
+class LoopInfo;
+class TerminatorInst;
+
+/// Analyze the specified function to find all of the loop backedges in the
+/// function and return them. This is a relatively cheap (compared to
+/// computing dominators and loop info) analysis.
+///
+/// The output is added to Result, as pairs of <from,to> edge info.
+void FindFunctionBackedges(
+ const Function &F,
+ SmallVectorImpl<std::pair<const BasicBlock *, const BasicBlock *> > &
+ Result);
+
+/// Search for the specified successor of basic block BB and return its position
+/// in the terminator instruction's list of successors. It is an error to call
+/// this with a block that is not a successor.
+unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ);
+
+/// Return true if the specified edge is a critical edge. Critical edges are
+/// edges from a block with multiple successors to a block with multiple
+/// predecessors.
+///
+bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
+ bool AllowIdenticalEdges = false);
+
+/// \brief Determine whether instruction 'To' is reachable from 'From',
+/// returning true if uncertain.
+///
+/// Determine whether there is a path from From to To within a single function.
+/// Returns false only if we can prove that once 'From' has been executed then
+/// 'To' can not be executed. Conservatively returns true.
+///
+/// This function is linear with respect to the number of blocks in the CFG,
+/// walking down successors from From to reach To, with a fixed threshold.
+/// Using DT or LI allows us to answer more quickly. LI reduces the cost of
+/// an entire loop of any number of blocsk to be the same as the cost of a
+/// single block. DT reduces the cost by allowing the search to terminate when
+/// we find a block that dominates the block containing 'To'. DT is most useful
+/// on branchy code but not loops, and LI is most useful on code with loops but
+/// does not help on branchy code outside loops.
+bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
+ const DominatorTree *DT = nullptr,
+ const LoopInfo *LI = nullptr);
+
+/// \brief Determine whether block 'To' is reachable from 'From', returning
+/// true if uncertain.
+///
+/// Determine whether there is a path from From to To within a single function.
+/// Returns false only if we can prove that once 'From' has been reached then
+/// 'To' can not be executed. Conservatively returns true.
+bool isPotentiallyReachable(const BasicBlock *From, const BasicBlock *To,
+ const DominatorTree *DT = nullptr,
+ const LoopInfo *LI = nullptr);
+
+/// \brief Determine whether there is at least one path from a block in
+/// 'Worklist' to 'StopBB', returning true if uncertain.
+///
+/// Determine whether there is a path from at least one block in Worklist to
+/// StopBB within a single function. Returns false only if we can prove that
+/// once any block in 'Worklist' has been reached then 'StopBB' can not be
+/// executed. Conservatively returns true.
+bool isPotentiallyReachableFromMany(SmallVectorImpl<BasicBlock *> &Worklist,
+ BasicBlock *StopBB,
+ const DominatorTree *DT = nullptr,
+ const LoopInfo *LI = nullptr);
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CFGPrinter.h b/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
new file mode 100644
index 0000000..0357648
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/CFGPrinter.h
@@ -0,0 +1,130 @@
+//===-- CFGPrinter.h - CFG printer external interface -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines external functions that can be called to explicitly
+// instantiate the CFG printer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CFGPRINTER_H
+#define LLVM_ANALYSIS_CFGPRINTER_H
+
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/GraphWriter.h"
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getGraphName(const Function *F) {
+ return "CFG for '" + F->getName().str() + "' function";
+ }
+
+ static std::string getSimpleNodeLabel(const BasicBlock *Node,
+ const Function *) {
+ if (!Node->getName().empty())
+ return Node->getName().str();
+
+ std::string Str;
+ raw_string_ostream OS(Str);
+
+ Node->printAsOperand(OS, false);
+ return OS.str();
+ }
+
+ static std::string getCompleteNodeLabel(const BasicBlock *Node,
+ const Function *) {
+ enum { MaxColumns = 80 };
+ std::string Str;
+ raw_string_ostream OS(Str);
+
+ if (Node->getName().empty()) {
+ Node->printAsOperand(OS, false);
+ OS << ":";
+ }
+
+ OS << *Node;
+ std::string OutStr = OS.str();
+ if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
+
+ // Process string output to make it nicer...
+ unsigned ColNum = 0;
+ unsigned LastSpace = 0;
+ for (unsigned i = 0; i != OutStr.length(); ++i) {
+ if (OutStr[i] == '\n') { // Left justify
+ OutStr[i] = '\\';
+ OutStr.insert(OutStr.begin()+i+1, 'l');
+ ColNum = 0;
+ LastSpace = 0;
+ } else if (OutStr[i] == ';') { // Delete comments!
+ unsigned Idx = OutStr.find('\n', i+1); // Find end of line
+ OutStr.erase(OutStr.begin()+i, OutStr.begin()+Idx);
+ --i;
+ } else if (ColNum == MaxColumns) { // Wrap lines.
+ // Wrap very long names even though we can't find a space.
+ if (!LastSpace)
+ LastSpace = i;
+ OutStr.insert(LastSpace, "\\l...");
+ ColNum = i - LastSpace;
+ LastSpace = 0;
+ i += 3; // The loop will advance 'i' again.
+ }
+ else
+ ++ColNum;
+ if (OutStr[i] == ' ')
+ LastSpace = i;
+ }
+ return OutStr;
+ }
+
+ std::string getNodeLabel(const BasicBlock *Node,
+ const Function *Graph) {
+ if (isSimple())
+ return getSimpleNodeLabel(Node, Graph);
+ else
+ return getCompleteNodeLabel(Node, Graph);
+ }
+
+ static std::string getEdgeSourceLabel(const BasicBlock *Node,
+ succ_const_iterator I) {
+ // Label source of conditional branches with "T" or "F"
+ if (const BranchInst *BI = dyn_cast<BranchInst>(Node->getTerminator()))
+ if (BI->isConditional())
+ return (I == succ_begin(Node)) ? "T" : "F";
+
+ // Label source of switch edges with the associated value.
+ if (const SwitchInst *SI = dyn_cast<SwitchInst>(Node->getTerminator())) {
+ unsigned SuccNo = I.getSuccessorIndex();
+
+ if (SuccNo == 0) return "def";
+
+ std::string Str;
+ raw_string_ostream OS(Str);
+ SwitchInst::ConstCaseIt Case =
+ SwitchInst::ConstCaseIt::fromSuccessorIndex(SI, SuccNo);
+ OS << Case.getCaseValue()->getValue();
+ return OS.str();
+ }
+ return "";
+ }
+};
+} // End llvm namespace
+
+namespace llvm {
+ class FunctionPass;
+ FunctionPass *createCFGPrinterPass ();
+ FunctionPass *createCFGOnlyPrinterPass ();
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CFLAliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/CFLAliasAnalysis.h
new file mode 100644
index 0000000..7473a45
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/CFLAliasAnalysis.h
@@ -0,0 +1,158 @@
+//===- CFLAliasAnalysis.h - CFL-Based Alias Analysis Interface ---*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for LLVM's primary stateless and local alias analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CFLALIASANALYSIS_H
+#define LLVM_ANALYSIS_CFLALIASANALYSIS_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include <forward_list>
+
+namespace llvm {
+
+class CFLAAResult : public AAResultBase<CFLAAResult> {
+ friend AAResultBase<CFLAAResult>;
+
+ struct FunctionInfo;
+
+public:
+ explicit CFLAAResult(const TargetLibraryInfo &TLI);
+ CFLAAResult(CFLAAResult &&Arg);
+
+ /// Handle invalidation events from the new pass manager.
+ ///
+ /// By definition, this result is stateless and so remains valid.
+ bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+
+ /// \brief Inserts the given Function into the cache.
+ void scan(Function *Fn);
+
+ void evict(Function *Fn);
+
+ /// \brief Ensures that the given function is available in the cache.
+ /// Returns the appropriate entry from the cache.
+ const Optional<FunctionInfo> &ensureCached(Function *Fn);
+
+ AliasResult query(const MemoryLocation &LocA, const MemoryLocation &LocB);
+
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB) {
+ if (LocA.Ptr == LocB.Ptr) {
+ if (LocA.Size == LocB.Size) {
+ return MustAlias;
+ } else {
+ return PartialAlias;
+ }
+ }
+
+ // Comparisons between global variables and other constants should be
+ // handled by BasicAA.
+ // TODO: ConstantExpr handling -- CFLAA may report NoAlias when comparing
+ // a GlobalValue and ConstantExpr, but every query needs to have at least
+ // one Value tied to a Function, and neither GlobalValues nor ConstantExprs
+ // are.
+ if (isa<Constant>(LocA.Ptr) && isa<Constant>(LocB.Ptr)) {
+ return AAResultBase::alias(LocA, LocB);
+ }
+
+ AliasResult QueryResult = query(LocA, LocB);
+ if (QueryResult == MayAlias)
+ return AAResultBase::alias(LocA, LocB);
+
+ return QueryResult;
+ }
+
+private:
+ struct FunctionHandle final : public CallbackVH {
+ FunctionHandle(Function *Fn, CFLAAResult *Result)
+ : CallbackVH(Fn), Result(Result) {
+ assert(Fn != nullptr);
+ assert(Result != nullptr);
+ }
+
+ void deleted() override { removeSelfFromCache(); }
+ void allUsesReplacedWith(Value *) override { removeSelfFromCache(); }
+
+ private:
+ CFLAAResult *Result;
+
+ void removeSelfFromCache() {
+ assert(Result != nullptr);
+ auto *Val = getValPtr();
+ Result->evict(cast<Function>(Val));
+ setValPtr(nullptr);
+ }
+ };
+
+ /// \brief Cached mapping of Functions to their StratifiedSets.
+ /// If a function's sets are currently being built, it is marked
+ /// in the cache as an Optional without a value. This way, if we
+ /// have any kind of recursion, it is discernable from a function
+ /// that simply has empty sets.
+ DenseMap<Function *, Optional<FunctionInfo>> Cache;
+ std::forward_list<FunctionHandle> Handles;
+
+ FunctionInfo buildSetsFrom(Function *F);
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+///
+/// FIXME: We really should refactor CFL to use the analysis more heavily, and
+/// in particular to leverage invalidation to trigger re-computation of sets.
+class CFLAA {
+public:
+ typedef CFLAAResult Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ CFLAAResult run(Function &F, AnalysisManager<Function> *AM);
+
+ /// \brief Provide access to a name for this pass for debugging purposes.
+ static StringRef name() { return "CFLAA"; }
+
+private:
+ static char PassID;
+};
+
+/// Legacy wrapper pass to provide the CFLAAResult object.
+class CFLAAWrapperPass : public ImmutablePass {
+ std::unique_ptr<CFLAAResult> Result;
+
+public:
+ static char ID;
+
+ CFLAAWrapperPass();
+
+ CFLAAResult &getResult() { return *Result; }
+ const CFLAAResult &getResult() const { return *Result; }
+
+ bool doInitialization(Module &M) override;
+ bool doFinalization(Module &M) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+//===--------------------------------------------------------------------===//
+//
+// createCFLAAWrapperPass - This pass implements a set-based approach to
+// alias analysis.
+//
+ImmutablePass *createCFLAAWrapperPass();
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CGSCCPassManager.h b/contrib/llvm/include/llvm/Analysis/CGSCCPassManager.h
new file mode 100644
index 0000000..e7635eb
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/CGSCCPassManager.h
@@ -0,0 +1,490 @@
+//===- CGSCCPassManager.h - Call graph pass management ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header provides classes for managing passes over SCCs of the call
+/// graph. These passes form an important component of LLVM's interprocedural
+/// optimizations. Because they operate on the SCCs of the call graph, and they
+/// wtraverse the graph in post order, they can effectively do pair-wise
+/// interprocedural optimizations for all call edges in the program. At each
+/// call site edge, the callee has already been optimized as much as is
+/// possible. This in turn allows very accurate analysis of it for IPO.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CGSCCPASSMANAGER_H
+#define LLVM_ANALYSIS_CGSCCPASSMANAGER_H
+
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// \brief The CGSCC pass manager.
+///
+/// See the documentation for the PassManager template for details. It runs
+/// a sequency of SCC passes over each SCC that the manager is run over. This
+/// typedef serves as a convenient way to refer to this construct.
+typedef PassManager<LazyCallGraph::SCC> CGSCCPassManager;
+
+/// \brief The CGSCC analysis manager.
+///
+/// See the documentation for the AnalysisManager template for detail
+/// documentation. This typedef serves as a convenient way to refer to this
+/// construct in the adaptors and proxies used to integrate this into the larger
+/// pass manager infrastructure.
+typedef AnalysisManager<LazyCallGraph::SCC> CGSCCAnalysisManager;
+
+/// \brief A module analysis which acts as a proxy for a CGSCC analysis
+/// manager.
+///
+/// This primarily proxies invalidation information from the module analysis
+/// manager and module pass manager to a CGSCC analysis manager. You should
+/// never use a CGSCC analysis manager from within (transitively) a module
+/// pass manager unless your parent module pass has received a proxy result
+/// object for it.
+class CGSCCAnalysisManagerModuleProxy {
+public:
+ class Result {
+ public:
+ explicit Result(CGSCCAnalysisManager &CGAM) : CGAM(&CGAM) {}
+ // We have to explicitly define all the special member functions because
+ // MSVC refuses to generate them.
+ Result(const Result &Arg) : CGAM(Arg.CGAM) {}
+ Result(Result &&Arg) : CGAM(std::move(Arg.CGAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(CGAM, RHS.CGAM);
+ return *this;
+ }
+ ~Result();
+
+ /// \brief Accessor for the \c CGSCCAnalysisManager.
+ CGSCCAnalysisManager &getManager() { return *CGAM; }
+
+ /// \brief Handler for invalidation of the module.
+ ///
+ /// If this analysis itself is preserved, then we assume that the call
+ /// graph of the module hasn't changed and thus we don't need to invalidate
+ /// *all* cached data associated with a \c SCC* in the \c
+ /// CGSCCAnalysisManager.
+ ///
+ /// Regardless of whether this analysis is marked as preserved, all of the
+ /// analyses in the \c CGSCCAnalysisManager are potentially invalidated
+ /// based on the set of preserved analyses.
+ bool invalidate(Module &M, const PreservedAnalyses &PA);
+
+ private:
+ CGSCCAnalysisManager *CGAM;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+
+ static StringRef name() { return "CGSCCAnalysisManagerModuleProxy"; }
+
+ explicit CGSCCAnalysisManagerModuleProxy(CGSCCAnalysisManager &CGAM)
+ : CGAM(&CGAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ CGSCCAnalysisManagerModuleProxy(const CGSCCAnalysisManagerModuleProxy &Arg)
+ : CGAM(Arg.CGAM) {}
+ CGSCCAnalysisManagerModuleProxy(CGSCCAnalysisManagerModuleProxy &&Arg)
+ : CGAM(std::move(Arg.CGAM)) {}
+ CGSCCAnalysisManagerModuleProxy &
+ operator=(CGSCCAnalysisManagerModuleProxy RHS) {
+ std::swap(CGAM, RHS.CGAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ ///
+ /// This doesn't do any interesting work, it is primarily used to insert our
+ /// proxy result object into the module analysis cache so that we can proxy
+ /// invalidation to the CGSCC analysis manager.
+ ///
+ /// In debug builds, it will also assert that the analysis manager is empty
+ /// as no queries should arrive at the CGSCC analysis manager prior to
+ /// this analysis being requested.
+ Result run(Module &M);
+
+private:
+ static char PassID;
+
+ CGSCCAnalysisManager *CGAM;
+};
+
+/// \brief A CGSCC analysis which acts as a proxy for a module analysis
+/// manager.
+///
+/// This primarily provides an accessor to a parent module analysis manager to
+/// CGSCC passes. Only the const interface of the module analysis manager is
+/// provided to indicate that once inside of a CGSCC analysis pass you
+/// cannot request a module analysis to actually run. Instead, the user must
+/// rely on the \c getCachedResult API.
+///
+/// This proxy *doesn't* manage the invalidation in any way. That is handled by
+/// the recursive return path of each layer of the pass manager and the
+/// returned PreservedAnalysis set.
+class ModuleAnalysisManagerCGSCCProxy {
+public:
+ /// \brief Result proxy object for \c ModuleAnalysisManagerCGSCCProxy.
+ class Result {
+ public:
+ explicit Result(const ModuleAnalysisManager &MAM) : MAM(&MAM) {}
+ // We have to explicitly define all the special member functions because
+ // MSVC refuses to generate them.
+ Result(const Result &Arg) : MAM(Arg.MAM) {}
+ Result(Result &&Arg) : MAM(std::move(Arg.MAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(MAM, RHS.MAM);
+ return *this;
+ }
+
+ const ModuleAnalysisManager &getManager() const { return *MAM; }
+
+ /// \brief Handle invalidation by ignoring it, this pass is immutable.
+ bool invalidate(LazyCallGraph::SCC &) { return false; }
+
+ private:
+ const ModuleAnalysisManager *MAM;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+
+ static StringRef name() { return "ModuleAnalysisManagerCGSCCProxy"; }
+
+ ModuleAnalysisManagerCGSCCProxy(const ModuleAnalysisManager &MAM)
+ : MAM(&MAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ ModuleAnalysisManagerCGSCCProxy(const ModuleAnalysisManagerCGSCCProxy &Arg)
+ : MAM(Arg.MAM) {}
+ ModuleAnalysisManagerCGSCCProxy(ModuleAnalysisManagerCGSCCProxy &&Arg)
+ : MAM(std::move(Arg.MAM)) {}
+ ModuleAnalysisManagerCGSCCProxy &
+ operator=(ModuleAnalysisManagerCGSCCProxy RHS) {
+ std::swap(MAM, RHS.MAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ /// Nothing to see here, it just forwards the \c MAM reference into the
+ /// result.
+ Result run(LazyCallGraph::SCC &) { return Result(*MAM); }
+
+private:
+ static char PassID;
+
+ const ModuleAnalysisManager *MAM;
+};
+
+/// \brief The core module pass which does a post-order walk of the SCCs and
+/// runs a CGSCC pass over each one.
+///
+/// Designed to allow composition of a CGSCCPass(Manager) and
+/// a ModulePassManager. Note that this pass must be run with a module analysis
+/// manager as it uses the LazyCallGraph analysis. It will also run the
+/// \c CGSCCAnalysisManagerModuleProxy analysis prior to running the CGSCC
+/// pass over the module to enable a \c FunctionAnalysisManager to be used
+/// within this run safely.
+template <typename CGSCCPassT> class ModuleToPostOrderCGSCCPassAdaptor {
+public:
+ explicit ModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT Pass)
+ : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ ModuleToPostOrderCGSCCPassAdaptor(
+ const ModuleToPostOrderCGSCCPassAdaptor &Arg)
+ : Pass(Arg.Pass) {}
+ ModuleToPostOrderCGSCCPassAdaptor(ModuleToPostOrderCGSCCPassAdaptor &&Arg)
+ : Pass(std::move(Arg.Pass)) {}
+ friend void swap(ModuleToPostOrderCGSCCPassAdaptor &LHS,
+ ModuleToPostOrderCGSCCPassAdaptor &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ ModuleToPostOrderCGSCCPassAdaptor &
+ operator=(ModuleToPostOrderCGSCCPassAdaptor RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief Runs the CGSCC pass across every SCC in the module.
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager *AM) {
+ assert(AM && "We need analyses to compute the call graph!");
+
+ // Setup the CGSCC analysis manager from its proxy.
+ CGSCCAnalysisManager &CGAM =
+ AM->getResult<CGSCCAnalysisManagerModuleProxy>(M).getManager();
+
+ // Get the call graph for this module.
+ LazyCallGraph &CG = AM->getResult<LazyCallGraphAnalysis>(M);
+
+ PreservedAnalyses PA = PreservedAnalyses::all();
+ for (LazyCallGraph::SCC &C : CG.postorder_sccs()) {
+ PreservedAnalyses PassPA = Pass.run(C, &CGAM);
+
+ // We know that the CGSCC pass couldn't have invalidated any other
+ // SCC's analyses (that's the contract of a CGSCC pass), so
+ // directly handle the CGSCC analysis manager's invalidation here. We
+ // also update the preserved set of analyses to reflect that invalidated
+ // analyses are now safe to preserve.
+ // FIXME: This isn't quite correct. We need to handle the case where the
+ // pass updated the CG, particularly some child of the current SCC, and
+ // invalidate its analyses.
+ PassPA = CGAM.invalidate(C, std::move(PassPA));
+
+ // Then intersect the preserved set so that invalidation of module
+ // analyses will eventually occur when the module pass completes.
+ PA.intersect(std::move(PassPA));
+ }
+
+ // By definition we preserve the proxy. This precludes *any* invalidation
+ // of CGSCC analyses by the proxy, but that's OK because we've taken
+ // care to invalidate analyses in the CGSCC analysis manager
+ // incrementally above.
+ PA.preserve<CGSCCAnalysisManagerModuleProxy>();
+ return PA;
+ }
+
+ static StringRef name() { return "ModuleToPostOrderCGSCCPassAdaptor"; }
+
+private:
+ CGSCCPassT Pass;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename CGSCCPassT>
+ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>
+createModuleToPostOrderCGSCCPassAdaptor(CGSCCPassT Pass) {
+ return ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>(std::move(Pass));
+}
+
+/// \brief A CGSCC analysis which acts as a proxy for a function analysis
+/// manager.
+///
+/// This primarily proxies invalidation information from the CGSCC analysis
+/// manager and CGSCC pass manager to a function analysis manager. You should
+/// never use a function analysis manager from within (transitively) a CGSCC
+/// pass manager unless your parent CGSCC pass has received a proxy result
+/// object for it.
+class FunctionAnalysisManagerCGSCCProxy {
+public:
+ class Result {
+ public:
+ explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
+ // We have to explicitly define all the special member functions because
+ // MSVC refuses to generate them.
+ Result(const Result &Arg) : FAM(Arg.FAM) {}
+ Result(Result &&Arg) : FAM(std::move(Arg.FAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(FAM, RHS.FAM);
+ return *this;
+ }
+ ~Result();
+
+ /// \brief Accessor for the \c FunctionAnalysisManager.
+ FunctionAnalysisManager &getManager() { return *FAM; }
+
+ /// \brief Handler for invalidation of the SCC.
+ ///
+ /// If this analysis itself is preserved, then we assume that the set of \c
+ /// Function objects in the \c SCC hasn't changed and thus we don't need
+ /// to invalidate *all* cached data associated with a \c Function* in the \c
+ /// FunctionAnalysisManager.
+ ///
+ /// Regardless of whether this analysis is marked as preserved, all of the
+ /// analyses in the \c FunctionAnalysisManager are potentially invalidated
+ /// based on the set of preserved analyses.
+ bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA);
+
+ private:
+ FunctionAnalysisManager *FAM;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+
+ static StringRef name() { return "FunctionAnalysisManagerCGSCCProxy"; }
+
+ explicit FunctionAnalysisManagerCGSCCProxy(FunctionAnalysisManager &FAM)
+ : FAM(&FAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ FunctionAnalysisManagerCGSCCProxy(
+ const FunctionAnalysisManagerCGSCCProxy &Arg)
+ : FAM(Arg.FAM) {}
+ FunctionAnalysisManagerCGSCCProxy(FunctionAnalysisManagerCGSCCProxy &&Arg)
+ : FAM(std::move(Arg.FAM)) {}
+ FunctionAnalysisManagerCGSCCProxy &
+ operator=(FunctionAnalysisManagerCGSCCProxy RHS) {
+ std::swap(FAM, RHS.FAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ ///
+ /// This doesn't do any interesting work, it is primarily used to insert our
+ /// proxy result object into the module analysis cache so that we can proxy
+ /// invalidation to the function analysis manager.
+ ///
+ /// In debug builds, it will also assert that the analysis manager is empty
+ /// as no queries should arrive at the function analysis manager prior to
+ /// this analysis being requested.
+ Result run(LazyCallGraph::SCC &C);
+
+private:
+ static char PassID;
+
+ FunctionAnalysisManager *FAM;
+};
+
+/// \brief A function analysis which acts as a proxy for a CGSCC analysis
+/// manager.
+///
+/// This primarily provides an accessor to a parent CGSCC analysis manager to
+/// function passes. Only the const interface of the CGSCC analysis manager is
+/// provided to indicate that once inside of a function analysis pass you
+/// cannot request a CGSCC analysis to actually run. Instead, the user must
+/// rely on the \c getCachedResult API.
+///
+/// This proxy *doesn't* manage the invalidation in any way. That is handled by
+/// the recursive return path of each layer of the pass manager and the
+/// returned PreservedAnalysis set.
+class CGSCCAnalysisManagerFunctionProxy {
+public:
+ /// \brief Result proxy object for \c CGSCCAnalysisManagerFunctionProxy.
+ class Result {
+ public:
+ explicit Result(const CGSCCAnalysisManager &CGAM) : CGAM(&CGAM) {}
+ // We have to explicitly define all the special member functions because
+ // MSVC refuses to generate them.
+ Result(const Result &Arg) : CGAM(Arg.CGAM) {}
+ Result(Result &&Arg) : CGAM(std::move(Arg.CGAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(CGAM, RHS.CGAM);
+ return *this;
+ }
+
+ const CGSCCAnalysisManager &getManager() const { return *CGAM; }
+
+ /// \brief Handle invalidation by ignoring it, this pass is immutable.
+ bool invalidate(Function &) { return false; }
+
+ private:
+ const CGSCCAnalysisManager *CGAM;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+
+ static StringRef name() { return "CGSCCAnalysisManagerFunctionProxy"; }
+
+ CGSCCAnalysisManagerFunctionProxy(const CGSCCAnalysisManager &CGAM)
+ : CGAM(&CGAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ CGSCCAnalysisManagerFunctionProxy(
+ const CGSCCAnalysisManagerFunctionProxy &Arg)
+ : CGAM(Arg.CGAM) {}
+ CGSCCAnalysisManagerFunctionProxy(CGSCCAnalysisManagerFunctionProxy &&Arg)
+ : CGAM(std::move(Arg.CGAM)) {}
+ CGSCCAnalysisManagerFunctionProxy &
+ operator=(CGSCCAnalysisManagerFunctionProxy RHS) {
+ std::swap(CGAM, RHS.CGAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ /// Nothing to see here, it just forwards the \c CGAM reference into the
+ /// result.
+ Result run(Function &) { return Result(*CGAM); }
+
+private:
+ static char PassID;
+
+ const CGSCCAnalysisManager *CGAM;
+};
+
+/// \brief Adaptor that maps from a SCC to its functions.
+///
+/// Designed to allow composition of a FunctionPass(Manager) and
+/// a CGSCCPassManager. Note that if this pass is constructed with a pointer
+/// to a \c CGSCCAnalysisManager it will run the
+/// \c FunctionAnalysisManagerCGSCCProxy analysis prior to running the function
+/// pass over the SCC to enable a \c FunctionAnalysisManager to be used
+/// within this run safely.
+template <typename FunctionPassT> class CGSCCToFunctionPassAdaptor {
+public:
+ explicit CGSCCToFunctionPassAdaptor(FunctionPassT Pass)
+ : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ CGSCCToFunctionPassAdaptor(const CGSCCToFunctionPassAdaptor &Arg)
+ : Pass(Arg.Pass) {}
+ CGSCCToFunctionPassAdaptor(CGSCCToFunctionPassAdaptor &&Arg)
+ : Pass(std::move(Arg.Pass)) {}
+ friend void swap(CGSCCToFunctionPassAdaptor &LHS,
+ CGSCCToFunctionPassAdaptor &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ CGSCCToFunctionPassAdaptor &operator=(CGSCCToFunctionPassAdaptor RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief Runs the function pass across every function in the module.
+ PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager *AM) {
+ FunctionAnalysisManager *FAM = nullptr;
+ if (AM)
+ // Setup the function analysis manager from its proxy.
+ FAM = &AM->getResult<FunctionAnalysisManagerCGSCCProxy>(C).getManager();
+
+ PreservedAnalyses PA = PreservedAnalyses::all();
+ for (LazyCallGraph::Node *N : C) {
+ PreservedAnalyses PassPA = Pass.run(N->getFunction(), FAM);
+
+ // We know that the function pass couldn't have invalidated any other
+ // function's analyses (that's the contract of a function pass), so
+ // directly handle the function analysis manager's invalidation here.
+ // Also, update the preserved analyses to reflect that once invalidated
+ // these can again be preserved.
+ if (FAM)
+ PassPA = FAM->invalidate(N->getFunction(), std::move(PassPA));
+
+ // Then intersect the preserved set so that invalidation of module
+ // analyses will eventually occur when the module pass completes.
+ PA.intersect(std::move(PassPA));
+ }
+
+ // By definition we preserve the proxy. This precludes *any* invalidation
+ // of function analyses by the proxy, but that's OK because we've taken
+ // care to invalidate analyses in the function analysis manager
+ // incrementally above.
+ // FIXME: We need to update the call graph here to account for any deleted
+ // edges!
+ PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
+ return PA;
+ }
+
+ static StringRef name() { return "CGSCCToFunctionPassAdaptor"; }
+
+private:
+ FunctionPassT Pass;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename FunctionPassT>
+CGSCCToFunctionPassAdaptor<FunctionPassT>
+createCGSCCToFunctionPassAdaptor(FunctionPassT Pass) {
+ return CGSCCToFunctionPassAdaptor<FunctionPassT>(std::move(Pass));
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CallGraph.h b/contrib/llvm/include/llvm/Analysis/CallGraph.h
new file mode 100644
index 0000000..5562e9b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/CallGraph.h
@@ -0,0 +1,495 @@
+//===- CallGraph.h - Build a Module's call graph ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides interfaces used to build and manipulate a call graph,
+/// which is a very useful tool for interprocedural optimization.
+///
+/// Every function in a module is represented as a node in the call graph. The
+/// callgraph node keeps track of which functions are called by the function
+/// corresponding to the node.
+///
+/// A call graph may contain nodes where the function that they correspond to
+/// is null. These 'external' nodes are used to represent control flow that is
+/// not represented (or analyzable) in the module. In particular, this
+/// analysis builds one external node such that:
+/// 1. All functions in the module without internal linkage will have edges
+/// from this external node, indicating that they could be called by
+/// functions outside of the module.
+/// 2. All functions whose address is used for something more than a direct
+/// call, for example being stored into a memory location will also have
+/// an edge from this external node. Since they may be called by an
+/// unknown caller later, they must be tracked as such.
+///
+/// There is a second external node added for calls that leave this module.
+/// Functions have a call edge to the external node iff:
+/// 1. The function is external, reflecting the fact that they could call
+/// anything without internal linkage or that has its address taken.
+/// 2. The function contains an indirect function call.
+///
+/// As an extension in the future, there may be multiple nodes with a null
+/// function. These will be used when we can prove (through pointer analysis)
+/// that an indirect call site can call only a specific set of functions.
+///
+/// Because of these properties, the CallGraph captures a conservative superset
+/// of all of the caller-callee relationships, which is useful for
+/// transformations.
+///
+/// The CallGraph class also attempts to figure out what the root of the
+/// CallGraph is, which it currently does by looking for a function named
+/// 'main'. If no function named 'main' is found, the external node is used as
+/// the entry node, reflecting the fact that any function without internal
+/// linkage could be called into (which is common for libraries).
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CALLGRAPH_H
+#define LLVM_ANALYSIS_CALLGRAPH_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include <map>
+
+namespace llvm {
+
+class Function;
+class Module;
+class CallGraphNode;
+
+/// \brief The basic data container for the call graph of a \c Module of IR.
+///
+/// This class exposes both the interface to the call graph for a module of IR.
+///
+/// The core call graph itself can also be updated to reflect changes to the IR.
+class CallGraph {
+ Module &M;
+
+ typedef std::map<const Function *, std::unique_ptr<CallGraphNode>>
+ FunctionMapTy;
+
+ /// \brief A map from \c Function* to \c CallGraphNode*.
+ FunctionMapTy FunctionMap;
+
+ /// \brief Root is root of the call graph, or the external node if a 'main'
+ /// function couldn't be found.
+ CallGraphNode *Root;
+
+ /// \brief This node has edges to all external functions and those internal
+ /// functions that have their address taken.
+ CallGraphNode *ExternalCallingNode;
+
+ /// \brief This node has edges to it from all functions making indirect calls
+ /// or calling an external function.
+ std::unique_ptr<CallGraphNode> CallsExternalNode;
+
+ /// \brief Replace the function represented by this node by another.
+ ///
+ /// This does not rescan the body of the function, so it is suitable when
+ /// splicing the body of one function to another while also updating all
+ /// callers from the old function to the new.
+ void spliceFunction(const Function *From, const Function *To);
+
+ /// \brief Add a function to the call graph, and link the node to all of the
+ /// functions that it calls.
+ void addToCallGraph(Function *F);
+
+public:
+ explicit CallGraph(Module &M);
+ CallGraph(CallGraph &&Arg);
+ ~CallGraph();
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+ typedef FunctionMapTy::iterator iterator;
+ typedef FunctionMapTy::const_iterator const_iterator;
+
+ /// \brief Returns the module the call graph corresponds to.
+ Module &getModule() const { return M; }
+
+ inline iterator begin() { return FunctionMap.begin(); }
+ inline iterator end() { return FunctionMap.end(); }
+ inline const_iterator begin() const { return FunctionMap.begin(); }
+ inline const_iterator end() const { return FunctionMap.end(); }
+
+ /// \brief Returns the call graph node for the provided function.
+ inline const CallGraphNode *operator[](const Function *F) const {
+ const_iterator I = FunctionMap.find(F);
+ assert(I != FunctionMap.end() && "Function not in callgraph!");
+ return I->second.get();
+ }
+
+ /// \brief Returns the call graph node for the provided function.
+ inline CallGraphNode *operator[](const Function *F) {
+ const_iterator I = FunctionMap.find(F);
+ assert(I != FunctionMap.end() && "Function not in callgraph!");
+ return I->second.get();
+ }
+
+ /// \brief Returns the \c CallGraphNode which is used to represent
+ /// undetermined calls into the callgraph.
+ CallGraphNode *getExternalCallingNode() const { return ExternalCallingNode; }
+
+ CallGraphNode *getCallsExternalNode() const {
+ return CallsExternalNode.get();
+ }
+
+ //===---------------------------------------------------------------------
+ // Functions to keep a call graph up to date with a function that has been
+ // modified.
+ //
+
+ /// \brief Unlink the function from this module, returning it.
+ ///
+ /// Because this removes the function from the module, the call graph node is
+ /// destroyed. This is only valid if the function does not call any other
+ /// functions (ie, there are no edges in it's CGN). The easiest way to do
+ /// this is to dropAllReferences before calling this.
+ Function *removeFunctionFromModule(CallGraphNode *CGN);
+
+ /// \brief Similar to operator[], but this will insert a new CallGraphNode for
+ /// \c F if one does not already exist.
+ CallGraphNode *getOrInsertFunction(const Function *F);
+};
+
+/// \brief A node in the call graph for a module.
+///
+/// Typically represents a function in the call graph. There are also special
+/// "null" nodes used to represent theoretical entries in the call graph.
+class CallGraphNode {
+public:
+ /// \brief A pair of the calling instruction (a call or invoke)
+ /// and the call graph node being called.
+ typedef std::pair<WeakVH, CallGraphNode *> CallRecord;
+
+public:
+ typedef std::vector<CallRecord> CalledFunctionsVector;
+
+ /// \brief Creates a node for the specified function.
+ inline CallGraphNode(Function *F) : F(F), NumReferences(0) {}
+
+ ~CallGraphNode() {
+ assert(NumReferences == 0 && "Node deleted while references remain");
+ }
+
+ typedef std::vector<CallRecord>::iterator iterator;
+ typedef std::vector<CallRecord>::const_iterator const_iterator;
+
+ /// \brief Returns the function that this call graph node represents.
+ Function *getFunction() const { return F; }
+
+ inline iterator begin() { return CalledFunctions.begin(); }
+ inline iterator end() { return CalledFunctions.end(); }
+ inline const_iterator begin() const { return CalledFunctions.begin(); }
+ inline const_iterator end() const { return CalledFunctions.end(); }
+ inline bool empty() const { return CalledFunctions.empty(); }
+ inline unsigned size() const { return (unsigned)CalledFunctions.size(); }
+
+ /// \brief Returns the number of other CallGraphNodes in this CallGraph that
+ /// reference this node in their callee list.
+ unsigned getNumReferences() const { return NumReferences; }
+
+ /// \brief Returns the i'th called function.
+ CallGraphNode *operator[](unsigned i) const {
+ assert(i < CalledFunctions.size() && "Invalid index");
+ return CalledFunctions[i].second;
+ }
+
+ /// \brief Print out this call graph node.
+ void dump() const;
+ void print(raw_ostream &OS) const;
+
+ //===---------------------------------------------------------------------
+ // Methods to keep a call graph up to date with a function that has been
+ // modified
+ //
+
+ /// \brief Removes all edges from this CallGraphNode to any functions it
+ /// calls.
+ void removeAllCalledFunctions() {
+ while (!CalledFunctions.empty()) {
+ CalledFunctions.back().second->DropRef();
+ CalledFunctions.pop_back();
+ }
+ }
+
+ /// \brief Moves all the callee information from N to this node.
+ void stealCalledFunctionsFrom(CallGraphNode *N) {
+ assert(CalledFunctions.empty() &&
+ "Cannot steal callsite information if I already have some");
+ std::swap(CalledFunctions, N->CalledFunctions);
+ }
+
+ /// \brief Adds a function to the list of functions called by this one.
+ void addCalledFunction(CallSite CS, CallGraphNode *M) {
+ assert(!CS.getInstruction() || !CS.getCalledFunction() ||
+ !CS.getCalledFunction()->isIntrinsic() ||
+ !Intrinsic::isLeaf(CS.getCalledFunction()->getIntrinsicID()));
+ CalledFunctions.emplace_back(CS.getInstruction(), M);
+ M->AddRef();
+ }
+
+ void removeCallEdge(iterator I) {
+ I->second->DropRef();
+ *I = CalledFunctions.back();
+ CalledFunctions.pop_back();
+ }
+
+ /// \brief Removes the edge in the node for the specified call site.
+ ///
+ /// Note that this method takes linear time, so it should be used sparingly.
+ void removeCallEdgeFor(CallSite CS);
+
+ /// \brief Removes all call edges from this node to the specified callee
+ /// function.
+ ///
+ /// This takes more time to execute than removeCallEdgeTo, so it should not
+ /// be used unless necessary.
+ void removeAnyCallEdgeTo(CallGraphNode *Callee);
+
+ /// \brief Removes one edge associated with a null callsite from this node to
+ /// the specified callee function.
+ void removeOneAbstractEdgeTo(CallGraphNode *Callee);
+
+ /// \brief Replaces the edge in the node for the specified call site with a
+ /// new one.
+ ///
+ /// Note that this method takes linear time, so it should be used sparingly.
+ void replaceCallEdge(CallSite CS, CallSite NewCS, CallGraphNode *NewNode);
+
+private:
+ friend class CallGraph;
+
+ AssertingVH<Function> F;
+
+ std::vector<CallRecord> CalledFunctions;
+
+ /// \brief The number of times that this CallGraphNode occurs in the
+ /// CalledFunctions array of this or other CallGraphNodes.
+ unsigned NumReferences;
+
+ CallGraphNode(const CallGraphNode &) = delete;
+ void operator=(const CallGraphNode &) = delete;
+
+ void DropRef() { --NumReferences; }
+ void AddRef() { ++NumReferences; }
+
+ /// \brief A special function that should only be used by the CallGraph class.
+ void allReferencesDropped() { NumReferences = 0; }
+};
+
+/// \brief An analysis pass to compute the \c CallGraph for a \c Module.
+///
+/// This class implements the concept of an analysis pass used by the \c
+/// ModuleAnalysisManager to run an analysis over a module and cache the
+/// resulting data.
+class CallGraphAnalysis {
+public:
+ /// \brief A formulaic typedef to inform clients of the result type.
+ typedef CallGraph Result;
+
+ static void *ID() { return (void *)&PassID; }
+
+ /// \brief Compute the \c CallGraph for the module \c M.
+ ///
+ /// The real work here is done in the \c CallGraph constructor.
+ CallGraph run(Module *M) { return CallGraph(*M); }
+
+private:
+ static char PassID;
+};
+
+/// \brief The \c ModulePass which wraps up a \c CallGraph and the logic to
+/// build it.
+///
+/// This class exposes both the interface to the call graph container and the
+/// module pass which runs over a module of IR and produces the call graph. The
+/// call graph interface is entirelly a wrapper around a \c CallGraph object
+/// which is stored internally for each module.
+class CallGraphWrapperPass : public ModulePass {
+ std::unique_ptr<CallGraph> G;
+
+public:
+ static char ID; // Class identification, replacement for typeinfo
+
+ CallGraphWrapperPass();
+ ~CallGraphWrapperPass() override;
+
+ /// \brief The internal \c CallGraph around which the rest of this interface
+ /// is wrapped.
+ const CallGraph &getCallGraph() const { return *G; }
+ CallGraph &getCallGraph() { return *G; }
+
+ typedef CallGraph::iterator iterator;
+ typedef CallGraph::const_iterator const_iterator;
+
+ /// \brief Returns the module the call graph corresponds to.
+ Module &getModule() const { return G->getModule(); }
+
+ inline iterator begin() { return G->begin(); }
+ inline iterator end() { return G->end(); }
+ inline const_iterator begin() const { return G->begin(); }
+ inline const_iterator end() const { return G->end(); }
+
+ /// \brief Returns the call graph node for the provided function.
+ inline const CallGraphNode *operator[](const Function *F) const {
+ return (*G)[F];
+ }
+
+ /// \brief Returns the call graph node for the provided function.
+ inline CallGraphNode *operator[](const Function *F) { return (*G)[F]; }
+
+ /// \brief Returns the \c CallGraphNode which is used to represent
+ /// undetermined calls into the callgraph.
+ CallGraphNode *getExternalCallingNode() const {
+ return G->getExternalCallingNode();
+ }
+
+ CallGraphNode *getCallsExternalNode() const {
+ return G->getCallsExternalNode();
+ }
+
+ //===---------------------------------------------------------------------
+ // Functions to keep a call graph up to date with a function that has been
+ // modified.
+ //
+
+ /// \brief Unlink the function from this module, returning it.
+ ///
+ /// Because this removes the function from the module, the call graph node is
+ /// destroyed. This is only valid if the function does not call any other
+ /// functions (ie, there are no edges in it's CGN). The easiest way to do
+ /// this is to dropAllReferences before calling this.
+ Function *removeFunctionFromModule(CallGraphNode *CGN) {
+ return G->removeFunctionFromModule(CGN);
+ }
+
+ /// \brief Similar to operator[], but this will insert a new CallGraphNode for
+ /// \c F if one does not already exist.
+ CallGraphNode *getOrInsertFunction(const Function *F) {
+ return G->getOrInsertFunction(F);
+ }
+
+ //===---------------------------------------------------------------------
+ // Implementation of the ModulePass interface needed here.
+ //
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnModule(Module &M) override;
+ void releaseMemory() override;
+
+ void print(raw_ostream &o, const Module *) const override;
+ void dump() const;
+};
+
+//===----------------------------------------------------------------------===//
+// GraphTraits specializations for call graphs so that they can be treated as
+// graphs by the generic graph algorithms.
+//
+
+// Provide graph traits for tranversing call graphs using standard graph
+// traversals.
+template <> struct GraphTraits<CallGraphNode *> {
+ typedef CallGraphNode NodeType;
+
+ typedef CallGraphNode::CallRecord CGNPairTy;
+ typedef std::pointer_to_unary_function<CGNPairTy, CallGraphNode *>
+ CGNDerefFun;
+
+ static NodeType *getEntryNode(CallGraphNode *CGN) { return CGN; }
+
+ typedef mapped_iterator<NodeType::iterator, CGNDerefFun> ChildIteratorType;
+
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return map_iterator(N->begin(), CGNDerefFun(CGNDeref));
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return map_iterator(N->end(), CGNDerefFun(CGNDeref));
+ }
+
+ static CallGraphNode *CGNDeref(CGNPairTy P) { return P.second; }
+};
+
+template <> struct GraphTraits<const CallGraphNode *> {
+ typedef const CallGraphNode NodeType;
+
+ typedef CallGraphNode::CallRecord CGNPairTy;
+ typedef std::pointer_to_unary_function<CGNPairTy, const CallGraphNode *>
+ CGNDerefFun;
+
+ static NodeType *getEntryNode(const CallGraphNode *CGN) { return CGN; }
+
+ typedef mapped_iterator<NodeType::const_iterator, CGNDerefFun>
+ ChildIteratorType;
+
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return map_iterator(N->begin(), CGNDerefFun(CGNDeref));
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return map_iterator(N->end(), CGNDerefFun(CGNDeref));
+ }
+
+ static const CallGraphNode *CGNDeref(CGNPairTy P) { return P.second; }
+};
+
+template <>
+struct GraphTraits<CallGraph *> : public GraphTraits<CallGraphNode *> {
+ static NodeType *getEntryNode(CallGraph *CGN) {
+ return CGN->getExternalCallingNode(); // Start at the external node!
+ }
+ typedef std::pair<const Function *const, std::unique_ptr<CallGraphNode>>
+ PairTy;
+ typedef std::pointer_to_unary_function<const PairTy &, CallGraphNode &>
+ DerefFun;
+
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef mapped_iterator<CallGraph::iterator, DerefFun> nodes_iterator;
+ static nodes_iterator nodes_begin(CallGraph *CG) {
+ return map_iterator(CG->begin(), DerefFun(CGdereference));
+ }
+ static nodes_iterator nodes_end(CallGraph *CG) {
+ return map_iterator(CG->end(), DerefFun(CGdereference));
+ }
+
+ static CallGraphNode &CGdereference(const PairTy &P) { return *P.second; }
+};
+
+template <>
+struct GraphTraits<const CallGraph *> : public GraphTraits<
+ const CallGraphNode *> {
+ static NodeType *getEntryNode(const CallGraph *CGN) {
+ return CGN->getExternalCallingNode(); // Start at the external node!
+ }
+ typedef std::pair<const Function *const, std::unique_ptr<CallGraphNode>>
+ PairTy;
+ typedef std::pointer_to_unary_function<const PairTy &, const CallGraphNode &>
+ DerefFun;
+
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef mapped_iterator<CallGraph::const_iterator, DerefFun> nodes_iterator;
+ static nodes_iterator nodes_begin(const CallGraph *CG) {
+ return map_iterator(CG->begin(), DerefFun(CGdereference));
+ }
+ static nodes_iterator nodes_end(const CallGraph *CG) {
+ return map_iterator(CG->end(), DerefFun(CGdereference));
+ }
+
+ static const CallGraphNode &CGdereference(const PairTy &P) {
+ return *P.second;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CallGraphSCCPass.h b/contrib/llvm/include/llvm/Analysis/CallGraphSCCPass.h
new file mode 100644
index 0000000..9c7f7bd
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/CallGraphSCCPass.h
@@ -0,0 +1,108 @@
+//===- CallGraphSCCPass.h - Pass that operates BU on call graph -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CallGraphSCCPass class, which is used for passes which
+// are implemented as bottom-up traversals on the call graph. Because there may
+// be cycles in the call graph, passes of this type operate on the call-graph in
+// SCC order: that is, they process function bottom-up, except for recursive
+// functions, which they process all at once.
+//
+// These passes are inherently interprocedural, and are required to keep the
+// call graph up-to-date if they do anything which could modify it.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
+#define LLVM_ANALYSIS_CALLGRAPHSCCPASS_H
+
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+class CallGraphNode;
+class CallGraph;
+class PMStack;
+class CallGraphSCC;
+
+class CallGraphSCCPass : public Pass {
+public:
+ explicit CallGraphSCCPass(char &pid) : Pass(PT_CallGraphSCC, pid) {}
+
+ /// createPrinterPass - Get a pass that prints the Module
+ /// corresponding to a CallGraph.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override;
+
+ using llvm::Pass::doInitialization;
+ using llvm::Pass::doFinalization;
+
+ /// doInitialization - This method is called before the SCC's of the program
+ /// has been processed, allowing the pass to do initialization as necessary.
+ virtual bool doInitialization(CallGraph &CG) {
+ return false;
+ }
+
+ /// runOnSCC - This method should be implemented by the subclass to perform
+ /// whatever action is necessary for the specified SCC. Note that
+ /// non-recursive (or only self-recursive) functions will have an SCC size of
+ /// 1, where recursive portions of the call graph will have SCC size > 1.
+ ///
+ /// SCC passes that add or delete functions to the SCC are required to update
+ /// the SCC list, otherwise stale pointers may be dereferenced.
+ ///
+ virtual bool runOnSCC(CallGraphSCC &SCC) = 0;
+
+ /// doFinalization - This method is called after the SCC's of the program has
+ /// been processed, allowing the pass to do final cleanup as necessary.
+ virtual bool doFinalization(CallGraph &CG) {
+ return false;
+ }
+
+ /// Assign pass manager to manager this pass
+ void assignPassManager(PMStack &PMS, PassManagerType PMT) override;
+
+ /// Return what kind of Pass Manager can manage this pass.
+ PassManagerType getPotentialPassManagerType() const override {
+ return PMT_CallGraphPassManager;
+ }
+
+ /// getAnalysisUsage - For this class, we declare that we require and preserve
+ /// the call graph. If the derived class implements this method, it should
+ /// always explicitly call the implementation here.
+ void getAnalysisUsage(AnalysisUsage &Info) const override;
+};
+
+/// CallGraphSCC - This is a single SCC that a CallGraphSCCPass is run on.
+class CallGraphSCC {
+ void *Context; // The CGPassManager object that is vending this.
+ std::vector<CallGraphNode*> Nodes;
+
+public:
+ CallGraphSCC(void *context) : Context(context) {}
+
+ void initialize(CallGraphNode *const *I, CallGraphNode *const *E) {
+ Nodes.assign(I, E);
+ }
+
+ bool isSingular() const { return Nodes.size() == 1; }
+ unsigned size() const { return Nodes.size(); }
+
+ /// ReplaceNode - This informs the SCC and the pass manager that the specified
+ /// Old node has been deleted, and New is to be used in its place.
+ void ReplaceNode(CallGraphNode *Old, CallGraphNode *New);
+
+ typedef std::vector<CallGraphNode *>::const_iterator iterator;
+ iterator begin() const { return Nodes.begin(); }
+ iterator end() const { return Nodes.end(); }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CallPrinter.h b/contrib/llvm/include/llvm/Analysis/CallPrinter.h
new file mode 100644
index 0000000..5f5d160
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/CallPrinter.h
@@ -0,0 +1,27 @@
+//===-- CallPrinter.h - Call graph printer external interface ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines external functions that can be called to explicitly
+// instantiate the call graph printer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CALLPRINTER_H
+#define LLVM_ANALYSIS_CALLPRINTER_H
+
+namespace llvm {
+
+ class ModulePass;
+
+ ModulePass *createCallGraphViewerPass();
+ ModulePass *createCallGraphPrinterPass();
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CaptureTracking.h b/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
new file mode 100644
index 0000000..8d2c095
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/CaptureTracking.h
@@ -0,0 +1,81 @@
+//===----- llvm/Analysis/CaptureTracking.h - Pointer capture ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains routines that help determine which pointers are captured.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CAPTURETRACKING_H
+#define LLVM_ANALYSIS_CAPTURETRACKING_H
+
+namespace llvm {
+
+ class Value;
+ class Use;
+ class Instruction;
+ class DominatorTree;
+ class OrderedBasicBlock;
+
+ /// PointerMayBeCaptured - Return true if this pointer value may be captured
+ /// by the enclosing function (which is required to exist). This routine can
+ /// be expensive, so consider caching the results. The boolean ReturnCaptures
+ /// specifies whether returning the value (or part of it) from the function
+ /// counts as capturing it or not. The boolean StoreCaptures specified
+ /// whether storing the value (or part of it) into memory anywhere
+ /// automatically counts as capturing it or not.
+ bool PointerMayBeCaptured(const Value *V,
+ bool ReturnCaptures,
+ bool StoreCaptures);
+
+ /// PointerMayBeCapturedBefore - Return true if this pointer value may be
+ /// captured by the enclosing function (which is required to exist). If a
+ /// DominatorTree is provided, only captures which happen before the given
+ /// instruction are considered. This routine can be expensive, so consider
+ /// caching the results. The boolean ReturnCaptures specifies whether
+ /// returning the value (or part of it) from the function counts as capturing
+ /// it or not. The boolean StoreCaptures specified whether storing the value
+ /// (or part of it) into memory anywhere automatically counts as capturing it
+ /// or not. Captures by the provided instruction are considered if the
+ /// final parameter is true. An ordered basic block in \p OBB could be used
+ /// to speed up capture-tracker queries.
+ bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
+ bool StoreCaptures, const Instruction *I,
+ DominatorTree *DT, bool IncludeI = false,
+ OrderedBasicBlock *OBB = nullptr);
+
+ /// This callback is used in conjunction with PointerMayBeCaptured. In
+ /// addition to the interface here, you'll need to provide your own getters
+ /// to see whether anything was captured.
+ struct CaptureTracker {
+ virtual ~CaptureTracker();
+
+ /// tooManyUses - The depth of traversal has breached a limit. There may be
+ /// capturing instructions that will not be passed into captured().
+ virtual void tooManyUses() = 0;
+
+ /// shouldExplore - This is the use of a value derived from the pointer.
+ /// To prune the search (ie., assume that none of its users could possibly
+ /// capture) return false. To search it, return true.
+ ///
+ /// U->getUser() is always an Instruction.
+ virtual bool shouldExplore(const Use *U);
+
+ /// captured - Information about the pointer was captured by the user of
+ /// use U. Return true to stop the traversal or false to continue looking
+ /// for more capturing instructions.
+ virtual bool captured(const Use *U) = 0;
+ };
+
+ /// PointerMayBeCaptured - Visit the value and the values derived from it and
+ /// find values which appear to be capturing the pointer value. This feeds
+ /// results into and is controlled by the CaptureTracker object.
+ void PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker);
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/CodeMetrics.h b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
new file mode 100644
index 0000000..2f59691
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/CodeMetrics.h
@@ -0,0 +1,107 @@
+//===- CodeMetrics.h - Code cost measurements -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements various weight measurements for code, helping
+// the Inliner and other passes decide whether to duplicate its contents.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CODEMETRICS_H
+#define LLVM_ANALYSIS_CODEMETRICS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/IR/CallSite.h"
+
+namespace llvm {
+class AssumptionCache;
+class BasicBlock;
+class Loop;
+class Function;
+class Instruction;
+class DataLayout;
+class TargetTransformInfo;
+class Value;
+
+/// \brief Check whether a call will lower to something small.
+///
+/// This tests checks whether this callsite will lower to something
+/// significantly cheaper than a traditional call, often a single
+/// instruction. Note that if isInstructionFree(CS.getInstruction()) would
+/// return true, so will this function.
+bool callIsSmall(ImmutableCallSite CS);
+
+/// \brief Utility to calculate the size and a few similar metrics for a set
+/// of basic blocks.
+struct CodeMetrics {
+ /// \brief True if this function contains a call to setjmp or other functions
+ /// with attribute "returns twice" without having the attribute itself.
+ bool exposesReturnsTwice;
+
+ /// \brief True if this function calls itself.
+ bool isRecursive;
+
+ /// \brief True if this function cannot be duplicated.
+ ///
+ /// True if this function contains one or more indirect branches, or it contains
+ /// one or more 'noduplicate' instructions.
+ bool notDuplicatable;
+
+ /// \brief True if this function calls alloca (in the C sense).
+ bool usesDynamicAlloca;
+
+ /// \brief Number of instructions in the analyzed blocks.
+ unsigned NumInsts;
+
+ /// \brief Number of analyzed blocks.
+ unsigned NumBlocks;
+
+ /// \brief Keeps track of basic block code size estimates.
+ DenseMap<const BasicBlock *, unsigned> NumBBInsts;
+
+ /// \brief Keep track of the number of calls to 'big' functions.
+ unsigned NumCalls;
+
+ /// \brief The number of calls to internal functions with a single caller.
+ ///
+ /// These are likely targets for future inlining, likely exposed by
+ /// interleaved devirtualization.
+ unsigned NumInlineCandidates;
+
+ /// \brief How many instructions produce vector values.
+ ///
+ /// The inliner is more aggressive with inlining vector kernels.
+ unsigned NumVectorInsts;
+
+ /// \brief How many 'ret' instructions the blocks contain.
+ unsigned NumRets;
+
+ CodeMetrics()
+ : exposesReturnsTwice(false), isRecursive(false), notDuplicatable(false),
+ usesDynamicAlloca(false), NumInsts(0), NumBlocks(0), NumCalls(0),
+ NumInlineCandidates(0), NumVectorInsts(0), NumRets(0) {}
+
+ /// \brief Add information about a block to the current state.
+ void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
+ SmallPtrSetImpl<const Value*> &EphValues);
+
+ /// \brief Collect a loop's ephemeral values (those used only by an assume
+ /// or similar intrinsics in the loop).
+ static void collectEphemeralValues(const Loop *L, AssumptionCache *AC,
+ SmallPtrSetImpl<const Value *> &EphValues);
+
+ /// \brief Collect a functions's ephemeral values (those used only by an
+ /// assume or similar intrinsics in the function).
+ static void collectEphemeralValues(const Function *L, AssumptionCache *AC,
+ SmallPtrSetImpl<const Value *> &EphValues);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ConstantFolding.h b/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
new file mode 100644
index 0000000..e8185b3
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ConstantFolding.h
@@ -0,0 +1,113 @@
+//===-- ConstantFolding.h - Fold instructions into constants ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares routines for folding instructions into constants when all
+// operands are constants, for example "sub i32 1, 0" -> "1".
+//
+// Also, to supplement the basic VMCore ConstantExpr simplifications,
+// this file declares some additional folding routines that can make use of
+// DataLayout information. These functions cannot go in VMCore due to library
+// dependency issues.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_CONSTANTFOLDING_H
+#define LLVM_ANALYSIS_CONSTANTFOLDING_H
+
+namespace llvm {
+ class Constant;
+ class ConstantExpr;
+ class Instruction;
+ class DataLayout;
+ class TargetLibraryInfo;
+ class Function;
+ class Type;
+ template<typename T>
+ class ArrayRef;
+
+/// ConstantFoldInstruction - Try to constant fold the specified instruction.
+/// If successful, the constant result is returned, if not, null is returned.
+/// Note that this fails if not all of the operands are constant. Otherwise,
+/// this function can only fail when attempting to fold instructions like loads
+/// and stores, which have no constant expression form.
+ Constant *ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// ConstantFoldConstantExpression - Attempt to fold the constant expression
+/// using the specified DataLayout. If successful, the constant result is
+/// result is returned, if not, null is returned.
+ Constant *
+ ConstantFoldConstantExpression(const ConstantExpr *CE, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
+/// specified operands. If successful, the constant result is returned, if not,
+/// null is returned. Note that this function can fail when attempting to
+/// fold instructions like loads and stores, which have no constant expression
+/// form.
+///
+ Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
+ ArrayRef<Constant *> Ops,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
+/// instruction (icmp/fcmp) with the specified operands. If it fails, it
+/// returns a constant expression of the specified operands.
+///
+ Constant *
+ ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS,
+ Constant *RHS, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
+/// instruction with the specified operands and indices. The constant result is
+/// returned if successful; if not, null is returned.
+Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> Idxs);
+
+/// \brief Attempt to constant fold an extractvalue instruction with the
+/// specified operands and indices. The constant result is returned if
+/// successful; if not, null is returned.
+Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
+ ArrayRef<unsigned> Idxs);
+
+/// \brief Attempt to constant fold an extractelement instruction with the
+/// specified operands and indices. The constant result is returned if
+/// successful; if not, null is returned.
+Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
+
+/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
+/// produce if it is constant and determinable. If this is not determinable,
+/// return null.
+Constant *ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout &DL);
+
+/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
+/// getelementptr constantexpr, return the constant value being addressed by the
+/// constant expression, or null if something is funny and we can't decide.
+Constant *ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE);
+
+/// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr
+/// indices (with an *implied* zero pointer index that is not in the list),
+/// return the constant value being addressed by a virtual load, or null if
+/// something is funny and we can't decide.
+Constant *ConstantFoldLoadThroughGEPIndices(Constant *C,
+ ArrayRef<Constant*> Indices);
+
+/// canConstantFoldCallTo - Return true if its even possible to fold a call to
+/// the specified function.
+bool canConstantFoldCallTo(const Function *F);
+
+/// ConstantFoldCall - Attempt to constant fold a call to the specified function
+/// with the specified arguments, returning null if unsuccessful.
+Constant *ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
+ const TargetLibraryInfo *TLI = nullptr);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h b/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
new file mode 100644
index 0000000..ca50ee2
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/DOTGraphTraitsPass.h
@@ -0,0 +1,189 @@
+//===-- DOTGraphTraitsPass.h - Print/View dotty graphs-----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Templates to create dotty viewer and printer passes for GraphTraits graphs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DOTGRAPHTRAITSPASS_H
+#define LLVM_ANALYSIS_DOTGRAPHTRAITSPASS_H
+
+#include "llvm/Analysis/CFGPrinter.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/FileSystem.h"
+
+namespace llvm {
+
+/// \brief Default traits class for extracting a graph from an analysis pass.
+///
+/// This assumes that 'GraphT' is 'AnalysisT *' and so just passes it through.
+template <typename AnalysisT, typename GraphT = AnalysisT *>
+struct DefaultAnalysisGraphTraits {
+ static GraphT getGraph(AnalysisT *A) { return A; }
+};
+
+template <
+ typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
+ typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT> >
+class DOTGraphTraitsViewer : public FunctionPass {
+public:
+ DOTGraphTraitsViewer(StringRef GraphName, char &ID)
+ : FunctionPass(ID), Name(GraphName) {}
+
+ /// @brief Return true if this function should be processed.
+ ///
+ /// An implementation of this class my override this function to indicate that
+ /// only certain functions should be viewed.
+ ///
+ /// @param Analysis The current analysis result for this function.
+ virtual bool processFunction(Function &F, AnalysisT &Analysis) {
+ return true;
+ }
+
+ bool runOnFunction(Function &F) override {
+ auto &Analysis = getAnalysis<AnalysisT>();
+
+ if (!processFunction(F, Analysis))
+ return false;
+
+ GraphT Graph = AnalysisGraphTraitsT::getGraph(&Analysis);
+ std::string GraphName = DOTGraphTraits<GraphT>::getGraphName(Graph);
+ std::string Title = GraphName + " for '" + F.getName().str() + "' function";
+
+ ViewGraph(Graph, Name, IsSimple, Title);
+
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ AU.addRequired<AnalysisT>();
+ }
+
+private:
+ std::string Name;
+};
+
+template <
+ typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
+ typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT> >
+class DOTGraphTraitsPrinter : public FunctionPass {
+public:
+ DOTGraphTraitsPrinter(StringRef GraphName, char &ID)
+ : FunctionPass(ID), Name(GraphName) {}
+
+ /// @brief Return true if this function should be processed.
+ ///
+ /// An implementation of this class my override this function to indicate that
+ /// only certain functions should be printed.
+ ///
+ /// @param Analysis The current analysis result for this function.
+ virtual bool processFunction(Function &F, AnalysisT &Analysis) {
+ return true;
+ }
+
+ bool runOnFunction(Function &F) override {
+ auto &Analysis = getAnalysis<AnalysisT>();
+
+ if (!processFunction(F, Analysis))
+ return false;
+
+ GraphT Graph = AnalysisGraphTraitsT::getGraph(&Analysis);
+ std::string Filename = Name + "." + F.getName().str() + ".dot";
+ std::error_code EC;
+
+ errs() << "Writing '" << Filename << "'...";
+
+ raw_fd_ostream File(Filename, EC, sys::fs::F_Text);
+ std::string GraphName = DOTGraphTraits<GraphT>::getGraphName(Graph);
+ std::string Title = GraphName + " for '" + F.getName().str() + "' function";
+
+ if (!EC)
+ WriteGraph(File, Graph, IsSimple, Title);
+ else
+ errs() << " error opening file for writing!";
+ errs() << "\n";
+
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ AU.addRequired<AnalysisT>();
+ }
+
+private:
+ std::string Name;
+};
+
+template <
+ typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
+ typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT> >
+class DOTGraphTraitsModuleViewer : public ModulePass {
+public:
+ DOTGraphTraitsModuleViewer(StringRef GraphName, char &ID)
+ : ModulePass(ID), Name(GraphName) {}
+
+ bool runOnModule(Module &M) override {
+ GraphT Graph = AnalysisGraphTraitsT::getGraph(&getAnalysis<AnalysisT>());
+ std::string Title = DOTGraphTraits<GraphT>::getGraphName(Graph);
+
+ ViewGraph(Graph, Name, IsSimple, Title);
+
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ AU.addRequired<AnalysisT>();
+ }
+
+private:
+ std::string Name;
+};
+
+template <
+ typename AnalysisT, bool IsSimple, typename GraphT = AnalysisT *,
+ typename AnalysisGraphTraitsT = DefaultAnalysisGraphTraits<AnalysisT> >
+class DOTGraphTraitsModulePrinter : public ModulePass {
+public:
+ DOTGraphTraitsModulePrinter(StringRef GraphName, char &ID)
+ : ModulePass(ID), Name(GraphName) {}
+
+ bool runOnModule(Module &M) override {
+ GraphT Graph = AnalysisGraphTraitsT::getGraph(&getAnalysis<AnalysisT>());
+ std::string Filename = Name + ".dot";
+ std::error_code EC;
+
+ errs() << "Writing '" << Filename << "'...";
+
+ raw_fd_ostream File(Filename, EC, sys::fs::F_Text);
+ std::string Title = DOTGraphTraits<GraphT>::getGraphName(Graph);
+
+ if (!EC)
+ WriteGraph(File, Graph, IsSimple, Title);
+ else
+ errs() << " error opening file for writing!";
+ errs() << "\n";
+
+ return false;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ AU.addRequired<AnalysisT>();
+ }
+
+private:
+ std::string Name;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/DemandedBits.h b/contrib/llvm/include/llvm/Analysis/DemandedBits.h
new file mode 100644
index 0000000..42932bf
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/DemandedBits.h
@@ -0,0 +1,75 @@
+//===-- llvm/Analysis/DemandedBits.h - Determine demanded bits --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass implements a demanded bits analysis. A demanded bit is one that
+// contributes to a result; bits that are not demanded can be either zero or
+// one without affecting control or data flow. For example in this sequence:
+//
+// %1 = add i32 %x, %y
+// %2 = trunc i32 %1 to i16
+//
+// Only the lowest 16 bits of %1 are demanded; the rest are removed by the
+// trunc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DEMANDED_BITS_H
+#define LLVM_ANALYSIS_DEMANDED_BITS_H
+
+#include "llvm/Pass.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+
+namespace llvm {
+
+class FunctionPass;
+class Function;
+class Instruction;
+class DominatorTree;
+class AssumptionCache;
+
+struct DemandedBits : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ DemandedBits();
+
+ bool runOnFunction(Function& F) override;
+ void getAnalysisUsage(AnalysisUsage& AU) const override;
+ void print(raw_ostream &OS, const Module *M) const override;
+
+ /// Return the bits demanded from instruction I.
+ APInt getDemandedBits(Instruction *I);
+
+ /// Return true if, during analysis, I could not be reached.
+ bool isInstructionDead(Instruction *I);
+
+private:
+ void performAnalysis();
+ void determineLiveOperandBits(const Instruction *UserI,
+ const Instruction *I, unsigned OperandNo,
+ const APInt &AOut, APInt &AB,
+ APInt &KnownZero, APInt &KnownOne,
+ APInt &KnownZero2, APInt &KnownOne2);
+
+ AssumptionCache *AC;
+ DominatorTree *DT;
+ Function *F;
+ bool Analyzed;
+
+ // The set of visited instructions (non-integer-typed only).
+ SmallPtrSet<Instruction*, 128> Visited;
+ DenseMap<Instruction *, APInt> AliveBits;
+};
+
+/// Create a demanded bits analysis pass.
+FunctionPass *createDemandedBitsPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h
new file mode 100644
index 0000000..5290552
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/DependenceAnalysis.h
@@ -0,0 +1,941 @@
+//===-- llvm/Analysis/DependenceAnalysis.h -------------------- -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// DependenceAnalysis is an LLVM pass that analyses dependences between memory
+// accesses. Currently, it is an implementation of the approach described in
+//
+// Practical Dependence Testing
+// Goff, Kennedy, Tseng
+// PLDI 1991
+//
+// There's a single entry point that analyzes the dependence between a pair
+// of memory references in a function, returning either NULL, for no dependence,
+// or a more-or-less detailed description of the dependence between them.
+//
+// This pass exists to support the DependenceGraph pass. There are two separate
+// passes because there's a useful separation of concerns. A dependence exists
+// if two conditions are met:
+//
+// 1) Two instructions reference the same memory location, and
+// 2) There is a flow of control leading from one instruction to the other.
+//
+// DependenceAnalysis attacks the first condition; DependenceGraph will attack
+// the second (it's not yet ready).
+//
+// Please note that this is work in progress and the interface is subject to
+// change.
+//
+// Plausible changes:
+// Return a set of more precise dependences instead of just one dependence
+// summarizing all.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
+#define LLVM_ANALYSIS_DEPENDENCEANALYSIS_H
+
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+ class Loop;
+ class LoopInfo;
+ class ScalarEvolution;
+ class SCEV;
+ class SCEVConstant;
+ class raw_ostream;
+
+ /// Dependence - This class represents a dependence between two memory
+ /// memory references in a function. It contains minimal information and
+ /// is used in the very common situation where the compiler is unable to
+ /// determine anything beyond the existence of a dependence; that is, it
+ /// represents a confused dependence (see also FullDependence). In most
+ /// cases (for output, flow, and anti dependences), the dependence implies
+ /// an ordering, where the source must precede the destination; in contrast,
+ /// input dependences are unordered.
+ ///
+ /// When a dependence graph is built, each Dependence will be a member of
+ /// the set of predecessor edges for its destination instruction and a set
+ /// if successor edges for its source instruction. These sets are represented
+ /// as singly-linked lists, with the "next" fields stored in the dependence
+ /// itelf.
+ class Dependence {
+ protected:
+ Dependence(const Dependence &) = default;
+
+ // FIXME: When we move to MSVC 2015 as the base compiler for Visual Studio
+ // support, uncomment this line to allow a defaulted move constructor for
+ // Dependence. Currently, FullDependence relies on the copy constructor, but
+ // that is acceptable given the triviality of the class.
+ // Dependence(Dependence &&) = default;
+
+ public:
+ Dependence(Instruction *Source,
+ Instruction *Destination) :
+ Src(Source),
+ Dst(Destination),
+ NextPredecessor(nullptr),
+ NextSuccessor(nullptr) {}
+ virtual ~Dependence() {}
+
+ /// Dependence::DVEntry - Each level in the distance/direction vector
+ /// has a direction (or perhaps a union of several directions), and
+ /// perhaps a distance.
+ struct DVEntry {
+ enum { NONE = 0,
+ LT = 1,
+ EQ = 2,
+ LE = 3,
+ GT = 4,
+ NE = 5,
+ GE = 6,
+ ALL = 7 };
+ unsigned char Direction : 3; // Init to ALL, then refine.
+ bool Scalar : 1; // Init to true.
+ bool PeelFirst : 1; // Peeling the first iteration will break dependence.
+ bool PeelLast : 1; // Peeling the last iteration will break the dependence.
+ bool Splitable : 1; // Splitting the loop will break dependence.
+ const SCEV *Distance; // NULL implies no distance available.
+ DVEntry() : Direction(ALL), Scalar(true), PeelFirst(false),
+ PeelLast(false), Splitable(false), Distance(nullptr) { }
+ };
+
+ /// getSrc - Returns the source instruction for this dependence.
+ ///
+ Instruction *getSrc() const { return Src; }
+
+ /// getDst - Returns the destination instruction for this dependence.
+ ///
+ Instruction *getDst() const { return Dst; }
+
+ /// isInput - Returns true if this is an input dependence.
+ ///
+ bool isInput() const;
+
+ /// isOutput - Returns true if this is an output dependence.
+ ///
+ bool isOutput() const;
+
+ /// isFlow - Returns true if this is a flow (aka true) dependence.
+ ///
+ bool isFlow() const;
+
+ /// isAnti - Returns true if this is an anti dependence.
+ ///
+ bool isAnti() const;
+
+ /// isOrdered - Returns true if dependence is Output, Flow, or Anti
+ ///
+ bool isOrdered() const { return isOutput() || isFlow() || isAnti(); }
+
+ /// isUnordered - Returns true if dependence is Input
+ ///
+ bool isUnordered() const { return isInput(); }
+
+ /// isLoopIndependent - Returns true if this is a loop-independent
+ /// dependence.
+ virtual bool isLoopIndependent() const { return true; }
+
+ /// isConfused - Returns true if this dependence is confused
+ /// (the compiler understands nothing and makes worst-case
+ /// assumptions).
+ virtual bool isConfused() const { return true; }
+
+ /// isConsistent - Returns true if this dependence is consistent
+ /// (occurs every time the source and destination are executed).
+ virtual bool isConsistent() const { return false; }
+
+ /// getLevels - Returns the number of common loops surrounding the
+ /// source and destination of the dependence.
+ virtual unsigned getLevels() const { return 0; }
+
+ /// getDirection - Returns the direction associated with a particular
+ /// level.
+ virtual unsigned getDirection(unsigned Level) const { return DVEntry::ALL; }
+
+ /// getDistance - Returns the distance (or NULL) associated with a
+ /// particular level.
+ virtual const SCEV *getDistance(unsigned Level) const { return nullptr; }
+
+ /// isPeelFirst - Returns true if peeling the first iteration from
+ /// this loop will break this dependence.
+ virtual bool isPeelFirst(unsigned Level) const { return false; }
+
+ /// isPeelLast - Returns true if peeling the last iteration from
+ /// this loop will break this dependence.
+ virtual bool isPeelLast(unsigned Level) const { return false; }
+
+ /// isSplitable - Returns true if splitting this loop will break
+ /// the dependence.
+ virtual bool isSplitable(unsigned Level) const { return false; }
+
+ /// isScalar - Returns true if a particular level is scalar; that is,
+ /// if no subscript in the source or destination mention the induction
+ /// variable associated with the loop at this level.
+ virtual bool isScalar(unsigned Level) const;
+
+ /// getNextPredecessor - Returns the value of the NextPredecessor
+ /// field.
+ const Dependence *getNextPredecessor() const { return NextPredecessor; }
+
+ /// getNextSuccessor - Returns the value of the NextSuccessor
+ /// field.
+ const Dependence *getNextSuccessor() const { return NextSuccessor; }
+
+ /// setNextPredecessor - Sets the value of the NextPredecessor
+ /// field.
+ void setNextPredecessor(const Dependence *pred) { NextPredecessor = pred; }
+
+ /// setNextSuccessor - Sets the value of the NextSuccessor
+ /// field.
+ void setNextSuccessor(const Dependence *succ) { NextSuccessor = succ; }
+
+ /// dump - For debugging purposes, dumps a dependence to OS.
+ ///
+ void dump(raw_ostream &OS) const;
+
+ private:
+ Instruction *Src, *Dst;
+ const Dependence *NextPredecessor, *NextSuccessor;
+ friend class DependenceAnalysis;
+ };
+
+ /// FullDependence - This class represents a dependence between two memory
+ /// references in a function. It contains detailed information about the
+ /// dependence (direction vectors, etc.) and is used when the compiler is
+ /// able to accurately analyze the interaction of the references; that is,
+ /// it is not a confused dependence (see Dependence). In most cases
+ /// (for output, flow, and anti dependences), the dependence implies an
+ /// ordering, where the source must precede the destination; in contrast,
+ /// input dependences are unordered.
+ class FullDependence final : public Dependence {
+ public:
+ FullDependence(Instruction *Src, Instruction *Dst, bool LoopIndependent,
+ unsigned Levels);
+
+ FullDependence(FullDependence &&RHS)
+ : Dependence(std::move(RHS)), Levels(RHS.Levels),
+ LoopIndependent(RHS.LoopIndependent), Consistent(RHS.Consistent),
+ DV(std::move(RHS.DV)) {}
+
+ /// isLoopIndependent - Returns true if this is a loop-independent
+ /// dependence.
+ bool isLoopIndependent() const override { return LoopIndependent; }
+
+ /// isConfused - Returns true if this dependence is confused
+ /// (the compiler understands nothing and makes worst-case
+ /// assumptions).
+ bool isConfused() const override { return false; }
+
+ /// isConsistent - Returns true if this dependence is consistent
+ /// (occurs every time the source and destination are executed).
+ bool isConsistent() const override { return Consistent; }
+
+ /// getLevels - Returns the number of common loops surrounding the
+ /// source and destination of the dependence.
+ unsigned getLevels() const override { return Levels; }
+
+ /// getDirection - Returns the direction associated with a particular
+ /// level.
+ unsigned getDirection(unsigned Level) const override;
+
+ /// getDistance - Returns the distance (or NULL) associated with a
+ /// particular level.
+ const SCEV *getDistance(unsigned Level) const override;
+
+ /// isPeelFirst - Returns true if peeling the first iteration from
+ /// this loop will break this dependence.
+ bool isPeelFirst(unsigned Level) const override;
+
+ /// isPeelLast - Returns true if peeling the last iteration from
+ /// this loop will break this dependence.
+ bool isPeelLast(unsigned Level) const override;
+
+ /// isSplitable - Returns true if splitting the loop will break
+ /// the dependence.
+ bool isSplitable(unsigned Level) const override;
+
+ /// isScalar - Returns true if a particular level is scalar; that is,
+ /// if no subscript in the source or destination mention the induction
+ /// variable associated with the loop at this level.
+ bool isScalar(unsigned Level) const override;
+
+ private:
+ unsigned short Levels;
+ bool LoopIndependent;
+ bool Consistent; // Init to true, then refine.
+ std::unique_ptr<DVEntry[]> DV;
+ friend class DependenceAnalysis;
+ };
+
+ /// DependenceAnalysis - This class is the main dependence-analysis driver.
+ ///
+ class DependenceAnalysis : public FunctionPass {
+ void operator=(const DependenceAnalysis &) = delete;
+ DependenceAnalysis(const DependenceAnalysis &) = delete;
+
+ public:
+ /// depends - Tests for a dependence between the Src and Dst instructions.
+ /// Returns NULL if no dependence; otherwise, returns a Dependence (or a
+ /// FullDependence) with as much information as can be gleaned.
+ /// The flag PossiblyLoopIndependent should be set by the caller
+ /// if it appears that control flow can reach from Src to Dst
+ /// without traversing a loop back edge.
+ std::unique_ptr<Dependence> depends(Instruction *Src,
+ Instruction *Dst,
+ bool PossiblyLoopIndependent);
+
+ /// getSplitIteration - Give a dependence that's splittable at some
+ /// particular level, return the iteration that should be used to split
+ /// the loop.
+ ///
+ /// Generally, the dependence analyzer will be used to build
+ /// a dependence graph for a function (basically a map from instructions
+ /// to dependences). Looking for cycles in the graph shows us loops
+ /// that cannot be trivially vectorized/parallelized.
+ ///
+ /// We can try to improve the situation by examining all the dependences
+ /// that make up the cycle, looking for ones we can break.
+ /// Sometimes, peeling the first or last iteration of a loop will break
+ /// dependences, and there are flags for those possibilities.
+ /// Sometimes, splitting a loop at some other iteration will do the trick,
+ /// and we've got a flag for that case. Rather than waste the space to
+ /// record the exact iteration (since we rarely know), we provide
+ /// a method that calculates the iteration. It's a drag that it must work
+ /// from scratch, but wonderful in that it's possible.
+ ///
+ /// Here's an example:
+ ///
+ /// for (i = 0; i < 10; i++)
+ /// A[i] = ...
+ /// ... = A[11 - i]
+ ///
+ /// There's a loop-carried flow dependence from the store to the load,
+ /// found by the weak-crossing SIV test. The dependence will have a flag,
+ /// indicating that the dependence can be broken by splitting the loop.
+ /// Calling getSplitIteration will return 5.
+ /// Splitting the loop breaks the dependence, like so:
+ ///
+ /// for (i = 0; i <= 5; i++)
+ /// A[i] = ...
+ /// ... = A[11 - i]
+ /// for (i = 6; i < 10; i++)
+ /// A[i] = ...
+ /// ... = A[11 - i]
+ ///
+ /// breaks the dependence and allows us to vectorize/parallelize
+ /// both loops.
+ const SCEV *getSplitIteration(const Dependence &Dep, unsigned Level);
+
+ private:
+ AliasAnalysis *AA;
+ ScalarEvolution *SE;
+ LoopInfo *LI;
+ Function *F;
+
+ /// Subscript - This private struct represents a pair of subscripts from
+ /// a pair of potentially multi-dimensional array references. We use a
+ /// vector of them to guide subscript partitioning.
+ struct Subscript {
+ const SCEV *Src;
+ const SCEV *Dst;
+ enum ClassificationKind { ZIV, SIV, RDIV, MIV, NonLinear } Classification;
+ SmallBitVector Loops;
+ SmallBitVector GroupLoops;
+ SmallBitVector Group;
+ };
+
+ struct CoefficientInfo {
+ const SCEV *Coeff;
+ const SCEV *PosPart;
+ const SCEV *NegPart;
+ const SCEV *Iterations;
+ };
+
+ struct BoundInfo {
+ const SCEV *Iterations;
+ const SCEV *Upper[8];
+ const SCEV *Lower[8];
+ unsigned char Direction;
+ unsigned char DirSet;
+ };
+
+ /// Constraint - This private class represents a constraint, as defined
+ /// in the paper
+ ///
+ /// Practical Dependence Testing
+ /// Goff, Kennedy, Tseng
+ /// PLDI 1991
+ ///
+ /// There are 5 kinds of constraint, in a hierarchy.
+ /// 1) Any - indicates no constraint, any dependence is possible.
+ /// 2) Line - A line ax + by = c, where a, b, and c are parameters,
+ /// representing the dependence equation.
+ /// 3) Distance - The value d of the dependence distance;
+ /// 4) Point - A point <x, y> representing the dependence from
+ /// iteration x to iteration y.
+ /// 5) Empty - No dependence is possible.
+ class Constraint {
+ private:
+ enum ConstraintKind { Empty, Point, Distance, Line, Any } Kind;
+ ScalarEvolution *SE;
+ const SCEV *A;
+ const SCEV *B;
+ const SCEV *C;
+ const Loop *AssociatedLoop;
+
+ public:
+ /// isEmpty - Return true if the constraint is of kind Empty.
+ bool isEmpty() const { return Kind == Empty; }
+
+ /// isPoint - Return true if the constraint is of kind Point.
+ bool isPoint() const { return Kind == Point; }
+
+ /// isDistance - Return true if the constraint is of kind Distance.
+ bool isDistance() const { return Kind == Distance; }
+
+ /// isLine - Return true if the constraint is of kind Line.
+ /// Since Distance's can also be represented as Lines, we also return
+ /// true if the constraint is of kind Distance.
+ bool isLine() const { return Kind == Line || Kind == Distance; }
+
+ /// isAny - Return true if the constraint is of kind Any;
+ bool isAny() const { return Kind == Any; }
+
+ /// getX - If constraint is a point <X, Y>, returns X.
+ /// Otherwise assert.
+ const SCEV *getX() const;
+
+ /// getY - If constraint is a point <X, Y>, returns Y.
+ /// Otherwise assert.
+ const SCEV *getY() const;
+
+ /// getA - If constraint is a line AX + BY = C, returns A.
+ /// Otherwise assert.
+ const SCEV *getA() const;
+
+ /// getB - If constraint is a line AX + BY = C, returns B.
+ /// Otherwise assert.
+ const SCEV *getB() const;
+
+ /// getC - If constraint is a line AX + BY = C, returns C.
+ /// Otherwise assert.
+ const SCEV *getC() const;
+
+ /// getD - If constraint is a distance, returns D.
+ /// Otherwise assert.
+ const SCEV *getD() const;
+
+ /// getAssociatedLoop - Returns the loop associated with this constraint.
+ const Loop *getAssociatedLoop() const;
+
+ /// setPoint - Change a constraint to Point.
+ void setPoint(const SCEV *X, const SCEV *Y, const Loop *CurrentLoop);
+
+ /// setLine - Change a constraint to Line.
+ void setLine(const SCEV *A, const SCEV *B,
+ const SCEV *C, const Loop *CurrentLoop);
+
+ /// setDistance - Change a constraint to Distance.
+ void setDistance(const SCEV *D, const Loop *CurrentLoop);
+
+ /// setEmpty - Change a constraint to Empty.
+ void setEmpty();
+
+ /// setAny - Change a constraint to Any.
+ void setAny(ScalarEvolution *SE);
+
+ /// dump - For debugging purposes. Dumps the constraint
+ /// out to OS.
+ void dump(raw_ostream &OS) const;
+ };
+
+ /// establishNestingLevels - Examines the loop nesting of the Src and Dst
+ /// instructions and establishes their shared loops. Sets the variables
+ /// CommonLevels, SrcLevels, and MaxLevels.
+ /// The source and destination instructions needn't be contained in the same
+ /// loop. The routine establishNestingLevels finds the level of most deeply
+ /// nested loop that contains them both, CommonLevels. An instruction that's
+ /// not contained in a loop is at level = 0. MaxLevels is equal to the level
+ /// of the source plus the level of the destination, minus CommonLevels.
+ /// This lets us allocate vectors MaxLevels in length, with room for every
+ /// distinct loop referenced in both the source and destination subscripts.
+ /// The variable SrcLevels is the nesting depth of the source instruction.
+ /// It's used to help calculate distinct loops referenced by the destination.
+ /// Here's the map from loops to levels:
+ /// 0 - unused
+ /// 1 - outermost common loop
+ /// ... - other common loops
+ /// CommonLevels - innermost common loop
+ /// ... - loops containing Src but not Dst
+ /// SrcLevels - innermost loop containing Src but not Dst
+ /// ... - loops containing Dst but not Src
+ /// MaxLevels - innermost loop containing Dst but not Src
+ /// Consider the follow code fragment:
+ /// for (a = ...) {
+ /// for (b = ...) {
+ /// for (c = ...) {
+ /// for (d = ...) {
+ /// A[] = ...;
+ /// }
+ /// }
+ /// for (e = ...) {
+ /// for (f = ...) {
+ /// for (g = ...) {
+ /// ... = A[];
+ /// }
+ /// }
+ /// }
+ /// }
+ /// }
+ /// If we're looking at the possibility of a dependence between the store
+ /// to A (the Src) and the load from A (the Dst), we'll note that they
+ /// have 2 loops in common, so CommonLevels will equal 2 and the direction
+ /// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7.
+ /// A map from loop names to level indices would look like
+ /// a - 1
+ /// b - 2 = CommonLevels
+ /// c - 3
+ /// d - 4 = SrcLevels
+ /// e - 5
+ /// f - 6
+ /// g - 7 = MaxLevels
+ void establishNestingLevels(const Instruction *Src,
+ const Instruction *Dst);
+
+ unsigned CommonLevels, SrcLevels, MaxLevels;
+
+ /// mapSrcLoop - Given one of the loops containing the source, return
+ /// its level index in our numbering scheme.
+ unsigned mapSrcLoop(const Loop *SrcLoop) const;
+
+ /// mapDstLoop - Given one of the loops containing the destination,
+ /// return its level index in our numbering scheme.
+ unsigned mapDstLoop(const Loop *DstLoop) const;
+
+ /// isLoopInvariant - Returns true if Expression is loop invariant
+ /// in LoopNest.
+ bool isLoopInvariant(const SCEV *Expression, const Loop *LoopNest) const;
+
+ /// Makes sure all subscript pairs share the same integer type by
+ /// sign-extending as necessary.
+ /// Sign-extending a subscript is safe because getelementptr assumes the
+ /// array subscripts are signed.
+ void unifySubscriptType(ArrayRef<Subscript *> Pairs);
+
+ /// removeMatchingExtensions - Examines a subscript pair.
+ /// If the source and destination are identically sign (or zero)
+ /// extended, it strips off the extension in an effort to
+ /// simplify the actual analysis.
+ void removeMatchingExtensions(Subscript *Pair);
+
+ /// collectCommonLoops - Finds the set of loops from the LoopNest that
+ /// have a level <= CommonLevels and are referred to by the SCEV Expression.
+ void collectCommonLoops(const SCEV *Expression,
+ const Loop *LoopNest,
+ SmallBitVector &Loops) const;
+
+ /// checkSrcSubscript - Examines the SCEV Src, returning true iff it's
+ /// linear. Collect the set of loops mentioned by Src.
+ bool checkSrcSubscript(const SCEV *Src,
+ const Loop *LoopNest,
+ SmallBitVector &Loops);
+
+ /// checkDstSubscript - Examines the SCEV Dst, returning true iff it's
+ /// linear. Collect the set of loops mentioned by Dst.
+ bool checkDstSubscript(const SCEV *Dst,
+ const Loop *LoopNest,
+ SmallBitVector &Loops);
+
+ /// isKnownPredicate - Compare X and Y using the predicate Pred.
+ /// Basically a wrapper for SCEV::isKnownPredicate,
+ /// but tries harder, especially in the presence of sign and zero
+ /// extensions and symbolics.
+ bool isKnownPredicate(ICmpInst::Predicate Pred,
+ const SCEV *X,
+ const SCEV *Y) const;
+
+ /// collectUpperBound - All subscripts are the same type (on my machine,
+ /// an i64). The loop bound may be a smaller type. collectUpperBound
+ /// find the bound, if available, and zero extends it to the Type T.
+ /// (I zero extend since the bound should always be >= 0.)
+ /// If no upper bound is available, return NULL.
+ const SCEV *collectUpperBound(const Loop *l, Type *T) const;
+
+ /// collectConstantUpperBound - Calls collectUpperBound(), then
+ /// attempts to cast it to SCEVConstant. If the cast fails,
+ /// returns NULL.
+ const SCEVConstant *collectConstantUpperBound(const Loop *l, Type *T) const;
+
+ /// classifyPair - Examines the subscript pair (the Src and Dst SCEVs)
+ /// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear.
+ /// Collects the associated loops in a set.
+ Subscript::ClassificationKind classifyPair(const SCEV *Src,
+ const Loop *SrcLoopNest,
+ const SCEV *Dst,
+ const Loop *DstLoopNest,
+ SmallBitVector &Loops);
+
+ /// testZIV - Tests the ZIV subscript pair (Src and Dst) for dependence.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// If the dependence isn't proven to exist,
+ /// marks the Result as inconsistent.
+ bool testZIV(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const;
+
+ /// testSIV - Tests the SIV subscript pair (Src and Dst) for dependence.
+ /// Things of the form [c1 + a1*i] and [c2 + a2*j], where
+ /// i and j are induction variables, c1 and c2 are loop invariant,
+ /// and a1 and a2 are constant.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction vector entry and, when possible,
+ /// the distance vector entry.
+ /// If the dependence isn't proven to exist,
+ /// marks the Result as inconsistent.
+ bool testSIV(const SCEV *Src,
+ const SCEV *Dst,
+ unsigned &Level,
+ FullDependence &Result,
+ Constraint &NewConstraint,
+ const SCEV *&SplitIter) const;
+
+ /// testRDIV - Tests the RDIV subscript pair (Src and Dst) for dependence.
+ /// Things of the form [c1 + a1*i] and [c2 + a2*j]
+ /// where i and j are induction variables, c1 and c2 are loop invariant,
+ /// and a1 and a2 are constant.
+ /// With minor algebra, this test can also be used for things like
+ /// [c1 + a1*i + a2*j][c2].
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Marks the Result as inconsistent.
+ bool testRDIV(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const;
+
+ /// testMIV - Tests the MIV subscript pair (Src and Dst) for dependence.
+ /// Returns true if dependence disproved.
+ /// Can sometimes refine direction vectors.
+ bool testMIV(const SCEV *Src,
+ const SCEV *Dst,
+ const SmallBitVector &Loops,
+ FullDependence &Result) const;
+
+ /// strongSIVtest - Tests the strong SIV subscript pair (Src and Dst)
+ /// for dependence.
+ /// Things of the form [c1 + a*i] and [c2 + a*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction and distance.
+ bool strongSIVtest(const SCEV *Coeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurrentLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// weakCrossingSIVtest - Tests the weak-crossing SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1 + a*i] and [c2 - a*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ /// Marks the dependence as splitable.
+ bool weakCrossingSIVtest(const SCEV *SrcCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurrentLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint,
+ const SCEV *&SplitIter) const;
+
+ /// ExactSIVtest - Tests the SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1 + a1*i] and [c2 + a2*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a1 and a2 are constant.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ bool exactSIVtest(const SCEV *SrcCoeff,
+ const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurrentLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// weakZeroSrcSIVtest - Tests the weak-zero SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1] and [c2 + a*i],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant. See also weakZeroDstSIVtest.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ /// If loop peeling will break the dependence, mark appropriately.
+ bool weakZeroSrcSIVtest(const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurrentLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// weakZeroDstSIVtest - Tests the weak-zero SIV subscript pair
+ /// (Src and Dst) for dependence.
+ /// Things of the form [c1 + a*i] and [c2],
+ /// where i is an induction variable, c1 and c2 are loop invariant,
+ /// and a is a constant. See also weakZeroSrcSIVtest.
+ /// Returns true if any possible dependence is disproved.
+ /// If there might be a dependence, returns false.
+ /// Sets appropriate direction entry.
+ /// Set consistent to false.
+ /// If loop peeling will break the dependence, mark appropriately.
+ bool weakZeroDstSIVtest(const SCEV *SrcCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *CurrentLoop,
+ unsigned Level,
+ FullDependence &Result,
+ Constraint &NewConstraint) const;
+
+ /// exactRDIVtest - Tests the RDIV subscript pair for dependence.
+ /// Things of the form [c1 + a*i] and [c2 + b*j],
+ /// where i and j are induction variable, c1 and c2 are loop invariant,
+ /// and a and b are constants.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Works in some cases that symbolicRDIVtest doesn't,
+ /// and vice versa.
+ bool exactRDIVtest(const SCEV *SrcCoeff,
+ const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *SrcLoop,
+ const Loop *DstLoop,
+ FullDependence &Result) const;
+
+ /// symbolicRDIVtest - Tests the RDIV subscript pair for dependence.
+ /// Things of the form [c1 + a*i] and [c2 + b*j],
+ /// where i and j are induction variable, c1 and c2 are loop invariant,
+ /// and a and b are constants.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Works in some cases that exactRDIVtest doesn't,
+ /// and vice versa. Can also be used as a backup for
+ /// ordinary SIV tests.
+ bool symbolicRDIVtest(const SCEV *SrcCoeff,
+ const SCEV *DstCoeff,
+ const SCEV *SrcConst,
+ const SCEV *DstConst,
+ const Loop *SrcLoop,
+ const Loop *DstLoop) const;
+
+ /// gcdMIVtest - Tests an MIV subscript pair for dependence.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Can sometimes disprove the equal direction for 1 or more loops.
+ // Can handle some symbolics that even the SIV tests don't get,
+ /// so we use it as a backup for everything.
+ bool gcdMIVtest(const SCEV *Src,
+ const SCEV *Dst,
+ FullDependence &Result) const;
+
+ /// banerjeeMIVtest - Tests an MIV subscript pair for dependence.
+ /// Returns true if any possible dependence is disproved.
+ /// Marks the result as inconsistent.
+ /// Computes directions.
+ bool banerjeeMIVtest(const SCEV *Src,
+ const SCEV *Dst,
+ const SmallBitVector &Loops,
+ FullDependence &Result) const;
+
+ /// collectCoefficientInfo - Walks through the subscript,
+ /// collecting each coefficient, the associated loop bounds,
+ /// and recording its positive and negative parts for later use.
+ CoefficientInfo *collectCoeffInfo(const SCEV *Subscript,
+ bool SrcFlag,
+ const SCEV *&Constant) const;
+
+ /// getPositivePart - X^+ = max(X, 0).
+ ///
+ const SCEV *getPositivePart(const SCEV *X) const;
+
+ /// getNegativePart - X^- = min(X, 0).
+ ///
+ const SCEV *getNegativePart(const SCEV *X) const;
+
+ /// getLowerBound - Looks through all the bounds info and
+ /// computes the lower bound given the current direction settings
+ /// at each level.
+ const SCEV *getLowerBound(BoundInfo *Bound) const;
+
+ /// getUpperBound - Looks through all the bounds info and
+ /// computes the upper bound given the current direction settings
+ /// at each level.
+ const SCEV *getUpperBound(BoundInfo *Bound) const;
+
+ /// exploreDirections - Hierarchically expands the direction vector
+ /// search space, combining the directions of discovered dependences
+ /// in the DirSet field of Bound. Returns the number of distinct
+ /// dependences discovered. If the dependence is disproved,
+ /// it will return 0.
+ unsigned exploreDirections(unsigned Level,
+ CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ const SmallBitVector &Loops,
+ unsigned &DepthExpanded,
+ const SCEV *Delta) const;
+
+ /// testBounds - Returns true iff the current bounds are plausible.
+ bool testBounds(unsigned char DirKind,
+ unsigned Level,
+ BoundInfo *Bound,
+ const SCEV *Delta) const;
+
+ /// findBoundsALL - Computes the upper and lower bounds for level K
+ /// using the * direction. Records them in Bound.
+ void findBoundsALL(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const;
+
+ /// findBoundsLT - Computes the upper and lower bounds for level K
+ /// using the < direction. Records them in Bound.
+ void findBoundsLT(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const;
+
+ /// findBoundsGT - Computes the upper and lower bounds for level K
+ /// using the > direction. Records them in Bound.
+ void findBoundsGT(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const;
+
+ /// findBoundsEQ - Computes the upper and lower bounds for level K
+ /// using the = direction. Records them in Bound.
+ void findBoundsEQ(CoefficientInfo *A,
+ CoefficientInfo *B,
+ BoundInfo *Bound,
+ unsigned K) const;
+
+ /// intersectConstraints - Updates X with the intersection
+ /// of the Constraints X and Y. Returns true if X has changed.
+ bool intersectConstraints(Constraint *X,
+ const Constraint *Y);
+
+ /// propagate - Review the constraints, looking for opportunities
+ /// to simplify a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ /// If the simplification isn't exact (that is, if it is conservative
+ /// in terms of dependence), set consistent to false.
+ bool propagate(const SCEV *&Src,
+ const SCEV *&Dst,
+ SmallBitVector &Loops,
+ SmallVectorImpl<Constraint> &Constraints,
+ bool &Consistent);
+
+ /// propagateDistance - Attempt to propagate a distance
+ /// constraint into a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ /// If the simplification isn't exact (that is, if it is conservative
+ /// in terms of dependence), set consistent to false.
+ bool propagateDistance(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint,
+ bool &Consistent);
+
+ /// propagatePoint - Attempt to propagate a point
+ /// constraint into a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ bool propagatePoint(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint);
+
+ /// propagateLine - Attempt to propagate a line
+ /// constraint into a subscript pair (Src and Dst).
+ /// Return true if some simplification occurs.
+ /// If the simplification isn't exact (that is, if it is conservative
+ /// in terms of dependence), set consistent to false.
+ bool propagateLine(const SCEV *&Src,
+ const SCEV *&Dst,
+ Constraint &CurConstraint,
+ bool &Consistent);
+
+ /// findCoefficient - Given a linear SCEV,
+ /// return the coefficient corresponding to specified loop.
+ /// If there isn't one, return the SCEV constant 0.
+ /// For example, given a*i + b*j + c*k, returning the coefficient
+ /// corresponding to the j loop would yield b.
+ const SCEV *findCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop) const;
+
+ /// zeroCoefficient - Given a linear SCEV,
+ /// return the SCEV given by zeroing out the coefficient
+ /// corresponding to the specified loop.
+ /// For example, given a*i + b*j + c*k, zeroing the coefficient
+ /// corresponding to the j loop would yield a*i + c*k.
+ const SCEV *zeroCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop) const;
+
+ /// addToCoefficient - Given a linear SCEV Expr,
+ /// return the SCEV given by adding some Value to the
+ /// coefficient corresponding to the specified TargetLoop.
+ /// For example, given a*i + b*j + c*k, adding 1 to the coefficient
+ /// corresponding to the j loop would yield a*i + (b+1)*j + c*k.
+ const SCEV *addToCoefficient(const SCEV *Expr,
+ const Loop *TargetLoop,
+ const SCEV *Value) const;
+
+ /// updateDirection - Update direction vector entry
+ /// based on the current constraint.
+ void updateDirection(Dependence::DVEntry &Level,
+ const Constraint &CurConstraint) const;
+
+ bool tryDelinearize(Instruction *Src, Instruction *Dst,
+ SmallVectorImpl<Subscript> &Pair);
+
+ public:
+ static char ID; // Class identification, replacement for typeinfo
+ DependenceAnalysis() : FunctionPass(ID) {
+ initializeDependenceAnalysisPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+ void getAnalysisUsage(AnalysisUsage &) const override;
+ void print(raw_ostream &, const Module * = nullptr) const override;
+ }; // class DependenceAnalysis
+
+ /// createDependenceAnalysisPass - This creates an instance of the
+ /// DependenceAnalysis pass.
+ FunctionPass *createDependenceAnalysisPass();
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/DivergenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/DivergenceAnalysis.h
new file mode 100644
index 0000000..aa2de57
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/DivergenceAnalysis.h
@@ -0,0 +1,48 @@
+//===- llvm/Analysis/DivergenceAnalysis.h - Divergence Analysis -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The divergence analysis is an LLVM pass which can be used to find out
+// if a branch instruction in a GPU program is divergent or not. It can help
+// branch optimizations such as jump threading and loop unswitching to make
+// better decisions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+class Value;
+class DivergenceAnalysis : public FunctionPass {
+public:
+ static char ID;
+
+ DivergenceAnalysis() : FunctionPass(ID) {
+ initializeDivergenceAnalysisPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnFunction(Function &F) override;
+
+ // Print all divergent branches in the function.
+ void print(raw_ostream &OS, const Module *) const override;
+
+ // Returns true if V is divergent.
+ bool isDivergent(const Value *V) const { return DivergentValues.count(V); }
+
+ // Returns true if V is uniform/non-divergent.
+ bool isUniform(const Value *V) const { return !isDivergent(V); }
+
+private:
+ // Stores all divergent values.
+ DenseSet<const Value *> DivergentValues;
+};
+} // End llvm namespace
diff --git a/contrib/llvm/include/llvm/Analysis/DomPrinter.h b/contrib/llvm/include/llvm/Analysis/DomPrinter.h
new file mode 100644
index 0000000..0ed2899
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/DomPrinter.h
@@ -0,0 +1,30 @@
+//===-- DomPrinter.h - Dom printer external interface ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines external functions that can be called to explicitly
+// instantiate the dominance tree printer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DOMPRINTER_H
+#define LLVM_ANALYSIS_DOMPRINTER_H
+
+namespace llvm {
+ class FunctionPass;
+ FunctionPass *createDomPrinterPass();
+ FunctionPass *createDomOnlyPrinterPass();
+ FunctionPass *createDomViewerPass();
+ FunctionPass *createDomOnlyViewerPass();
+ FunctionPass *createPostDomPrinterPass();
+ FunctionPass *createPostDomOnlyPrinterPass();
+ FunctionPass *createPostDomViewerPass();
+ FunctionPass *createPostDomOnlyViewerPass();
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/DominanceFrontier.h b/contrib/llvm/include/llvm/Analysis/DominanceFrontier.h
new file mode 100644
index 0000000..fb73005
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/DominanceFrontier.h
@@ -0,0 +1,210 @@
+//===- llvm/Analysis/DominanceFrontier.h - Dominator Frontiers --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DominanceFrontier class, which calculate and holds the
+// dominance frontier for a function.
+//
+// This should be considered deprecated, don't add any more uses of this data
+// structure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DOMINANCEFRONTIER_H
+#define LLVM_ANALYSIS_DOMINANCEFRONTIER_H
+
+#include "llvm/IR/Dominators.h"
+#include <map>
+#include <set>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// DominanceFrontierBase - Common base class for computing forward and inverse
+/// dominance frontiers for a function.
+///
+template <class BlockT>
+class DominanceFrontierBase {
+public:
+ typedef std::set<BlockT *> DomSetType; // Dom set for a bb
+ typedef std::map<BlockT *, DomSetType> DomSetMapType; // Dom set map
+
+protected:
+ typedef GraphTraits<BlockT *> BlockTraits;
+
+ DomSetMapType Frontiers;
+ std::vector<BlockT *> Roots;
+ const bool IsPostDominators;
+
+public:
+ DominanceFrontierBase(bool isPostDom) : IsPostDominators(isPostDom) {}
+
+ /// getRoots - Return the root blocks of the current CFG. This may include
+ /// multiple blocks if we are computing post dominators. For forward
+ /// dominators, this will always be a single block (the entry node).
+ ///
+ inline const std::vector<BlockT *> &getRoots() const {
+ return Roots;
+ }
+
+ BlockT *getRoot() const {
+ assert(Roots.size() == 1 && "Should always have entry node!");
+ return Roots[0];
+ }
+
+ /// isPostDominator - Returns true if analysis based of postdoms
+ ///
+ bool isPostDominator() const {
+ return IsPostDominators;
+ }
+
+ void releaseMemory() {
+ Frontiers.clear();
+ }
+
+ // Accessor interface:
+ typedef typename DomSetMapType::iterator iterator;
+ typedef typename DomSetMapType::const_iterator const_iterator;
+ iterator begin() { return Frontiers.begin(); }
+ const_iterator begin() const { return Frontiers.begin(); }
+ iterator end() { return Frontiers.end(); }
+ const_iterator end() const { return Frontiers.end(); }
+ iterator find(BlockT *B) { return Frontiers.find(B); }
+ const_iterator find(BlockT *B) const { return Frontiers.find(B); }
+
+ iterator addBasicBlock(BlockT *BB, const DomSetType &frontier) {
+ assert(find(BB) == end() && "Block already in DominanceFrontier!");
+ return Frontiers.insert(std::make_pair(BB, frontier)).first;
+ }
+
+ /// removeBlock - Remove basic block BB's frontier.
+ void removeBlock(BlockT *BB);
+
+ void addToFrontier(iterator I, BlockT *Node);
+
+ void removeFromFrontier(iterator I, BlockT *Node);
+
+ /// compareDomSet - Return false if two domsets match. Otherwise
+ /// return true;
+ bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const;
+
+ /// compare - Return true if the other dominance frontier base matches
+ /// this dominance frontier base. Otherwise return false.
+ bool compare(DominanceFrontierBase<BlockT> &Other) const;
+
+ /// print - Convert to human readable form
+ ///
+ void print(raw_ostream &OS) const;
+
+ /// dump - Dump the dominance frontier to dbgs().
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const;
+#endif
+};
+
+//===-------------------------------------
+/// DominanceFrontier Class - Concrete subclass of DominanceFrontierBase that is
+/// used to compute a forward dominator frontiers.
+///
+template <class BlockT>
+class ForwardDominanceFrontierBase : public DominanceFrontierBase<BlockT> {
+private:
+ typedef GraphTraits<BlockT *> BlockTraits;
+
+public:
+ typedef DominatorTreeBase<BlockT> DomTreeT;
+ typedef DomTreeNodeBase<BlockT> DomTreeNodeT;
+ typedef typename DominanceFrontierBase<BlockT>::DomSetType DomSetType;
+
+ ForwardDominanceFrontierBase() : DominanceFrontierBase<BlockT>(false) {}
+
+ void analyze(DomTreeT &DT) {
+ this->Roots = DT.getRoots();
+ assert(this->Roots.size() == 1 &&
+ "Only one entry block for forward domfronts!");
+ calculate(DT, DT[this->Roots[0]]);
+ }
+
+ const DomSetType &calculate(const DomTreeT &DT, const DomTreeNodeT *Node);
+};
+
+class DominanceFrontier : public FunctionPass {
+ ForwardDominanceFrontierBase<BasicBlock> Base;
+
+public:
+ typedef DominatorTreeBase<BasicBlock> DomTreeT;
+ typedef DomTreeNodeBase<BasicBlock> DomTreeNodeT;
+ typedef DominanceFrontierBase<BasicBlock>::DomSetType DomSetType;
+ typedef DominanceFrontierBase<BasicBlock>::iterator iterator;
+ typedef DominanceFrontierBase<BasicBlock>::const_iterator const_iterator;
+
+ static char ID; // Pass ID, replacement for typeid
+
+ DominanceFrontier();
+
+ ForwardDominanceFrontierBase<BasicBlock> &getBase() { return Base; }
+
+ inline const std::vector<BasicBlock *> &getRoots() const {
+ return Base.getRoots();
+ }
+
+ BasicBlock *getRoot() const { return Base.getRoot(); }
+
+ bool isPostDominator() const { return Base.isPostDominator(); }
+
+ iterator begin() { return Base.begin(); }
+
+ const_iterator begin() const { return Base.begin(); }
+
+ iterator end() { return Base.end(); }
+
+ const_iterator end() const { return Base.end(); }
+
+ iterator find(BasicBlock *B) { return Base.find(B); }
+
+ const_iterator find(BasicBlock *B) const { return Base.find(B); }
+
+ iterator addBasicBlock(BasicBlock *BB, const DomSetType &frontier) {
+ return Base.addBasicBlock(BB, frontier);
+ }
+
+ void removeBlock(BasicBlock *BB) { return Base.removeBlock(BB); }
+
+ void addToFrontier(iterator I, BasicBlock *Node) {
+ return Base.addToFrontier(I, Node);
+ }
+
+ void removeFromFrontier(iterator I, BasicBlock *Node) {
+ return Base.removeFromFrontier(I, Node);
+ }
+
+ bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const {
+ return Base.compareDomSet(DS1, DS2);
+ }
+
+ bool compare(DominanceFrontierBase<BasicBlock> &Other) const {
+ return Base.compare(Other);
+ }
+
+ void releaseMemory() override;
+
+ bool runOnFunction(Function &) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ void print(raw_ostream &OS, const Module * = nullptr) const override;
+
+ void dump() const;
+};
+
+extern template class DominanceFrontierBase<BasicBlock>;
+extern template class ForwardDominanceFrontierBase<BasicBlock>;
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/DominanceFrontierImpl.h b/contrib/llvm/include/llvm/Analysis/DominanceFrontierImpl.h
new file mode 100644
index 0000000..629ae38
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/DominanceFrontierImpl.h
@@ -0,0 +1,226 @@
+//===- llvm/Analysis/DominanceFrontier.h - Dominator Frontiers --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the generic implementation of the DominanceFrontier class, which
+// calculate and holds the dominance frontier for a function for.
+//
+// This should be considered deprecated, don't add any more uses of this data
+// structure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_DOMINANCEFRONTIERIMPL_H
+#define LLVM_ANALYSIS_DOMINANCEFRONTIERIMPL_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/GenericDomTree.h"
+
+namespace llvm {
+
+template <class BlockT>
+class DFCalculateWorkObject {
+public:
+ typedef DomTreeNodeBase<BlockT> DomTreeNodeT;
+
+ DFCalculateWorkObject(BlockT *B, BlockT *P, const DomTreeNodeT *N,
+ const DomTreeNodeT *PN)
+ : currentBB(B), parentBB(P), Node(N), parentNode(PN) {}
+ BlockT *currentBB;
+ BlockT *parentBB;
+ const DomTreeNodeT *Node;
+ const DomTreeNodeT *parentNode;
+};
+
+template <class BlockT>
+void DominanceFrontierBase<BlockT>::removeBlock(BlockT *BB) {
+ assert(find(BB) != end() && "Block is not in DominanceFrontier!");
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ I->second.erase(BB);
+ Frontiers.erase(BB);
+}
+
+template <class BlockT>
+void DominanceFrontierBase<BlockT>::addToFrontier(iterator I,
+ BlockT *Node) {
+ assert(I != end() && "BB is not in DominanceFrontier!");
+ assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
+ I->second.erase(Node);
+}
+
+template <class BlockT>
+void DominanceFrontierBase<BlockT>::removeFromFrontier(iterator I,
+ BlockT *Node) {
+ assert(I != end() && "BB is not in DominanceFrontier!");
+ assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
+ I->second.erase(Node);
+}
+
+template <class BlockT>
+bool DominanceFrontierBase<BlockT>::compareDomSet(DomSetType &DS1,
+ const DomSetType &DS2) const {
+ std::set<BlockT *> tmpSet;
+ for (BlockT *BB : DS2)
+ tmpSet.insert(BB);
+
+ for (typename DomSetType::const_iterator I = DS1.begin(), E = DS1.end();
+ I != E;) {
+ BlockT *Node = *I++;
+
+ if (tmpSet.erase(Node) == 0)
+ // Node is in DS1 but tnot in DS2.
+ return true;
+ }
+
+ if (!tmpSet.empty()) {
+ // There are nodes that are in DS2 but not in DS1.
+ return true;
+ }
+
+ // DS1 and DS2 matches.
+ return false;
+}
+
+template <class BlockT>
+bool DominanceFrontierBase<BlockT>::compare(
+ DominanceFrontierBase<BlockT> &Other) const {
+ DomSetMapType tmpFrontiers;
+ for (typename DomSetMapType::const_iterator I = Other.begin(),
+ E = Other.end();
+ I != E; ++I)
+ tmpFrontiers.insert(std::make_pair(I->first, I->second));
+
+ for (typename DomSetMapType::iterator I = tmpFrontiers.begin(),
+ E = tmpFrontiers.end();
+ I != E;) {
+ BlockT *Node = I->first;
+ const_iterator DFI = find(Node);
+ if (DFI == end())
+ return true;
+
+ if (compareDomSet(I->second, DFI->second))
+ return true;
+
+ ++I;
+ tmpFrontiers.erase(Node);
+ }
+
+ if (!tmpFrontiers.empty())
+ return true;
+
+ return false;
+}
+
+template <class BlockT>
+void DominanceFrontierBase<BlockT>::print(raw_ostream &OS) const {
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ OS << " DomFrontier for BB ";
+ if (I->first)
+ I->first->printAsOperand(OS, false);
+ else
+ OS << " <<exit node>>";
+ OS << " is:\t";
+
+ const std::set<BlockT *> &BBs = I->second;
+
+ for (const BlockT *BB : BBs) {
+ OS << ' ';
+ if (BB)
+ BB->printAsOperand(OS, false);
+ else
+ OS << "<<exit node>>";
+ }
+ OS << '\n';
+ }
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+template <class BlockT>
+void DominanceFrontierBase<BlockT>::dump() const {
+ print(dbgs());
+}
+#endif
+
+template <class BlockT>
+const typename ForwardDominanceFrontierBase<BlockT>::DomSetType &
+ForwardDominanceFrontierBase<BlockT>::calculate(const DomTreeT &DT,
+ const DomTreeNodeT *Node) {
+ BlockT *BB = Node->getBlock();
+ DomSetType *Result = nullptr;
+
+ std::vector<DFCalculateWorkObject<BlockT>> workList;
+ SmallPtrSet<BlockT *, 32> visited;
+
+ workList.push_back(DFCalculateWorkObject<BlockT>(BB, nullptr, Node, nullptr));
+ do {
+ DFCalculateWorkObject<BlockT> *currentW = &workList.back();
+ assert(currentW && "Missing work object.");
+
+ BlockT *currentBB = currentW->currentBB;
+ BlockT *parentBB = currentW->parentBB;
+ const DomTreeNodeT *currentNode = currentW->Node;
+ const DomTreeNodeT *parentNode = currentW->parentNode;
+ assert(currentBB && "Invalid work object. Missing current Basic Block");
+ assert(currentNode && "Invalid work object. Missing current Node");
+ DomSetType &S = this->Frontiers[currentBB];
+
+ // Visit each block only once.
+ if (visited.insert(currentBB).second) {
+ // Loop over CFG successors to calculate DFlocal[currentNode]
+ for (auto SI = BlockTraits::child_begin(currentBB),
+ SE = BlockTraits::child_end(currentBB);
+ SI != SE; ++SI) {
+ // Does Node immediately dominate this successor?
+ if (DT[*SI]->getIDom() != currentNode)
+ S.insert(*SI);
+ }
+ }
+
+ // At this point, S is DFlocal. Now we union in DFup's of our children...
+ // Loop through and visit the nodes that Node immediately dominates (Node's
+ // children in the IDomTree)
+ bool visitChild = false;
+ for (typename DomTreeNodeT::const_iterator NI = currentNode->begin(),
+ NE = currentNode->end();
+ NI != NE; ++NI) {
+ DomTreeNodeT *IDominee = *NI;
+ BlockT *childBB = IDominee->getBlock();
+ if (visited.count(childBB) == 0) {
+ workList.push_back(DFCalculateWorkObject<BlockT>(
+ childBB, currentBB, IDominee, currentNode));
+ visitChild = true;
+ }
+ }
+
+ // If all children are visited or there is any child then pop this block
+ // from the workList.
+ if (!visitChild) {
+ if (!parentBB) {
+ Result = &S;
+ break;
+ }
+
+ typename DomSetType::const_iterator CDFI = S.begin(), CDFE = S.end();
+ DomSetType &parentSet = this->Frontiers[parentBB];
+ for (; CDFI != CDFE; ++CDFI) {
+ if (!DT.properlyDominates(parentNode, DT[*CDFI]))
+ parentSet.insert(*CDFI);
+ }
+ workList.pop_back();
+ }
+
+ } while (!workList.empty());
+
+ return *Result;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/EHPersonalities.h b/contrib/llvm/include/llvm/Analysis/EHPersonalities.h
new file mode 100644
index 0000000..59e9672
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/EHPersonalities.h
@@ -0,0 +1,94 @@
+//===- EHPersonalities.h - Compute EH-related information -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_EHPERSONALITIES_H
+#define LLVM_ANALYSIS_EHPERSONALITIES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+class BasicBlock;
+class Function;
+class Value;
+
+enum class EHPersonality {
+ Unknown,
+ GNU_Ada,
+ GNU_C,
+ GNU_CXX,
+ GNU_ObjC,
+ MSVC_X86SEH,
+ MSVC_Win64SEH,
+ MSVC_CXX,
+ CoreCLR
+};
+
+/// \brief See if the given exception handling personality function is one
+/// that we understand. If so, return a description of it; otherwise return
+/// Unknown.
+EHPersonality classifyEHPersonality(const Value *Pers);
+
+/// \brief Returns true if this personality function catches asynchronous
+/// exceptions.
+inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
+ // The two SEH personality functions can catch asynch exceptions. We assume
+ // unknown personalities don't catch asynch exceptions.
+ switch (Pers) {
+ case EHPersonality::MSVC_X86SEH:
+ case EHPersonality::MSVC_Win64SEH:
+ return true;
+ default:
+ return false;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+/// \brief Returns true if this is a personality function that invokes
+/// handler funclets (which must return to it).
+inline bool isFuncletEHPersonality(EHPersonality Pers) {
+ switch (Pers) {
+ case EHPersonality::MSVC_CXX:
+ case EHPersonality::MSVC_X86SEH:
+ case EHPersonality::MSVC_Win64SEH:
+ case EHPersonality::CoreCLR:
+ return true;
+ default:
+ return false;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+/// \brief Return true if this personality may be safely removed if there
+/// are no invoke instructions remaining in the current function.
+inline bool isNoOpWithoutInvoke(EHPersonality Pers) {
+ switch (Pers) {
+ case EHPersonality::Unknown:
+ return false;
+ // All known personalities currently have this behavior
+ default:
+ return true;
+ }
+ llvm_unreachable("invalid enum");
+}
+
+bool canSimplifyInvokeNoUnwind(const Function *F);
+
+typedef TinyPtrVector<BasicBlock *> ColorVector;
+
+/// \brief If an EH funclet personality is in use (see isFuncletEHPersonality),
+/// this will recompute which blocks are in which funclet. It is possible that
+/// some blocks are in multiple funclets. Consider this analysis to be
+/// expensive.
+DenseMap<BasicBlock *, ColorVector> colorEHFunclets(Function &F);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/GlobalsModRef.h b/contrib/llvm/include/llvm/Analysis/GlobalsModRef.h
new file mode 100644
index 0000000..bcd102e
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/GlobalsModRef.h
@@ -0,0 +1,160 @@
+//===- GlobalsModRef.h - Simple Mod/Ref AA for Globals ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for a simple mod/ref and alias analysis over globals.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_GLOBALSMODREF_H
+#define LLVM_ANALYSIS_GLOBALSMODREF_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include <list>
+
+namespace llvm {
+
+/// An alias analysis result set for globals.
+///
+/// This focuses on handling aliasing properties of globals and interprocedural
+/// function call mod/ref information.
+class GlobalsAAResult : public AAResultBase<GlobalsAAResult> {
+ friend AAResultBase<GlobalsAAResult>;
+
+ class FunctionInfo;
+
+ const DataLayout &DL;
+
+ /// The globals that do not have their addresses taken.
+ SmallPtrSet<const GlobalValue *, 8> NonAddressTakenGlobals;
+
+ /// IndirectGlobals - The memory pointed to by this global is known to be
+ /// 'owned' by the global.
+ SmallPtrSet<const GlobalValue *, 8> IndirectGlobals;
+
+ /// AllocsForIndirectGlobals - If an instruction allocates memory for an
+ /// indirect global, this map indicates which one.
+ DenseMap<const Value *, const GlobalValue *> AllocsForIndirectGlobals;
+
+ /// For each function, keep track of what globals are modified or read.
+ DenseMap<const Function *, FunctionInfo> FunctionInfos;
+
+ /// A map of functions to SCC. The SCCs are described by a simple integer
+ /// ID that is only useful for comparing for equality (are two functions
+ /// in the same SCC or not?)
+ DenseMap<const Function *, unsigned> FunctionToSCCMap;
+
+ /// Handle to clear this analysis on deletion of values.
+ struct DeletionCallbackHandle final : CallbackVH {
+ GlobalsAAResult *GAR;
+ std::list<DeletionCallbackHandle>::iterator I;
+
+ DeletionCallbackHandle(GlobalsAAResult &GAR, Value *V)
+ : CallbackVH(V), GAR(&GAR) {}
+
+ void deleted() override;
+ };
+
+ /// List of callbacks for globals being tracked by this analysis. Note that
+ /// these objects are quite large, but we only anticipate having one per
+ /// global tracked by this analysis. There are numerous optimizations we
+ /// could perform to the memory utilization here if this becomes a problem.
+ std::list<DeletionCallbackHandle> Handles;
+
+ explicit GlobalsAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI);
+
+public:
+ GlobalsAAResult(GlobalsAAResult &&Arg);
+
+ static GlobalsAAResult analyzeModule(Module &M, const TargetLibraryInfo &TLI,
+ CallGraph &CG);
+
+ //------------------------------------------------
+ // Implement the AliasAnalysis API
+ //
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+
+ using AAResultBase::getModRefInfo;
+ ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+
+ /// getModRefBehavior - Return the behavior of the specified function if
+ /// called from the specified call site. The call site may be null in which
+ /// case the most generic behavior of this function should be returned.
+ FunctionModRefBehavior getModRefBehavior(const Function *F);
+
+ /// getModRefBehavior - Return the behavior of the specified function if
+ /// called from the specified call site. The call site may be null in which
+ /// case the most generic behavior of this function should be returned.
+ FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+
+private:
+ FunctionInfo *getFunctionInfo(const Function *F);
+
+ void AnalyzeGlobals(Module &M);
+ void AnalyzeCallGraph(CallGraph &CG, Module &M);
+ bool AnalyzeUsesOfPointer(Value *V,
+ SmallPtrSetImpl<Function *> *Readers = nullptr,
+ SmallPtrSetImpl<Function *> *Writers = nullptr,
+ GlobalValue *OkayStoreDest = nullptr);
+ bool AnalyzeIndirectGlobalMemory(GlobalVariable *GV);
+ void CollectSCCMembership(CallGraph &CG);
+
+ bool isNonEscapingGlobalNoAlias(const GlobalValue *GV, const Value *V);
+ ModRefInfo getModRefInfoForArgument(ImmutableCallSite CS,
+ const GlobalValue *GV);
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class GlobalsAA {
+public:
+ typedef GlobalsAAResult Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ GlobalsAAResult run(Module &M, AnalysisManager<Module> *AM);
+
+ /// \brief Provide access to a name for this pass for debugging purposes.
+ static StringRef name() { return "GlobalsAA"; }
+
+private:
+ static char PassID;
+};
+
+/// Legacy wrapper pass to provide the GlobalsAAResult object.
+class GlobalsAAWrapperPass : public ModulePass {
+ std::unique_ptr<GlobalsAAResult> Result;
+
+public:
+ static char ID;
+
+ GlobalsAAWrapperPass();
+
+ GlobalsAAResult &getResult() { return *Result; }
+ const GlobalsAAResult &getResult() const { return *Result; }
+
+ bool runOnModule(Module &M) override;
+ bool doFinalization(Module &M) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+//===--------------------------------------------------------------------===//
+//
+// createGlobalsAAWrapperPass - This pass provides alias and mod/ref info for
+// global values that do not have their addresses taken.
+//
+ModulePass *createGlobalsAAWrapperPass();
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/IVUsers.h b/contrib/llvm/include/llvm/Analysis/IVUsers.h
new file mode 100644
index 0000000..37d0149
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/IVUsers.h
@@ -0,0 +1,188 @@
+//===- llvm/Analysis/IVUsers.h - Induction Variable Users -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements bookkeeping for "interesting" users of expressions
+// computed from induction variables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_IVUSERS_H
+#define LLVM_ANALYSIS_IVUSERS_H
+
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/ScalarEvolutionNormalization.h"
+#include "llvm/IR/ValueHandle.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class DominatorTree;
+class Instruction;
+class Value;
+class ScalarEvolution;
+class SCEV;
+class IVUsers;
+class DataLayout;
+
+/// IVStrideUse - Keep track of one use of a strided induction variable.
+/// The Expr member keeps track of the expression, User is the actual user
+/// instruction of the operand, and 'OperandValToReplace' is the operand of
+/// the User that is the use.
+class IVStrideUse final : public CallbackVH, public ilist_node<IVStrideUse> {
+ friend class IVUsers;
+public:
+ IVStrideUse(IVUsers *P, Instruction* U, Value *O)
+ : CallbackVH(U), Parent(P), OperandValToReplace(O) {
+ }
+
+ /// getUser - Return the user instruction for this use.
+ Instruction *getUser() const {
+ return cast<Instruction>(getValPtr());
+ }
+
+ /// setUser - Assign a new user instruction for this use.
+ void setUser(Instruction *NewUser) {
+ setValPtr(NewUser);
+ }
+
+ /// getOperandValToReplace - Return the Value of the operand in the user
+ /// instruction that this IVStrideUse is representing.
+ Value *getOperandValToReplace() const {
+ return OperandValToReplace;
+ }
+
+ /// setOperandValToReplace - Assign a new Value as the operand value
+ /// to replace.
+ void setOperandValToReplace(Value *Op) {
+ OperandValToReplace = Op;
+ }
+
+ /// getPostIncLoops - Return the set of loops for which the expression has
+ /// been adjusted to use post-inc mode.
+ const PostIncLoopSet &getPostIncLoops() const {
+ return PostIncLoops;
+ }
+
+ /// transformToPostInc - Transform the expression to post-inc form for the
+ /// given loop.
+ void transformToPostInc(const Loop *L);
+
+private:
+ /// Parent - a pointer to the IVUsers that owns this IVStrideUse.
+ IVUsers *Parent;
+
+ /// OperandValToReplace - The Value of the operand in the user instruction
+ /// that this IVStrideUse is representing.
+ WeakVH OperandValToReplace;
+
+ /// PostIncLoops - The set of loops for which Expr has been adjusted to
+ /// use post-inc mode. This corresponds with SCEVExpander's post-inc concept.
+ PostIncLoopSet PostIncLoops;
+
+ /// Deleted - Implementation of CallbackVH virtual function to
+ /// receive notification when the User is deleted.
+ void deleted() override;
+};
+
+template<> struct ilist_traits<IVStrideUse>
+ : public ilist_default_traits<IVStrideUse> {
+ // createSentinel is used to get hold of a node that marks the end of
+ // the list...
+ // The sentinel is relative to this instance, so we use a non-static
+ // method.
+ IVStrideUse *createSentinel() const {
+ // since i(p)lists always publicly derive from the corresponding
+ // traits, placing a data member in this class will augment i(p)list.
+ // But since the NodeTy is expected to publicly derive from
+ // ilist_node<NodeTy>, there is a legal viable downcast from it
+ // to NodeTy. We use this trick to superpose i(p)list with a "ghostly"
+ // NodeTy, which becomes the sentinel. Dereferencing the sentinel is
+ // forbidden (save the ilist_node<NodeTy>) so no one will ever notice
+ // the superposition.
+ return static_cast<IVStrideUse*>(&Sentinel);
+ }
+ static void destroySentinel(IVStrideUse*) {}
+
+ IVStrideUse *provideInitialHead() const { return createSentinel(); }
+ IVStrideUse *ensureHead(IVStrideUse*) const { return createSentinel(); }
+ static void noteHead(IVStrideUse*, IVStrideUse*) {}
+
+private:
+ mutable ilist_node<IVStrideUse> Sentinel;
+};
+
+class IVUsers : public LoopPass {
+ friend class IVStrideUse;
+ Loop *L;
+ AssumptionCache *AC;
+ LoopInfo *LI;
+ DominatorTree *DT;
+ ScalarEvolution *SE;
+ SmallPtrSet<Instruction*, 16> Processed;
+
+ /// IVUses - A list of all tracked IV uses of induction variable expressions
+ /// we are interested in.
+ ilist<IVStrideUse> IVUses;
+
+ // Ephemeral values used by @llvm.assume in this function.
+ SmallPtrSet<const Value *, 32> EphValues;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnLoop(Loop *L, LPPassManager &LPM) override;
+
+ void releaseMemory() override;
+
+public:
+ static char ID; // Pass ID, replacement for typeid
+ IVUsers();
+
+ Loop *getLoop() const { return L; }
+
+ /// AddUsersIfInteresting - Inspect the specified Instruction. If it is a
+ /// reducible SCEV, recursively add its users to the IVUsesByStride set and
+ /// return true. Otherwise, return false.
+ bool AddUsersIfInteresting(Instruction *I);
+
+ IVStrideUse &AddUser(Instruction *User, Value *Operand);
+
+ /// getReplacementExpr - Return a SCEV expression which computes the
+ /// value of the OperandValToReplace of the given IVStrideUse.
+ const SCEV *getReplacementExpr(const IVStrideUse &IU) const;
+
+ /// getExpr - Return the expression for the use.
+ const SCEV *getExpr(const IVStrideUse &IU) const;
+
+ const SCEV *getStride(const IVStrideUse &IU, const Loop *L) const;
+
+ typedef ilist<IVStrideUse>::iterator iterator;
+ typedef ilist<IVStrideUse>::const_iterator const_iterator;
+ iterator begin() { return IVUses.begin(); }
+ iterator end() { return IVUses.end(); }
+ const_iterator begin() const { return IVUses.begin(); }
+ const_iterator end() const { return IVUses.end(); }
+ bool empty() const { return IVUses.empty(); }
+
+ bool isIVUserOrOperand(Instruction *Inst) const {
+ return Processed.count(Inst);
+ }
+
+ void print(raw_ostream &OS, const Module* = nullptr) const override;
+
+ /// dump - This method is used for debugging.
+ void dump() const;
+protected:
+ bool AddUsersImpl(Instruction *I, SmallPtrSetImpl<Loop*> &SimpleLoopNests);
+};
+
+Pass *createIVUsersPass();
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/InlineCost.h b/contrib/llvm/include/llvm/Analysis/InlineCost.h
new file mode 100644
index 0000000..35f991c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/InlineCost.h
@@ -0,0 +1,128 @@
+//===- InlineCost.h - Cost analysis for inliner -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements heuristics for inlining decisions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INLINECOST_H
+#define LLVM_ANALYSIS_INLINECOST_H
+
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include <cassert>
+#include <climits>
+
+namespace llvm {
+class AssumptionCacheTracker;
+class CallSite;
+class DataLayout;
+class Function;
+class TargetTransformInfo;
+
+namespace InlineConstants {
+ // Various magic constants used to adjust heuristics.
+ const int InstrCost = 5;
+ const int IndirectCallThreshold = 100;
+ const int CallPenalty = 25;
+ const int LastCallToStaticBonus = -15000;
+ const int ColdccPenalty = 2000;
+ const int NoreturnPenalty = 10000;
+ /// Do not inline functions which allocate this many bytes on the stack
+ /// when the caller is recursive.
+ const unsigned TotalAllocaSizeRecursiveCaller = 1024;
+}
+
+/// \brief Represents the cost of inlining a function.
+///
+/// This supports special values for functions which should "always" or
+/// "never" be inlined. Otherwise, the cost represents a unitless amount;
+/// smaller values increase the likelihood of the function being inlined.
+///
+/// Objects of this type also provide the adjusted threshold for inlining
+/// based on the information available for a particular callsite. They can be
+/// directly tested to determine if inlining should occur given the cost and
+/// threshold for this cost metric.
+class InlineCost {
+ enum SentinelValues {
+ AlwaysInlineCost = INT_MIN,
+ NeverInlineCost = INT_MAX
+ };
+
+ /// \brief The estimated cost of inlining this callsite.
+ const int Cost;
+
+ /// \brief The adjusted threshold against which this cost was computed.
+ const int Threshold;
+
+ // Trivial constructor, interesting logic in the factory functions below.
+ InlineCost(int Cost, int Threshold) : Cost(Cost), Threshold(Threshold) {}
+
+public:
+ static InlineCost get(int Cost, int Threshold) {
+ assert(Cost > AlwaysInlineCost && "Cost crosses sentinel value");
+ assert(Cost < NeverInlineCost && "Cost crosses sentinel value");
+ return InlineCost(Cost, Threshold);
+ }
+ static InlineCost getAlways() {
+ return InlineCost(AlwaysInlineCost, 0);
+ }
+ static InlineCost getNever() {
+ return InlineCost(NeverInlineCost, 0);
+ }
+
+ /// \brief Test whether the inline cost is low enough for inlining.
+ explicit operator bool() const {
+ return Cost < Threshold;
+ }
+
+ bool isAlways() const { return Cost == AlwaysInlineCost; }
+ bool isNever() const { return Cost == NeverInlineCost; }
+ bool isVariable() const { return !isAlways() && !isNever(); }
+
+ /// \brief Get the inline cost estimate.
+ /// It is an error to call this on an "always" or "never" InlineCost.
+ int getCost() const {
+ assert(isVariable() && "Invalid access of InlineCost");
+ return Cost;
+ }
+
+ /// \brief Get the cost delta from the threshold for inlining.
+ /// Only valid if the cost is of the variable kind. Returns a negative
+ /// value if the cost is too high to inline.
+ int getCostDelta() const { return Threshold - getCost(); }
+};
+
+/// \brief Get an InlineCost object representing the cost of inlining this
+/// callsite.
+///
+/// Note that threshold is passed into this function. Only costs below the
+/// threshold are computed with any accuracy. The threshold can be used to
+/// bound the computation necessary to determine whether the cost is
+/// sufficiently low to warrant inlining.
+///
+/// Also note that calling this function *dynamically* computes the cost of
+/// inlining the callsite. It is an expensive, heavyweight call.
+InlineCost getInlineCost(CallSite CS, int Threshold,
+ TargetTransformInfo &CalleeTTI,
+ AssumptionCacheTracker *ACT);
+
+/// \brief Get an InlineCost with the callee explicitly specified.
+/// This allows you to calculate the cost of inlining a function via a
+/// pointer. This behaves exactly as the version with no explicit callee
+/// parameter in all other respects.
+//
+InlineCost getInlineCost(CallSite CS, Function *Callee, int Threshold,
+ TargetTransformInfo &CalleeTTI,
+ AssumptionCacheTracker *ACT);
+
+/// \brief Minimal filter to detect invalid constructs for inlining.
+bool isInlineViable(Function &Callee);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h b/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
new file mode 100644
index 0000000..ed313da
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/InstructionSimplify.h
@@ -0,0 +1,358 @@
+//===-- InstructionSimplify.h - Fold instrs into simpler forms --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares routines for folding instructions into simpler forms
+// that do not require creating new instructions. This does constant folding
+// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
+// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
+// ("and i32 %x, %x" -> "%x"). If the simplification is also an instruction
+// then it dominates the original instruction.
+//
+// These routines implicitly resolve undef uses. The easiest way to be safe when
+// using these routines to obtain simplified values for existing instructions is
+// to always replace all uses of the instructions with the resulting simplified
+// values. This will prevent other code from seeing the same undef uses and
+// resolving them to different values.
+//
+// These routines are designed to tolerate moderately incomplete IR, such as
+// instructions that are not connected to basic blocks yet. However, they do
+// require that all the IR that they encounter be valid. In particular, they
+// require that all non-constant values be defined in the same function, and the
+// same call context of that function (and not split between caller and callee
+// contexts of a directly recursive call, for example).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
+#define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
+
+#include "llvm/IR/User.h"
+
+namespace llvm {
+ template<typename T>
+ class ArrayRef;
+ class AssumptionCache;
+ class DominatorTree;
+ class Instruction;
+ class DataLayout;
+ class FastMathFlags;
+ class TargetLibraryInfo;
+ class Type;
+ class Value;
+
+ /// SimplifyAddInst - Given operands for an Add, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifySubInst - Given operands for a Sub, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// Given operands for an FAdd, see if we can fold the result. If not, this
+ /// returns null.
+ Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// Given operands for an FSub, see if we can fold the result. If not, this
+ /// returns null.
+ Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// Given operands for an FMul, see if we can fold the result. If not, this
+ /// returns null.
+ Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyMulInst - Given operands for a Mul, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifySDivInst - Given operands for an SDiv, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyUDivInst - Given operands for a UDiv, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyFDivInst - Given operands for an FDiv, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifySRemInst - Given operands for an SRem, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyURemInst - Given operands for a URem, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyFRemInst - Given operands for an FRem, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyShlInst - Given operands for a Shl, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyLShrInst - Given operands for a LShr, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyAShrInst - Given operands for a AShr, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyAndInst - Given operands for an And, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyOrInst - Given operands for an Or, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyXorInst - Given operands for a Xor, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ FastMathFlags FMF, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
+ /// the result. If not, this returns null.
+ Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
+ /// can fold the result. If not, this returns null.
+ Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
+ ArrayRef<unsigned> Idxs, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// \brief Given operands for an ExtractValueInst, see if we can fold the
+ /// result. If not, this returns null.
+ Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// \brief Given operands for an ExtractElementInst, see if we can fold the
+ /// result. If not, this returns null.
+ Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold
+ /// the result. If not, this returns null.
+ Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ //=== Helper functions for higher up the class hierarchy.
+
+
+ /// SimplifyCmpInst - Given operands for a CmpInst, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
+ /// fold the result. If not, this returns null.
+ Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+ /// SimplifyFPBinOp - Given operands for a BinaryOperator, see if we can
+ /// fold the result. If not, this returns null.
+ /// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
+ /// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
+ Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ const FastMathFlags &FMF, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// \brief Given a function and iterators over arguments, see if we can fold
+ /// the result.
+ ///
+ /// If this call could not be simplified returns null.
+ Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
+ User::op_iterator ArgEnd, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// \brief Given a function and set of arguments, see if we can fold the
+ /// result.
+ ///
+ /// If this call could not be simplified returns null.
+ Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr);
+
+ /// SimplifyInstruction - See if we can compute a simplified version of this
+ /// instruction. If not, this returns null.
+ Value *SimplifyInstruction(Instruction *I, const DataLayout &DL,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr);
+
+ /// \brief Replace all uses of 'I' with 'SimpleV' and simplify the uses
+ /// recursively.
+ ///
+ /// This first performs a normal RAUW of I with SimpleV. It then recursively
+ /// attempts to simplify those users updated by the operation. The 'I'
+ /// instruction must not be equal to the simplified value 'SimpleV'.
+ ///
+ /// The function returns true if any simplifications were performed.
+ bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr);
+
+ /// \brief Recursively attempt to simplify an instruction.
+ ///
+ /// This routine uses SimplifyInstruction to simplify 'I', and if successful
+ /// replaces uses of 'I' with the simplified value. It then recurses on each
+ /// of the users impacted. It returns true if any simplifications were
+ /// performed.
+ bool recursivelySimplifyInstruction(Instruction *I,
+ const TargetLibraryInfo *TLI = nullptr,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr);
+} // end namespace llvm
+
+#endif
+
diff --git a/contrib/llvm/include/llvm/Analysis/Interval.h b/contrib/llvm/include/llvm/Analysis/Interval.h
new file mode 100644
index 0000000..01eba3f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/Interval.h
@@ -0,0 +1,150 @@
+//===- llvm/Analysis/Interval.h - Interval Class Declaration ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the Interval class, which
+// represents a set of CFG nodes and is a portion of an interval partition.
+//
+// Intervals have some interesting and useful properties, including the
+// following:
+// 1. The header node of an interval dominates all of the elements of the
+// interval
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INTERVAL_H
+#define LLVM_ANALYSIS_INTERVAL_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class raw_ostream;
+
+//===----------------------------------------------------------------------===//
+//
+/// Interval Class - An Interval is a set of nodes defined such that every node
+/// in the interval has all of its predecessors in the interval (except for the
+/// header)
+///
+class Interval {
+ /// HeaderNode - The header BasicBlock, which dominates all BasicBlocks in this
+ /// interval. Also, any loops in this interval must go through the HeaderNode.
+ ///
+ BasicBlock *HeaderNode;
+public:
+ typedef std::vector<BasicBlock*>::iterator succ_iterator;
+ typedef std::vector<BasicBlock*>::iterator pred_iterator;
+ typedef std::vector<BasicBlock*>::iterator node_iterator;
+
+ inline Interval(BasicBlock *Header) : HeaderNode(Header) {
+ Nodes.push_back(Header);
+ }
+
+ inline BasicBlock *getHeaderNode() const { return HeaderNode; }
+
+ /// Nodes - The basic blocks in this interval.
+ ///
+ std::vector<BasicBlock*> Nodes;
+
+ /// Successors - List of BasicBlocks that are reachable directly from nodes in
+ /// this interval, but are not in the interval themselves.
+ /// These nodes necessarily must be header nodes for other intervals.
+ ///
+ std::vector<BasicBlock*> Successors;
+
+ /// Predecessors - List of BasicBlocks that have this Interval's header block
+ /// as one of their successors.
+ ///
+ std::vector<BasicBlock*> Predecessors;
+
+ /// contains - Find out if a basic block is in this interval
+ inline bool contains(BasicBlock *BB) const {
+ for (unsigned i = 0; i < Nodes.size(); ++i)
+ if (Nodes[i] == BB) return true;
+ return false;
+ // I don't want the dependency on <algorithm>
+ //return find(Nodes.begin(), Nodes.end(), BB) != Nodes.end();
+ }
+
+ /// isSuccessor - find out if a basic block is a successor of this Interval
+ inline bool isSuccessor(BasicBlock *BB) const {
+ for (unsigned i = 0; i < Successors.size(); ++i)
+ if (Successors[i] == BB) return true;
+ return false;
+ // I don't want the dependency on <algorithm>
+ //return find(Successors.begin(), Successors.end(), BB) != Successors.end();
+ }
+
+ /// Equality operator. It is only valid to compare two intervals from the
+ /// same partition, because of this, all we have to check is the header node
+ /// for equality.
+ ///
+ inline bool operator==(const Interval &I) const {
+ return HeaderNode == I.HeaderNode;
+ }
+
+ /// isLoop - Find out if there is a back edge in this interval...
+ bool isLoop() const;
+
+ /// print - Show contents in human readable format...
+ void print(raw_ostream &O) const;
+};
+
+/// succ_begin/succ_end - define methods so that Intervals may be used
+/// just like BasicBlocks can with the succ_* functions, and *::succ_iterator.
+///
+inline Interval::succ_iterator succ_begin(Interval *I) {
+ return I->Successors.begin();
+}
+inline Interval::succ_iterator succ_end(Interval *I) {
+ return I->Successors.end();
+}
+
+/// pred_begin/pred_end - define methods so that Intervals may be used
+/// just like BasicBlocks can with the pred_* functions, and *::pred_iterator.
+///
+inline Interval::pred_iterator pred_begin(Interval *I) {
+ return I->Predecessors.begin();
+}
+inline Interval::pred_iterator pred_end(Interval *I) {
+ return I->Predecessors.end();
+}
+
+template <> struct GraphTraits<Interval*> {
+ typedef Interval NodeType;
+ typedef Interval::succ_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(Interval *I) { return I; }
+
+ /// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return succ_begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return succ_end(N);
+ }
+};
+
+template <> struct GraphTraits<Inverse<Interval*> > {
+ typedef Interval NodeType;
+ typedef Interval::pred_iterator ChildIteratorType;
+ static NodeType *getEntryNode(Inverse<Interval *> G) { return G.Graph; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return pred_begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return pred_end(N);
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/IntervalIterator.h b/contrib/llvm/include/llvm/Analysis/IntervalIterator.h
new file mode 100644
index 0000000..655ce2d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/IntervalIterator.h
@@ -0,0 +1,268 @@
+//===- IntervalIterator.h - Interval Iterator Declaration -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an iterator that enumerates the intervals in a control flow
+// graph of some sort. This iterator is parametric, allowing iterator over the
+// following types of graphs:
+//
+// 1. A Function* object, composed of BasicBlock nodes.
+// 2. An IntervalPartition& object, composed of Interval nodes.
+//
+// This iterator is defined to walk the control flow graph, returning intervals
+// in depth first order. These intervals are completely filled in except for
+// the predecessor fields (the successor information is filled in however).
+//
+// By default, the intervals created by this iterator are deleted after they
+// are no longer any use to the iterator. This behavior can be changed by
+// passing a false value into the intervals_begin() function. This causes the
+// IOwnMem member to be set, and the intervals to not be deleted.
+//
+// It is only safe to use this if all of the intervals are deleted by the caller
+// and all of the intervals are processed. However, the user of the iterator is
+// not allowed to modify or delete the intervals until after the iterator has
+// been used completely. The IntervalPartition class uses this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INTERVALITERATOR_H
+#define LLVM_ANALYSIS_INTERVALITERATOR_H
+
+#include "llvm/Analysis/IntervalPartition.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Function.h"
+#include <algorithm>
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+// getNodeHeader - Given a source graph node and the source graph, return the
+// BasicBlock that is the header node. This is the opposite of
+// getSourceGraphNode.
+//
+inline BasicBlock *getNodeHeader(BasicBlock *BB) { return BB; }
+inline BasicBlock *getNodeHeader(Interval *I) { return I->getHeaderNode(); }
+
+// getSourceGraphNode - Given a BasicBlock and the source graph, return the
+// source graph node that corresponds to the BasicBlock. This is the opposite
+// of getNodeHeader.
+//
+inline BasicBlock *getSourceGraphNode(Function *, BasicBlock *BB) {
+ return BB;
+}
+inline Interval *getSourceGraphNode(IntervalPartition *IP, BasicBlock *BB) {
+ return IP->getBlockInterval(BB);
+}
+
+// addNodeToInterval - This method exists to assist the generic ProcessNode
+// with the task of adding a node to the new interval, depending on the
+// type of the source node. In the case of a CFG source graph (BasicBlock
+// case), the BasicBlock itself is added to the interval.
+//
+inline void addNodeToInterval(Interval *Int, BasicBlock *BB) {
+ Int->Nodes.push_back(BB);
+}
+
+// addNodeToInterval - This method exists to assist the generic ProcessNode
+// with the task of adding a node to the new interval, depending on the
+// type of the source node. In the case of a CFG source graph (BasicBlock
+// case), the BasicBlock itself is added to the interval. In the case of
+// an IntervalPartition source graph (Interval case), all of the member
+// BasicBlocks are added to the interval.
+//
+inline void addNodeToInterval(Interval *Int, Interval *I) {
+ // Add all of the nodes in I as new nodes in Int.
+ Int->Nodes.insert(Int->Nodes.end(), I->Nodes.begin(), I->Nodes.end());
+}
+
+
+
+
+
+template<class NodeTy, class OrigContainer_t, class GT = GraphTraits<NodeTy*>,
+ class IGT = GraphTraits<Inverse<NodeTy*> > >
+class IntervalIterator {
+ std::vector<std::pair<Interval*, typename Interval::succ_iterator> > IntStack;
+ std::set<BasicBlock*> Visited;
+ OrigContainer_t *OrigContainer;
+ bool IOwnMem; // If True, delete intervals when done with them
+ // See file header for conditions of use
+public:
+ typedef std::forward_iterator_tag iterator_category;
+
+ IntervalIterator() {} // End iterator, empty stack
+ IntervalIterator(Function *M, bool OwnMemory) : IOwnMem(OwnMemory) {
+ OrigContainer = M;
+ if (!ProcessInterval(&M->front())) {
+ llvm_unreachable("ProcessInterval should never fail for first interval!");
+ }
+ }
+
+ IntervalIterator(IntervalIterator &&x)
+ : IntStack(std::move(x.IntStack)), Visited(std::move(x.Visited)),
+ OrigContainer(x.OrigContainer), IOwnMem(x.IOwnMem) {
+ x.IOwnMem = false;
+ }
+
+ IntervalIterator(IntervalPartition &IP, bool OwnMemory) : IOwnMem(OwnMemory) {
+ OrigContainer = &IP;
+ if (!ProcessInterval(IP.getRootInterval())) {
+ llvm_unreachable("ProcessInterval should never fail for first interval!");
+ }
+ }
+
+ ~IntervalIterator() {
+ if (IOwnMem)
+ while (!IntStack.empty()) {
+ delete operator*();
+ IntStack.pop_back();
+ }
+ }
+
+ bool operator==(const IntervalIterator &x) const {
+ return IntStack == x.IntStack;
+ }
+ bool operator!=(const IntervalIterator &x) const { return !(*this == x); }
+
+ const Interval *operator*() const { return IntStack.back().first; }
+ Interval *operator*() { return IntStack.back().first; }
+ const Interval *operator->() const { return operator*(); }
+ Interval *operator->() { return operator*(); }
+
+ IntervalIterator &operator++() { // Preincrement
+ assert(!IntStack.empty() && "Attempting to use interval iterator at end!");
+ do {
+ // All of the intervals on the stack have been visited. Try visiting
+ // their successors now.
+ Interval::succ_iterator &SuccIt = IntStack.back().second,
+ EndIt = succ_end(IntStack.back().first);
+ while (SuccIt != EndIt) { // Loop over all interval succs
+ bool Done = ProcessInterval(getSourceGraphNode(OrigContainer, *SuccIt));
+ ++SuccIt; // Increment iterator
+ if (Done) return *this; // Found a new interval! Use it!
+ }
+
+ // Free interval memory... if necessary
+ if (IOwnMem) delete IntStack.back().first;
+
+ // We ran out of successors for this interval... pop off the stack
+ IntStack.pop_back();
+ } while (!IntStack.empty());
+
+ return *this;
+ }
+ IntervalIterator operator++(int) { // Postincrement
+ IntervalIterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+private:
+ // ProcessInterval - This method is used during the construction of the
+ // interval graph. It walks through the source graph, recursively creating
+ // an interval per invocation until the entire graph is covered. This uses
+ // the ProcessNode method to add all of the nodes to the interval.
+ //
+ // This method is templated because it may operate on two different source
+ // graphs: a basic block graph, or a preexisting interval graph.
+ //
+ bool ProcessInterval(NodeTy *Node) {
+ BasicBlock *Header = getNodeHeader(Node);
+ if (!Visited.insert(Header).second)
+ return false;
+
+ Interval *Int = new Interval(Header);
+
+ // Check all of our successors to see if they are in the interval...
+ for (typename GT::ChildIteratorType I = GT::child_begin(Node),
+ E = GT::child_end(Node); I != E; ++I)
+ ProcessNode(Int, getSourceGraphNode(OrigContainer, *I));
+
+ IntStack.push_back(std::make_pair(Int, succ_begin(Int)));
+ return true;
+ }
+
+ // ProcessNode - This method is called by ProcessInterval to add nodes to the
+ // interval being constructed, and it is also called recursively as it walks
+ // the source graph. A node is added to the current interval only if all of
+ // its predecessors are already in the graph. This also takes care of keeping
+ // the successor set of an interval up to date.
+ //
+ // This method is templated because it may operate on two different source
+ // graphs: a basic block graph, or a preexisting interval graph.
+ //
+ void ProcessNode(Interval *Int, NodeTy *Node) {
+ assert(Int && "Null interval == bad!");
+ assert(Node && "Null Node == bad!");
+
+ BasicBlock *NodeHeader = getNodeHeader(Node);
+
+ if (Visited.count(NodeHeader)) { // Node already been visited?
+ if (Int->contains(NodeHeader)) { // Already in this interval...
+ return;
+ } else { // In other interval, add as successor
+ if (!Int->isSuccessor(NodeHeader)) // Add only if not already in set
+ Int->Successors.push_back(NodeHeader);
+ }
+ } else { // Otherwise, not in interval yet
+ for (typename IGT::ChildIteratorType I = IGT::child_begin(Node),
+ E = IGT::child_end(Node); I != E; ++I) {
+ if (!Int->contains(*I)) { // If pred not in interval, we can't be
+ if (!Int->isSuccessor(NodeHeader)) // Add only if not already in set
+ Int->Successors.push_back(NodeHeader);
+ return; // See you later
+ }
+ }
+
+ // If we get here, then all of the predecessors of BB are in the interval
+ // already. In this case, we must add BB to the interval!
+ addNodeToInterval(Int, Node);
+ Visited.insert(NodeHeader); // The node has now been visited!
+
+ if (Int->isSuccessor(NodeHeader)) {
+ // If we were in the successor list from before... remove from succ list
+ Int->Successors.erase(std::remove(Int->Successors.begin(),
+ Int->Successors.end(), NodeHeader),
+ Int->Successors.end());
+ }
+
+ // Now that we have discovered that Node is in the interval, perhaps some
+ // of its successors are as well?
+ for (typename GT::ChildIteratorType It = GT::child_begin(Node),
+ End = GT::child_end(Node); It != End; ++It)
+ ProcessNode(Int, getSourceGraphNode(OrigContainer, *It));
+ }
+ }
+};
+
+typedef IntervalIterator<BasicBlock, Function> function_interval_iterator;
+typedef IntervalIterator<Interval, IntervalPartition>
+ interval_part_interval_iterator;
+
+
+inline function_interval_iterator intervals_begin(Function *F,
+ bool DeleteInts = true) {
+ return function_interval_iterator(F, DeleteInts);
+}
+inline function_interval_iterator intervals_end(Function *) {
+ return function_interval_iterator();
+}
+
+inline interval_part_interval_iterator
+ intervals_begin(IntervalPartition &IP, bool DeleteIntervals = true) {
+ return interval_part_interval_iterator(IP, DeleteIntervals);
+}
+
+inline interval_part_interval_iterator intervals_end(IntervalPartition &IP) {
+ return interval_part_interval_iterator();
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/IntervalPartition.h b/contrib/llvm/include/llvm/Analysis/IntervalPartition.h
new file mode 100644
index 0000000..274be2b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/IntervalPartition.h
@@ -0,0 +1,111 @@
+//===- IntervalPartition.h - Interval partition Calculation -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the IntervalPartition class, which
+// calculates and represents the interval partition of a function, or a
+// preexisting interval partition.
+//
+// In this way, the interval partition may be used to reduce a flow graph down
+// to its degenerate single node interval partition (unless it is irreducible).
+//
+// TODO: The IntervalPartition class should take a bool parameter that tells
+// whether it should add the "tails" of an interval to an interval itself or if
+// they should be represented as distinct intervals.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_INTERVALPARTITION_H
+#define LLVM_ANALYSIS_INTERVALPARTITION_H
+
+#include "llvm/Analysis/Interval.h"
+#include "llvm/Pass.h"
+#include <map>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+//
+// IntervalPartition - This class builds and holds an "interval partition" for
+// a function. This partition divides the control flow graph into a set of
+// maximal intervals, as defined with the properties above. Intuitively, an
+// interval is a (possibly nonexistent) loop with a "tail" of non-looping
+// nodes following it.
+//
+class IntervalPartition : public FunctionPass {
+ typedef std::map<BasicBlock*, Interval*> IntervalMapTy;
+ IntervalMapTy IntervalMap;
+
+ typedef std::vector<Interval*> IntervalListTy;
+ Interval *RootInterval;
+ std::vector<Interval*> Intervals;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ IntervalPartition() : FunctionPass(ID), RootInterval(nullptr) {
+ initializeIntervalPartitionPass(*PassRegistry::getPassRegistry());
+ }
+
+ // run - Calculate the interval partition for this function
+ bool runOnFunction(Function &F) override;
+
+ // IntervalPartition ctor - Build a reduced interval partition from an
+ // existing interval graph. This takes an additional boolean parameter to
+ // distinguish it from a copy constructor. Always pass in false for now.
+ //
+ IntervalPartition(IntervalPartition &I, bool);
+
+ // print - Show contents in human readable format...
+ void print(raw_ostream &O, const Module* = nullptr) const override;
+
+ // getRootInterval() - Return the root interval that contains the starting
+ // block of the function.
+ inline Interval *getRootInterval() { return RootInterval; }
+
+ // isDegeneratePartition() - Returns true if the interval partition contains
+ // a single interval, and thus cannot be simplified anymore.
+ bool isDegeneratePartition() { return Intervals.size() == 1; }
+
+ // TODO: isIrreducible - look for triangle graph.
+
+ // getBlockInterval - Return the interval that a basic block exists in.
+ inline Interval *getBlockInterval(BasicBlock *BB) {
+ IntervalMapTy::iterator I = IntervalMap.find(BB);
+ return I != IntervalMap.end() ? I->second : nullptr;
+ }
+
+ // getAnalysisUsage - Implement the Pass API
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+
+ // Interface to Intervals vector...
+ const std::vector<Interval*> &getIntervals() const { return Intervals; }
+
+ // releaseMemory - Reset state back to before function was analyzed
+ void releaseMemory() override;
+
+private:
+ // addIntervalToPartition - Add an interval to the internal list of intervals,
+ // and then add mappings from all of the basic blocks in the interval to the
+ // interval itself (in the IntervalMap).
+ //
+ void addIntervalToPartition(Interval *I);
+
+ // updatePredecessors - Interval generation only sets the successor fields of
+ // the interval data structures. After interval generation is complete,
+ // run through all of the intervals and propagate successor info as
+ // predecessor info.
+ //
+ void updatePredecessors(Interval *Int);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/IteratedDominanceFrontier.h b/contrib/llvm/include/llvm/Analysis/IteratedDominanceFrontier.h
new file mode 100644
index 0000000..a1ded25
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/IteratedDominanceFrontier.h
@@ -0,0 +1,96 @@
+//===- IteratedDominanceFrontier.h - Calculate IDF --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \brief Compute iterated dominance frontiers using a linear time algorithm.
+///
+/// The algorithm used here is based on:
+///
+/// Sreedhar and Gao. A linear time algorithm for placing phi-nodes.
+/// In Proceedings of the 22nd ACM SIGPLAN-SIGACT Symposium on Principles of
+/// Programming Languages
+/// POPL '95. ACM, New York, NY, 62-73.
+///
+/// It has been modified to not explicitly use the DJ graph data structure and
+/// to directly compute pruned SSA using per-variable liveness information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_IDF_H
+#define LLVM_ANALYSIS_IDF_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class BasicBlock;
+template <class T> class DomTreeNodeBase;
+typedef DomTreeNodeBase<BasicBlock> DomTreeNode;
+template <class T> class DominatorTreeBase;
+
+/// \brief Determine the iterated dominance frontier, given a set of defining
+/// blocks, and optionally, a set of live-in blocks.
+///
+/// In turn, the results can be used to place phi nodes.
+///
+/// This algorithm is a linear time computation of Iterated Dominance Frontiers,
+/// pruned using the live-in set.
+/// By default, liveness is not used to prune the IDF computation.
+class IDFCalculator {
+
+public:
+ IDFCalculator(DominatorTreeBase<BasicBlock> &DT) : DT(DT), useLiveIn(false) {}
+
+ /// \brief Give the IDF calculator the set of blocks in which the value is
+ /// defined. This is equivalent to the set of starting blocks it should be
+ /// calculating the IDF for (though later gets pruned based on liveness).
+ ///
+ /// Note: This set *must* live for the entire lifetime of the IDF calculator.
+ void setDefiningBlocks(const SmallPtrSetImpl<BasicBlock *> &Blocks) {
+ DefBlocks = &Blocks;
+ }
+
+ /// \brief Give the IDF calculator the set of blocks in which the value is
+ /// live on entry to the block. This is used to prune the IDF calculation to
+ /// not include blocks where any phi insertion would be dead.
+ ///
+ /// Note: This set *must* live for the entire lifetime of the IDF calculator.
+
+ void setLiveInBlocks(const SmallPtrSetImpl<BasicBlock *> &Blocks) {
+ LiveInBlocks = &Blocks;
+ useLiveIn = true;
+ }
+
+ /// \brief Reset the live-in block set to be empty, and tell the IDF
+ /// calculator to not use liveness anymore.
+ void resetLiveInBlocks() {
+ LiveInBlocks = nullptr;
+ useLiveIn = false;
+ }
+
+ /// \brief Calculate iterated dominance frontiers
+ ///
+ /// This uses the linear-time phi algorithm based on DJ-graphs mentioned in
+ /// the file-level comment. It performs DF->IDF pruning using the live-in
+ /// set, to avoid computing the IDF for blocks where an inserted PHI node
+ /// would be dead.
+ void calculate(SmallVectorImpl<BasicBlock *> &IDFBlocks);
+
+private:
+ DominatorTreeBase<BasicBlock> &DT;
+ bool useLiveIn;
+ DenseMap<DomTreeNode *, unsigned> DomLevels;
+ const SmallPtrSetImpl<BasicBlock *> *LiveInBlocks;
+ const SmallPtrSetImpl<BasicBlock *> *DefBlocks;
+ SmallVector<BasicBlock *, 32> PHIBlocks;
+};
+}
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LazyCallGraph.h b/contrib/llvm/include/llvm/Analysis/LazyCallGraph.h
new file mode 100644
index 0000000..ef3d5e8
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/LazyCallGraph.h
@@ -0,0 +1,569 @@
+//===- LazyCallGraph.h - Analysis of a Module's call graph ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Implements a lazy call graph analysis and related passes for the new pass
+/// manager.
+///
+/// NB: This is *not* a traditional call graph! It is a graph which models both
+/// the current calls and potential calls. As a consequence there are many
+/// edges in this call graph that do not correspond to a 'call' or 'invoke'
+/// instruction.
+///
+/// The primary use cases of this graph analysis is to facilitate iterating
+/// across the functions of a module in ways that ensure all callees are
+/// visited prior to a caller (given any SCC constraints), or vice versa. As
+/// such is it particularly well suited to organizing CGSCC optimizations such
+/// as inlining, outlining, argument promotion, etc. That is its primary use
+/// case and motivates the design. It may not be appropriate for other
+/// purposes. The use graph of functions or some other conservative analysis of
+/// call instructions may be interesting for optimizations and subsequent
+/// analyses which don't work in the context of an overly specified
+/// potential-call-edge graph.
+///
+/// To understand the specific rules and nature of this call graph analysis,
+/// see the documentation of the \c LazyCallGraph below.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LAZYCALLGRAPH_H
+#define LLVM_ANALYSIS_LAZYCALLGRAPH_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Support/Allocator.h"
+#include <iterator>
+
+namespace llvm {
+class PreservedAnalyses;
+class raw_ostream;
+
+/// A lazily constructed view of the call graph of a module.
+///
+/// With the edges of this graph, the motivating constraint that we are
+/// attempting to maintain is that function-local optimization, CGSCC-local
+/// optimizations, and optimizations transforming a pair of functions connected
+/// by an edge in the graph, do not invalidate a bottom-up traversal of the SCC
+/// DAG. That is, no optimizations will delete, remove, or add an edge such
+/// that functions already visited in a bottom-up order of the SCC DAG are no
+/// longer valid to have visited, or such that functions not yet visited in
+/// a bottom-up order of the SCC DAG are not required to have already been
+/// visited.
+///
+/// Within this constraint, the desire is to minimize the merge points of the
+/// SCC DAG. The greater the fanout of the SCC DAG and the fewer merge points
+/// in the SCC DAG, the more independence there is in optimizing within it.
+/// There is a strong desire to enable parallelization of optimizations over
+/// the call graph, and both limited fanout and merge points will (artificially
+/// in some cases) limit the scaling of such an effort.
+///
+/// To this end, graph represents both direct and any potential resolution to
+/// an indirect call edge. Another way to think about it is that it represents
+/// both the direct call edges and any direct call edges that might be formed
+/// through static optimizations. Specifically, it considers taking the address
+/// of a function to be an edge in the call graph because this might be
+/// forwarded to become a direct call by some subsequent function-local
+/// optimization. The result is that the graph closely follows the use-def
+/// edges for functions. Walking "up" the graph can be done by looking at all
+/// of the uses of a function.
+///
+/// The roots of the call graph are the external functions and functions
+/// escaped into global variables. Those functions can be called from outside
+/// of the module or via unknowable means in the IR -- we may not be able to
+/// form even a potential call edge from a function body which may dynamically
+/// load the function and call it.
+///
+/// This analysis still requires updates to remain valid after optimizations
+/// which could potentially change the set of potential callees. The
+/// constraints it operates under only make the traversal order remain valid.
+///
+/// The entire analysis must be re-computed if full interprocedural
+/// optimizations run at any point. For example, globalopt completely
+/// invalidates the information in this analysis.
+///
+/// FIXME: This class is named LazyCallGraph in a lame attempt to distinguish
+/// it from the existing CallGraph. At some point, it is expected that this
+/// will be the only call graph and it will be renamed accordingly.
+class LazyCallGraph {
+public:
+ class Node;
+ class SCC;
+ typedef SmallVector<PointerUnion<Function *, Node *>, 4> NodeVectorT;
+ typedef SmallVectorImpl<PointerUnion<Function *, Node *>> NodeVectorImplT;
+
+ /// A lazy iterator used for both the entry nodes and child nodes.
+ ///
+ /// When this iterator is dereferenced, if not yet available, a function will
+ /// be scanned for "calls" or uses of functions and its child information
+ /// will be constructed. All of these results are accumulated and cached in
+ /// the graph.
+ class iterator
+ : public iterator_adaptor_base<iterator, NodeVectorImplT::iterator,
+ std::forward_iterator_tag, Node> {
+ friend class LazyCallGraph;
+ friend class LazyCallGraph::Node;
+
+ LazyCallGraph *G;
+ NodeVectorImplT::iterator E;
+
+ // Build the iterator for a specific position in a node list.
+ iterator(LazyCallGraph &G, NodeVectorImplT::iterator NI,
+ NodeVectorImplT::iterator E)
+ : iterator_adaptor_base(NI), G(&G), E(E) {
+ while (I != E && I->isNull())
+ ++I;
+ }
+
+ public:
+ iterator() {}
+
+ using iterator_adaptor_base::operator++;
+ iterator &operator++() {
+ do {
+ ++I;
+ } while (I != E && I->isNull());
+ return *this;
+ }
+
+ reference operator*() const {
+ if (I->is<Node *>())
+ return *I->get<Node *>();
+
+ Function *F = I->get<Function *>();
+ Node &ChildN = G->get(*F);
+ *I = &ChildN;
+ return ChildN;
+ }
+ };
+
+ /// A node in the call graph.
+ ///
+ /// This represents a single node. It's primary roles are to cache the list of
+ /// callees, de-duplicate and provide fast testing of whether a function is
+ /// a callee, and facilitate iteration of child nodes in the graph.
+ class Node {
+ friend class LazyCallGraph;
+ friend class LazyCallGraph::SCC;
+
+ LazyCallGraph *G;
+ Function &F;
+
+ // We provide for the DFS numbering and Tarjan walk lowlink numbers to be
+ // stored directly within the node.
+ int DFSNumber;
+ int LowLink;
+
+ mutable NodeVectorT Callees;
+ DenseMap<Function *, size_t> CalleeIndexMap;
+
+ /// Basic constructor implements the scanning of F into Callees and
+ /// CalleeIndexMap.
+ Node(LazyCallGraph &G, Function &F);
+
+ /// Internal helper to insert a callee.
+ void insertEdgeInternal(Function &Callee);
+
+ /// Internal helper to insert a callee.
+ void insertEdgeInternal(Node &CalleeN);
+
+ /// Internal helper to remove a callee from this node.
+ void removeEdgeInternal(Function &Callee);
+
+ public:
+ typedef LazyCallGraph::iterator iterator;
+
+ Function &getFunction() const { return F; }
+
+ iterator begin() const {
+ return iterator(*G, Callees.begin(), Callees.end());
+ }
+ iterator end() const { return iterator(*G, Callees.end(), Callees.end()); }
+
+ /// Equality is defined as address equality.
+ bool operator==(const Node &N) const { return this == &N; }
+ bool operator!=(const Node &N) const { return !operator==(N); }
+ };
+
+ /// An SCC of the call graph.
+ ///
+ /// This represents a Strongly Connected Component of the call graph as
+ /// a collection of call graph nodes. While the order of nodes in the SCC is
+ /// stable, it is not any particular order.
+ class SCC {
+ friend class LazyCallGraph;
+ friend class LazyCallGraph::Node;
+
+ LazyCallGraph *G;
+ SmallPtrSet<SCC *, 1> ParentSCCs;
+ SmallVector<Node *, 1> Nodes;
+
+ SCC(LazyCallGraph &G) : G(&G) {}
+
+ void insert(Node &N);
+
+ void
+ internalDFS(SmallVectorImpl<std::pair<Node *, Node::iterator>> &DFSStack,
+ SmallVectorImpl<Node *> &PendingSCCStack, Node *N,
+ SmallVectorImpl<SCC *> &ResultSCCs);
+
+ public:
+ typedef SmallVectorImpl<Node *>::const_iterator iterator;
+ typedef pointee_iterator<SmallPtrSet<SCC *, 1>::const_iterator>
+ parent_iterator;
+
+ iterator begin() const { return Nodes.begin(); }
+ iterator end() const { return Nodes.end(); }
+
+ parent_iterator parent_begin() const { return ParentSCCs.begin(); }
+ parent_iterator parent_end() const { return ParentSCCs.end(); }
+
+ iterator_range<parent_iterator> parents() const {
+ return make_range(parent_begin(), parent_end());
+ }
+
+ /// Test if this SCC is a parent of \a C.
+ bool isParentOf(const SCC &C) const { return C.isChildOf(*this); }
+
+ /// Test if this SCC is an ancestor of \a C.
+ bool isAncestorOf(const SCC &C) const { return C.isDescendantOf(*this); }
+
+ /// Test if this SCC is a child of \a C.
+ bool isChildOf(const SCC &C) const {
+ return ParentSCCs.count(const_cast<SCC *>(&C));
+ }
+
+ /// Test if this SCC is a descendant of \a C.
+ bool isDescendantOf(const SCC &C) const;
+
+ /// Short name useful for debugging or logging.
+ ///
+ /// We use the name of the first function in the SCC to name the SCC for
+ /// the purposes of debugging and logging.
+ StringRef getName() const { return (*begin())->getFunction().getName(); }
+
+ ///@{
+ /// \name Mutation API
+ ///
+ /// These methods provide the core API for updating the call graph in the
+ /// presence of a (potentially still in-flight) DFS-found SCCs.
+ ///
+ /// Note that these methods sometimes have complex runtimes, so be careful
+ /// how you call them.
+
+ /// Insert an edge from one node in this SCC to another in this SCC.
+ ///
+ /// By the definition of an SCC, this does not change the nature or make-up
+ /// of any SCCs.
+ void insertIntraSCCEdge(Node &CallerN, Node &CalleeN);
+
+ /// Insert an edge whose tail is in this SCC and head is in some child SCC.
+ ///
+ /// There must be an existing path from the caller to the callee. This
+ /// operation is inexpensive and does not change the set of SCCs in the
+ /// graph.
+ void insertOutgoingEdge(Node &CallerN, Node &CalleeN);
+
+ /// Insert an edge whose tail is in a descendant SCC and head is in this
+ /// SCC.
+ ///
+ /// There must be an existing path from the callee to the caller in this
+ /// case. NB! This is has the potential to be a very expensive function. It
+ /// inherently forms a cycle in the prior SCC DAG and we have to merge SCCs
+ /// to resolve that cycle. But finding all of the SCCs which participate in
+ /// the cycle can in the worst case require traversing every SCC in the
+ /// graph. Every attempt is made to avoid that, but passes must still
+ /// exercise caution calling this routine repeatedly.
+ ///
+ /// FIXME: We could possibly optimize this quite a bit for cases where the
+ /// caller and callee are very nearby in the graph. See comments in the
+ /// implementation for details, but that use case might impact users.
+ SmallVector<SCC *, 1> insertIncomingEdge(Node &CallerN, Node &CalleeN);
+
+ /// Remove an edge whose source is in this SCC and target is *not*.
+ ///
+ /// This removes an inter-SCC edge. All inter-SCC edges originating from
+ /// this SCC have been fully explored by any in-flight DFS SCC formation,
+ /// so this is always safe to call once you have the source SCC.
+ ///
+ /// This operation does not change the set of SCCs or the members of the
+ /// SCCs and so is very inexpensive. It may change the connectivity graph
+ /// of the SCCs though, so be careful calling this while iterating over
+ /// them.
+ void removeInterSCCEdge(Node &CallerN, Node &CalleeN);
+
+ /// Remove an edge which is entirely within this SCC.
+ ///
+ /// Both the \a Caller and the \a Callee must be within this SCC. Removing
+ /// such an edge make break cycles that form this SCC and thus this
+ /// operation may change the SCC graph significantly. In particular, this
+ /// operation will re-form new SCCs based on the remaining connectivity of
+ /// the graph. The following invariants are guaranteed to hold after
+ /// calling this method:
+ ///
+ /// 1) This SCC is still an SCC in the graph.
+ /// 2) This SCC will be the parent of any new SCCs. Thus, this SCC is
+ /// preserved as the root of any new SCC directed graph formed.
+ /// 3) No SCC other than this SCC has its member set changed (this is
+ /// inherent in the definition of removing such an edge).
+ /// 4) All of the parent links of the SCC graph will be updated to reflect
+ /// the new SCC structure.
+ /// 5) All SCCs formed out of this SCC, excluding this SCC, will be
+ /// returned in a vector.
+ /// 6) The order of the SCCs in the vector will be a valid postorder
+ /// traversal of the new SCCs.
+ ///
+ /// These invariants are very important to ensure that we can build
+ /// optimization pipeliens on top of the CGSCC pass manager which
+ /// intelligently update the SCC graph without invalidating other parts of
+ /// the SCC graph.
+ ///
+ /// The runtime complexity of this method is, in the worst case, O(V+E)
+ /// where V is the number of nodes in this SCC and E is the number of edges
+ /// leaving the nodes in this SCC. Note that E includes both edges within
+ /// this SCC and edges from this SCC to child SCCs. Some effort has been
+ /// made to minimize the overhead of common cases such as self-edges and
+ /// edge removals which result in a spanning tree with no more cycles.
+ SmallVector<SCC *, 1> removeIntraSCCEdge(Node &CallerN, Node &CalleeN);
+
+ ///@}
+ };
+
+ /// A post-order depth-first SCC iterator over the call graph.
+ ///
+ /// This iterator triggers the Tarjan DFS-based formation of the SCC DAG for
+ /// the call graph, walking it lazily in depth-first post-order. That is, it
+ /// always visits SCCs for a callee prior to visiting the SCC for a caller
+ /// (when they are in different SCCs).
+ class postorder_scc_iterator
+ : public iterator_facade_base<postorder_scc_iterator,
+ std::forward_iterator_tag, SCC> {
+ friend class LazyCallGraph;
+ friend class LazyCallGraph::Node;
+
+ /// Nonce type to select the constructor for the end iterator.
+ struct IsAtEndT {};
+
+ LazyCallGraph *G;
+ SCC *C;
+
+ // Build the begin iterator for a node.
+ postorder_scc_iterator(LazyCallGraph &G) : G(&G) {
+ C = G.getNextSCCInPostOrder();
+ }
+
+ // Build the end iterator for a node. This is selected purely by overload.
+ postorder_scc_iterator(LazyCallGraph &G, IsAtEndT /*Nonce*/)
+ : G(&G), C(nullptr) {}
+
+ public:
+ bool operator==(const postorder_scc_iterator &Arg) const {
+ return G == Arg.G && C == Arg.C;
+ }
+
+ reference operator*() const { return *C; }
+
+ using iterator_facade_base::operator++;
+ postorder_scc_iterator &operator++() {
+ C = G->getNextSCCInPostOrder();
+ return *this;
+ }
+ };
+
+ /// Construct a graph for the given module.
+ ///
+ /// This sets up the graph and computes all of the entry points of the graph.
+ /// No function definitions are scanned until their nodes in the graph are
+ /// requested during traversal.
+ LazyCallGraph(Module &M);
+
+ LazyCallGraph(LazyCallGraph &&G);
+ LazyCallGraph &operator=(LazyCallGraph &&RHS);
+
+ iterator begin() {
+ return iterator(*this, EntryNodes.begin(), EntryNodes.end());
+ }
+ iterator end() { return iterator(*this, EntryNodes.end(), EntryNodes.end()); }
+
+ postorder_scc_iterator postorder_scc_begin() {
+ return postorder_scc_iterator(*this);
+ }
+ postorder_scc_iterator postorder_scc_end() {
+ return postorder_scc_iterator(*this, postorder_scc_iterator::IsAtEndT());
+ }
+
+ iterator_range<postorder_scc_iterator> postorder_sccs() {
+ return make_range(postorder_scc_begin(), postorder_scc_end());
+ }
+
+ /// Lookup a function in the graph which has already been scanned and added.
+ Node *lookup(const Function &F) const { return NodeMap.lookup(&F); }
+
+ /// Lookup a function's SCC in the graph.
+ ///
+ /// \returns null if the function hasn't been assigned an SCC via the SCC
+ /// iterator walk.
+ SCC *lookupSCC(Node &N) const { return SCCMap.lookup(&N); }
+
+ /// Get a graph node for a given function, scanning it to populate the graph
+ /// data as necessary.
+ Node &get(Function &F) {
+ Node *&N = NodeMap[&F];
+ if (N)
+ return *N;
+
+ return insertInto(F, N);
+ }
+
+ ///@{
+ /// \name Pre-SCC Mutation API
+ ///
+ /// These methods are only valid to call prior to forming any SCCs for this
+ /// call graph. They can be used to update the core node-graph during
+ /// a node-based inorder traversal that precedes any SCC-based traversal.
+ ///
+ /// Once you begin manipulating a call graph's SCCs, you must perform all
+ /// mutation of the graph via the SCC methods.
+
+ /// Update the call graph after inserting a new edge.
+ void insertEdge(Node &Caller, Function &Callee);
+
+ /// Update the call graph after inserting a new edge.
+ void insertEdge(Function &Caller, Function &Callee) {
+ return insertEdge(get(Caller), Callee);
+ }
+
+ /// Update the call graph after deleting an edge.
+ void removeEdge(Node &Caller, Function &Callee);
+
+ /// Update the call graph after deleting an edge.
+ void removeEdge(Function &Caller, Function &Callee) {
+ return removeEdge(get(Caller), Callee);
+ }
+
+ ///@}
+
+private:
+ /// Allocator that holds all the call graph nodes.
+ SpecificBumpPtrAllocator<Node> BPA;
+
+ /// Maps function->node for fast lookup.
+ DenseMap<const Function *, Node *> NodeMap;
+
+ /// The entry nodes to the graph.
+ ///
+ /// These nodes are reachable through "external" means. Put another way, they
+ /// escape at the module scope.
+ NodeVectorT EntryNodes;
+
+ /// Map of the entry nodes in the graph to their indices in \c EntryNodes.
+ DenseMap<Function *, size_t> EntryIndexMap;
+
+ /// Allocator that holds all the call graph SCCs.
+ SpecificBumpPtrAllocator<SCC> SCCBPA;
+
+ /// Maps Function -> SCC for fast lookup.
+ DenseMap<Node *, SCC *> SCCMap;
+
+ /// The leaf SCCs of the graph.
+ ///
+ /// These are all of the SCCs which have no children.
+ SmallVector<SCC *, 4> LeafSCCs;
+
+ /// Stack of nodes in the DFS walk.
+ SmallVector<std::pair<Node *, iterator>, 4> DFSStack;
+
+ /// Set of entry nodes not-yet-processed into SCCs.
+ SmallVector<Function *, 4> SCCEntryNodes;
+
+ /// Stack of nodes the DFS has walked but not yet put into a SCC.
+ SmallVector<Node *, 4> PendingSCCStack;
+
+ /// Counter for the next DFS number to assign.
+ int NextDFSNumber;
+
+ /// Helper to insert a new function, with an already looked-up entry in
+ /// the NodeMap.
+ Node &insertInto(Function &F, Node *&MappedN);
+
+ /// Helper to update pointers back to the graph object during moves.
+ void updateGraphPtrs();
+
+ /// Helper to form a new SCC out of the top of a DFSStack-like
+ /// structure.
+ SCC *formSCC(Node *RootN, SmallVectorImpl<Node *> &NodeStack);
+
+ /// Retrieve the next node in the post-order SCC walk of the call graph.
+ SCC *getNextSCCInPostOrder();
+};
+
+// Provide GraphTraits specializations for call graphs.
+template <> struct GraphTraits<LazyCallGraph::Node *> {
+ typedef LazyCallGraph::Node NodeType;
+ typedef LazyCallGraph::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(NodeType *N) { return N; }
+ static ChildIteratorType child_begin(NodeType *N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeType *N) { return N->end(); }
+};
+template <> struct GraphTraits<LazyCallGraph *> {
+ typedef LazyCallGraph::Node NodeType;
+ typedef LazyCallGraph::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(NodeType *N) { return N; }
+ static ChildIteratorType child_begin(NodeType *N) { return N->begin(); }
+ static ChildIteratorType child_end(NodeType *N) { return N->end(); }
+};
+
+/// An analysis pass which computes the call graph for a module.
+class LazyCallGraphAnalysis {
+public:
+ /// Inform generic clients of the result type.
+ typedef LazyCallGraph Result;
+
+ static void *ID() { return (void *)&PassID; }
+
+ static StringRef name() { return "Lazy CallGraph Analysis"; }
+
+ /// Compute the \c LazyCallGraph for the module \c M.
+ ///
+ /// This just builds the set of entry points to the call graph. The rest is
+ /// built lazily as it is walked.
+ LazyCallGraph run(Module &M) { return LazyCallGraph(M); }
+
+private:
+ static char PassID;
+};
+
+/// A pass which prints the call graph to a \c raw_ostream.
+///
+/// This is primarily useful for testing the analysis.
+class LazyCallGraphPrinterPass {
+ raw_ostream &OS;
+
+public:
+ explicit LazyCallGraphPrinterPass(raw_ostream &OS);
+
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager *AM);
+
+ static StringRef name() { return "LazyCallGraphPrinterPass"; }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h b/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
new file mode 100644
index 0000000..4200206
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/LazyValueInfo.h
@@ -0,0 +1,90 @@
+//===- LazyValueInfo.h - Value constraint analysis --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface for lazy computation of value constraint
+// information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LAZYVALUEINFO_H
+#define LLVM_ANALYSIS_LAZYVALUEINFO_H
+
+#include "llvm/Pass.h"
+
+namespace llvm {
+ class AssumptionCache;
+ class Constant;
+ class DataLayout;
+ class DominatorTree;
+ class Instruction;
+ class TargetLibraryInfo;
+ class Value;
+
+/// This pass computes, caches, and vends lazy value constraint information.
+class LazyValueInfo : public FunctionPass {
+ AssumptionCache *AC;
+ class TargetLibraryInfo *TLI;
+ DominatorTree *DT;
+ void *PImpl;
+ LazyValueInfo(const LazyValueInfo&) = delete;
+ void operator=(const LazyValueInfo&) = delete;
+public:
+ static char ID;
+ LazyValueInfo() : FunctionPass(ID), PImpl(nullptr) {
+ initializeLazyValueInfoPass(*PassRegistry::getPassRegistry());
+ }
+ ~LazyValueInfo() override { assert(!PImpl && "releaseMemory not called"); }
+
+ /// This is used to return true/false/dunno results.
+ enum Tristate {
+ Unknown = -1, False = 0, True = 1
+ };
+
+ // Public query interface.
+
+ /// Determine whether the specified value comparison with a constant is known
+ /// to be true or false on the specified CFG edge.
+ /// Pred is a CmpInst predicate.
+ Tristate getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
+ BasicBlock *FromBB, BasicBlock *ToBB,
+ Instruction *CxtI = nullptr);
+
+ /// Determine whether the specified value comparison with a constant is known
+ /// to be true or false at the specified instruction
+ /// (from an assume intrinsic). Pred is a CmpInst predicate.
+ Tristate getPredicateAt(unsigned Pred, Value *V, Constant *C,
+ Instruction *CxtI);
+
+ /// Determine whether the specified value is known to be a
+ /// constant at the end of the specified block. Return null if not.
+ Constant *getConstant(Value *V, BasicBlock *BB, Instruction *CxtI = nullptr);
+
+ /// Determine whether the specified value is known to be a
+ /// constant on the specified edge. Return null if not.
+ Constant *getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
+ Instruction *CxtI = nullptr);
+
+ /// Inform the analysis cache that we have threaded an edge from
+ /// PredBB to OldSucc to be from PredBB to NewSucc instead.
+ void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc);
+
+ /// Inform the analysis cache that we have erased a block.
+ void eraseBlock(BasicBlock *BB);
+
+ // Implementation boilerplate.
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void releaseMemory() override;
+ bool runOnFunction(Function &F) override;
+};
+
+} // end namespace llvm
+
+#endif
+
diff --git a/contrib/llvm/include/llvm/Analysis/Lint.h b/contrib/llvm/include/llvm/Analysis/Lint.h
new file mode 100644
index 0000000..7c88b13
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/Lint.h
@@ -0,0 +1,49 @@
+//===-- llvm/Analysis/Lint.h - LLVM IR Lint ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines lint interfaces that can be used for some sanity checking
+// of input to the system, and for checking that transformations
+// haven't done something bad. In contrast to the Verifier, the Lint checker
+// checks for undefined behavior or constructions with likely unintended
+// behavior.
+//
+// To see what specifically is checked, look at Lint.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LINT_H
+#define LLVM_ANALYSIS_LINT_H
+
+namespace llvm {
+
+class FunctionPass;
+class Module;
+class Function;
+
+/// @brief Create a lint pass.
+///
+/// Check a module or function.
+FunctionPass *createLintPass();
+
+/// @brief Check a module.
+///
+/// This should only be used for debugging, because it plays games with
+/// PassManagers and stuff.
+void lintModule(
+ const Module &M ///< The module to be checked
+);
+
+// lintFunction - Check a function.
+void lintFunction(
+ const Function &F ///< The function to be checked
+);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/Loads.h b/contrib/llvm/include/llvm/Analysis/Loads.h
new file mode 100644
index 0000000..939663b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/Loads.h
@@ -0,0 +1,62 @@
+//===- Loads.h - Local load analysis --------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares simple local analyses for load instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOADS_H
+#define LLVM_ANALYSIS_LOADS_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+class DataLayout;
+class MDNode;
+
+/// isSafeToLoadUnconditionally - Return true if we know that executing a load
+/// from this value cannot trap. If it is not obviously safe to load from the
+/// specified pointer, we do a quick local scan of the basic block containing
+/// ScanFrom, to determine if the address is already accessed.
+bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
+ unsigned Align);
+
+/// DefMaxInstsToScan - the default number of maximum instructions
+/// to scan in the block, used by FindAvailableLoadedValue().
+extern cl::opt<unsigned> DefMaxInstsToScan;
+
+/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
+/// the instruction before ScanFrom) checking to see if we have the value at
+/// the memory address *Ptr locally available within a small number of
+/// instructions. If the value is available, return it.
+///
+/// If not, return the iterator for the last validated instruction that the
+/// value would be live through. If we scanned the entire block and didn't
+/// find something that invalidates *Ptr or provides it, ScanFrom would be
+/// left at begin() and this returns null. ScanFrom could also be left
+///
+/// MaxInstsToScan specifies the maximum instructions to scan in the block.
+/// If it is set to 0, it will scan the whole block. You can also optionally
+/// specify an alias analysis implementation, which makes this more precise.
+///
+/// If AATags is non-null and a load or store is found, the AA tags from the
+/// load or store are recorded there. If there are no AA tags or if no access
+/// is found, it is left unmodified.
+Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
+ BasicBlock::iterator &ScanFrom,
+ unsigned MaxInstsToScan = DefMaxInstsToScan,
+ AliasAnalysis *AA = nullptr,
+ AAMDNodes *AATags = nullptr);
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/contrib/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
new file mode 100644
index 0000000..871d35e
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -0,0 +1,721 @@
+//===- llvm/Analysis/LoopAccessAnalysis.h -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface for the loop memory dependence framework that
+// was originally developed for the Loop Vectorizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
+#define LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
+
+#include "llvm/ADT/EquivalenceClasses.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class Value;
+class DataLayout;
+class ScalarEvolution;
+class Loop;
+class SCEV;
+class SCEVUnionPredicate;
+class LoopAccessInfo;
+
+/// Optimization analysis message produced during vectorization. Messages inform
+/// the user why vectorization did not occur.
+class LoopAccessReport {
+ std::string Message;
+ const Instruction *Instr;
+
+protected:
+ LoopAccessReport(const Twine &Message, const Instruction *I)
+ : Message(Message.str()), Instr(I) {}
+
+public:
+ LoopAccessReport(const Instruction *I = nullptr) : Instr(I) {}
+
+ template <typename A> LoopAccessReport &operator<<(const A &Value) {
+ raw_string_ostream Out(Message);
+ Out << Value;
+ return *this;
+ }
+
+ const Instruction *getInstr() const { return Instr; }
+
+ std::string &str() { return Message; }
+ const std::string &str() const { return Message; }
+ operator Twine() { return Message; }
+
+ /// \brief Emit an analysis note for \p PassName with the debug location from
+ /// the instruction in \p Message if available. Otherwise use the location of
+ /// \p TheLoop.
+ static void emitAnalysis(const LoopAccessReport &Message,
+ const Function *TheFunction,
+ const Loop *TheLoop,
+ const char *PassName);
+};
+
+/// \brief Collection of parameters shared beetween the Loop Vectorizer and the
+/// Loop Access Analysis.
+struct VectorizerParams {
+ /// \brief Maximum SIMD width.
+ static const unsigned MaxVectorWidth;
+
+ /// \brief VF as overridden by the user.
+ static unsigned VectorizationFactor;
+ /// \brief Interleave factor as overridden by the user.
+ static unsigned VectorizationInterleave;
+ /// \brief True if force-vector-interleave was specified by the user.
+ static bool isInterleaveForced();
+
+ /// \\brief When performing memory disambiguation checks at runtime do not
+ /// make more than this number of comparisons.
+ static unsigned RuntimeMemoryCheckThreshold;
+};
+
+/// \brief Checks memory dependences among accesses to the same underlying
+/// object to determine whether there vectorization is legal or not (and at
+/// which vectorization factor).
+///
+/// Note: This class will compute a conservative dependence for access to
+/// different underlying pointers. Clients, such as the loop vectorizer, will
+/// sometimes deal these potential dependencies by emitting runtime checks.
+///
+/// We use the ScalarEvolution framework to symbolically evalutate access
+/// functions pairs. Since we currently don't restructure the loop we can rely
+/// on the program order of memory accesses to determine their safety.
+/// At the moment we will only deem accesses as safe for:
+/// * A negative constant distance assuming program order.
+///
+/// Safe: tmp = a[i + 1]; OR a[i + 1] = x;
+/// a[i] = tmp; y = a[i];
+///
+/// The latter case is safe because later checks guarantuee that there can't
+/// be a cycle through a phi node (that is, we check that "x" and "y" is not
+/// the same variable: a header phi can only be an induction or a reduction, a
+/// reduction can't have a memory sink, an induction can't have a memory
+/// source). This is important and must not be violated (or we have to
+/// resort to checking for cycles through memory).
+///
+/// * A positive constant distance assuming program order that is bigger
+/// than the biggest memory access.
+///
+/// tmp = a[i] OR b[i] = x
+/// a[i+2] = tmp y = b[i+2];
+///
+/// Safe distance: 2 x sizeof(a[0]), and 2 x sizeof(b[0]), respectively.
+///
+/// * Zero distances and all accesses have the same size.
+///
+class MemoryDepChecker {
+public:
+ typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
+ typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
+ /// \brief Set of potential dependent memory accesses.
+ typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
+
+ /// \brief Dependece between memory access instructions.
+ struct Dependence {
+ /// \brief The type of the dependence.
+ enum DepType {
+ // No dependence.
+ NoDep,
+ // We couldn't determine the direction or the distance.
+ Unknown,
+ // Lexically forward.
+ //
+ // FIXME: If we only have loop-independent forward dependences (e.g. a
+ // read and write of A[i]), LAA will locally deem the dependence "safe"
+ // without querying the MemoryDepChecker. Therefore we can miss
+ // enumerating loop-independent forward dependences in
+ // getDependences. Note that as soon as there are different
+ // indices used to access the same array, the MemoryDepChecker *is*
+ // queried and the dependence list is complete.
+ Forward,
+ // Forward, but if vectorized, is likely to prevent store-to-load
+ // forwarding.
+ ForwardButPreventsForwarding,
+ // Lexically backward.
+ Backward,
+ // Backward, but the distance allows a vectorization factor of
+ // MaxSafeDepDistBytes.
+ BackwardVectorizable,
+ // Same, but may prevent store-to-load forwarding.
+ BackwardVectorizableButPreventsForwarding
+ };
+
+ /// \brief String version of the types.
+ static const char *DepName[];
+
+ /// \brief Index of the source of the dependence in the InstMap vector.
+ unsigned Source;
+ /// \brief Index of the destination of the dependence in the InstMap vector.
+ unsigned Destination;
+ /// \brief The type of the dependence.
+ DepType Type;
+
+ Dependence(unsigned Source, unsigned Destination, DepType Type)
+ : Source(Source), Destination(Destination), Type(Type) {}
+
+ /// \brief Return the source instruction of the dependence.
+ Instruction *getSource(const LoopAccessInfo &LAI) const;
+ /// \brief Return the destination instruction of the dependence.
+ Instruction *getDestination(const LoopAccessInfo &LAI) const;
+
+ /// \brief Dependence types that don't prevent vectorization.
+ static bool isSafeForVectorization(DepType Type);
+
+ /// \brief Lexically forward dependence.
+ bool isForward() const;
+ /// \brief Lexically backward dependence.
+ bool isBackward() const;
+
+ /// \brief May be a lexically backward dependence type (includes Unknown).
+ bool isPossiblyBackward() const;
+
+ /// \brief Print the dependence. \p Instr is used to map the instruction
+ /// indices to instructions.
+ void print(raw_ostream &OS, unsigned Depth,
+ const SmallVectorImpl<Instruction *> &Instrs) const;
+ };
+
+ MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
+ : PSE(PSE), InnermostLoop(L), AccessIdx(0),
+ ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
+ RecordDependences(true) {}
+
+ /// \brief Register the location (instructions are given increasing numbers)
+ /// of a write access.
+ void addAccess(StoreInst *SI) {
+ Value *Ptr = SI->getPointerOperand();
+ Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
+ InstMap.push_back(SI);
+ ++AccessIdx;
+ }
+
+ /// \brief Register the location (instructions are given increasing numbers)
+ /// of a write access.
+ void addAccess(LoadInst *LI) {
+ Value *Ptr = LI->getPointerOperand();
+ Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
+ InstMap.push_back(LI);
+ ++AccessIdx;
+ }
+
+ /// \brief Check whether the dependencies between the accesses are safe.
+ ///
+ /// Only checks sets with elements in \p CheckDeps.
+ bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoSet &CheckDeps,
+ const ValueToValueMap &Strides);
+
+ /// \brief No memory dependence was encountered that would inhibit
+ /// vectorization.
+ bool isSafeForVectorization() const { return SafeForVectorization; }
+
+ /// \brief The maximum number of bytes of a vector register we can vectorize
+ /// the accesses safely with.
+ unsigned getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
+
+ /// \brief In same cases when the dependency check fails we can still
+ /// vectorize the loop with a dynamic array access check.
+ bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
+
+ /// \brief Returns the memory dependences. If null is returned we exceeded
+ /// the MaxDependences threshold and this information is not
+ /// available.
+ const SmallVectorImpl<Dependence> *getDependences() const {
+ return RecordDependences ? &Dependences : nullptr;
+ }
+
+ void clearDependences() { Dependences.clear(); }
+
+ /// \brief The vector of memory access instructions. The indices are used as
+ /// instruction identifiers in the Dependence class.
+ const SmallVectorImpl<Instruction *> &getMemoryInstructions() const {
+ return InstMap;
+ }
+
+ /// \brief Generate a mapping between the memory instructions and their
+ /// indices according to program order.
+ DenseMap<Instruction *, unsigned> generateInstructionOrderMap() const {
+ DenseMap<Instruction *, unsigned> OrderMap;
+
+ for (unsigned I = 0; I < InstMap.size(); ++I)
+ OrderMap[InstMap[I]] = I;
+
+ return OrderMap;
+ }
+
+ /// \brief Find the set of instructions that read or write via \p Ptr.
+ SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
+ bool isWrite) const;
+
+private:
+ /// A wrapper around ScalarEvolution, used to add runtime SCEV checks, and
+ /// applies dynamic knowledge to simplify SCEV expressions and convert them
+ /// to a more usable form. We need this in case assumptions about SCEV
+ /// expressions need to be made in order to avoid unknown dependences. For
+ /// example we might assume a unit stride for a pointer in order to prove
+ /// that a memory access is strided and doesn't wrap.
+ PredicatedScalarEvolution &PSE;
+ const Loop *InnermostLoop;
+
+ /// \brief Maps access locations (ptr, read/write) to program order.
+ DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;
+
+ /// \brief Memory access instructions in program order.
+ SmallVector<Instruction *, 16> InstMap;
+
+ /// \brief The program order index to be used for the next instruction.
+ unsigned AccessIdx;
+
+ // We can access this many bytes in parallel safely.
+ unsigned MaxSafeDepDistBytes;
+
+ /// \brief If we see a non-constant dependence distance we can still try to
+ /// vectorize this loop with runtime checks.
+ bool ShouldRetryWithRuntimeCheck;
+
+ /// \brief No memory dependence was encountered that would inhibit
+ /// vectorization.
+ bool SafeForVectorization;
+
+ //// \brief True if Dependences reflects the dependences in the
+ //// loop. If false we exceeded MaxDependences and
+ //// Dependences is invalid.
+ bool RecordDependences;
+
+ /// \brief Memory dependences collected during the analysis. Only valid if
+ /// RecordDependences is true.
+ SmallVector<Dependence, 8> Dependences;
+
+ /// \brief Check whether there is a plausible dependence between the two
+ /// accesses.
+ ///
+ /// Access \p A must happen before \p B in program order. The two indices
+ /// identify the index into the program order map.
+ ///
+ /// This function checks whether there is a plausible dependence (or the
+ /// absence of such can't be proved) between the two accesses. If there is a
+ /// plausible dependence but the dependence distance is bigger than one
+ /// element access it records this distance in \p MaxSafeDepDistBytes (if this
+ /// distance is smaller than any other distance encountered so far).
+ /// Otherwise, this function returns true signaling a possible dependence.
+ Dependence::DepType isDependent(const MemAccessInfo &A, unsigned AIdx,
+ const MemAccessInfo &B, unsigned BIdx,
+ const ValueToValueMap &Strides);
+
+ /// \brief Check whether the data dependence could prevent store-load
+ /// forwarding.
+ bool couldPreventStoreLoadForward(unsigned Distance, unsigned TypeByteSize);
+};
+
+/// \brief Holds information about the memory runtime legality checks to verify
+/// that a group of pointers do not overlap.
+class RuntimePointerChecking {
+public:
+ struct PointerInfo {
+ /// Holds the pointer value that we need to check.
+ TrackingVH<Value> PointerValue;
+ /// Holds the pointer value at the beginning of the loop.
+ const SCEV *Start;
+ /// Holds the pointer value at the end of the loop.
+ const SCEV *End;
+ /// Holds the information if this pointer is used for writing to memory.
+ bool IsWritePtr;
+ /// Holds the id of the set of pointers that could be dependent because of a
+ /// shared underlying object.
+ unsigned DependencySetId;
+ /// Holds the id of the disjoint alias set to which this pointer belongs.
+ unsigned AliasSetId;
+ /// SCEV for the access.
+ const SCEV *Expr;
+
+ PointerInfo(Value *PointerValue, const SCEV *Start, const SCEV *End,
+ bool IsWritePtr, unsigned DependencySetId, unsigned AliasSetId,
+ const SCEV *Expr)
+ : PointerValue(PointerValue), Start(Start), End(End),
+ IsWritePtr(IsWritePtr), DependencySetId(DependencySetId),
+ AliasSetId(AliasSetId), Expr(Expr) {}
+ };
+
+ RuntimePointerChecking(ScalarEvolution *SE) : Need(false), SE(SE) {}
+
+ /// Reset the state of the pointer runtime information.
+ void reset() {
+ Need = false;
+ Pointers.clear();
+ Checks.clear();
+ }
+
+ /// Insert a pointer and calculate the start and end SCEVs.
+ /// \p We need Preds in order to compute the SCEV expression of the pointer
+ /// according to the assumptions that we've made during the analysis.
+ /// The method might also version the pointer stride according to \p Strides,
+ /// and change \p Preds.
+ void insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
+ unsigned ASId, const ValueToValueMap &Strides,
+ PredicatedScalarEvolution &PSE);
+
+ /// \brief No run-time memory checking is necessary.
+ bool empty() const { return Pointers.empty(); }
+
+ /// A grouping of pointers. A single memcheck is required between
+ /// two groups.
+ struct CheckingPtrGroup {
+ /// \brief Create a new pointer checking group containing a single
+ /// pointer, with index \p Index in RtCheck.
+ CheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
+ : RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
+ Low(RtCheck.Pointers[Index].Start) {
+ Members.push_back(Index);
+ }
+
+ /// \brief Tries to add the pointer recorded in RtCheck at index
+ /// \p Index to this pointer checking group. We can only add a pointer
+ /// to a checking group if we will still be able to get
+ /// the upper and lower bounds of the check. Returns true in case
+ /// of success, false otherwise.
+ bool addPointer(unsigned Index);
+
+ /// Constitutes the context of this pointer checking group. For each
+ /// pointer that is a member of this group we will retain the index
+ /// at which it appears in RtCheck.
+ RuntimePointerChecking &RtCheck;
+ /// The SCEV expression which represents the upper bound of all the
+ /// pointers in this group.
+ const SCEV *High;
+ /// The SCEV expression which represents the lower bound of all the
+ /// pointers in this group.
+ const SCEV *Low;
+ /// Indices of all the pointers that constitute this grouping.
+ SmallVector<unsigned, 2> Members;
+ };
+
+ /// \brief A memcheck which made up of a pair of grouped pointers.
+ ///
+ /// These *have* to be const for now, since checks are generated from
+ /// CheckingPtrGroups in LAI::addRuntimeChecks which is a const member
+ /// function. FIXME: once check-generation is moved inside this class (after
+ /// the PtrPartition hack is removed), we could drop const.
+ typedef std::pair<const CheckingPtrGroup *, const CheckingPtrGroup *>
+ PointerCheck;
+
+ /// \brief Generate the checks and store it. This also performs the grouping
+ /// of pointers to reduce the number of memchecks necessary.
+ void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
+ bool UseDependencies);
+
+ /// \brief Returns the checks that generateChecks created.
+ const SmallVector<PointerCheck, 4> &getChecks() const { return Checks; }
+
+ /// \brief Decide if we need to add a check between two groups of pointers,
+ /// according to needsChecking.
+ bool needsChecking(const CheckingPtrGroup &M,
+ const CheckingPtrGroup &N) const;
+
+ /// \brief Returns the number of run-time checks required according to
+ /// needsChecking.
+ unsigned getNumberOfChecks() const { return Checks.size(); }
+
+ /// \brief Print the list run-time memory checks necessary.
+ void print(raw_ostream &OS, unsigned Depth = 0) const;
+
+ /// Print \p Checks.
+ void printChecks(raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks,
+ unsigned Depth = 0) const;
+
+ /// This flag indicates if we need to add the runtime check.
+ bool Need;
+
+ /// Information about the pointers that may require checking.
+ SmallVector<PointerInfo, 2> Pointers;
+
+ /// Holds a partitioning of pointers into "check groups".
+ SmallVector<CheckingPtrGroup, 2> CheckingGroups;
+
+ /// \brief Check if pointers are in the same partition
+ ///
+ /// \p PtrToPartition contains the partition number for pointers (-1 if the
+ /// pointer belongs to multiple partitions).
+ static bool
+ arePointersInSamePartition(const SmallVectorImpl<int> &PtrToPartition,
+ unsigned PtrIdx1, unsigned PtrIdx2);
+
+ /// \brief Decide whether we need to issue a run-time check for pointer at
+ /// index \p I and \p J to prove their independence.
+ bool needsChecking(unsigned I, unsigned J) const;
+
+ /// \brief Return PointerInfo for pointer at index \p PtrIdx.
+ const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
+ return Pointers[PtrIdx];
+ }
+
+private:
+ /// \brief Groups pointers such that a single memcheck is required
+ /// between two different groups. This will clear the CheckingGroups vector
+ /// and re-compute it. We will only group dependecies if \p UseDependencies
+ /// is true, otherwise we will create a separate group for each pointer.
+ void groupChecks(MemoryDepChecker::DepCandidates &DepCands,
+ bool UseDependencies);
+
+ /// Generate the checks and return them.
+ SmallVector<PointerCheck, 4>
+ generateChecks() const;
+
+ /// Holds a pointer to the ScalarEvolution analysis.
+ ScalarEvolution *SE;
+
+ /// \brief Set of run-time checks required to establish independence of
+ /// otherwise may-aliasing pointers in the loop.
+ SmallVector<PointerCheck, 4> Checks;
+};
+
+/// \brief Drive the analysis of memory accesses in the loop
+///
+/// This class is responsible for analyzing the memory accesses of a loop. It
+/// collects the accesses and then its main helper the AccessAnalysis class
+/// finds and categorizes the dependences in buildDependenceSets.
+///
+/// For memory dependences that can be analyzed at compile time, it determines
+/// whether the dependence is part of cycle inhibiting vectorization. This work
+/// is delegated to the MemoryDepChecker class.
+///
+/// For memory dependences that cannot be determined at compile time, it
+/// generates run-time checks to prove independence. This is done by
+/// AccessAnalysis::canCheckPtrAtRT and the checks are maintained by the
+/// RuntimePointerCheck class.
+///
+/// If pointers can wrap or can't be expressed as affine AddRec expressions by
+/// ScalarEvolution, we will generate run-time checks by emitting a
+/// SCEVUnionPredicate.
+///
+/// Checks for both memory dependences and the SCEV predicates contained in the
+/// PSE must be emitted in order for the results of this analysis to be valid.
+class LoopAccessInfo {
+public:
+ LoopAccessInfo(Loop *L, ScalarEvolution *SE, const DataLayout &DL,
+ const TargetLibraryInfo *TLI, AliasAnalysis *AA,
+ DominatorTree *DT, LoopInfo *LI,
+ const ValueToValueMap &Strides);
+
+ /// Return true we can analyze the memory accesses in the loop and there are
+ /// no memory dependence cycles.
+ bool canVectorizeMemory() const { return CanVecMem; }
+
+ const RuntimePointerChecking *getRuntimePointerChecking() const {
+ return &PtrRtChecking;
+ }
+
+ /// \brief Number of memchecks required to prove independence of otherwise
+ /// may-alias pointers.
+ unsigned getNumRuntimePointerChecks() const {
+ return PtrRtChecking.getNumberOfChecks();
+ }
+
+ /// Return true if the block BB needs to be predicated in order for the loop
+ /// to be vectorized.
+ static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
+ DominatorTree *DT);
+
+ /// Returns true if the value V is uniform within the loop.
+ bool isUniform(Value *V) const;
+
+ unsigned getMaxSafeDepDistBytes() const { return MaxSafeDepDistBytes; }
+ unsigned getNumStores() const { return NumStores; }
+ unsigned getNumLoads() const { return NumLoads;}
+
+ /// \brief Add code that checks at runtime if the accessed arrays overlap.
+ ///
+ /// Returns a pair of instructions where the first element is the first
+ /// instruction generated in possibly a sequence of instructions and the
+ /// second value is the final comparator value or NULL if no check is needed.
+ std::pair<Instruction *, Instruction *>
+ addRuntimeChecks(Instruction *Loc) const;
+
+ /// \brief Generete the instructions for the checks in \p PointerChecks.
+ ///
+ /// Returns a pair of instructions where the first element is the first
+ /// instruction generated in possibly a sequence of instructions and the
+ /// second value is the final comparator value or NULL if no check is needed.
+ std::pair<Instruction *, Instruction *>
+ addRuntimeChecks(Instruction *Loc,
+ const SmallVectorImpl<RuntimePointerChecking::PointerCheck>
+ &PointerChecks) const;
+
+ /// \brief The diagnostics report generated for the analysis. E.g. why we
+ /// couldn't analyze the loop.
+ const Optional<LoopAccessReport> &getReport() const { return Report; }
+
+ /// \brief the Memory Dependence Checker which can determine the
+ /// loop-independent and loop-carried dependences between memory accesses.
+ const MemoryDepChecker &getDepChecker() const { return DepChecker; }
+
+ /// \brief Return the list of instructions that use \p Ptr to read or write
+ /// memory.
+ SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
+ bool isWrite) const {
+ return DepChecker.getInstructionsForAccess(Ptr, isWrite);
+ }
+
+ /// \brief Print the information about the memory accesses in the loop.
+ void print(raw_ostream &OS, unsigned Depth = 0) const;
+
+ /// \brief Used to ensure that if the analysis was run with speculating the
+ /// value of symbolic strides, the client queries it with the same assumption.
+ /// Only used in DEBUG build but we don't want NDEBUG-dependent ABI.
+ unsigned NumSymbolicStrides;
+
+ /// \brief Checks existence of store to invariant address inside loop.
+ /// If the loop has any store to invariant address, then it returns true,
+ /// else returns false.
+ bool hasStoreToLoopInvariantAddress() const {
+ return StoreToLoopInvariantAddress;
+ }
+
+ /// Used to add runtime SCEV checks. Simplifies SCEV expressions and converts
+ /// them to a more usable form. All SCEV expressions during the analysis
+ /// should be re-written (and therefore simplified) according to PSE.
+ /// A user of LoopAccessAnalysis will need to emit the runtime checks
+ /// associated with this predicate.
+ PredicatedScalarEvolution PSE;
+
+private:
+ /// \brief Analyze the loop. Substitute symbolic strides using Strides.
+ void analyzeLoop(const ValueToValueMap &Strides);
+
+ /// \brief Check if the structure of the loop allows it to be analyzed by this
+ /// pass.
+ bool canAnalyzeLoop();
+
+ void emitAnalysis(LoopAccessReport &Message);
+
+ /// We need to check that all of the pointers in this list are disjoint
+ /// at runtime.
+ RuntimePointerChecking PtrRtChecking;
+
+ /// \brief the Memory Dependence Checker which can determine the
+ /// loop-independent and loop-carried dependences between memory accesses.
+ MemoryDepChecker DepChecker;
+
+ Loop *TheLoop;
+ const DataLayout &DL;
+ const TargetLibraryInfo *TLI;
+ AliasAnalysis *AA;
+ DominatorTree *DT;
+ LoopInfo *LI;
+
+ unsigned NumLoads;
+ unsigned NumStores;
+
+ unsigned MaxSafeDepDistBytes;
+
+ /// \brief Cache the result of analyzeLoop.
+ bool CanVecMem;
+
+ /// \brief Indicator for storing to uniform addresses.
+ /// If a loop has write to a loop invariant address then it should be true.
+ bool StoreToLoopInvariantAddress;
+
+ /// \brief The diagnostics report generated for the analysis. E.g. why we
+ /// couldn't analyze the loop.
+ Optional<LoopAccessReport> Report;
+};
+
+Value *stripIntegerCast(Value *V);
+
+///\brief Return the SCEV corresponding to a pointer with the symbolic stride
+/// replaced with constant one, assuming \p Preds is true.
+///
+/// If necessary this method will version the stride of the pointer according
+/// to \p PtrToStride and therefore add a new predicate to \p Preds.
+///
+/// If \p OrigPtr is not null, use it to look up the stride value instead of \p
+/// Ptr. \p PtrToStride provides the mapping between the pointer value and its
+/// stride as collected by LoopVectorizationLegality::collectStridedAccess.
+const SCEV *replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
+ const ValueToValueMap &PtrToStride,
+ Value *Ptr, Value *OrigPtr = nullptr);
+
+/// \brief Check the stride of the pointer and ensure that it does not wrap in
+/// the address space, assuming \p Preds is true.
+///
+/// If necessary this method will version the stride of the pointer according
+/// to \p PtrToStride and therefore add a new predicate to \p Preds.
+int isStridedPtr(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
+ const ValueToValueMap &StridesMap);
+
+/// \brief This analysis provides dependence information for the memory accesses
+/// of a loop.
+///
+/// It runs the analysis for a loop on demand. This can be initiated by
+/// querying the loop access info via LAA::getInfo. getInfo return a
+/// LoopAccessInfo object. See this class for the specifics of what information
+/// is provided.
+class LoopAccessAnalysis : public FunctionPass {
+public:
+ static char ID;
+
+ LoopAccessAnalysis() : FunctionPass(ID) {
+ initializeLoopAccessAnalysisPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ /// \brief Query the result of the loop access information for the loop \p L.
+ ///
+ /// If the client speculates (and then issues run-time checks) for the values
+ /// of symbolic strides, \p Strides provides the mapping (see
+ /// replaceSymbolicStrideSCEV). If there is no cached result available run
+ /// the analysis.
+ const LoopAccessInfo &getInfo(Loop *L, const ValueToValueMap &Strides);
+
+ void releaseMemory() override {
+ // Invalidate the cache when the pass is freed.
+ LoopAccessInfoMap.clear();
+ }
+
+ /// \brief Print the result of the analysis when invoked with -analyze.
+ void print(raw_ostream &OS, const Module *M = nullptr) const override;
+
+private:
+ /// \brief The cache.
+ DenseMap<Loop *, std::unique_ptr<LoopAccessInfo>> LoopAccessInfoMap;
+
+ // The used analysis passes.
+ ScalarEvolution *SE;
+ const TargetLibraryInfo *TLI;
+ AliasAnalysis *AA;
+ DominatorTree *DT;
+ LoopInfo *LI;
+};
+
+inline Instruction *MemoryDepChecker::Dependence::getSource(
+ const LoopAccessInfo &LAI) const {
+ return LAI.getDepChecker().getMemoryInstructions()[Source];
+}
+
+inline Instruction *MemoryDepChecker::Dependence::getDestination(
+ const LoopAccessInfo &LAI) const {
+ return LAI.getDepChecker().getMemoryInstructions()[Destination];
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfo.h b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
new file mode 100644
index 0000000..c219bd8
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfo.h
@@ -0,0 +1,869 @@
+//===- llvm/Analysis/LoopInfo.h - Natural Loop Calculator -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LoopInfo class that is used to identify natural loops
+// and determine the loop depth of various nodes of the CFG. A natural loop
+// has exactly one entry-point, which is called the header. Note that natural
+// loops may actually be several loops that share the same header node.
+//
+// This analysis calculates the nesting structure of loops in a function. For
+// each natural loop identified, this analysis identifies natural loops
+// contained entirely within the loop and the basic blocks the make up the loop.
+//
+// It can calculate on the fly various bits of information, for example:
+//
+// * whether there is a preheader for the loop
+// * the number of back edges to the header
+// * whether or not a particular block branches out of the loop
+// * the successor blocks of the loop
+// * the loop depth
+// * etc...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPINFO_H
+#define LLVM_ANALYSIS_LOOPINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Pass.h"
+#include <algorithm>
+
+namespace llvm {
+
+// FIXME: Replace this brittle forward declaration with the include of the new
+// PassManager.h when doing so doesn't break the PassManagerBuilder.
+template <typename IRUnitT> class AnalysisManager;
+class PreservedAnalyses;
+
+class DominatorTree;
+class LoopInfo;
+class Loop;
+class MDNode;
+class PHINode;
+class raw_ostream;
+template<class N> class DominatorTreeBase;
+template<class N, class M> class LoopInfoBase;
+template<class N, class M> class LoopBase;
+
+//===----------------------------------------------------------------------===//
+/// LoopBase class - Instances of this class are used to represent loops that
+/// are detected in the flow graph
+///
+template<class BlockT, class LoopT>
+class LoopBase {
+ LoopT *ParentLoop;
+ // SubLoops - Loops contained entirely within this one.
+ std::vector<LoopT *> SubLoops;
+
+ // Blocks - The list of blocks in this loop. First entry is the header node.
+ std::vector<BlockT*> Blocks;
+
+ SmallPtrSet<const BlockT*, 8> DenseBlockSet;
+
+ /// Indicator that this loops has been "unlooped", so there's no loop here
+ /// anymore.
+ bool IsUnloop = false;
+
+ LoopBase(const LoopBase<BlockT, LoopT> &) = delete;
+ const LoopBase<BlockT, LoopT>&
+ operator=(const LoopBase<BlockT, LoopT> &) = delete;
+public:
+ /// Loop ctor - This creates an empty loop.
+ LoopBase() : ParentLoop(nullptr) {}
+ ~LoopBase() {
+ for (size_t i = 0, e = SubLoops.size(); i != e; ++i)
+ delete SubLoops[i];
+ }
+
+ /// getLoopDepth - Return the nesting level of this loop. An outer-most
+ /// loop has depth 1, for consistency with loop depth values used for basic
+ /// blocks, where depth 0 is used for blocks not inside any loops.
+ unsigned getLoopDepth() const {
+ unsigned D = 1;
+ for (const LoopT *CurLoop = ParentLoop; CurLoop;
+ CurLoop = CurLoop->ParentLoop)
+ ++D;
+ return D;
+ }
+ BlockT *getHeader() const { return Blocks.front(); }
+ LoopT *getParentLoop() const { return ParentLoop; }
+
+ /// setParentLoop is a raw interface for bypassing addChildLoop.
+ void setParentLoop(LoopT *L) { ParentLoop = L; }
+
+ /// contains - Return true if the specified loop is contained within in
+ /// this loop.
+ ///
+ bool contains(const LoopT *L) const {
+ if (L == this) return true;
+ if (!L) return false;
+ return contains(L->getParentLoop());
+ }
+
+ /// contains - Return true if the specified basic block is in this loop.
+ ///
+ bool contains(const BlockT *BB) const {
+ return DenseBlockSet.count(BB);
+ }
+
+ /// contains - Return true if the specified instruction is in this loop.
+ ///
+ template<class InstT>
+ bool contains(const InstT *Inst) const {
+ return contains(Inst->getParent());
+ }
+
+ /// iterator/begin/end - Return the loops contained entirely within this loop.
+ ///
+ const std::vector<LoopT *> &getSubLoops() const { return SubLoops; }
+ std::vector<LoopT *> &getSubLoopsVector() { return SubLoops; }
+ typedef typename std::vector<LoopT *>::const_iterator iterator;
+ typedef typename std::vector<LoopT *>::const_reverse_iterator
+ reverse_iterator;
+ iterator begin() const { return SubLoops.begin(); }
+ iterator end() const { return SubLoops.end(); }
+ reverse_iterator rbegin() const { return SubLoops.rbegin(); }
+ reverse_iterator rend() const { return SubLoops.rend(); }
+ bool empty() const { return SubLoops.empty(); }
+
+ /// getBlocks - Get a list of the basic blocks which make up this loop.
+ ///
+ const std::vector<BlockT*> &getBlocks() const { return Blocks; }
+ typedef typename std::vector<BlockT*>::const_iterator block_iterator;
+ block_iterator block_begin() const { return Blocks.begin(); }
+ block_iterator block_end() const { return Blocks.end(); }
+ inline iterator_range<block_iterator> blocks() const {
+ return make_range(block_begin(), block_end());
+ }
+
+ /// getNumBlocks - Get the number of blocks in this loop in constant time.
+ unsigned getNumBlocks() const {
+ return Blocks.size();
+ }
+
+ /// Mark this loop as having been unlooped - the last backedge was removed and
+ /// we no longer have a loop.
+ void markUnlooped() { IsUnloop = true; }
+
+ /// Return true if this no longer represents a loop.
+ bool isUnloop() const { return IsUnloop; }
+
+ /// isLoopExiting - True if terminator in the block can branch to another
+ /// block that is outside of the current loop.
+ ///
+ bool isLoopExiting(const BlockT *BB) const {
+ typedef GraphTraits<const BlockT*> BlockTraits;
+ for (typename BlockTraits::ChildIteratorType SI =
+ BlockTraits::child_begin(BB),
+ SE = BlockTraits::child_end(BB); SI != SE; ++SI) {
+ if (!contains(*SI))
+ return true;
+ }
+ return false;
+ }
+
+ /// getNumBackEdges - Calculate the number of back edges to the loop header
+ ///
+ unsigned getNumBackEdges() const {
+ unsigned NumBackEdges = 0;
+ BlockT *H = getHeader();
+
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+ for (typename InvBlockTraits::ChildIteratorType I =
+ InvBlockTraits::child_begin(H),
+ E = InvBlockTraits::child_end(H); I != E; ++I)
+ if (contains(*I))
+ ++NumBackEdges;
+
+ return NumBackEdges;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // APIs for simple analysis of the loop.
+ //
+ // Note that all of these methods can fail on general loops (ie, there may not
+ // be a preheader, etc). For best success, the loop simplification and
+ // induction variable canonicalization pass should be used to normalize loops
+ // for easy analysis. These methods assume canonical loops.
+
+ /// getExitingBlocks - Return all blocks inside the loop that have successors
+ /// outside of the loop. These are the blocks _inside of the current loop_
+ /// which branch out. The returned list is always unique.
+ ///
+ void getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const;
+
+ /// getExitingBlock - If getExitingBlocks would return exactly one block,
+ /// return that block. Otherwise return null.
+ BlockT *getExitingBlock() const;
+
+ /// getExitBlocks - Return all of the successor blocks of this loop. These
+ /// are the blocks _outside of the current loop_ which are branched to.
+ ///
+ void getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const;
+
+ /// getExitBlock - If getExitBlocks would return exactly one block,
+ /// return that block. Otherwise return null.
+ BlockT *getExitBlock() const;
+
+ /// Edge type.
+ typedef std::pair<const BlockT*, const BlockT*> Edge;
+
+ /// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
+ void getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const;
+
+ /// getLoopPreheader - If there is a preheader for this loop, return it. A
+ /// loop has a preheader if there is only one edge to the header of the loop
+ /// from outside of the loop. If this is the case, the block branching to the
+ /// header of the loop is the preheader node.
+ ///
+ /// This method returns null if there is no preheader for the loop.
+ ///
+ BlockT *getLoopPreheader() const;
+
+ /// getLoopPredecessor - If the given loop's header has exactly one unique
+ /// predecessor outside the loop, return it. Otherwise return null.
+ /// This is less strict that the loop "preheader" concept, which requires
+ /// the predecessor to have exactly one successor.
+ ///
+ BlockT *getLoopPredecessor() const;
+
+ /// getLoopLatch - If there is a single latch block for this loop, return it.
+ /// A latch block is a block that contains a branch back to the header.
+ BlockT *getLoopLatch() const;
+
+ /// getLoopLatches - Return all loop latch blocks of this loop. A latch block
+ /// is a block that contains a branch back to the header.
+ void getLoopLatches(SmallVectorImpl<BlockT *> &LoopLatches) const {
+ BlockT *H = getHeader();
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+ for (typename InvBlockTraits::ChildIteratorType I =
+ InvBlockTraits::child_begin(H),
+ E = InvBlockTraits::child_end(H); I != E; ++I)
+ if (contains(*I))
+ LoopLatches.push_back(*I);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // APIs for updating loop information after changing the CFG
+ //
+
+ /// addBasicBlockToLoop - This method is used by other analyses to update loop
+ /// information. NewBB is set to be a new member of the current loop.
+ /// Because of this, it is added as a member of all parent loops, and is added
+ /// to the specified LoopInfo object as being in the current basic block. It
+ /// is not valid to replace the loop header with this method.
+ ///
+ void addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LI);
+
+ /// replaceChildLoopWith - This is used when splitting loops up. It replaces
+ /// the OldChild entry in our children list with NewChild, and updates the
+ /// parent pointer of OldChild to be null and the NewChild to be this loop.
+ /// This updates the loop depth of the new child.
+ void replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild);
+
+ /// addChildLoop - Add the specified loop to be a child of this loop. This
+ /// updates the loop depth of the new child.
+ ///
+ void addChildLoop(LoopT *NewChild) {
+ assert(!NewChild->ParentLoop && "NewChild already has a parent!");
+ NewChild->ParentLoop = static_cast<LoopT *>(this);
+ SubLoops.push_back(NewChild);
+ }
+
+ /// removeChildLoop - This removes the specified child from being a subloop of
+ /// this loop. The loop is not deleted, as it will presumably be inserted
+ /// into another loop.
+ LoopT *removeChildLoop(iterator I) {
+ assert(I != SubLoops.end() && "Cannot remove end iterator!");
+ LoopT *Child = *I;
+ assert(Child->ParentLoop == this && "Child is not a child of this loop!");
+ SubLoops.erase(SubLoops.begin()+(I-begin()));
+ Child->ParentLoop = nullptr;
+ return Child;
+ }
+
+ /// addBlockEntry - This adds a basic block directly to the basic block list.
+ /// This should only be used by transformations that create new loops. Other
+ /// transformations should use addBasicBlockToLoop.
+ void addBlockEntry(BlockT *BB) {
+ Blocks.push_back(BB);
+ DenseBlockSet.insert(BB);
+ }
+
+ /// reverseBlocks - interface to reverse Blocks[from, end of loop] in this loop
+ void reverseBlock(unsigned from) {
+ std::reverse(Blocks.begin() + from, Blocks.end());
+ }
+
+ /// reserveBlocks- interface to do reserve() for Blocks
+ void reserveBlocks(unsigned size) {
+ Blocks.reserve(size);
+ }
+
+ /// moveToHeader - This method is used to move BB (which must be part of this
+ /// loop) to be the loop header of the loop (the block that dominates all
+ /// others).
+ void moveToHeader(BlockT *BB) {
+ if (Blocks[0] == BB) return;
+ for (unsigned i = 0; ; ++i) {
+ assert(i != Blocks.size() && "Loop does not contain BB!");
+ if (Blocks[i] == BB) {
+ Blocks[i] = Blocks[0];
+ Blocks[0] = BB;
+ return;
+ }
+ }
+ }
+
+ /// removeBlockFromLoop - This removes the specified basic block from the
+ /// current loop, updating the Blocks as appropriate. This does not update
+ /// the mapping in the LoopInfo class.
+ void removeBlockFromLoop(BlockT *BB) {
+ auto I = std::find(Blocks.begin(), Blocks.end(), BB);
+ assert(I != Blocks.end() && "N is not in this list!");
+ Blocks.erase(I);
+
+ DenseBlockSet.erase(BB);
+ }
+
+ /// verifyLoop - Verify loop structure
+ void verifyLoop() const;
+
+ /// verifyLoop - Verify loop structure of this loop and all nested loops.
+ void verifyLoopNest(DenseSet<const LoopT*> *Loops) const;
+
+ void print(raw_ostream &OS, unsigned Depth = 0) const;
+
+protected:
+ friend class LoopInfoBase<BlockT, LoopT>;
+ explicit LoopBase(BlockT *BB) : ParentLoop(nullptr) {
+ Blocks.push_back(BB);
+ DenseBlockSet.insert(BB);
+ }
+};
+
+template<class BlockT, class LoopT>
+raw_ostream& operator<<(raw_ostream &OS, const LoopBase<BlockT, LoopT> &Loop) {
+ Loop.print(OS);
+ return OS;
+}
+
+// Implementation in LoopInfoImpl.h
+extern template class LoopBase<BasicBlock, Loop>;
+
+class Loop : public LoopBase<BasicBlock, Loop> {
+public:
+ Loop() {}
+
+ /// isLoopInvariant - Return true if the specified value is loop invariant
+ ///
+ bool isLoopInvariant(const Value *V) const;
+
+ /// hasLoopInvariantOperands - Return true if all the operands of the
+ /// specified instruction are loop invariant.
+ bool hasLoopInvariantOperands(const Instruction *I) const;
+
+ /// makeLoopInvariant - If the given value is an instruction inside of the
+ /// loop and it can be hoisted, do so to make it trivially loop-invariant.
+ /// Return true if the value after any hoisting is loop invariant. This
+ /// function can be used as a slightly more aggressive replacement for
+ /// isLoopInvariant.
+ ///
+ /// If InsertPt is specified, it is the point to hoist instructions to.
+ /// If null, the terminator of the loop preheader is used.
+ ///
+ bool makeLoopInvariant(Value *V, bool &Changed,
+ Instruction *InsertPt = nullptr) const;
+
+ /// makeLoopInvariant - If the given instruction is inside of the
+ /// loop and it can be hoisted, do so to make it trivially loop-invariant.
+ /// Return true if the instruction after any hoisting is loop invariant. This
+ /// function can be used as a slightly more aggressive replacement for
+ /// isLoopInvariant.
+ ///
+ /// If InsertPt is specified, it is the point to hoist instructions to.
+ /// If null, the terminator of the loop preheader is used.
+ ///
+ bool makeLoopInvariant(Instruction *I, bool &Changed,
+ Instruction *InsertPt = nullptr) const;
+
+ /// getCanonicalInductionVariable - Check to see if the loop has a canonical
+ /// induction variable: an integer recurrence that starts at 0 and increments
+ /// by one each time through the loop. If so, return the phi node that
+ /// corresponds to it.
+ ///
+ /// The IndVarSimplify pass transforms loops to have a canonical induction
+ /// variable.
+ ///
+ PHINode *getCanonicalInductionVariable() const;
+
+ /// isLCSSAForm - Return true if the Loop is in LCSSA form
+ bool isLCSSAForm(DominatorTree &DT) const;
+
+ /// \brief Return true if this Loop and all inner subloops are in LCSSA form.
+ bool isRecursivelyLCSSAForm(DominatorTree &DT) const;
+
+ /// isLoopSimplifyForm - Return true if the Loop is in the form that
+ /// the LoopSimplify form transforms loops to, which is sometimes called
+ /// normal form.
+ bool isLoopSimplifyForm() const;
+
+ /// isSafeToClone - Return true if the loop body is safe to clone in practice.
+ bool isSafeToClone() const;
+
+ /// Returns true if the loop is annotated parallel.
+ ///
+ /// A parallel loop can be assumed to not contain any dependencies between
+ /// iterations by the compiler. That is, any loop-carried dependency checking
+ /// can be skipped completely when parallelizing the loop on the target
+ /// machine. Thus, if the parallel loop information originates from the
+ /// programmer, e.g. via the OpenMP parallel for pragma, it is the
+ /// programmer's responsibility to ensure there are no loop-carried
+ /// dependencies. The final execution order of the instructions across
+ /// iterations is not guaranteed, thus, the end result might or might not
+ /// implement actual concurrent execution of instructions across multiple
+ /// iterations.
+ bool isAnnotatedParallel() const;
+
+ /// Return the llvm.loop loop id metadata node for this loop if it is present.
+ ///
+ /// If this loop contains the same llvm.loop metadata on each branch to the
+ /// header then the node is returned. If any latch instruction does not
+ /// contain llvm.loop or or if multiple latches contain different nodes then
+ /// 0 is returned.
+ MDNode *getLoopID() const;
+ /// Set the llvm.loop loop id metadata for this loop.
+ ///
+ /// The LoopID metadata node will be added to each terminator instruction in
+ /// the loop that branches to the loop header.
+ ///
+ /// The LoopID metadata node should have one or more operands and the first
+ /// operand should should be the node itself.
+ void setLoopID(MDNode *LoopID) const;
+
+ /// hasDedicatedExits - Return true if no exit block for the loop
+ /// has a predecessor that is outside the loop.
+ bool hasDedicatedExits() const;
+
+ /// getUniqueExitBlocks - Return all unique successor blocks of this loop.
+ /// These are the blocks _outside of the current loop_ which are branched to.
+ /// This assumes that loop exits are in canonical form.
+ ///
+ void getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const;
+
+ /// getUniqueExitBlock - If getUniqueExitBlocks would return exactly one
+ /// block, return that block. Otherwise return null.
+ BasicBlock *getUniqueExitBlock() const;
+
+ void dump() const;
+
+ /// \brief Return the debug location of the start of this loop.
+ /// This looks for a BB terminating instruction with a known debug
+ /// location by looking at the preheader and header blocks. If it
+ /// cannot find a terminating instruction with location information,
+ /// it returns an unknown location.
+ DebugLoc getStartLoc() const {
+ BasicBlock *HeadBB;
+
+ // Try the pre-header first.
+ if ((HeadBB = getLoopPreheader()) != nullptr)
+ if (DebugLoc DL = HeadBB->getTerminator()->getDebugLoc())
+ return DL;
+
+ // If we have no pre-header or there are no instructions with debug
+ // info in it, try the header.
+ HeadBB = getHeader();
+ if (HeadBB)
+ return HeadBB->getTerminator()->getDebugLoc();
+
+ return DebugLoc();
+ }
+
+private:
+ friend class LoopInfoBase<BasicBlock, Loop>;
+ explicit Loop(BasicBlock *BB) : LoopBase<BasicBlock, Loop>(BB) {}
+};
+
+//===----------------------------------------------------------------------===//
+/// LoopInfo - This class builds and contains all of the top level loop
+/// structures in the specified function.
+///
+
+template<class BlockT, class LoopT>
+class LoopInfoBase {
+ // BBMap - Mapping of basic blocks to the inner most loop they occur in
+ DenseMap<const BlockT *, LoopT *> BBMap;
+ std::vector<LoopT *> TopLevelLoops;
+ friend class LoopBase<BlockT, LoopT>;
+ friend class LoopInfo;
+
+ void operator=(const LoopInfoBase &) = delete;
+ LoopInfoBase(const LoopInfoBase &) = delete;
+public:
+ LoopInfoBase() { }
+ ~LoopInfoBase() { releaseMemory(); }
+
+ LoopInfoBase(LoopInfoBase &&Arg)
+ : BBMap(std::move(Arg.BBMap)),
+ TopLevelLoops(std::move(Arg.TopLevelLoops)) {
+ // We have to clear the arguments top level loops as we've taken ownership.
+ Arg.TopLevelLoops.clear();
+ }
+ LoopInfoBase &operator=(LoopInfoBase &&RHS) {
+ BBMap = std::move(RHS.BBMap);
+
+ for (auto *L : TopLevelLoops)
+ delete L;
+ TopLevelLoops = std::move(RHS.TopLevelLoops);
+ RHS.TopLevelLoops.clear();
+ return *this;
+ }
+
+ void releaseMemory() {
+ BBMap.clear();
+
+ for (auto *L : TopLevelLoops)
+ delete L;
+ TopLevelLoops.clear();
+ }
+
+ /// iterator/begin/end - The interface to the top-level loops in the current
+ /// function.
+ ///
+ typedef typename std::vector<LoopT *>::const_iterator iterator;
+ typedef typename std::vector<LoopT *>::const_reverse_iterator
+ reverse_iterator;
+ iterator begin() const { return TopLevelLoops.begin(); }
+ iterator end() const { return TopLevelLoops.end(); }
+ reverse_iterator rbegin() const { return TopLevelLoops.rbegin(); }
+ reverse_iterator rend() const { return TopLevelLoops.rend(); }
+ bool empty() const { return TopLevelLoops.empty(); }
+
+ /// getLoopFor - Return the inner most loop that BB lives in. If a basic
+ /// block is in no loop (for example the entry node), null is returned.
+ ///
+ LoopT *getLoopFor(const BlockT *BB) const { return BBMap.lookup(BB); }
+
+ /// operator[] - same as getLoopFor...
+ ///
+ const LoopT *operator[](const BlockT *BB) const {
+ return getLoopFor(BB);
+ }
+
+ /// getLoopDepth - Return the loop nesting level of the specified block. A
+ /// depth of 0 means the block is not inside any loop.
+ ///
+ unsigned getLoopDepth(const BlockT *BB) const {
+ const LoopT *L = getLoopFor(BB);
+ return L ? L->getLoopDepth() : 0;
+ }
+
+ // isLoopHeader - True if the block is a loop header node
+ bool isLoopHeader(const BlockT *BB) const {
+ const LoopT *L = getLoopFor(BB);
+ return L && L->getHeader() == BB;
+ }
+
+ /// removeLoop - This removes the specified top-level loop from this loop info
+ /// object. The loop is not deleted, as it will presumably be inserted into
+ /// another loop.
+ LoopT *removeLoop(iterator I) {
+ assert(I != end() && "Cannot remove end iterator!");
+ LoopT *L = *I;
+ assert(!L->getParentLoop() && "Not a top-level loop!");
+ TopLevelLoops.erase(TopLevelLoops.begin() + (I-begin()));
+ return L;
+ }
+
+ /// changeLoopFor - Change the top-level loop that contains BB to the
+ /// specified loop. This should be used by transformations that restructure
+ /// the loop hierarchy tree.
+ void changeLoopFor(BlockT *BB, LoopT *L) {
+ if (!L) {
+ BBMap.erase(BB);
+ return;
+ }
+ BBMap[BB] = L;
+ }
+
+ /// changeTopLevelLoop - Replace the specified loop in the top-level loops
+ /// list with the indicated loop.
+ void changeTopLevelLoop(LoopT *OldLoop,
+ LoopT *NewLoop) {
+ auto I = std::find(TopLevelLoops.begin(), TopLevelLoops.end(), OldLoop);
+ assert(I != TopLevelLoops.end() && "Old loop not at top level!");
+ *I = NewLoop;
+ assert(!NewLoop->ParentLoop && !OldLoop->ParentLoop &&
+ "Loops already embedded into a subloop!");
+ }
+
+ /// addTopLevelLoop - This adds the specified loop to the collection of
+ /// top-level loops.
+ void addTopLevelLoop(LoopT *New) {
+ assert(!New->getParentLoop() && "Loop already in subloop!");
+ TopLevelLoops.push_back(New);
+ }
+
+ /// removeBlock - This method completely removes BB from all data structures,
+ /// including all of the Loop objects it is nested in and our mapping from
+ /// BasicBlocks to loops.
+ void removeBlock(BlockT *BB) {
+ auto I = BBMap.find(BB);
+ if (I != BBMap.end()) {
+ for (LoopT *L = I->second; L; L = L->getParentLoop())
+ L->removeBlockFromLoop(BB);
+
+ BBMap.erase(I);
+ }
+ }
+
+ // Internals
+
+ static bool isNotAlreadyContainedIn(const LoopT *SubLoop,
+ const LoopT *ParentLoop) {
+ if (!SubLoop) return true;
+ if (SubLoop == ParentLoop) return false;
+ return isNotAlreadyContainedIn(SubLoop->getParentLoop(), ParentLoop);
+ }
+
+ /// Create the loop forest using a stable algorithm.
+ void analyze(const DominatorTreeBase<BlockT> &DomTree);
+
+ // Debugging
+ void print(raw_ostream &OS) const;
+
+ void verify() const;
+};
+
+// Implementation in LoopInfoImpl.h
+extern template class LoopInfoBase<BasicBlock, Loop>;
+
+class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
+ typedef LoopInfoBase<BasicBlock, Loop> BaseT;
+
+ friend class LoopBase<BasicBlock, Loop>;
+
+ void operator=(const LoopInfo &) = delete;
+ LoopInfo(const LoopInfo &) = delete;
+public:
+ LoopInfo() {}
+ explicit LoopInfo(const DominatorTreeBase<BasicBlock> &DomTree);
+
+ LoopInfo(LoopInfo &&Arg) : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
+ LoopInfo &operator=(LoopInfo &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ return *this;
+ }
+
+ // Most of the public interface is provided via LoopInfoBase.
+
+ /// updateUnloop - Update LoopInfo after removing the last backedge from a
+ /// loop--now the "unloop". This updates the loop forest and parent loops for
+ /// each block so that Unloop is no longer referenced, but does not actually
+ /// delete the Unloop object. Generally, the loop pass manager should manage
+ /// deleting the Unloop.
+ void updateUnloop(Loop *Unloop);
+
+ /// replacementPreservesLCSSAForm - Returns true if replacing From with To
+ /// everywhere is guaranteed to preserve LCSSA form.
+ bool replacementPreservesLCSSAForm(Instruction *From, Value *To) {
+ // Preserving LCSSA form is only problematic if the replacing value is an
+ // instruction.
+ Instruction *I = dyn_cast<Instruction>(To);
+ if (!I) return true;
+ // If both instructions are defined in the same basic block then replacement
+ // cannot break LCSSA form.
+ if (I->getParent() == From->getParent())
+ return true;
+ // If the instruction is not defined in a loop then it can safely replace
+ // anything.
+ Loop *ToLoop = getLoopFor(I->getParent());
+ if (!ToLoop) return true;
+ // If the replacing instruction is defined in the same loop as the original
+ // instruction, or in a loop that contains it as an inner loop, then using
+ // it as a replacement will not break LCSSA form.
+ return ToLoop->contains(getLoopFor(From->getParent()));
+ }
+
+ /// \brief Checks if moving a specific instruction can break LCSSA in any
+ /// loop.
+ ///
+ /// Return true if moving \p Inst to before \p NewLoc will break LCSSA,
+ /// assuming that the function containing \p Inst and \p NewLoc is currently
+ /// in LCSSA form.
+ bool movementPreservesLCSSAForm(Instruction *Inst, Instruction *NewLoc) {
+ assert(Inst->getFunction() == NewLoc->getFunction() &&
+ "Can't reason about IPO!");
+
+ auto *OldBB = Inst->getParent();
+ auto *NewBB = NewLoc->getParent();
+
+ // Movement within the same loop does not break LCSSA (the equality check is
+ // to avoid doing a hashtable lookup in case of intra-block movement).
+ if (OldBB == NewBB)
+ return true;
+
+ auto *OldLoop = getLoopFor(OldBB);
+ auto *NewLoop = getLoopFor(NewBB);
+
+ if (OldLoop == NewLoop)
+ return true;
+
+ // Check if Outer contains Inner; with the null loop counting as the
+ // "outermost" loop.
+ auto Contains = [](const Loop *Outer, const Loop *Inner) {
+ return !Outer || Outer->contains(Inner);
+ };
+
+ // To check that the movement of Inst to before NewLoc does not break LCSSA,
+ // we need to check two sets of uses for possible LCSSA violations at
+ // NewLoc: the users of NewInst, and the operands of NewInst.
+
+ // If we know we're hoisting Inst out of an inner loop to an outer loop,
+ // then the uses *of* Inst don't need to be checked.
+
+ if (!Contains(NewLoop, OldLoop)) {
+ for (Use &U : Inst->uses()) {
+ auto *UI = cast<Instruction>(U.getUser());
+ auto *UBB = isa<PHINode>(UI) ? cast<PHINode>(UI)->getIncomingBlock(U)
+ : UI->getParent();
+ if (UBB != NewBB && getLoopFor(UBB) != NewLoop)
+ return false;
+ }
+ }
+
+ // If we know we're sinking Inst from an outer loop into an inner loop, then
+ // the *operands* of Inst don't need to be checked.
+
+ if (!Contains(OldLoop, NewLoop)) {
+ // See below on why we can't handle phi nodes here.
+ if (isa<PHINode>(Inst))
+ return false;
+
+ for (Use &U : Inst->operands()) {
+ auto *DefI = dyn_cast<Instruction>(U.get());
+ if (!DefI)
+ return false;
+
+ // This would need adjustment if we allow Inst to be a phi node -- the
+ // new use block won't simply be NewBB.
+
+ auto *DefBlock = DefI->getParent();
+ if (DefBlock != NewBB && getLoopFor(DefBlock) != NewLoop)
+ return false;
+ }
+ }
+
+ return true;
+ }
+};
+
+// Allow clients to walk the list of nested loops...
+template <> struct GraphTraits<const Loop*> {
+ typedef const Loop NodeType;
+ typedef LoopInfo::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(const Loop *L) { return L; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->end();
+ }
+};
+
+template <> struct GraphTraits<Loop*> {
+ typedef Loop NodeType;
+ typedef LoopInfo::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(Loop *L) { return L; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->end();
+ }
+};
+
+/// \brief Analysis pass that exposes the \c LoopInfo for a function.
+class LoopAnalysis {
+ static char PassID;
+
+public:
+ typedef LoopInfo Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ /// \brief Provide a name for the analysis for debugging and logging.
+ static StringRef name() { return "LoopAnalysis"; }
+
+ LoopInfo run(Function &F, AnalysisManager<Function> *AM);
+};
+
+/// \brief Printer pass for the \c LoopAnalysis results.
+class LoopPrinterPass {
+ raw_ostream &OS;
+
+public:
+ explicit LoopPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+
+ static StringRef name() { return "LoopPrinterPass"; }
+};
+
+/// \brief The legacy pass manager's analysis pass to compute loop information.
+class LoopInfoWrapperPass : public FunctionPass {
+ LoopInfo LI;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ LoopInfoWrapperPass() : FunctionPass(ID) {
+ initializeLoopInfoWrapperPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ LoopInfo &getLoopInfo() { return LI; }
+ const LoopInfo &getLoopInfo() const { return LI; }
+
+ /// \brief Calculate the natural loop information for a given function.
+ bool runOnFunction(Function &F) override;
+
+ void verifyAnalysis() const override;
+
+ void releaseMemory() override { LI.releaseMemory(); }
+
+ void print(raw_ostream &O, const Module *M = nullptr) const override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+/// \brief Pass for printing a loop's contents as LLVM's text IR assembly.
+class PrintLoopPass {
+ raw_ostream &OS;
+ std::string Banner;
+
+public:
+ PrintLoopPass();
+ PrintLoopPass(raw_ostream &OS, const std::string &Banner = "");
+
+ PreservedAnalyses run(Loop &L);
+ static StringRef name() { return "PrintLoopPass"; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
new file mode 100644
index 0000000..824fc7e
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/LoopInfoImpl.h
@@ -0,0 +1,540 @@
+//===- llvm/Analysis/LoopInfoImpl.h - Natural Loop Calculator ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the generic implementation of LoopInfo used for both Loops and
+// MachineLoops.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPINFOIMPL_H
+#define LLVM_ANALYSIS_LOOPINFOIMPL_H
+
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/Dominators.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// APIs for simple analysis of the loop. See header notes.
+
+/// getExitingBlocks - Return all blocks inside the loop that have successors
+/// outside of the loop. These are the blocks _inside of the current loop_
+/// which branch out. The returned list is always unique.
+///
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::
+getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const {
+ typedef GraphTraits<BlockT*> BlockTraits;
+ for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
+ for (typename BlockTraits::ChildIteratorType I =
+ BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
+ I != E; ++I)
+ if (!contains(*I)) {
+ // Not in current loop? It must be an exit block.
+ ExitingBlocks.push_back(*BI);
+ break;
+ }
+}
+
+/// getExitingBlock - If getExitingBlocks would return exactly one block,
+/// return that block. Otherwise return null.
+template<class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const {
+ SmallVector<BlockT*, 8> ExitingBlocks;
+ getExitingBlocks(ExitingBlocks);
+ if (ExitingBlocks.size() == 1)
+ return ExitingBlocks[0];
+ return nullptr;
+}
+
+/// getExitBlocks - Return all of the successor blocks of this loop. These
+/// are the blocks _outside of the current loop_ which are branched to.
+///
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::
+getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const {
+ typedef GraphTraits<BlockT*> BlockTraits;
+ for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
+ for (typename BlockTraits::ChildIteratorType I =
+ BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
+ I != E; ++I)
+ if (!contains(*I))
+ // Not in current loop? It must be an exit block.
+ ExitBlocks.push_back(*I);
+}
+
+/// getExitBlock - If getExitBlocks would return exactly one block,
+/// return that block. Otherwise return null.
+template<class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
+ SmallVector<BlockT*, 8> ExitBlocks;
+ getExitBlocks(ExitBlocks);
+ if (ExitBlocks.size() == 1)
+ return ExitBlocks[0];
+ return nullptr;
+}
+
+/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::
+getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const {
+ typedef GraphTraits<BlockT*> BlockTraits;
+ for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
+ for (typename BlockTraits::ChildIteratorType I =
+ BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
+ I != E; ++I)
+ if (!contains(*I))
+ // Not in current loop? It must be an exit block.
+ ExitEdges.push_back(Edge(*BI, *I));
+}
+
+/// getLoopPreheader - If there is a preheader for this loop, return it. A
+/// loop has a preheader if there is only one edge to the header of the loop
+/// from outside of the loop. If this is the case, the block branching to the
+/// header of the loop is the preheader node.
+///
+/// This method returns null if there is no preheader for the loop.
+///
+template<class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
+ // Keep track of nodes outside the loop branching to the header...
+ BlockT *Out = getLoopPredecessor();
+ if (!Out) return nullptr;
+
+ // Make sure there is only one exit out of the preheader.
+ typedef GraphTraits<BlockT*> BlockTraits;
+ typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
+ ++SI;
+ if (SI != BlockTraits::child_end(Out))
+ return nullptr; // Multiple exits from the block, must not be a preheader.
+
+ // The predecessor has exactly one successor, so it is a preheader.
+ return Out;
+}
+
+/// getLoopPredecessor - If the given loop's header has exactly one unique
+/// predecessor outside the loop, return it. Otherwise return null.
+/// This is less strict that the loop "preheader" concept, which requires
+/// the predecessor to have exactly one successor.
+///
+template<class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
+ // Keep track of nodes outside the loop branching to the header...
+ BlockT *Out = nullptr;
+
+ // Loop over the predecessors of the header node...
+ BlockT *Header = getHeader();
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+ for (typename InvBlockTraits::ChildIteratorType PI =
+ InvBlockTraits::child_begin(Header),
+ PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (!contains(N)) { // If the block is not in the loop...
+ if (Out && Out != N)
+ return nullptr; // Multiple predecessors outside the loop
+ Out = N;
+ }
+ }
+
+ // Make sure there is only one exit out of the preheader.
+ assert(Out && "Header of loop has no predecessors from outside loop?");
+ return Out;
+}
+
+/// getLoopLatch - If there is a single latch block for this loop, return it.
+/// A latch block is a block that contains a branch back to the header.
+template<class BlockT, class LoopT>
+BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
+ BlockT *Header = getHeader();
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+ typename InvBlockTraits::ChildIteratorType PI =
+ InvBlockTraits::child_begin(Header);
+ typename InvBlockTraits::ChildIteratorType PE =
+ InvBlockTraits::child_end(Header);
+ BlockT *Latch = nullptr;
+ for (; PI != PE; ++PI) {
+ typename InvBlockTraits::NodeType *N = *PI;
+ if (contains(N)) {
+ if (Latch) return nullptr;
+ Latch = N;
+ }
+ }
+
+ return Latch;
+}
+
+//===----------------------------------------------------------------------===//
+// APIs for updating loop information after changing the CFG
+//
+
+/// addBasicBlockToLoop - This method is used by other analyses to update loop
+/// information. NewBB is set to be a new member of the current loop.
+/// Because of this, it is added as a member of all parent loops, and is added
+/// to the specified LoopInfo object as being in the current basic block. It
+/// is not valid to replace the loop header with this method.
+///
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::
+addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LIB) {
+ assert((Blocks.empty() || LIB[getHeader()] == this) &&
+ "Incorrect LI specified for this loop!");
+ assert(NewBB && "Cannot add a null basic block to the loop!");
+ assert(!LIB[NewBB] && "BasicBlock already in the loop!");
+
+ LoopT *L = static_cast<LoopT *>(this);
+
+ // Add the loop mapping to the LoopInfo object...
+ LIB.BBMap[NewBB] = L;
+
+ // Add the basic block to this loop and all parent loops...
+ while (L) {
+ L->addBlockEntry(NewBB);
+ L = L->getParentLoop();
+ }
+}
+
+/// replaceChildLoopWith - This is used when splitting loops up. It replaces
+/// the OldChild entry in our children list with NewChild, and updates the
+/// parent pointer of OldChild to be null and the NewChild to be this loop.
+/// This updates the loop depth of the new child.
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::
+replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild) {
+ assert(OldChild->ParentLoop == this && "This loop is already broken!");
+ assert(!NewChild->ParentLoop && "NewChild already has a parent!");
+ typename std::vector<LoopT *>::iterator I =
+ std::find(SubLoops.begin(), SubLoops.end(), OldChild);
+ assert(I != SubLoops.end() && "OldChild not in loop!");
+ *I = NewChild;
+ OldChild->ParentLoop = nullptr;
+ NewChild->ParentLoop = static_cast<LoopT *>(this);
+}
+
+/// verifyLoop - Verify loop structure
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::verifyLoop() const {
+#ifndef NDEBUG
+ assert(!Blocks.empty() && "Loop header is missing");
+
+ // Setup for using a depth-first iterator to visit every block in the loop.
+ SmallVector<BlockT*, 8> ExitBBs;
+ getExitBlocks(ExitBBs);
+ llvm::SmallPtrSet<BlockT*, 8> VisitSet;
+ VisitSet.insert(ExitBBs.begin(), ExitBBs.end());
+ df_ext_iterator<BlockT*, llvm::SmallPtrSet<BlockT*, 8> >
+ BI = df_ext_begin(getHeader(), VisitSet),
+ BE = df_ext_end(getHeader(), VisitSet);
+
+ // Keep track of the number of BBs visited.
+ unsigned NumVisited = 0;
+
+ // Check the individual blocks.
+ for ( ; BI != BE; ++BI) {
+ BlockT *BB = *BI;
+ bool HasInsideLoopSuccs = false;
+ bool HasInsideLoopPreds = false;
+ SmallVector<BlockT *, 2> OutsideLoopPreds;
+
+ typedef GraphTraits<BlockT*> BlockTraits;
+ for (typename BlockTraits::ChildIteratorType SI =
+ BlockTraits::child_begin(BB), SE = BlockTraits::child_end(BB);
+ SI != SE; ++SI)
+ if (contains(*SI)) {
+ HasInsideLoopSuccs = true;
+ break;
+ }
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+ for (typename InvBlockTraits::ChildIteratorType PI =
+ InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
+ PI != PE; ++PI) {
+ BlockT *N = *PI;
+ if (contains(N))
+ HasInsideLoopPreds = true;
+ else
+ OutsideLoopPreds.push_back(N);
+ }
+
+ if (BB == getHeader()) {
+ assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
+ } else if (!OutsideLoopPreds.empty()) {
+ // A non-header loop shouldn't be reachable from outside the loop,
+ // though it is permitted if the predecessor is not itself actually
+ // reachable.
+ BlockT *EntryBB = &BB->getParent()->front();
+ for (BlockT *CB : depth_first(EntryBB))
+ for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
+ assert(CB != OutsideLoopPreds[i] &&
+ "Loop has multiple entry points!");
+ }
+ assert(HasInsideLoopPreds && "Loop block has no in-loop predecessors!");
+ assert(HasInsideLoopSuccs && "Loop block has no in-loop successors!");
+ assert(BB != getHeader()->getParent()->begin() &&
+ "Loop contains function entry block!");
+
+ NumVisited++;
+ }
+
+ assert(NumVisited == getNumBlocks() && "Unreachable block in loop");
+
+ // Check the subloops.
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ // Each block in each subloop should be contained within this loop.
+ for (block_iterator BI = (*I)->block_begin(), BE = (*I)->block_end();
+ BI != BE; ++BI) {
+ assert(contains(*BI) &&
+ "Loop does not contain all the blocks of a subloop!");
+ }
+
+ // Check the parent loop pointer.
+ if (ParentLoop) {
+ assert(std::find(ParentLoop->begin(), ParentLoop->end(), this) !=
+ ParentLoop->end() &&
+ "Loop is not a subloop of its parent!");
+ }
+#endif
+}
+
+/// verifyLoop - Verify loop structure of this loop and all nested loops.
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::verifyLoopNest(
+ DenseSet<const LoopT*> *Loops) const {
+ Loops->insert(static_cast<const LoopT *>(this));
+ // Verify this loop.
+ verifyLoop();
+ // Verify the subloops.
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ (*I)->verifyLoopNest(Loops);
+}
+
+template<class BlockT, class LoopT>
+void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, unsigned Depth) const {
+ OS.indent(Depth*2) << "Loop at depth " << getLoopDepth()
+ << " containing: ";
+
+ for (unsigned i = 0; i < getBlocks().size(); ++i) {
+ if (i) OS << ",";
+ BlockT *BB = getBlocks()[i];
+ BB->printAsOperand(OS, false);
+ if (BB == getHeader()) OS << "<header>";
+ if (BB == getLoopLatch()) OS << "<latch>";
+ if (isLoopExiting(BB)) OS << "<exiting>";
+ }
+ OS << "\n";
+
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ (*I)->print(OS, Depth+2);
+}
+
+//===----------------------------------------------------------------------===//
+/// Stable LoopInfo Analysis - Build a loop tree using stable iterators so the
+/// result does / not depend on use list (block predecessor) order.
+///
+
+/// Discover a subloop with the specified backedges such that: All blocks within
+/// this loop are mapped to this loop or a subloop. And all subloops within this
+/// loop have their parent loop set to this loop or a subloop.
+template<class BlockT, class LoopT>
+static void discoverAndMapSubloop(LoopT *L, ArrayRef<BlockT*> Backedges,
+ LoopInfoBase<BlockT, LoopT> *LI,
+ const DominatorTreeBase<BlockT> &DomTree) {
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+
+ unsigned NumBlocks = 0;
+ unsigned NumSubloops = 0;
+
+ // Perform a backward CFG traversal using a worklist.
+ std::vector<BlockT *> ReverseCFGWorklist(Backedges.begin(), Backedges.end());
+ while (!ReverseCFGWorklist.empty()) {
+ BlockT *PredBB = ReverseCFGWorklist.back();
+ ReverseCFGWorklist.pop_back();
+
+ LoopT *Subloop = LI->getLoopFor(PredBB);
+ if (!Subloop) {
+ if (!DomTree.isReachableFromEntry(PredBB))
+ continue;
+
+ // This is an undiscovered block. Map it to the current loop.
+ LI->changeLoopFor(PredBB, L);
+ ++NumBlocks;
+ if (PredBB == L->getHeader())
+ continue;
+ // Push all block predecessors on the worklist.
+ ReverseCFGWorklist.insert(ReverseCFGWorklist.end(),
+ InvBlockTraits::child_begin(PredBB),
+ InvBlockTraits::child_end(PredBB));
+ }
+ else {
+ // This is a discovered block. Find its outermost discovered loop.
+ while (LoopT *Parent = Subloop->getParentLoop())
+ Subloop = Parent;
+
+ // If it is already discovered to be a subloop of this loop, continue.
+ if (Subloop == L)
+ continue;
+
+ // Discover a subloop of this loop.
+ Subloop->setParentLoop(L);
+ ++NumSubloops;
+ NumBlocks += Subloop->getBlocks().capacity();
+ PredBB = Subloop->getHeader();
+ // Continue traversal along predecessors that are not loop-back edges from
+ // within this subloop tree itself. Note that a predecessor may directly
+ // reach another subloop that is not yet discovered to be a subloop of
+ // this loop, which we must traverse.
+ for (typename InvBlockTraits::ChildIteratorType PI =
+ InvBlockTraits::child_begin(PredBB),
+ PE = InvBlockTraits::child_end(PredBB); PI != PE; ++PI) {
+ if (LI->getLoopFor(*PI) != Subloop)
+ ReverseCFGWorklist.push_back(*PI);
+ }
+ }
+ }
+ L->getSubLoopsVector().reserve(NumSubloops);
+ L->reserveBlocks(NumBlocks);
+}
+
+/// Populate all loop data in a stable order during a single forward DFS.
+template<class BlockT, class LoopT>
+class PopulateLoopsDFS {
+ typedef GraphTraits<BlockT*> BlockTraits;
+ typedef typename BlockTraits::ChildIteratorType SuccIterTy;
+
+ LoopInfoBase<BlockT, LoopT> *LI;
+public:
+ PopulateLoopsDFS(LoopInfoBase<BlockT, LoopT> *li):
+ LI(li) {}
+
+ void traverse(BlockT *EntryBlock);
+
+protected:
+ void insertIntoLoop(BlockT *Block);
+};
+
+/// Top-level driver for the forward DFS within the loop.
+template<class BlockT, class LoopT>
+void PopulateLoopsDFS<BlockT, LoopT>::traverse(BlockT *EntryBlock) {
+ for (BlockT *BB : post_order(EntryBlock))
+ insertIntoLoop(BB);
+}
+
+/// Add a single Block to its ancestor loops in PostOrder. If the block is a
+/// subloop header, add the subloop to its parent in PostOrder, then reverse the
+/// Block and Subloop vectors of the now complete subloop to achieve RPO.
+template<class BlockT, class LoopT>
+void PopulateLoopsDFS<BlockT, LoopT>::insertIntoLoop(BlockT *Block) {
+ LoopT *Subloop = LI->getLoopFor(Block);
+ if (Subloop && Block == Subloop->getHeader()) {
+ // We reach this point once per subloop after processing all the blocks in
+ // the subloop.
+ if (Subloop->getParentLoop())
+ Subloop->getParentLoop()->getSubLoopsVector().push_back(Subloop);
+ else
+ LI->addTopLevelLoop(Subloop);
+
+ // For convenience, Blocks and Subloops are inserted in postorder. Reverse
+ // the lists, except for the loop header, which is always at the beginning.
+ Subloop->reverseBlock(1);
+ std::reverse(Subloop->getSubLoopsVector().begin(),
+ Subloop->getSubLoopsVector().end());
+
+ Subloop = Subloop->getParentLoop();
+ }
+ for (; Subloop; Subloop = Subloop->getParentLoop())
+ Subloop->addBlockEntry(Block);
+}
+
+/// Analyze LoopInfo discovers loops during a postorder DominatorTree traversal
+/// interleaved with backward CFG traversals within each subloop
+/// (discoverAndMapSubloop). The backward traversal skips inner subloops, so
+/// this part of the algorithm is linear in the number of CFG edges. Subloop and
+/// Block vectors are then populated during a single forward CFG traversal
+/// (PopulateLoopDFS).
+///
+/// During the two CFG traversals each block is seen three times:
+/// 1) Discovered and mapped by a reverse CFG traversal.
+/// 2) Visited during a forward DFS CFG traversal.
+/// 3) Reverse-inserted in the loop in postorder following forward DFS.
+///
+/// The Block vectors are inclusive, so step 3 requires loop-depth number of
+/// insertions per block.
+template<class BlockT, class LoopT>
+void LoopInfoBase<BlockT, LoopT>::
+analyze(const DominatorTreeBase<BlockT> &DomTree) {
+
+ // Postorder traversal of the dominator tree.
+ const DomTreeNodeBase<BlockT> *DomRoot = DomTree.getRootNode();
+ for (auto DomNode : post_order(DomRoot)) {
+
+ BlockT *Header = DomNode->getBlock();
+ SmallVector<BlockT *, 4> Backedges;
+
+ // Check each predecessor of the potential loop header.
+ typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
+ for (typename InvBlockTraits::ChildIteratorType PI =
+ InvBlockTraits::child_begin(Header),
+ PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
+
+ BlockT *Backedge = *PI;
+
+ // If Header dominates predBB, this is a new loop. Collect the backedges.
+ if (DomTree.dominates(Header, Backedge)
+ && DomTree.isReachableFromEntry(Backedge)) {
+ Backedges.push_back(Backedge);
+ }
+ }
+ // Perform a backward CFG traversal to discover and map blocks in this loop.
+ if (!Backedges.empty()) {
+ LoopT *L = new LoopT(Header);
+ discoverAndMapSubloop(L, ArrayRef<BlockT*>(Backedges), this, DomTree);
+ }
+ }
+ // Perform a single forward CFG traversal to populate block and subloop
+ // vectors for all loops.
+ PopulateLoopsDFS<BlockT, LoopT> DFS(this);
+ DFS.traverse(DomRoot->getBlock());
+}
+
+// Debugging
+template<class BlockT, class LoopT>
+void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
+ for (unsigned i = 0; i < TopLevelLoops.size(); ++i)
+ TopLevelLoops[i]->print(OS);
+#if 0
+ for (DenseMap<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
+ E = BBMap.end(); I != E; ++I)
+ OS << "BB '" << I->first->getName() << "' level = "
+ << I->second->getLoopDepth() << "\n";
+#endif
+}
+
+template<class BlockT, class LoopT>
+void LoopInfoBase<BlockT, LoopT>::verify() const {
+ DenseSet<const LoopT*> Loops;
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ assert(!(*I)->getParentLoop() && "Top-level loop has a parent!");
+ (*I)->verifyLoopNest(&Loops);
+ }
+
+ // Verify that blocks are mapped to valid loops.
+#ifndef NDEBUG
+ for (auto &Entry : BBMap) {
+ const BlockT *BB = Entry.first;
+ LoopT *L = Entry.second;
+ assert(Loops.count(L) && "orphaned loop");
+ assert(L->contains(BB) && "orphaned block");
+ }
+#endif
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LoopIterator.h b/contrib/llvm/include/llvm/Analysis/LoopIterator.h
new file mode 100644
index 0000000..e3dd963
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/LoopIterator.h
@@ -0,0 +1,181 @@
+//===--------- LoopIterator.h - Iterate over loop blocks --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines iterators to visit the basic blocks within a loop.
+//
+// These iterators currently visit blocks within subloops as well.
+// Unfortunately we have no efficient way of summarizing loop exits which would
+// allow skipping subloops during traversal.
+//
+// If you want to visit all blocks in a loop and don't need an ordered traveral,
+// use Loop::block_begin() instead.
+//
+// This is intentionally designed to work with ill-formed loops in which the
+// backedge has been deleted. The only prerequisite is that all blocks
+// contained within the loop according to the most recent LoopInfo analysis are
+// reachable from the loop header.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPITERATOR_H
+#define LLVM_ANALYSIS_LOOPITERATOR_H
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/Analysis/LoopInfo.h"
+
+namespace llvm {
+
+class LoopBlocksTraversal;
+
+/// Store the result of a depth first search within basic blocks contained by a
+/// single loop.
+///
+/// TODO: This could be generalized for any CFG region, or the entire CFG.
+class LoopBlocksDFS {
+public:
+ /// Postorder list iterators.
+ typedef std::vector<BasicBlock*>::const_iterator POIterator;
+ typedef std::vector<BasicBlock*>::const_reverse_iterator RPOIterator;
+
+ friend class LoopBlocksTraversal;
+
+private:
+ Loop *L;
+
+ /// Map each block to its postorder number. A block is only mapped after it is
+ /// preorder visited by DFS. It's postorder number is initially zero and set
+ /// to nonzero after it is finished by postorder traversal.
+ DenseMap<BasicBlock*, unsigned> PostNumbers;
+ std::vector<BasicBlock*> PostBlocks;
+
+public:
+ LoopBlocksDFS(Loop *Container) :
+ L(Container), PostNumbers(NextPowerOf2(Container->getNumBlocks())) {
+ PostBlocks.reserve(Container->getNumBlocks());
+ }
+
+ Loop *getLoop() const { return L; }
+
+ /// Traverse the loop blocks and store the DFS result.
+ void perform(LoopInfo *LI);
+
+ /// Return true if postorder numbers are assigned to all loop blocks.
+ bool isComplete() const { return PostBlocks.size() == L->getNumBlocks(); }
+
+ /// Iterate over the cached postorder blocks.
+ POIterator beginPostorder() const {
+ assert(isComplete() && "bad loop DFS");
+ return PostBlocks.begin();
+ }
+ POIterator endPostorder() const { return PostBlocks.end(); }
+
+ /// Reverse iterate over the cached postorder blocks.
+ RPOIterator beginRPO() const {
+ assert(isComplete() && "bad loop DFS");
+ return PostBlocks.rbegin();
+ }
+ RPOIterator endRPO() const { return PostBlocks.rend(); }
+
+ /// Return true if this block has been preorder visited.
+ bool hasPreorder(BasicBlock *BB) const { return PostNumbers.count(BB); }
+
+ /// Return true if this block has a postorder number.
+ bool hasPostorder(BasicBlock *BB) const {
+ DenseMap<BasicBlock*, unsigned>::const_iterator I = PostNumbers.find(BB);
+ return I != PostNumbers.end() && I->second;
+ }
+
+ /// Get a block's postorder number.
+ unsigned getPostorder(BasicBlock *BB) const {
+ DenseMap<BasicBlock*, unsigned>::const_iterator I = PostNumbers.find(BB);
+ assert(I != PostNumbers.end() && "block not visited by DFS");
+ assert(I->second && "block not finished by DFS");
+ return I->second;
+ }
+
+ /// Get a block's reverse postorder number.
+ unsigned getRPO(BasicBlock *BB) const {
+ return 1 + PostBlocks.size() - getPostorder(BB);
+ }
+
+ void clear() {
+ PostNumbers.clear();
+ PostBlocks.clear();
+ }
+};
+
+/// Specialize po_iterator_storage to record postorder numbers.
+template<> class po_iterator_storage<LoopBlocksTraversal, true> {
+ LoopBlocksTraversal &LBT;
+public:
+ po_iterator_storage(LoopBlocksTraversal &lbs) : LBT(lbs) {}
+ // These functions are defined below.
+ bool insertEdge(BasicBlock *From, BasicBlock *To);
+ void finishPostorder(BasicBlock *BB);
+};
+
+/// Traverse the blocks in a loop using a depth-first search.
+class LoopBlocksTraversal {
+public:
+ /// Graph traversal iterator.
+ typedef po_iterator<BasicBlock*, LoopBlocksTraversal, true> POTIterator;
+
+private:
+ LoopBlocksDFS &DFS;
+ LoopInfo *LI;
+
+public:
+ LoopBlocksTraversal(LoopBlocksDFS &Storage, LoopInfo *LInfo) :
+ DFS(Storage), LI(LInfo) {}
+
+ /// Postorder traversal over the graph. This only needs to be done once.
+ /// po_iterator "automatically" calls back to visitPreorder and
+ /// finishPostorder to record the DFS result.
+ POTIterator begin() {
+ assert(DFS.PostBlocks.empty() && "Need clear DFS result before traversing");
+ assert(DFS.L->getNumBlocks() && "po_iterator cannot handle an empty graph");
+ return po_ext_begin(DFS.L->getHeader(), *this);
+ }
+ POTIterator end() {
+ // po_ext_end interface requires a basic block, but ignores its value.
+ return po_ext_end(DFS.L->getHeader(), *this);
+ }
+
+ /// Called by po_iterator upon reaching a block via a CFG edge. If this block
+ /// is contained in the loop and has not been visited, then mark it preorder
+ /// visited and return true.
+ ///
+ /// TODO: If anyone is interested, we could record preorder numbers here.
+ bool visitPreorder(BasicBlock *BB) {
+ if (!DFS.L->contains(LI->getLoopFor(BB)))
+ return false;
+
+ return DFS.PostNumbers.insert(std::make_pair(BB, 0)).second;
+ }
+
+ /// Called by po_iterator each time it advances, indicating a block's
+ /// postorder.
+ void finishPostorder(BasicBlock *BB) {
+ assert(DFS.PostNumbers.count(BB) && "Loop DFS skipped preorder");
+ DFS.PostBlocks.push_back(BB);
+ DFS.PostNumbers[BB] = DFS.PostBlocks.size();
+ }
+};
+
+inline bool po_iterator_storage<LoopBlocksTraversal, true>::
+insertEdge(BasicBlock *From, BasicBlock *To) {
+ return LBT.visitPreorder(To);
+}
+
+inline void po_iterator_storage<LoopBlocksTraversal, true>::
+finishPostorder(BasicBlock *BB) {
+ LBT.finishPostorder(BB);
+}
+
+} // End namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/LoopPass.h b/contrib/llvm/include/llvm/Analysis/LoopPass.h
new file mode 100644
index 0000000..2cf734e5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/LoopPass.h
@@ -0,0 +1,161 @@
+//===- LoopPass.h - LoopPass class ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines LoopPass class. All loop optimization
+// and transformation passes are derived from LoopPass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_LOOPPASS_H
+#define LLVM_ANALYSIS_LOOPPASS_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/LegacyPassManagers.h"
+#include "llvm/Pass.h"
+#include <deque>
+
+namespace llvm {
+
+class LPPassManager;
+class Function;
+class PMStack;
+
+class LoopPass : public Pass {
+public:
+ explicit LoopPass(char &pid) : Pass(PT_Loop, pid) {}
+
+ /// getPrinterPass - Get a pass to print the function corresponding
+ /// to a Loop.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override;
+
+ // runOnLoop - This method should be implemented by the subclass to perform
+ // whatever action is necessary for the specified Loop.
+ virtual bool runOnLoop(Loop *L, LPPassManager &LPM) = 0;
+
+ using llvm::Pass::doInitialization;
+ using llvm::Pass::doFinalization;
+
+ // Initialization and finalization hooks.
+ virtual bool doInitialization(Loop *L, LPPassManager &LPM) {
+ return false;
+ }
+
+ // Finalization hook does not supply Loop because at this time
+ // loop nest is completely different.
+ virtual bool doFinalization() { return false; }
+
+ // Check if this pass is suitable for the current LPPassManager, if
+ // available. This pass P is not suitable for a LPPassManager if P
+ // is not preserving higher level analysis info used by other
+ // LPPassManager passes. In such case, pop LPPassManager from the
+ // stack. This will force assignPassManager() to create new
+ // LPPassManger as expected.
+ void preparePassManager(PMStack &PMS) override;
+
+ /// Assign pass manager to manage this pass
+ void assignPassManager(PMStack &PMS, PassManagerType PMT) override;
+
+ /// Return what kind of Pass Manager can manage this pass.
+ PassManagerType getPotentialPassManagerType() const override {
+ return PMT_LoopPassManager;
+ }
+
+ //===--------------------------------------------------------------------===//
+ /// SimpleAnalysis - Provides simple interface to update analysis info
+ /// maintained by various passes. Note, if required this interface can
+ /// be extracted into a separate abstract class but it would require
+ /// additional use of multiple inheritance in Pass class hierarchy, something
+ /// we are trying to avoid.
+
+ /// Each loop pass can override these simple analysis hooks to update
+ /// desired analysis information.
+ /// cloneBasicBlockAnalysis - Clone analysis info associated with basic block.
+ virtual void cloneBasicBlockAnalysis(BasicBlock *F, BasicBlock *T, Loop *L) {}
+
+ /// deleteAnalysisValue - Delete analysis info associated with value V.
+ virtual void deleteAnalysisValue(Value *V, Loop *L) {}
+
+ /// Delete analysis info associated with Loop L.
+ /// Called to notify a Pass that a loop has been deleted and any
+ /// associated analysis values can be deleted.
+ virtual void deleteAnalysisLoop(Loop *L) {}
+
+protected:
+ /// skipOptnoneFunction - Containing function has Attribute::OptimizeNone
+ /// and most transformation passes should skip it.
+ bool skipOptnoneFunction(const Loop *L) const;
+};
+
+class LPPassManager : public FunctionPass, public PMDataManager {
+public:
+ static char ID;
+ explicit LPPassManager();
+
+ /// run - Execute all of the passes scheduled for execution. Keep track of
+ /// whether any of the passes modifies the module, and if so, return true.
+ bool runOnFunction(Function &F) override;
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ // LPPassManager needs LoopInfo.
+ void getAnalysisUsage(AnalysisUsage &Info) const override;
+
+ const char *getPassName() const override {
+ return "Loop Pass Manager";
+ }
+
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+
+ /// Print passes managed by this manager
+ void dumpPassStructure(unsigned Offset) override;
+
+ LoopPass *getContainedPass(unsigned N) {
+ assert(N < PassVector.size() && "Pass number out of range!");
+ LoopPass *LP = static_cast<LoopPass *>(PassVector[N]);
+ return LP;
+ }
+
+ PassManagerType getPassManagerType() const override {
+ return PMT_LoopPassManager;
+ }
+
+public:
+ // Add a new loop into the loop queue as a child of the given parent, or at
+ // the top level if \c ParentLoop is null.
+ Loop &addLoop(Loop *ParentLoop);
+
+ //===--------------------------------------------------------------------===//
+ /// SimpleAnalysis - Provides simple interface to update analysis info
+ /// maintained by various passes. Note, if required this interface can
+ /// be extracted into a separate abstract class but it would require
+ /// additional use of multiple inheritance in Pass class hierarchy, something
+ /// we are trying to avoid.
+
+ /// cloneBasicBlockSimpleAnalysis - Invoke cloneBasicBlockAnalysis hook for
+ /// all passes that implement simple analysis interface.
+ void cloneBasicBlockSimpleAnalysis(BasicBlock *From, BasicBlock *To, Loop *L);
+
+ /// deleteSimpleAnalysisValue - Invoke deleteAnalysisValue hook for all passes
+ /// that implement simple analysis interface.
+ void deleteSimpleAnalysisValue(Value *V, Loop *L);
+
+ /// Invoke deleteAnalysisLoop hook for all passes that implement simple
+ /// analysis interface.
+ void deleteSimpleAnalysisLoop(Loop *L);
+
+private:
+ std::deque<Loop *> LQ;
+ LoopInfo *LI;
+ Loop *CurrentLoop;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
new file mode 100644
index 0000000..87fb3ef
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/MemoryBuiltins.h
@@ -0,0 +1,262 @@
+//===- llvm/Analysis/MemoryBuiltins.h- Calls to memory builtins -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions identifies calls to builtin functions that allocate
+// or free memory.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MEMORYBUILTINS_H
+#define LLVM_ANALYSIS_MEMORYBUILTINS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+class CallInst;
+class PointerType;
+class DataLayout;
+class TargetLibraryInfo;
+class Type;
+class Value;
+
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
+/// like).
+bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a function that returns a
+/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
+bool isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates uninitialized memory (such as malloc).
+bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates zero-filled memory (such as calloc).
+bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates memory (either malloc, calloc, or strdup like).
+bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
+
+/// \brief Tests if a value is a call or invoke to a library function that
+/// allocates memory and never returns null (such as operator new).
+bool isOperatorNewLikeFn(const Value *V, const TargetLibraryInfo *TLI,
+ bool LookThroughBitCast = false);
+
+//===----------------------------------------------------------------------===//
+// malloc Call Utility Functions.
+//
+
+/// extractMallocCall - Returns the corresponding CallInst if the instruction
+/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
+/// ignore InvokeInst here.
+const CallInst *extractMallocCall(const Value *I, const TargetLibraryInfo *TLI);
+static inline CallInst *extractMallocCall(Value *I,
+ const TargetLibraryInfo *TLI) {
+ return const_cast<CallInst*>(extractMallocCall((const Value*)I, TLI));
+}
+
+/// getMallocType - Returns the PointerType resulting from the malloc call.
+/// The PointerType depends on the number of bitcast uses of the malloc call:
+/// 0: PointerType is the malloc calls' return type.
+/// 1: PointerType is the bitcast's result type.
+/// >1: Unique PointerType cannot be determined, return NULL.
+PointerType *getMallocType(const CallInst *CI, const TargetLibraryInfo *TLI);
+
+/// getMallocAllocatedType - Returns the Type allocated by malloc call.
+/// The Type depends on the number of bitcast uses of the malloc call:
+/// 0: PointerType is the malloc calls' return type.
+/// 1: PointerType is the bitcast's result type.
+/// >1: Unique PointerType cannot be determined, return NULL.
+Type *getMallocAllocatedType(const CallInst *CI, const TargetLibraryInfo *TLI);
+
+/// getMallocArraySize - Returns the array size of a malloc call. If the
+/// argument passed to malloc is a multiple of the size of the malloced type,
+/// then return that multiple. For non-array mallocs, the multiple is
+/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
+/// determined.
+Value *getMallocArraySize(CallInst *CI, const DataLayout &DL,
+ const TargetLibraryInfo *TLI,
+ bool LookThroughSExt = false);
+
+//===----------------------------------------------------------------------===//
+// calloc Call Utility Functions.
+//
+
+/// extractCallocCall - Returns the corresponding CallInst if the instruction
+/// is a calloc call.
+const CallInst *extractCallocCall(const Value *I, const TargetLibraryInfo *TLI);
+static inline CallInst *extractCallocCall(Value *I,
+ const TargetLibraryInfo *TLI) {
+ return const_cast<CallInst*>(extractCallocCall((const Value*)I, TLI));
+}
+
+
+//===----------------------------------------------------------------------===//
+// free Call Utility Functions.
+//
+
+/// isFreeCall - Returns non-null if the value is a call to the builtin free()
+const CallInst *isFreeCall(const Value *I, const TargetLibraryInfo *TLI);
+
+static inline CallInst *isFreeCall(Value *I, const TargetLibraryInfo *TLI) {
+ return const_cast<CallInst*>(isFreeCall((const Value*)I, TLI));
+}
+
+
+//===----------------------------------------------------------------------===//
+// Utility functions to compute size of objects.
+//
+
+/// \brief Compute the size of the object pointed by Ptr. Returns true and the
+/// object size in Size if successful, and false otherwise. In this context, by
+/// object we mean the region of memory starting at Ptr to the end of the
+/// underlying object pointed to by Ptr.
+/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
+/// byval arguments, and global variables.
+bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
+ const TargetLibraryInfo *TLI, bool RoundToAlign = false);
+
+typedef std::pair<APInt, APInt> SizeOffsetType;
+
+/// \brief Evaluate the size and offset of an object pointed to by a Value*
+/// statically. Fails if size or offset are not known at compile time.
+class ObjectSizeOffsetVisitor
+ : public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
+
+ const DataLayout &DL;
+ const TargetLibraryInfo *TLI;
+ bool RoundToAlign;
+ unsigned IntTyBits;
+ APInt Zero;
+ SmallPtrSet<Instruction *, 8> SeenInsts;
+
+ APInt align(APInt Size, uint64_t Align);
+
+ SizeOffsetType unknown() {
+ return std::make_pair(APInt(), APInt());
+ }
+
+public:
+ ObjectSizeOffsetVisitor(const DataLayout &DL, const TargetLibraryInfo *TLI,
+ LLVMContext &Context, bool RoundToAlign = false);
+
+ SizeOffsetType compute(Value *V);
+
+ bool knownSize(SizeOffsetType &SizeOffset) {
+ return SizeOffset.first.getBitWidth() > 1;
+ }
+
+ bool knownOffset(SizeOffsetType &SizeOffset) {
+ return SizeOffset.second.getBitWidth() > 1;
+ }
+
+ bool bothKnown(SizeOffsetType &SizeOffset) {
+ return knownSize(SizeOffset) && knownOffset(SizeOffset);
+ }
+
+ // These are "private", except they can't actually be made private. Only
+ // compute() should be used by external users.
+ SizeOffsetType visitAllocaInst(AllocaInst &I);
+ SizeOffsetType visitArgument(Argument &A);
+ SizeOffsetType visitCallSite(CallSite CS);
+ SizeOffsetType visitConstantPointerNull(ConstantPointerNull&);
+ SizeOffsetType visitExtractElementInst(ExtractElementInst &I);
+ SizeOffsetType visitExtractValueInst(ExtractValueInst &I);
+ SizeOffsetType visitGEPOperator(GEPOperator &GEP);
+ SizeOffsetType visitGlobalAlias(GlobalAlias &GA);
+ SizeOffsetType visitGlobalVariable(GlobalVariable &GV);
+ SizeOffsetType visitIntToPtrInst(IntToPtrInst&);
+ SizeOffsetType visitLoadInst(LoadInst &I);
+ SizeOffsetType visitPHINode(PHINode&);
+ SizeOffsetType visitSelectInst(SelectInst &I);
+ SizeOffsetType visitUndefValue(UndefValue&);
+ SizeOffsetType visitInstruction(Instruction &I);
+};
+
+typedef std::pair<Value*, Value*> SizeOffsetEvalType;
+
+
+/// \brief Evaluate the size and offset of an object pointed to by a Value*.
+/// May create code to compute the result at run-time.
+class ObjectSizeOffsetEvaluator
+ : public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {
+
+ typedef IRBuilder<true, TargetFolder> BuilderTy;
+ typedef std::pair<WeakVH, WeakVH> WeakEvalType;
+ typedef DenseMap<const Value*, WeakEvalType> CacheMapTy;
+ typedef SmallPtrSet<const Value*, 8> PtrSetTy;
+
+ const DataLayout &DL;
+ const TargetLibraryInfo *TLI;
+ LLVMContext &Context;
+ BuilderTy Builder;
+ IntegerType *IntTy;
+ Value *Zero;
+ CacheMapTy CacheMap;
+ PtrSetTy SeenVals;
+ bool RoundToAlign;
+
+ SizeOffsetEvalType unknown() {
+ return std::make_pair(nullptr, nullptr);
+ }
+ SizeOffsetEvalType compute_(Value *V);
+
+public:
+ ObjectSizeOffsetEvaluator(const DataLayout &DL, const TargetLibraryInfo *TLI,
+ LLVMContext &Context, bool RoundToAlign = false);
+ SizeOffsetEvalType compute(Value *V);
+
+ bool knownSize(SizeOffsetEvalType SizeOffset) {
+ return SizeOffset.first;
+ }
+
+ bool knownOffset(SizeOffsetEvalType SizeOffset) {
+ return SizeOffset.second;
+ }
+
+ bool anyKnown(SizeOffsetEvalType SizeOffset) {
+ return knownSize(SizeOffset) || knownOffset(SizeOffset);
+ }
+
+ bool bothKnown(SizeOffsetEvalType SizeOffset) {
+ return knownSize(SizeOffset) && knownOffset(SizeOffset);
+ }
+
+ // The individual instruction visitors should be treated as private.
+ SizeOffsetEvalType visitAllocaInst(AllocaInst &I);
+ SizeOffsetEvalType visitCallSite(CallSite CS);
+ SizeOffsetEvalType visitExtractElementInst(ExtractElementInst &I);
+ SizeOffsetEvalType visitExtractValueInst(ExtractValueInst &I);
+ SizeOffsetEvalType visitGEPOperator(GEPOperator &GEP);
+ SizeOffsetEvalType visitIntToPtrInst(IntToPtrInst&);
+ SizeOffsetEvalType visitLoadInst(LoadInst &I);
+ SizeOffsetEvalType visitPHINode(PHINode &PHI);
+ SizeOffsetEvalType visitSelectInst(SelectInst &I);
+ SizeOffsetEvalType visitInstruction(Instruction &I);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
new file mode 100644
index 0000000..daa1ba9
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/MemoryDependenceAnalysis.h
@@ -0,0 +1,467 @@
+//===- llvm/Analysis/MemoryDependenceAnalysis.h - Memory Deps --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MemoryDependenceAnalysis analysis pass.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MEMORYDEPENDENCEANALYSIS_H
+#define LLVM_ANALYSIS_MEMORYDEPENDENCEANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/PredIteratorCache.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+ class Function;
+ class FunctionPass;
+ class Instruction;
+ class CallSite;
+ class AssumptionCache;
+ class MemoryDependenceAnalysis;
+ class PredIteratorCache;
+ class DominatorTree;
+ class PHITransAddr;
+
+ /// MemDepResult - A memory dependence query can return one of three different
+ /// answers, described below.
+ class MemDepResult {
+ enum DepType {
+ /// Invalid - Clients of MemDep never see this.
+ Invalid = 0,
+
+ /// Clobber - This is a dependence on the specified instruction which
+ /// clobbers the desired value. The pointer member of the MemDepResult
+ /// pair holds the instruction that clobbers the memory. For example,
+ /// this occurs when we see a may-aliased store to the memory location we
+ /// care about.
+ ///
+ /// There are several cases that may be interesting here:
+ /// 1. Loads are clobbered by may-alias stores.
+ /// 2. Loads are considered clobbered by partially-aliased loads. The
+ /// client may choose to analyze deeper into these cases.
+ Clobber,
+
+ /// Def - This is a dependence on the specified instruction which
+ /// defines/produces the desired memory location. The pointer member of
+ /// the MemDepResult pair holds the instruction that defines the memory.
+ /// Cases of interest:
+ /// 1. This could be a load or store for dependence queries on
+ /// load/store. The value loaded or stored is the produced value.
+ /// Note that the pointer operand may be different than that of the
+ /// queried pointer due to must aliases and phi translation. Note
+ /// that the def may not be the same type as the query, the pointers
+ /// may just be must aliases.
+ /// 2. For loads and stores, this could be an allocation instruction. In
+ /// this case, the load is loading an undef value or a store is the
+ /// first store to (that part of) the allocation.
+ /// 3. Dependence queries on calls return Def only when they are
+ /// readonly calls or memory use intrinsics with identical callees
+ /// and no intervening clobbers. No validation is done that the
+ /// operands to the calls are the same.
+ Def,
+
+ /// Other - This marker indicates that the query has no known dependency
+ /// in the specified block. More detailed state info is encoded in the
+ /// upper part of the pair (i.e. the Instruction*)
+ Other
+ };
+ /// If DepType is "Other", the upper part of the pair
+ /// (i.e. the Instruction* part) is instead used to encode more detailed
+ /// type information as follows
+ enum OtherType {
+ /// NonLocal - This marker indicates that the query has no dependency in
+ /// the specified block. To find out more, the client should query other
+ /// predecessor blocks.
+ NonLocal = 0x4,
+ /// NonFuncLocal - This marker indicates that the query has no
+ /// dependency in the specified function.
+ NonFuncLocal = 0x8,
+ /// Unknown - This marker indicates that the query dependency
+ /// is unknown.
+ Unknown = 0xc
+ };
+
+ typedef PointerIntPair<Instruction*, 2, DepType> PairTy;
+ PairTy Value;
+ explicit MemDepResult(PairTy V) : Value(V) {}
+
+ public:
+ MemDepResult() : Value(nullptr, Invalid) {}
+
+ /// get methods: These are static ctor methods for creating various
+ /// MemDepResult kinds.
+ static MemDepResult getDef(Instruction *Inst) {
+ assert(Inst && "Def requires inst");
+ return MemDepResult(PairTy(Inst, Def));
+ }
+ static MemDepResult getClobber(Instruction *Inst) {
+ assert(Inst && "Clobber requires inst");
+ return MemDepResult(PairTy(Inst, Clobber));
+ }
+ static MemDepResult getNonLocal() {
+ return MemDepResult(
+ PairTy(reinterpret_cast<Instruction*>(NonLocal), Other));
+ }
+ static MemDepResult getNonFuncLocal() {
+ return MemDepResult(
+ PairTy(reinterpret_cast<Instruction*>(NonFuncLocal), Other));
+ }
+ static MemDepResult getUnknown() {
+ return MemDepResult(
+ PairTy(reinterpret_cast<Instruction*>(Unknown), Other));
+ }
+
+ /// isClobber - Return true if this MemDepResult represents a query that is
+ /// an instruction clobber dependency.
+ bool isClobber() const { return Value.getInt() == Clobber; }
+
+ /// isDef - Return true if this MemDepResult represents a query that is
+ /// an instruction definition dependency.
+ bool isDef() const { return Value.getInt() == Def; }
+
+ /// isNonLocal - Return true if this MemDepResult represents a query that
+ /// is transparent to the start of the block, but where a non-local hasn't
+ /// been done.
+ bool isNonLocal() const {
+ return Value.getInt() == Other
+ && Value.getPointer() == reinterpret_cast<Instruction*>(NonLocal);
+ }
+
+ /// isNonFuncLocal - Return true if this MemDepResult represents a query
+ /// that is transparent to the start of the function.
+ bool isNonFuncLocal() const {
+ return Value.getInt() == Other
+ && Value.getPointer() == reinterpret_cast<Instruction*>(NonFuncLocal);
+ }
+
+ /// isUnknown - Return true if this MemDepResult represents a query which
+ /// cannot and/or will not be computed.
+ bool isUnknown() const {
+ return Value.getInt() == Other
+ && Value.getPointer() == reinterpret_cast<Instruction*>(Unknown);
+ }
+
+ /// getInst() - If this is a normal dependency, return the instruction that
+ /// is depended on. Otherwise, return null.
+ Instruction *getInst() const {
+ if (Value.getInt() == Other) return nullptr;
+ return Value.getPointer();
+ }
+
+ bool operator==(const MemDepResult &M) const { return Value == M.Value; }
+ bool operator!=(const MemDepResult &M) const { return Value != M.Value; }
+ bool operator<(const MemDepResult &M) const { return Value < M.Value; }
+ bool operator>(const MemDepResult &M) const { return Value > M.Value; }
+
+ private:
+ friend class MemoryDependenceAnalysis;
+ /// Dirty - Entries with this marker occur in a LocalDeps map or
+ /// NonLocalDeps map when the instruction they previously referenced was
+ /// removed from MemDep. In either case, the entry may include an
+ /// instruction pointer. If so, the pointer is an instruction in the
+ /// block where scanning can start from, saving some work.
+ ///
+ /// In a default-constructed MemDepResult object, the type will be Dirty
+ /// and the instruction pointer will be null.
+ ///
+
+ /// isDirty - Return true if this is a MemDepResult in its dirty/invalid.
+ /// state.
+ bool isDirty() const { return Value.getInt() == Invalid; }
+
+ static MemDepResult getDirty(Instruction *Inst) {
+ return MemDepResult(PairTy(Inst, Invalid));
+ }
+ };
+
+ /// NonLocalDepEntry - This is an entry in the NonLocalDepInfo cache. For
+ /// each BasicBlock (the BB entry) it keeps a MemDepResult.
+ class NonLocalDepEntry {
+ BasicBlock *BB;
+ MemDepResult Result;
+
+ public:
+ NonLocalDepEntry(BasicBlock *bb, MemDepResult result)
+ : BB(bb), Result(result) {}
+
+ // This is used for searches.
+ NonLocalDepEntry(BasicBlock *bb) : BB(bb) {}
+
+ // BB is the sort key, it can't be changed.
+ BasicBlock *getBB() const { return BB; }
+
+ void setResult(const MemDepResult &R) { Result = R; }
+
+ const MemDepResult &getResult() const { return Result; }
+
+ bool operator<(const NonLocalDepEntry &RHS) const {
+ return BB < RHS.BB;
+ }
+ };
+
+ /// NonLocalDepResult - This is a result from a NonLocal dependence query.
+ /// For each BasicBlock (the BB entry) it keeps a MemDepResult and the
+ /// (potentially phi translated) address that was live in the block.
+ class NonLocalDepResult {
+ NonLocalDepEntry Entry;
+ Value *Address;
+
+ public:
+ NonLocalDepResult(BasicBlock *bb, MemDepResult result, Value *address)
+ : Entry(bb, result), Address(address) {}
+
+ // BB is the sort key, it can't be changed.
+ BasicBlock *getBB() const { return Entry.getBB(); }
+
+ void setResult(const MemDepResult &R, Value *Addr) {
+ Entry.setResult(R);
+ Address = Addr;
+ }
+
+ const MemDepResult &getResult() const { return Entry.getResult(); }
+
+ /// getAddress - Return the address of this pointer in this block. This can
+ /// be different than the address queried for the non-local result because
+ /// of phi translation. This returns null if the address was not available
+ /// in a block (i.e. because phi translation failed) or if this is a cached
+ /// result and that address was deleted.
+ ///
+ /// The address is always null for a non-local 'call' dependence.
+ Value *getAddress() const { return Address; }
+ };
+
+ /// MemoryDependenceAnalysis - This is an analysis that determines, for a
+ /// given memory operation, what preceding memory operations it depends on.
+ /// It builds on alias analysis information, and tries to provide a lazy,
+ /// caching interface to a common kind of alias information query.
+ ///
+ /// The dependency information returned is somewhat unusual, but is pragmatic.
+ /// If queried about a store or call that might modify memory, the analysis
+ /// will return the instruction[s] that may either load from that memory or
+ /// store to it. If queried with a load or call that can never modify memory,
+ /// the analysis will return calls and stores that might modify the pointer,
+ /// but generally does not return loads unless a) they are volatile, or
+ /// b) they load from *must-aliased* pointers. Returning a dependence on
+ /// must-alias'd pointers instead of all pointers interacts well with the
+ /// internal caching mechanism.
+ ///
+ class MemoryDependenceAnalysis : public FunctionPass {
+ // A map from instructions to their dependency.
+ typedef DenseMap<Instruction*, MemDepResult> LocalDepMapType;
+ LocalDepMapType LocalDeps;
+
+ public:
+ typedef std::vector<NonLocalDepEntry> NonLocalDepInfo;
+
+ private:
+ /// ValueIsLoadPair - This is a pair<Value*, bool> where the bool is true if
+ /// the dependence is a read only dependence, false if read/write.
+ typedef PointerIntPair<const Value*, 1, bool> ValueIsLoadPair;
+
+ /// BBSkipFirstBlockPair - This pair is used when caching information for a
+ /// block. If the pointer is null, the cache value is not a full query that
+ /// starts at the specified block. If non-null, the bool indicates whether
+ /// or not the contents of the block was skipped.
+ typedef PointerIntPair<BasicBlock*, 1, bool> BBSkipFirstBlockPair;
+
+ /// NonLocalPointerInfo - This record is the information kept for each
+ /// (value, is load) pair.
+ struct NonLocalPointerInfo {
+ /// Pair - The pair of the block and the skip-first-block flag.
+ BBSkipFirstBlockPair Pair;
+ /// NonLocalDeps - The results of the query for each relevant block.
+ NonLocalDepInfo NonLocalDeps;
+ /// Size - The maximum size of the dereferences of the
+ /// pointer. May be UnknownSize if the sizes are unknown.
+ uint64_t Size;
+ /// AATags - The AA tags associated with dereferences of the
+ /// pointer. The members may be null if there are no tags or
+ /// conflicting tags.
+ AAMDNodes AATags;
+
+ NonLocalPointerInfo() : Size(MemoryLocation::UnknownSize) {}
+ };
+
+ /// CachedNonLocalPointerInfo - This map stores the cached results of doing
+ /// a pointer lookup at the bottom of a block. The key of this map is the
+ /// pointer+isload bit, the value is a list of <bb->result> mappings.
+ typedef DenseMap<ValueIsLoadPair,
+ NonLocalPointerInfo> CachedNonLocalPointerInfo;
+ CachedNonLocalPointerInfo NonLocalPointerDeps;
+
+ // A map from instructions to their non-local pointer dependencies.
+ typedef DenseMap<Instruction*,
+ SmallPtrSet<ValueIsLoadPair, 4> > ReverseNonLocalPtrDepTy;
+ ReverseNonLocalPtrDepTy ReverseNonLocalPtrDeps;
+
+ /// PerInstNLInfo - This is the instruction we keep for each cached access
+ /// that we have for an instruction. The pointer is an owning pointer and
+ /// the bool indicates whether we have any dirty bits in the set.
+ typedef std::pair<NonLocalDepInfo, bool> PerInstNLInfo;
+
+ // A map from instructions to their non-local dependencies.
+ typedef DenseMap<Instruction*, PerInstNLInfo> NonLocalDepMapType;
+
+ NonLocalDepMapType NonLocalDeps;
+
+ // A reverse mapping from dependencies to the dependees. This is
+ // used when removing instructions to keep the cache coherent.
+ typedef DenseMap<Instruction*,
+ SmallPtrSet<Instruction*, 4> > ReverseDepMapType;
+ ReverseDepMapType ReverseLocalDeps;
+
+ // A reverse mapping from dependencies to the non-local dependees.
+ ReverseDepMapType ReverseNonLocalDeps;
+
+ /// Current AA implementation, just a cache.
+ AliasAnalysis *AA;
+ DominatorTree *DT;
+ AssumptionCache *AC;
+ const TargetLibraryInfo *TLI;
+ PredIteratorCache PredCache;
+
+ public:
+ MemoryDependenceAnalysis();
+ ~MemoryDependenceAnalysis() override;
+ static char ID;
+
+ /// Pass Implementation stuff. This doesn't do any analysis eagerly.
+ bool runOnFunction(Function &) override;
+
+ /// Clean up memory in between runs
+ void releaseMemory() override;
+
+ /// getAnalysisUsage - Does not modify anything. It uses Value Numbering
+ /// and Alias Analysis.
+ ///
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ /// getDependency - Return the instruction on which a memory operation
+ /// depends. See the class comment for more details. It is illegal to call
+ /// this on non-memory instructions.
+ MemDepResult getDependency(Instruction *QueryInst);
+
+ /// getNonLocalCallDependency - Perform a full dependency query for the
+ /// specified call, returning the set of blocks that the value is
+ /// potentially live across. The returned set of results will include a
+ /// "NonLocal" result for all blocks where the value is live across.
+ ///
+ /// This method assumes the instruction returns a "NonLocal" dependency
+ /// within its own block.
+ ///
+ /// This returns a reference to an internal data structure that may be
+ /// invalidated on the next non-local query or when an instruction is
+ /// removed. Clients must copy this data if they want it around longer than
+ /// that.
+ const NonLocalDepInfo &getNonLocalCallDependency(CallSite QueryCS);
+
+ /// getNonLocalPointerDependency - Perform a full dependency query for an
+ /// access to the QueryInst's specified memory location, returning the set
+ /// of instructions that either define or clobber the value.
+ ///
+ /// Warning: For a volatile query instruction, the dependencies will be
+ /// accurate, and thus usable for reordering, but it is never legal to
+ /// remove the query instruction.
+ ///
+ /// This method assumes the pointer has a "NonLocal" dependency within
+ /// QueryInst's parent basic block.
+ void getNonLocalPointerDependency(Instruction *QueryInst,
+ SmallVectorImpl<NonLocalDepResult> &Result);
+
+ /// removeInstruction - Remove an instruction from the dependence analysis,
+ /// updating the dependence of instructions that previously depended on it.
+ void removeInstruction(Instruction *InstToRemove);
+
+ /// invalidateCachedPointerInfo - This method is used to invalidate cached
+ /// information about the specified pointer, because it may be too
+ /// conservative in memdep. This is an optional call that can be used when
+ /// the client detects an equivalence between the pointer and some other
+ /// value and replaces the other value with ptr. This can make Ptr available
+ /// in more places that cached info does not necessarily keep.
+ void invalidateCachedPointerInfo(Value *Ptr);
+
+ /// invalidateCachedPredecessors - Clear the PredIteratorCache info.
+ /// This needs to be done when the CFG changes, e.g., due to splitting
+ /// critical edges.
+ void invalidateCachedPredecessors();
+
+ /// \brief Return the instruction on which a memory location depends.
+ /// If isLoad is true, this routine ignores may-aliases with read-only
+ /// operations. If isLoad is false, this routine ignores may-aliases
+ /// with reads from read-only locations. If possible, pass the query
+ /// instruction as well; this function may take advantage of the metadata
+ /// annotated to the query instruction to refine the result.
+ ///
+ /// Note that this is an uncached query, and thus may be inefficient.
+ ///
+ MemDepResult getPointerDependencyFrom(const MemoryLocation &Loc,
+ bool isLoad,
+ BasicBlock::iterator ScanIt,
+ BasicBlock *BB,
+ Instruction *QueryInst = nullptr);
+
+ MemDepResult getSimplePointerDependencyFrom(const MemoryLocation &MemLoc,
+ bool isLoad,
+ BasicBlock::iterator ScanIt,
+ BasicBlock *BB,
+ Instruction *QueryInst);
+
+ /// This analysis looks for other loads and stores with invariant.group
+ /// metadata and the same pointer operand. Returns Unknown if it does not
+ /// find anything, and Def if it can be assumed that 2 instructions load or
+ /// store the same value.
+ /// FIXME: This analysis works only on single block because of restrictions
+ /// at the call site.
+ MemDepResult getInvariantGroupPointerDependency(LoadInst *LI,
+ BasicBlock *BB);
+
+ /// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
+ /// looks at a memory location for a load (specified by MemLocBase, Offs,
+ /// and Size) and compares it against a load. If the specified load could
+ /// be safely widened to a larger integer load that is 1) still efficient,
+ /// 2) safe for the target, and 3) would provide the specified memory
+ /// location value, then this function returns the size in bytes of the
+ /// load width to use. If not, this returns zero.
+ static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase,
+ int64_t MemLocOffs,
+ unsigned MemLocSize,
+ const LoadInst *LI);
+
+ private:
+ MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,
+ BasicBlock::iterator ScanIt,
+ BasicBlock *BB);
+ bool getNonLocalPointerDepFromBB(Instruction *QueryInst,
+ const PHITransAddr &Pointer,
+ const MemoryLocation &Loc, bool isLoad,
+ BasicBlock *BB,
+ SmallVectorImpl<NonLocalDepResult> &Result,
+ DenseMap<BasicBlock *, Value *> &Visited,
+ bool SkipFirstBlock = false);
+ MemDepResult GetNonLocalInfoForBlock(Instruction *QueryInst,
+ const MemoryLocation &Loc, bool isLoad,
+ BasicBlock *BB, NonLocalDepInfo *Cache,
+ unsigned NumSortedEntries);
+
+ void RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P);
+
+ /// verifyRemoved - Verify that the specified instruction does not occur
+ /// in our internal data structures.
+ void verifyRemoved(Instruction *Inst) const;
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/MemoryLocation.h b/contrib/llvm/include/llvm/Analysis/MemoryLocation.h
new file mode 100644
index 0000000..426b49a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/MemoryLocation.h
@@ -0,0 +1,142 @@
+//===- MemoryLocation.h - Memory location descriptions ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides utility analysis objects describing memory locations.
+/// These are used both by the Alias Analysis infrastructure and more
+/// specialized memory analysis layers.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_MEMORYLOCATION_H
+#define LLVM_ANALYSIS_MEMORYLOCATION_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Metadata.h"
+
+namespace llvm {
+
+class LoadInst;
+class StoreInst;
+class MemTransferInst;
+class MemIntrinsic;
+class TargetLibraryInfo;
+
+/// Representation for a specific memory location.
+///
+/// This abstraction can be used to represent a specific location in memory.
+/// The goal of the location is to represent enough information to describe
+/// abstract aliasing, modification, and reference behaviors of whatever
+/// value(s) are stored in memory at the particular location.
+///
+/// The primary user of this interface is LLVM's Alias Analysis, but other
+/// memory analyses such as MemoryDependence can use it as well.
+class MemoryLocation {
+public:
+ /// UnknownSize - This is a special value which can be used with the
+ /// size arguments in alias queries to indicate that the caller does not
+ /// know the sizes of the potential memory references.
+ enum : uint64_t { UnknownSize = ~UINT64_C(0) };
+
+ /// The address of the start of the location.
+ const Value *Ptr;
+
+ /// The maximum size of the location, in address-units, or
+ /// UnknownSize if the size is not known.
+ ///
+ /// Note that an unknown size does not mean the pointer aliases the entire
+ /// virtual address space, because there are restrictions on stepping out of
+ /// one object and into another. See
+ /// http://llvm.org/docs/LangRef.html#pointeraliasing
+ uint64_t Size;
+
+ /// The metadata nodes which describes the aliasing of the location (each
+ /// member is null if that kind of information is unavailable).
+ AAMDNodes AATags;
+
+ /// Return a location with information about the memory reference by the given
+ /// instruction.
+ static MemoryLocation get(const LoadInst *LI);
+ static MemoryLocation get(const StoreInst *SI);
+ static MemoryLocation get(const VAArgInst *VI);
+ static MemoryLocation get(const AtomicCmpXchgInst *CXI);
+ static MemoryLocation get(const AtomicRMWInst *RMWI);
+ static MemoryLocation get(const Instruction *Inst) {
+ if (auto *I = dyn_cast<LoadInst>(Inst))
+ return get(I);
+ else if (auto *I = dyn_cast<StoreInst>(Inst))
+ return get(I);
+ else if (auto *I = dyn_cast<VAArgInst>(Inst))
+ return get(I);
+ else if (auto *I = dyn_cast<AtomicCmpXchgInst>(Inst))
+ return get(I);
+ else if (auto *I = dyn_cast<AtomicRMWInst>(Inst))
+ return get(I);
+ llvm_unreachable("unsupported memory instruction");
+ }
+
+ /// Return a location representing the source of a memory transfer.
+ static MemoryLocation getForSource(const MemTransferInst *MTI);
+
+ /// Return a location representing the destination of a memory set or
+ /// transfer.
+ static MemoryLocation getForDest(const MemIntrinsic *MI);
+
+ /// Return a location representing a particular argument of a call.
+ static MemoryLocation getForArgument(ImmutableCallSite CS, unsigned ArgIdx,
+ const TargetLibraryInfo &TLI);
+
+ explicit MemoryLocation(const Value *Ptr = nullptr,
+ uint64_t Size = UnknownSize,
+ const AAMDNodes &AATags = AAMDNodes())
+ : Ptr(Ptr), Size(Size), AATags(AATags) {}
+
+ MemoryLocation getWithNewPtr(const Value *NewPtr) const {
+ MemoryLocation Copy(*this);
+ Copy.Ptr = NewPtr;
+ return Copy;
+ }
+
+ MemoryLocation getWithNewSize(uint64_t NewSize) const {
+ MemoryLocation Copy(*this);
+ Copy.Size = NewSize;
+ return Copy;
+ }
+
+ MemoryLocation getWithoutAATags() const {
+ MemoryLocation Copy(*this);
+ Copy.AATags = AAMDNodes();
+ return Copy;
+ }
+
+ bool operator==(const MemoryLocation &Other) const {
+ return Ptr == Other.Ptr && Size == Other.Size && AATags == Other.AATags;
+ }
+};
+
+// Specialize DenseMapInfo for MemoryLocation.
+template <> struct DenseMapInfo<MemoryLocation> {
+ static inline MemoryLocation getEmptyKey() {
+ return MemoryLocation(DenseMapInfo<const Value *>::getEmptyKey(), 0);
+ }
+ static inline MemoryLocation getTombstoneKey() {
+ return MemoryLocation(DenseMapInfo<const Value *>::getTombstoneKey(), 0);
+ }
+ static unsigned getHashValue(const MemoryLocation &Val) {
+ return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^
+ DenseMapInfo<uint64_t>::getHashValue(Val.Size) ^
+ DenseMapInfo<AAMDNodes>::getHashValue(Val.AATags);
+ }
+ static bool isEqual(const MemoryLocation &LHS, const MemoryLocation &RHS) {
+ return LHS == RHS;
+ }
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h
new file mode 100644
index 0000000..ac01154
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ObjCARCAliasAnalysis.h
@@ -0,0 +1,102 @@
+//===- ObjCARCAliasAnalysis.h - ObjC ARC Alias Analysis ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares a simple ARC-aware AliasAnalysis using special knowledge
+/// of Objective C to enhance other optimization passes which rely on the Alias
+/// Analysis infrastructure.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_OBJCARCALIASANALYSIS_H
+#define LLVM_ANALYSIS_OBJCARCALIASANALYSIS_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+namespace objcarc {
+
+/// \brief This is a simple alias analysis implementation that uses knowledge
+/// of ARC constructs to answer queries.
+///
+/// TODO: This class could be generalized to know about other ObjC-specific
+/// tricks. Such as knowing that ivars in the non-fragile ABI are non-aliasing
+/// even though their offsets are dynamic.
+class ObjCARCAAResult : public AAResultBase<ObjCARCAAResult> {
+ friend AAResultBase<ObjCARCAAResult>;
+
+ const DataLayout &DL;
+
+public:
+ explicit ObjCARCAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI)
+ : AAResultBase(TLI), DL(DL) {}
+ ObjCARCAAResult(ObjCARCAAResult &&Arg)
+ : AAResultBase(std::move(Arg)), DL(Arg.DL) {}
+
+ /// Handle invalidation events from the new pass manager.
+ ///
+ /// By definition, this result is stateless and so remains valid.
+ bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+ bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
+
+ using AAResultBase::getModRefBehavior;
+ FunctionModRefBehavior getModRefBehavior(const Function *F);
+
+ using AAResultBase::getModRefInfo;
+ ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class ObjCARCAA {
+public:
+ typedef ObjCARCAAResult Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ ObjCARCAAResult run(Function &F, AnalysisManager<Function> *AM);
+
+ /// \brief Provide access to a name for this pass for debugging purposes.
+ static StringRef name() { return "ObjCARCAA"; }
+
+private:
+ static char PassID;
+};
+
+/// Legacy wrapper pass to provide the ObjCARCAAResult object.
+class ObjCARCAAWrapperPass : public ImmutablePass {
+ std::unique_ptr<ObjCARCAAResult> Result;
+
+public:
+ static char ID;
+
+ ObjCARCAAWrapperPass();
+
+ ObjCARCAAResult &getResult() { return *Result; }
+ const ObjCARCAAResult &getResult() const { return *Result; }
+
+ bool doInitialization(Module &M) override;
+ bool doFinalization(Module &M) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+} // namespace objcarc
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h b/contrib/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
new file mode 100644
index 0000000..29d99c9
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ObjCARCAnalysisUtils.h
@@ -0,0 +1,287 @@
+//===- ObjCARCAnalysisUtils.h - ObjC ARC Analysis Utilities -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines common analysis utilities used by the ObjC ARC Optimizer.
+/// ARC stands for Automatic Reference Counting and is a system for managing
+/// reference counts for objects in Objective C.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_ANALYSIS_OBJCARCANALYSISUTILS_H
+#define LLVM_LIB_ANALYSIS_OBJCARCANALYSISUTILS_H
+
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ObjCARCInstKind.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+class raw_ostream;
+}
+
+namespace llvm {
+namespace objcarc {
+
+/// \brief A handy option to enable/disable all ARC Optimizations.
+extern bool EnableARCOpts;
+
+/// \brief Test if the given module looks interesting to run ARC optimization
+/// on.
+inline bool ModuleHasARC(const Module &M) {
+ return
+ M.getNamedValue("objc_retain") ||
+ M.getNamedValue("objc_release") ||
+ M.getNamedValue("objc_autorelease") ||
+ M.getNamedValue("objc_retainAutoreleasedReturnValue") ||
+ M.getNamedValue("objc_retainBlock") ||
+ M.getNamedValue("objc_autoreleaseReturnValue") ||
+ M.getNamedValue("objc_autoreleasePoolPush") ||
+ M.getNamedValue("objc_loadWeakRetained") ||
+ M.getNamedValue("objc_loadWeak") ||
+ M.getNamedValue("objc_destroyWeak") ||
+ M.getNamedValue("objc_storeWeak") ||
+ M.getNamedValue("objc_initWeak") ||
+ M.getNamedValue("objc_moveWeak") ||
+ M.getNamedValue("objc_copyWeak") ||
+ M.getNamedValue("objc_retainedObject") ||
+ M.getNamedValue("objc_unretainedObject") ||
+ M.getNamedValue("objc_unretainedPointer") ||
+ M.getNamedValue("clang.arc.use");
+}
+
+/// \brief This is a wrapper around getUnderlyingObject which also knows how to
+/// look through objc_retain and objc_autorelease calls, which we know to return
+/// their argument verbatim.
+inline const Value *GetUnderlyingObjCPtr(const Value *V,
+ const DataLayout &DL) {
+ for (;;) {
+ V = GetUnderlyingObject(V, DL);
+ if (!IsForwarding(GetBasicARCInstKind(V)))
+ break;
+ V = cast<CallInst>(V)->getArgOperand(0);
+ }
+
+ return V;
+}
+
+/// The RCIdentity root of a value \p V is a dominating value U for which
+/// retaining or releasing U is equivalent to retaining or releasing V. In other
+/// words, ARC operations on \p V are equivalent to ARC operations on \p U.
+///
+/// We use this in the ARC optimizer to make it easier to match up ARC
+/// operations by always mapping ARC operations to RCIdentityRoots instead of
+/// pointers themselves.
+///
+/// The two ways that we see RCIdentical values in ObjC are via:
+///
+/// 1. PointerCasts
+/// 2. Forwarding Calls that return their argument verbatim.
+///
+/// Thus this function strips off pointer casts and forwarding calls. *NOTE*
+/// This implies that two RCIdentical values must alias.
+inline const Value *GetRCIdentityRoot(const Value *V) {
+ for (;;) {
+ V = V->stripPointerCasts();
+ if (!IsForwarding(GetBasicARCInstKind(V)))
+ break;
+ V = cast<CallInst>(V)->getArgOperand(0);
+ }
+ return V;
+}
+
+/// Helper which calls const Value *GetRCIdentityRoot(const Value *V) and just
+/// casts away the const of the result. For documentation about what an
+/// RCIdentityRoot (and by extension GetRCIdentityRoot is) look at that
+/// function.
+inline Value *GetRCIdentityRoot(Value *V) {
+ return const_cast<Value *>(GetRCIdentityRoot((const Value *)V));
+}
+
+/// \brief Assuming the given instruction is one of the special calls such as
+/// objc_retain or objc_release, return the RCIdentity root of the argument of
+/// the call.
+inline Value *GetArgRCIdentityRoot(Value *Inst) {
+ return GetRCIdentityRoot(cast<CallInst>(Inst)->getArgOperand(0));
+}
+
+inline bool IsNullOrUndef(const Value *V) {
+ return isa<ConstantPointerNull>(V) || isa<UndefValue>(V);
+}
+
+inline bool IsNoopInstruction(const Instruction *I) {
+ return isa<BitCastInst>(I) ||
+ (isa<GetElementPtrInst>(I) &&
+ cast<GetElementPtrInst>(I)->hasAllZeroIndices());
+}
+
+/// \brief Test whether the given value is possible a retainable object pointer.
+inline bool IsPotentialRetainableObjPtr(const Value *Op) {
+ // Pointers to static or stack storage are not valid retainable object
+ // pointers.
+ if (isa<Constant>(Op) || isa<AllocaInst>(Op))
+ return false;
+ // Special arguments can not be a valid retainable object pointer.
+ if (const Argument *Arg = dyn_cast<Argument>(Op))
+ if (Arg->hasByValAttr() ||
+ Arg->hasInAllocaAttr() ||
+ Arg->hasNestAttr() ||
+ Arg->hasStructRetAttr())
+ return false;
+ // Only consider values with pointer types.
+ //
+ // It seemes intuitive to exclude function pointer types as well, since
+ // functions are never retainable object pointers, however clang occasionally
+ // bitcasts retainable object pointers to function-pointer type temporarily.
+ PointerType *Ty = dyn_cast<PointerType>(Op->getType());
+ if (!Ty)
+ return false;
+ // Conservatively assume anything else is a potential retainable object
+ // pointer.
+ return true;
+}
+
+inline bool IsPotentialRetainableObjPtr(const Value *Op,
+ AliasAnalysis &AA) {
+ // First make the rudimentary check.
+ if (!IsPotentialRetainableObjPtr(Op))
+ return false;
+
+ // Objects in constant memory are not reference-counted.
+ if (AA.pointsToConstantMemory(Op))
+ return false;
+
+ // Pointers in constant memory are not pointing to reference-counted objects.
+ if (const LoadInst *LI = dyn_cast<LoadInst>(Op))
+ if (AA.pointsToConstantMemory(LI->getPointerOperand()))
+ return false;
+
+ // Otherwise assume the worst.
+ return true;
+}
+
+/// \brief Helper for GetARCInstKind. Determines what kind of construct CS
+/// is.
+inline ARCInstKind GetCallSiteClass(ImmutableCallSite CS) {
+ for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
+ I != E; ++I)
+ if (IsPotentialRetainableObjPtr(*I))
+ return CS.onlyReadsMemory() ? ARCInstKind::User : ARCInstKind::CallOrUser;
+
+ return CS.onlyReadsMemory() ? ARCInstKind::None : ARCInstKind::Call;
+}
+
+/// \brief Return true if this value refers to a distinct and identifiable
+/// object.
+///
+/// This is similar to AliasAnalysis's isIdentifiedObject, except that it uses
+/// special knowledge of ObjC conventions.
+inline bool IsObjCIdentifiedObject(const Value *V) {
+ // Assume that call results and arguments have their own "provenance".
+ // Constants (including GlobalVariables) and Allocas are never
+ // reference-counted.
+ if (isa<CallInst>(V) || isa<InvokeInst>(V) ||
+ isa<Argument>(V) || isa<Constant>(V) ||
+ isa<AllocaInst>(V))
+ return true;
+
+ if (const LoadInst *LI = dyn_cast<LoadInst>(V)) {
+ const Value *Pointer =
+ GetRCIdentityRoot(LI->getPointerOperand());
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Pointer)) {
+ // A constant pointer can't be pointing to an object on the heap. It may
+ // be reference-counted, but it won't be deleted.
+ if (GV->isConstant())
+ return true;
+ StringRef Name = GV->getName();
+ // These special variables are known to hold values which are not
+ // reference-counted pointers.
+ if (Name.startswith("\01l_objc_msgSend_fixup_"))
+ return true;
+
+ StringRef Section = GV->getSection();
+ if (Section.find("__message_refs") != StringRef::npos ||
+ Section.find("__objc_classrefs") != StringRef::npos ||
+ Section.find("__objc_superrefs") != StringRef::npos ||
+ Section.find("__objc_methname") != StringRef::npos ||
+ Section.find("__cstring") != StringRef::npos)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+enum class ARCMDKindID {
+ ImpreciseRelease,
+ CopyOnEscape,
+ NoObjCARCExceptions,
+};
+
+/// A cache of MDKinds used by various ARC optimizations.
+class ARCMDKindCache {
+ Module *M;
+
+ /// The Metadata Kind for clang.imprecise_release metadata.
+ llvm::Optional<unsigned> ImpreciseReleaseMDKind;
+
+ /// The Metadata Kind for clang.arc.copy_on_escape metadata.
+ llvm::Optional<unsigned> CopyOnEscapeMDKind;
+
+ /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
+ llvm::Optional<unsigned> NoObjCARCExceptionsMDKind;
+
+public:
+ void init(Module *Mod) {
+ M = Mod;
+ ImpreciseReleaseMDKind = NoneType::None;
+ CopyOnEscapeMDKind = NoneType::None;
+ NoObjCARCExceptionsMDKind = NoneType::None;
+ }
+
+ unsigned get(ARCMDKindID ID) {
+ switch (ID) {
+ case ARCMDKindID::ImpreciseRelease:
+ if (!ImpreciseReleaseMDKind)
+ ImpreciseReleaseMDKind =
+ M->getContext().getMDKindID("clang.imprecise_release");
+ return *ImpreciseReleaseMDKind;
+ case ARCMDKindID::CopyOnEscape:
+ if (!CopyOnEscapeMDKind)
+ CopyOnEscapeMDKind =
+ M->getContext().getMDKindID("clang.arc.copy_on_escape");
+ return *CopyOnEscapeMDKind;
+ case ARCMDKindID::NoObjCARCExceptions:
+ if (!NoObjCARCExceptionsMDKind)
+ NoObjCARCExceptionsMDKind =
+ M->getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
+ return *NoObjCARCExceptionsMDKind;
+ }
+ llvm_unreachable("Covered switch isn't covered?!");
+ }
+};
+
+} // end namespace objcarc
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ObjCARCInstKind.h b/contrib/llvm/include/llvm/Analysis/ObjCARCInstKind.h
new file mode 100644
index 0000000..13efb4b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ObjCARCInstKind.h
@@ -0,0 +1,123 @@
+//===- ObjCARCInstKind.h - ARC instruction equivalence classes --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_OBJCARCINSTKIND_H
+#define LLVM_ANALYSIS_OBJCARCINSTKIND_H
+
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Function.h"
+
+namespace llvm {
+namespace objcarc {
+
+/// \enum ARCInstKind
+///
+/// \brief Equivalence classes of instructions in the ARC Model.
+///
+/// Since we do not have "instructions" to represent ARC concepts in LLVM IR,
+/// we instead operate on equivalence classes of instructions.
+///
+/// TODO: This should be split into two enums: a runtime entry point enum
+/// (possibly united with the ARCRuntimeEntrypoint class) and an enum that deals
+/// with effects of instructions in the ARC model (which would handle the notion
+/// of a User or CallOrUser).
+enum class ARCInstKind {
+ Retain, ///< objc_retain
+ RetainRV, ///< objc_retainAutoreleasedReturnValue
+ RetainBlock, ///< objc_retainBlock
+ Release, ///< objc_release
+ Autorelease, ///< objc_autorelease
+ AutoreleaseRV, ///< objc_autoreleaseReturnValue
+ AutoreleasepoolPush, ///< objc_autoreleasePoolPush
+ AutoreleasepoolPop, ///< objc_autoreleasePoolPop
+ NoopCast, ///< objc_retainedObject, etc.
+ FusedRetainAutorelease, ///< objc_retainAutorelease
+ FusedRetainAutoreleaseRV, ///< objc_retainAutoreleaseReturnValue
+ LoadWeakRetained, ///< objc_loadWeakRetained (primitive)
+ StoreWeak, ///< objc_storeWeak (primitive)
+ InitWeak, ///< objc_initWeak (derived)
+ LoadWeak, ///< objc_loadWeak (derived)
+ MoveWeak, ///< objc_moveWeak (derived)
+ CopyWeak, ///< objc_copyWeak (derived)
+ DestroyWeak, ///< objc_destroyWeak (derived)
+ StoreStrong, ///< objc_storeStrong (derived)
+ IntrinsicUser, ///< clang.arc.use
+ CallOrUser, ///< could call objc_release and/or "use" pointers
+ Call, ///< could call objc_release
+ User, ///< could "use" a pointer
+ None ///< anything that is inert from an ARC perspective.
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const ARCInstKind Class);
+
+/// \brief Test if the given class is a kind of user.
+bool IsUser(ARCInstKind Class);
+
+/// \brief Test if the given class is objc_retain or equivalent.
+bool IsRetain(ARCInstKind Class);
+
+/// \brief Test if the given class is objc_autorelease or equivalent.
+bool IsAutorelease(ARCInstKind Class);
+
+/// \brief Test if the given class represents instructions which return their
+/// argument verbatim.
+bool IsForwarding(ARCInstKind Class);
+
+/// \brief Test if the given class represents instructions which do nothing if
+/// passed a null pointer.
+bool IsNoopOnNull(ARCInstKind Class);
+
+/// \brief Test if the given class represents instructions which are always safe
+/// to mark with the "tail" keyword.
+bool IsAlwaysTail(ARCInstKind Class);
+
+/// \brief Test if the given class represents instructions which are never safe
+/// to mark with the "tail" keyword.
+bool IsNeverTail(ARCInstKind Class);
+
+/// \brief Test if the given class represents instructions which are always safe
+/// to mark with the nounwind attribute.
+bool IsNoThrow(ARCInstKind Class);
+
+/// Test whether the given instruction can autorelease any pointer or cause an
+/// autoreleasepool pop.
+bool CanInterruptRV(ARCInstKind Class);
+
+/// \brief Determine if F is one of the special known Functions. If it isn't,
+/// return ARCInstKind::CallOrUser.
+ARCInstKind GetFunctionClass(const Function *F);
+
+/// \brief Determine which objc runtime call instruction class V belongs to.
+///
+/// This is similar to GetARCInstKind except that it only detects objc
+/// runtime calls. This allows it to be faster.
+///
+inline ARCInstKind GetBasicARCInstKind(const Value *V) {
+ if (const CallInst *CI = dyn_cast<CallInst>(V)) {
+ if (const Function *F = CI->getCalledFunction())
+ return GetFunctionClass(F);
+ // Otherwise, be conservative.
+ return ARCInstKind::CallOrUser;
+ }
+
+ // Otherwise, be conservative.
+ return isa<InvokeInst>(V) ? ARCInstKind::CallOrUser : ARCInstKind::User;
+}
+
+/// Map V to its ARCInstKind equivalence class.
+ARCInstKind GetARCInstKind(const Value *V);
+
+/// Returns false if conservatively we can prove that any instruction mapped to
+/// this kind can not decrement ref counts. Returns true otherwise.
+bool CanDecrementRefCount(ARCInstKind Kind);
+
+} // end namespace objcarc
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/OrderedBasicBlock.h b/contrib/llvm/include/llvm/Analysis/OrderedBasicBlock.h
new file mode 100644
index 0000000..5aa813e
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/OrderedBasicBlock.h
@@ -0,0 +1,66 @@
+//===- llvm/Analysis/OrderedBasicBlock.h --------------------- -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the OrderedBasicBlock class. OrderedBasicBlock maintains
+// an interface where clients can query if one instruction comes before another
+// in a BasicBlock. Since BasicBlock currently lacks a reliable way to query
+// relative position between instructions one can use OrderedBasicBlock to do
+// such queries. OrderedBasicBlock is lazily built on a source BasicBlock and
+// maintains an internal Instruction -> Position map. A OrderedBasicBlock
+// instance should be discarded whenever the source BasicBlock changes.
+//
+// It's currently used by the CaptureTracker in order to find relative
+// positions of a pair of instructions inside a BasicBlock.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_ORDEREDBASICBLOCK_H
+#define LLVM_ANALYSIS_ORDEREDBASICBLOCK_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/BasicBlock.h"
+
+namespace llvm {
+
+class Instruction;
+class BasicBlock;
+
+class OrderedBasicBlock {
+private:
+ /// \brief Map a instruction to its position in a BasicBlock.
+ SmallDenseMap<const Instruction *, unsigned, 32> NumberedInsts;
+
+ /// \brief Keep track of last instruction inserted into \p NumberedInsts.
+ /// It speeds up queries for uncached instructions by providing a start point
+ /// for new queries in OrderedBasicBlock::comesBefore.
+ BasicBlock::const_iterator LastInstFound;
+
+ /// \brief The position/number to tag the next instruction to be found.
+ unsigned NextInstPos;
+
+ /// \brief The source BasicBlock to map.
+ const BasicBlock *BB;
+
+ /// \brief Given no cached results, find if \p A comes before \p B in \p BB.
+ /// Cache and number out instruction while walking \p BB.
+ bool comesBefore(const Instruction *A, const Instruction *B);
+
+public:
+ OrderedBasicBlock(const BasicBlock *BasicB);
+
+ /// \brief Find out whether \p A dominates \p B, meaning whether \p A
+ /// comes before \p B in \p BB. This is a simplification that considers
+ /// cached instruction positions and ignores other basic blocks, being
+ /// only relevant to compare relative instructions positions inside \p BB.
+ bool dominates(const Instruction *A, const Instruction *B);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/PHITransAddr.h b/contrib/llvm/include/llvm/Analysis/PHITransAddr.h
new file mode 100644
index 0000000..f0f34f3
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/PHITransAddr.h
@@ -0,0 +1,127 @@
+//===- PHITransAddr.h - PHI Translation for Addresses -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the PHITransAddr class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_PHITRANSADDR_H
+#define LLVM_ANALYSIS_PHITRANSADDR_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Instruction.h"
+
+namespace llvm {
+ class AssumptionCache;
+ class DominatorTree;
+ class DataLayout;
+ class TargetLibraryInfo;
+
+/// PHITransAddr - An address value which tracks and handles phi translation.
+/// As we walk "up" the CFG through predecessors, we need to ensure that the
+/// address we're tracking is kept up to date. For example, if we're analyzing
+/// an address of "&A[i]" and walk through the definition of 'i' which is a PHI
+/// node, we *must* phi translate i to get "&A[j]" or else we will analyze an
+/// incorrect pointer in the predecessor block.
+///
+/// This is designed to be a relatively small object that lives on the stack and
+/// is copyable.
+///
+class PHITransAddr {
+ /// Addr - The actual address we're analyzing.
+ Value *Addr;
+
+ /// The DataLayout we are playing with.
+ const DataLayout &DL;
+
+ /// TLI - The target library info if known, otherwise null.
+ const TargetLibraryInfo *TLI;
+
+ /// A cache of @llvm.assume calls used by SimplifyInstruction.
+ AssumptionCache *AC;
+
+ /// InstInputs - The inputs for our symbolic address.
+ SmallVector<Instruction*, 4> InstInputs;
+
+public:
+ PHITransAddr(Value *addr, const DataLayout &DL, AssumptionCache *AC)
+ : Addr(addr), DL(DL), TLI(nullptr), AC(AC) {
+ // If the address is an instruction, the whole thing is considered an input.
+ if (Instruction *I = dyn_cast<Instruction>(Addr))
+ InstInputs.push_back(I);
+ }
+
+ Value *getAddr() const { return Addr; }
+
+ /// NeedsPHITranslationFromBlock - Return true if moving from the specified
+ /// BasicBlock to its predecessors requires PHI translation.
+ bool NeedsPHITranslationFromBlock(BasicBlock *BB) const {
+ // We do need translation if one of our input instructions is defined in
+ // this block.
+ for (unsigned i = 0, e = InstInputs.size(); i != e; ++i)
+ if (InstInputs[i]->getParent() == BB)
+ return true;
+ return false;
+ }
+
+ /// IsPotentiallyPHITranslatable - If this needs PHI translation, return true
+ /// if we have some hope of doing it. This should be used as a filter to
+ /// avoid calling PHITranslateValue in hopeless situations.
+ bool IsPotentiallyPHITranslatable() const;
+
+ /// PHITranslateValue - PHI translate the current address up the CFG from
+ /// CurBB to Pred, updating our state to reflect any needed changes. If
+ /// 'MustDominate' is true, the translated value must dominate
+ /// PredBB. This returns true on failure and sets Addr to null.
+ bool PHITranslateValue(BasicBlock *CurBB, BasicBlock *PredBB,
+ const DominatorTree *DT, bool MustDominate);
+
+ /// PHITranslateWithInsertion - PHI translate this value into the specified
+ /// predecessor block, inserting a computation of the value if it is
+ /// unavailable.
+ ///
+ /// All newly created instructions are added to the NewInsts list. This
+ /// returns null on failure.
+ ///
+ Value *PHITranslateWithInsertion(BasicBlock *CurBB, BasicBlock *PredBB,
+ const DominatorTree &DT,
+ SmallVectorImpl<Instruction *> &NewInsts);
+
+ void dump() const;
+
+ /// Verify - Check internal consistency of this data structure. If the
+ /// structure is valid, it returns true. If invalid, it prints errors and
+ /// returns false.
+ bool Verify() const;
+
+private:
+ Value *PHITranslateSubExpr(Value *V, BasicBlock *CurBB, BasicBlock *PredBB,
+ const DominatorTree *DT);
+
+ /// InsertPHITranslatedSubExpr - Insert a computation of the PHI translated
+ /// version of 'V' for the edge PredBB->CurBB into the end of the PredBB
+ /// block. All newly created instructions are added to the NewInsts list.
+ /// This returns null on failure.
+ ///
+ Value *InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
+ BasicBlock *PredBB, const DominatorTree &DT,
+ SmallVectorImpl<Instruction *> &NewInsts);
+
+ /// AddAsInput - If the specified value is an instruction, add it as an input.
+ Value *AddAsInput(Value *V) {
+ // If V is an instruction, it is now an input.
+ if (Instruction *VI = dyn_cast<Instruction>(V))
+ InstInputs.push_back(VI);
+ return V;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/Passes.h b/contrib/llvm/include/llvm/Analysis/Passes.h
new file mode 100644
index 0000000..da17457
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/Passes.h
@@ -0,0 +1,108 @@
+//===-- llvm/Analysis/Passes.h - Constructors for analyses ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the analysis libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_PASSES_H
+#define LLVM_ANALYSIS_PASSES_H
+
+namespace llvm {
+ class FunctionPass;
+ class ImmutablePass;
+ class LoopPass;
+ class ModulePass;
+ class Pass;
+ class PassInfo;
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createAAEvalPass - This pass implements a simple N^2 alias analysis
+ // accuracy evaluator.
+ //
+ FunctionPass *createAAEvalPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createObjCARCAAWrapperPass - This pass implements ObjC-ARC-based
+ // alias analysis.
+ //
+ ImmutablePass *createObjCARCAAWrapperPass();
+
+ FunctionPass *createPAEvalPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ /// createLazyValueInfoPass - This creates an instance of the LazyValueInfo
+ /// pass.
+ FunctionPass *createLazyValueInfoPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createDependenceAnalysisPass - This creates an instance of the
+ // DependenceAnalysis pass.
+ //
+ FunctionPass *createDependenceAnalysisPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createCostModelAnalysisPass - This creates an instance of the
+ // CostModelAnalysis pass.
+ //
+ FunctionPass *createCostModelAnalysisPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createDelinearizationPass - This pass implements attempts to restore
+ // multidimensional array indices from linearized expressions.
+ //
+ FunctionPass *createDelinearizationPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createDivergenceAnalysisPass - This pass determines which branches in a GPU
+ // program are divergent.
+ //
+ FunctionPass *createDivergenceAnalysisPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // Minor pass prototypes, allowing us to expose them through bugpoint and
+ // analyze.
+ FunctionPass *createInstCountPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createRegionInfoPass - This pass finds all single entry single exit regions
+ // in a function and builds the region hierarchy.
+ //
+ FunctionPass *createRegionInfoPass();
+
+ // Print module-level debug info metadata in human-readable form.
+ ModulePass *createModuleDebugInfoPrinterPass();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createMemDepPrinter - This pass exhaustively collects all memdep
+ // information and prints it with -analyze.
+ //
+ FunctionPass *createMemDepPrinter();
+
+ //===--------------------------------------------------------------------===//
+ //
+ // createMemDerefPrinter - This pass collects memory dereferenceability
+ // information and prints it with -analyze.
+ //
+ FunctionPass *createMemDerefPrinter();
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/PostDominators.h b/contrib/llvm/include/llvm/Analysis/PostDominators.h
new file mode 100644
index 0000000..0f7e2b8
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/PostDominators.h
@@ -0,0 +1,117 @@
+//=- llvm/Analysis/PostDominators.h - Post Dominator Calculation-*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes interfaces to post dominance information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_POSTDOMINATORS_H
+#define LLVM_ANALYSIS_POSTDOMINATORS_H
+
+#include "llvm/IR/Dominators.h"
+
+namespace llvm {
+
+/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used to
+/// compute the post-dominator tree.
+///
+struct PostDominatorTree : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ DominatorTreeBase<BasicBlock>* DT;
+
+ PostDominatorTree() : FunctionPass(ID) {
+ initializePostDominatorTreePass(*PassRegistry::getPassRegistry());
+ DT = new DominatorTreeBase<BasicBlock>(true);
+ }
+
+ ~PostDominatorTree() override;
+
+ bool runOnFunction(Function &F) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+
+ inline const std::vector<BasicBlock*> &getRoots() const {
+ return DT->getRoots();
+ }
+
+ inline DomTreeNode *getRootNode() const {
+ return DT->getRootNode();
+ }
+
+ inline DomTreeNode *operator[](BasicBlock *BB) const {
+ return DT->getNode(BB);
+ }
+
+ inline DomTreeNode *getNode(BasicBlock *BB) const {
+ return DT->getNode(BB);
+ }
+
+ inline bool dominates(DomTreeNode* A, DomTreeNode* B) const {
+ return DT->dominates(A, B);
+ }
+
+ inline bool dominates(const BasicBlock* A, const BasicBlock* B) const {
+ return DT->dominates(A, B);
+ }
+
+ inline bool properlyDominates(const DomTreeNode* A, DomTreeNode* B) const {
+ return DT->properlyDominates(A, B);
+ }
+
+ inline bool properlyDominates(BasicBlock* A, BasicBlock* B) const {
+ return DT->properlyDominates(A, B);
+ }
+
+ inline BasicBlock *findNearestCommonDominator(BasicBlock *A, BasicBlock *B) {
+ return DT->findNearestCommonDominator(A, B);
+ }
+
+ inline const BasicBlock *findNearestCommonDominator(const BasicBlock *A,
+ const BasicBlock *B) {
+ return DT->findNearestCommonDominator(A, B);
+ }
+
+ /// Get all nodes post-dominated by R, including R itself.
+ void getDescendants(BasicBlock *R,
+ SmallVectorImpl<BasicBlock *> &Result) const {
+ DT->getDescendants(R, Result);
+ }
+
+ void releaseMemory() override {
+ DT->releaseMemory();
+ }
+
+ void print(raw_ostream &OS, const Module*) const override;
+};
+
+FunctionPass* createPostDomTree();
+
+template <> struct GraphTraits<PostDominatorTree*>
+ : public GraphTraits<DomTreeNode*> {
+ static NodeType *getEntryNode(PostDominatorTree *DT) {
+ return DT->getRootNode();
+ }
+
+ static nodes_iterator nodes_begin(PostDominatorTree *N) {
+ if (getEntryNode(N))
+ return df_begin(getEntryNode(N));
+ else
+ return df_end(getEntryNode(N));
+ }
+
+ static nodes_iterator nodes_end(PostDominatorTree *N) {
+ return df_end(getEntryNode(N));
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/PtrUseVisitor.h b/contrib/llvm/include/llvm/Analysis/PtrUseVisitor.h
new file mode 100644
index 0000000..6e61fc3
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/PtrUseVisitor.h
@@ -0,0 +1,285 @@
+//===- PtrUseVisitor.h - InstVisitors over a pointers uses ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides a collection of visitors which walk the (instruction)
+/// uses of a pointer. These visitors all provide the same essential behavior
+/// as an InstVisitor with similar template-based flexibility and
+/// implementation strategies.
+///
+/// These can be used, for example, to quickly analyze the uses of an alloca,
+/// global variable, or function argument.
+///
+/// FIXME: Provide a variant which doesn't track offsets and is cheaper.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_PTRUSEVISITOR_H
+#define LLVM_ANALYSIS_PTRUSEVISITOR_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+
+namespace detail {
+/// \brief Implementation of non-dependent functionality for \c PtrUseVisitor.
+///
+/// See \c PtrUseVisitor for the public interface and detailed comments about
+/// usage. This class is just a helper base class which is not templated and
+/// contains all common code to be shared between different instantiations of
+/// PtrUseVisitor.
+class PtrUseVisitorBase {
+public:
+ /// \brief This class provides information about the result of a visit.
+ ///
+ /// After walking all the users (recursively) of a pointer, the basic
+ /// infrastructure records some commonly useful information such as escape
+ /// analysis and whether the visit completed or aborted early.
+ class PtrInfo {
+ public:
+ PtrInfo() : AbortedInfo(nullptr, false), EscapedInfo(nullptr, false) {}
+
+ /// \brief Reset the pointer info, clearing all state.
+ void reset() {
+ AbortedInfo.setPointer(nullptr);
+ AbortedInfo.setInt(false);
+ EscapedInfo.setPointer(nullptr);
+ EscapedInfo.setInt(false);
+ }
+
+ /// \brief Did we abort the visit early?
+ bool isAborted() const { return AbortedInfo.getInt(); }
+
+ /// \brief Is the pointer escaped at some point?
+ bool isEscaped() const { return EscapedInfo.getInt(); }
+
+ /// \brief Get the instruction causing the visit to abort.
+ /// \returns a pointer to the instruction causing the abort if one is
+ /// available; otherwise returns null.
+ Instruction *getAbortingInst() const { return AbortedInfo.getPointer(); }
+
+ /// \brief Get the instruction causing the pointer to escape.
+ /// \returns a pointer to the instruction which escapes the pointer if one
+ /// is available; otherwise returns null.
+ Instruction *getEscapingInst() const { return EscapedInfo.getPointer(); }
+
+ /// \brief Mark the visit as aborted. Intended for use in a void return.
+ /// \param I The instruction which caused the visit to abort, if available.
+ void setAborted(Instruction *I = nullptr) {
+ AbortedInfo.setInt(true);
+ AbortedInfo.setPointer(I);
+ }
+
+ /// \brief Mark the pointer as escaped. Intended for use in a void return.
+ /// \param I The instruction which escapes the pointer, if available.
+ void setEscaped(Instruction *I = nullptr) {
+ EscapedInfo.setInt(true);
+ EscapedInfo.setPointer(I);
+ }
+
+ /// \brief Mark the pointer as escaped, and the visit as aborted. Intended
+ /// for use in a void return.
+ /// \param I The instruction which both escapes the pointer and aborts the
+ /// visit, if available.
+ void setEscapedAndAborted(Instruction *I = nullptr) {
+ setEscaped(I);
+ setAborted(I);
+ }
+
+ private:
+ PointerIntPair<Instruction *, 1, bool> AbortedInfo, EscapedInfo;
+ };
+
+protected:
+ const DataLayout &DL;
+
+ /// \name Visitation infrastructure
+ /// @{
+
+ /// \brief The info collected about the pointer being visited thus far.
+ PtrInfo PI;
+
+ /// \brief A struct of the data needed to visit a particular use.
+ ///
+ /// This is used to maintain a worklist fo to-visit uses. This is used to
+ /// make the visit be iterative rather than recursive.
+ struct UseToVisit {
+ typedef PointerIntPair<Use *, 1, bool> UseAndIsOffsetKnownPair;
+ UseAndIsOffsetKnownPair UseAndIsOffsetKnown;
+ APInt Offset;
+ };
+
+ /// \brief The worklist of to-visit uses.
+ SmallVector<UseToVisit, 8> Worklist;
+
+ /// \brief A set of visited uses to break cycles in unreachable code.
+ SmallPtrSet<Use *, 8> VisitedUses;
+
+ /// @}
+
+
+ /// \name Per-visit state
+ /// This state is reset for each instruction visited.
+ /// @{
+
+ /// \brief The use currently being visited.
+ Use *U;
+
+ /// \brief True if we have a known constant offset for the use currently
+ /// being visited.
+ bool IsOffsetKnown;
+
+ /// \brief The constant offset of the use if that is known.
+ APInt Offset;
+
+ /// @}
+
+
+ /// Note that the constructor is protected because this class must be a base
+ /// class, we can't create instances directly of this class.
+ PtrUseVisitorBase(const DataLayout &DL) : DL(DL) {}
+
+ /// \brief Enqueue the users of this instruction in the visit worklist.
+ ///
+ /// This will visit the users with the same offset of the current visit
+ /// (including an unknown offset if that is the current state).
+ void enqueueUsers(Instruction &I);
+
+ /// \brief Walk the operands of a GEP and adjust the offset as appropriate.
+ ///
+ /// This routine does the heavy lifting of the pointer walk by computing
+ /// offsets and looking through GEPs.
+ bool adjustOffsetForGEP(GetElementPtrInst &GEPI);
+};
+} // end namespace detail
+
+/// \brief A base class for visitors over the uses of a pointer value.
+///
+/// Once constructed, a user can call \c visit on a pointer value, and this
+/// will walk its uses and visit each instruction using an InstVisitor. It also
+/// provides visit methods which will recurse through any pointer-to-pointer
+/// transformations such as GEPs and bitcasts.
+///
+/// During the visit, the current Use* being visited is available to the
+/// subclass, as well as the current offset from the original base pointer if
+/// known.
+///
+/// The recursive visit of uses is accomplished with a worklist, so the only
+/// ordering guarantee is that an instruction is visited before any uses of it
+/// are visited. Note that this does *not* mean before any of its users are
+/// visited! This is because users can be visited multiple times due to
+/// multiple, different uses of pointers derived from the same base.
+///
+/// A particular Use will only be visited once, but a User may be visited
+/// multiple times, once per Use. This visits may notably have different
+/// offsets.
+///
+/// All visit methods on the underlying InstVisitor return a boolean. This
+/// return short-circuits the visit, stopping it immediately.
+///
+/// FIXME: Generalize this for all values rather than just instructions.
+template <typename DerivedT>
+class PtrUseVisitor : protected InstVisitor<DerivedT>,
+ public detail::PtrUseVisitorBase {
+ friend class InstVisitor<DerivedT>;
+ typedef InstVisitor<DerivedT> Base;
+
+public:
+ PtrUseVisitor(const DataLayout &DL) : PtrUseVisitorBase(DL) {}
+
+ /// \brief Recursively visit the uses of the given pointer.
+ /// \returns An info struct about the pointer. See \c PtrInfo for details.
+ PtrInfo visitPtr(Instruction &I) {
+ // This must be a pointer type. Get an integer type suitable to hold
+ // offsets on this pointer.
+ // FIXME: Support a vector of pointers.
+ assert(I.getType()->isPointerTy());
+ IntegerType *IntPtrTy = cast<IntegerType>(DL.getIntPtrType(I.getType()));
+ IsOffsetKnown = true;
+ Offset = APInt(IntPtrTy->getBitWidth(), 0);
+ PI.reset();
+
+ // Enqueue the uses of this pointer.
+ enqueueUsers(I);
+
+ // Visit all the uses off the worklist until it is empty.
+ while (!Worklist.empty()) {
+ UseToVisit ToVisit = Worklist.pop_back_val();
+ U = ToVisit.UseAndIsOffsetKnown.getPointer();
+ IsOffsetKnown = ToVisit.UseAndIsOffsetKnown.getInt();
+ if (IsOffsetKnown)
+ Offset = std::move(ToVisit.Offset);
+
+ Instruction *I = cast<Instruction>(U->getUser());
+ static_cast<DerivedT*>(this)->visit(I);
+ if (PI.isAborted())
+ break;
+ }
+ return PI;
+ }
+
+protected:
+ void visitStoreInst(StoreInst &SI) {
+ if (SI.getValueOperand() == U->get())
+ PI.setEscaped(&SI);
+ }
+
+ void visitBitCastInst(BitCastInst &BC) {
+ enqueueUsers(BC);
+ }
+
+ void visitPtrToIntInst(PtrToIntInst &I) {
+ PI.setEscaped(&I);
+ }
+
+ void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
+ if (GEPI.use_empty())
+ return;
+
+ // If we can't walk the GEP, clear the offset.
+ if (!adjustOffsetForGEP(GEPI)) {
+ IsOffsetKnown = false;
+ Offset = APInt();
+ }
+
+ // Enqueue the users now that the offset has been adjusted.
+ enqueueUsers(GEPI);
+ }
+
+ // No-op intrinsics which we know don't escape the pointer to to logic in
+ // some other function.
+ void visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) {}
+ void visitMemIntrinsic(MemIntrinsic &I) {}
+ void visitIntrinsicInst(IntrinsicInst &II) {
+ switch (II.getIntrinsicID()) {
+ default:
+ return Base::visitIntrinsicInst(II);
+
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ return; // No-op intrinsics.
+ }
+ }
+
+ // Generically, arguments to calls and invokes escape the pointer to some
+ // other function. Mark that.
+ void visitCallSite(CallSite CS) {
+ PI.setEscaped(CS.getInstruction());
+ Base::visitCallSite(CS);
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/RegionInfo.h b/contrib/llvm/include/llvm/Analysis/RegionInfo.h
new file mode 100644
index 0000000..4988386
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/RegionInfo.h
@@ -0,0 +1,921 @@
+//===- RegionInfo.h - SESE region analysis ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Calculate a program structure tree built out of single entry single exit
+// regions.
+// The basic ideas are taken from "The Program Structure Tree - Richard Johnson,
+// David Pearson, Keshav Pingali - 1994", however enriched with ideas from "The
+// Refined Process Structure Tree - Jussi Vanhatalo, Hagen Voelyer, Jana
+// Koehler - 2009".
+// The algorithm to calculate these data structures however is completely
+// different, as it takes advantage of existing information already available
+// in (Post)dominace tree and dominance frontier passes. This leads to a simpler
+// and in practice hopefully better performing algorithm. The runtime of the
+// algorithms described in the papers above are both linear in graph size,
+// O(V+E), whereas this algorithm is not, as the dominance frontier information
+// itself is not, but in practice runtime seems to be in the order of magnitude
+// of dominance tree calculation.
+//
+// WARNING: LLVM is generally very concerned about compile time such that
+// the use of additional analysis passes in the default
+// optimization sequence is avoided as much as possible.
+// Specifically, if you do not need the RegionInfo, but dominance
+// information could be sufficient please base your work only on
+// the dominator tree. Most passes maintain it, such that using
+// it has often near zero cost. In contrast RegionInfo is by
+// default not available, is not maintained by existing
+// transformations and there is no intention to do so.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGIONINFO_H
+#define LLVM_ANALYSIS_REGIONINFO_H
+
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Dominators.h"
+#include <map>
+#include <memory>
+#include <set>
+
+namespace llvm {
+
+// Class to be specialized for different users of RegionInfo
+// (i.e. BasicBlocks or MachineBasicBlocks). This is only to avoid needing to
+// pass around an unreasonable number of template parameters.
+template <class FuncT_>
+struct RegionTraits {
+ // FuncT
+ // BlockT
+ // RegionT
+ // RegionNodeT
+ // RegionInfoT
+ typedef typename FuncT_::UnknownRegionTypeError BrokenT;
+};
+
+class DominatorTree;
+class DominanceFrontier;
+class Loop;
+class LoopInfo;
+struct PostDominatorTree;
+class raw_ostream;
+class Region;
+template <class RegionTr>
+class RegionBase;
+class RegionNode;
+class RegionInfo;
+template <class RegionTr>
+class RegionInfoBase;
+
+template <>
+struct RegionTraits<Function> {
+ typedef Function FuncT;
+ typedef BasicBlock BlockT;
+ typedef Region RegionT;
+ typedef RegionNode RegionNodeT;
+ typedef RegionInfo RegionInfoT;
+ typedef DominatorTree DomTreeT;
+ typedef DomTreeNode DomTreeNodeT;
+ typedef DominanceFrontier DomFrontierT;
+ typedef PostDominatorTree PostDomTreeT;
+ typedef Instruction InstT;
+ typedef Loop LoopT;
+ typedef LoopInfo LoopInfoT;
+
+ static unsigned getNumSuccessors(BasicBlock *BB) {
+ return BB->getTerminator()->getNumSuccessors();
+ }
+};
+
+/// @brief Marker class to iterate over the elements of a Region in flat mode.
+///
+/// The class is used to either iterate in Flat mode or by not using it to not
+/// iterate in Flat mode. During a Flat mode iteration all Regions are entered
+/// and the iteration returns every BasicBlock. If the Flat mode is not
+/// selected for SubRegions just one RegionNode containing the subregion is
+/// returned.
+template <class GraphType>
+class FlatIt {};
+
+/// @brief A RegionNode represents a subregion or a BasicBlock that is part of a
+/// Region.
+template <class Tr>
+class RegionNodeBase {
+ friend class RegionBase<Tr>;
+
+public:
+ typedef typename Tr::BlockT BlockT;
+ typedef typename Tr::RegionT RegionT;
+
+private:
+ RegionNodeBase(const RegionNodeBase &) = delete;
+ const RegionNodeBase &operator=(const RegionNodeBase &) = delete;
+
+ /// This is the entry basic block that starts this region node. If this is a
+ /// BasicBlock RegionNode, then entry is just the basic block, that this
+ /// RegionNode represents. Otherwise it is the entry of this (Sub)RegionNode.
+ ///
+ /// In the BBtoRegionNode map of the parent of this node, BB will always map
+ /// to this node no matter which kind of node this one is.
+ ///
+ /// The node can hold either a Region or a BasicBlock.
+ /// Use one bit to save, if this RegionNode is a subregion or BasicBlock
+ /// RegionNode.
+ PointerIntPair<BlockT *, 1, bool> entry;
+
+ /// @brief The parent Region of this RegionNode.
+ /// @see getParent()
+ RegionT *parent;
+
+protected:
+ /// @brief Create a RegionNode.
+ ///
+ /// @param Parent The parent of this RegionNode.
+ /// @param Entry The entry BasicBlock of the RegionNode. If this
+ /// RegionNode represents a BasicBlock, this is the
+ /// BasicBlock itself. If it represents a subregion, this
+ /// is the entry BasicBlock of the subregion.
+ /// @param isSubRegion If this RegionNode represents a SubRegion.
+ inline RegionNodeBase(RegionT *Parent, BlockT *Entry,
+ bool isSubRegion = false)
+ : entry(Entry, isSubRegion), parent(Parent) {}
+
+public:
+ /// @brief Get the parent Region of this RegionNode.
+ ///
+ /// The parent Region is the Region this RegionNode belongs to. If for
+ /// example a BasicBlock is element of two Regions, there exist two
+ /// RegionNodes for this BasicBlock. Each with the getParent() function
+ /// pointing to the Region this RegionNode belongs to.
+ ///
+ /// @return Get the parent Region of this RegionNode.
+ inline RegionT *getParent() const { return parent; }
+
+ /// @brief Get the entry BasicBlock of this RegionNode.
+ ///
+ /// If this RegionNode represents a BasicBlock this is just the BasicBlock
+ /// itself, otherwise we return the entry BasicBlock of the Subregion
+ ///
+ /// @return The entry BasicBlock of this RegionNode.
+ inline BlockT *getEntry() const { return entry.getPointer(); }
+
+ /// @brief Get the content of this RegionNode.
+ ///
+ /// This can be either a BasicBlock or a subregion. Before calling getNodeAs()
+ /// check the type of the content with the isSubRegion() function call.
+ ///
+ /// @return The content of this RegionNode.
+ template <class T> inline T *getNodeAs() const;
+
+ /// @brief Is this RegionNode a subregion?
+ ///
+ /// @return True if it contains a subregion. False if it contains a
+ /// BasicBlock.
+ inline bool isSubRegion() const { return entry.getInt(); }
+};
+
+//===----------------------------------------------------------------------===//
+/// @brief A single entry single exit Region.
+///
+/// A Region is a connected subgraph of a control flow graph that has exactly
+/// two connections to the remaining graph. It can be used to analyze or
+/// optimize parts of the control flow graph.
+///
+/// A <em> simple Region </em> is connected to the remaining graph by just two
+/// edges. One edge entering the Region and another one leaving the Region.
+///
+/// An <em> extended Region </em> (or just Region) is a subgraph that can be
+/// transform into a simple Region. The transformation is done by adding
+/// BasicBlocks that merge several entry or exit edges so that after the merge
+/// just one entry and one exit edge exists.
+///
+/// The \e Entry of a Region is the first BasicBlock that is passed after
+/// entering the Region. It is an element of the Region. The entry BasicBlock
+/// dominates all BasicBlocks in the Region.
+///
+/// The \e Exit of a Region is the first BasicBlock that is passed after
+/// leaving the Region. It is not an element of the Region. The exit BasicBlock,
+/// postdominates all BasicBlocks in the Region.
+///
+/// A <em> canonical Region </em> cannot be constructed by combining smaller
+/// Regions.
+///
+/// Region A is the \e parent of Region B, if B is completely contained in A.
+///
+/// Two canonical Regions either do not intersect at all or one is
+/// the parent of the other.
+///
+/// The <em> Program Structure Tree</em> is a graph (V, E) where V is the set of
+/// Regions in the control flow graph and E is the \e parent relation of these
+/// Regions.
+///
+/// Example:
+///
+/// \verbatim
+/// A simple control flow graph, that contains two regions.
+///
+/// 1
+/// / |
+/// 2 |
+/// / \ 3
+/// 4 5 |
+/// | | |
+/// 6 7 8
+/// \ | /
+/// \ |/ Region A: 1 -> 9 {1,2,3,4,5,6,7,8}
+/// 9 Region B: 2 -> 9 {2,4,5,6,7}
+/// \endverbatim
+///
+/// You can obtain more examples by either calling
+///
+/// <tt> "opt -regions -analyze anyprogram.ll" </tt>
+/// or
+/// <tt> "opt -view-regions-only anyprogram.ll" </tt>
+///
+/// on any LLVM file you are interested in.
+///
+/// The first call returns a textual representation of the program structure
+/// tree, the second one creates a graphical representation using graphviz.
+template <class Tr>
+class RegionBase : public RegionNodeBase<Tr> {
+ typedef typename Tr::FuncT FuncT;
+ typedef typename Tr::BlockT BlockT;
+ typedef typename Tr::RegionInfoT RegionInfoT;
+ typedef typename Tr::RegionT RegionT;
+ typedef typename Tr::RegionNodeT RegionNodeT;
+ typedef typename Tr::DomTreeT DomTreeT;
+ typedef typename Tr::LoopT LoopT;
+ typedef typename Tr::LoopInfoT LoopInfoT;
+ typedef typename Tr::InstT InstT;
+
+ typedef GraphTraits<BlockT *> BlockTraits;
+ typedef GraphTraits<Inverse<BlockT *>> InvBlockTraits;
+ typedef typename BlockTraits::ChildIteratorType SuccIterTy;
+ typedef typename InvBlockTraits::ChildIteratorType PredIterTy;
+
+ friend class RegionInfoBase<Tr>;
+ RegionBase(const RegionBase &) = delete;
+ const RegionBase &operator=(const RegionBase &) = delete;
+
+ // Information necessary to manage this Region.
+ RegionInfoT *RI;
+ DomTreeT *DT;
+
+ // The exit BasicBlock of this region.
+ // (The entry BasicBlock is part of RegionNode)
+ BlockT *exit;
+
+ typedef std::vector<std::unique_ptr<RegionT>> RegionSet;
+
+ // The subregions of this region.
+ RegionSet children;
+
+ typedef std::map<BlockT *, RegionNodeT *> BBNodeMapT;
+
+ // Save the BasicBlock RegionNodes that are element of this Region.
+ mutable BBNodeMapT BBNodeMap;
+
+ /// Check if a BB is in this Region. This check also works
+ /// if the region is incorrectly built. (EXPENSIVE!)
+ void verifyBBInRegion(BlockT *BB) const;
+
+ /// Walk over all the BBs of the region starting from BB and
+ /// verify that all reachable basic blocks are elements of the region.
+ /// (EXPENSIVE!)
+ void verifyWalk(BlockT *BB, std::set<BlockT *> *visitedBB) const;
+
+ /// Verify if the region and its children are valid regions (EXPENSIVE!)
+ void verifyRegionNest() const;
+
+public:
+ /// @brief Create a new region.
+ ///
+ /// @param Entry The entry basic block of the region.
+ /// @param Exit The exit basic block of the region.
+ /// @param RI The region info object that is managing this region.
+ /// @param DT The dominator tree of the current function.
+ /// @param Parent The surrounding region or NULL if this is a top level
+ /// region.
+ RegionBase(BlockT *Entry, BlockT *Exit, RegionInfoT *RI, DomTreeT *DT,
+ RegionT *Parent = nullptr);
+
+ /// Delete the Region and all its subregions.
+ ~RegionBase();
+
+ /// @brief Get the entry BasicBlock of the Region.
+ /// @return The entry BasicBlock of the region.
+ BlockT *getEntry() const {
+ return RegionNodeBase<Tr>::getEntry();
+ }
+
+ /// @brief Replace the entry basic block of the region with the new basic
+ /// block.
+ ///
+ /// @param BB The new entry basic block of the region.
+ void replaceEntry(BlockT *BB);
+
+ /// @brief Replace the exit basic block of the region with the new basic
+ /// block.
+ ///
+ /// @param BB The new exit basic block of the region.
+ void replaceExit(BlockT *BB);
+
+ /// @brief Recursively replace the entry basic block of the region.
+ ///
+ /// This function replaces the entry basic block with a new basic block. It
+ /// also updates all child regions that have the same entry basic block as
+ /// this region.
+ ///
+ /// @param NewEntry The new entry basic block.
+ void replaceEntryRecursive(BlockT *NewEntry);
+
+ /// @brief Recursively replace the exit basic block of the region.
+ ///
+ /// This function replaces the exit basic block with a new basic block. It
+ /// also updates all child regions that have the same exit basic block as
+ /// this region.
+ ///
+ /// @param NewExit The new exit basic block.
+ void replaceExitRecursive(BlockT *NewExit);
+
+ /// @brief Get the exit BasicBlock of the Region.
+ /// @return The exit BasicBlock of the Region, NULL if this is the TopLevel
+ /// Region.
+ BlockT *getExit() const { return exit; }
+
+ /// @brief Get the parent of the Region.
+ /// @return The parent of the Region or NULL if this is a top level
+ /// Region.
+ RegionT *getParent() const {
+ return RegionNodeBase<Tr>::getParent();
+ }
+
+ /// @brief Get the RegionNode representing the current Region.
+ /// @return The RegionNode representing the current Region.
+ RegionNodeT *getNode() const {
+ return const_cast<RegionNodeT *>(
+ reinterpret_cast<const RegionNodeT *>(this));
+ }
+
+ /// @brief Get the nesting level of this Region.
+ ///
+ /// An toplevel Region has depth 0.
+ ///
+ /// @return The depth of the region.
+ unsigned getDepth() const;
+
+ /// @brief Check if a Region is the TopLevel region.
+ ///
+ /// The toplevel region represents the whole function.
+ bool isTopLevelRegion() const { return exit == nullptr; }
+
+ /// @brief Return a new (non-canonical) region, that is obtained by joining
+ /// this region with its predecessors.
+ ///
+ /// @return A region also starting at getEntry(), but reaching to the next
+ /// basic block that forms with getEntry() a (non-canonical) region.
+ /// NULL if such a basic block does not exist.
+ RegionT *getExpandedRegion() const;
+
+ /// @brief Return the first block of this region's single entry edge,
+ /// if existing.
+ ///
+ /// @return The BasicBlock starting this region's single entry edge,
+ /// else NULL.
+ BlockT *getEnteringBlock() const;
+
+ /// @brief Return the first block of this region's single exit edge,
+ /// if existing.
+ ///
+ /// @return The BasicBlock starting this region's single exit edge,
+ /// else NULL.
+ BlockT *getExitingBlock() const;
+
+ /// @brief Is this a simple region?
+ ///
+ /// A region is simple if it has exactly one exit and one entry edge.
+ ///
+ /// @return True if the Region is simple.
+ bool isSimple() const;
+
+ /// @brief Returns the name of the Region.
+ /// @return The Name of the Region.
+ std::string getNameStr() const;
+
+ /// @brief Return the RegionInfo object, that belongs to this Region.
+ RegionInfoT *getRegionInfo() const { return RI; }
+
+ /// PrintStyle - Print region in difference ways.
+ enum PrintStyle { PrintNone, PrintBB, PrintRN };
+
+ /// @brief Print the region.
+ ///
+ /// @param OS The output stream the Region is printed to.
+ /// @param printTree Print also the tree of subregions.
+ /// @param level The indentation level used for printing.
+ void print(raw_ostream &OS, bool printTree = true, unsigned level = 0,
+ PrintStyle Style = PrintNone) const;
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ /// @brief Print the region to stderr.
+ void dump() const;
+#endif
+
+ /// @brief Check if the region contains a BasicBlock.
+ ///
+ /// @param BB The BasicBlock that might be contained in this Region.
+ /// @return True if the block is contained in the region otherwise false.
+ bool contains(const BlockT *BB) const;
+
+ /// @brief Check if the region contains another region.
+ ///
+ /// @param SubRegion The region that might be contained in this Region.
+ /// @return True if SubRegion is contained in the region otherwise false.
+ bool contains(const RegionT *SubRegion) const {
+ // Toplevel Region.
+ if (!getExit())
+ return true;
+
+ return contains(SubRegion->getEntry()) &&
+ (contains(SubRegion->getExit()) ||
+ SubRegion->getExit() == getExit());
+ }
+
+ /// @brief Check if the region contains an Instruction.
+ ///
+ /// @param Inst The Instruction that might be contained in this region.
+ /// @return True if the Instruction is contained in the region otherwise
+ /// false.
+ bool contains(const InstT *Inst) const { return contains(Inst->getParent()); }
+
+ /// @brief Check if the region contains a loop.
+ ///
+ /// @param L The loop that might be contained in this region.
+ /// @return True if the loop is contained in the region otherwise false.
+ /// In case a NULL pointer is passed to this function the result
+ /// is false, except for the region that describes the whole function.
+ /// In that case true is returned.
+ bool contains(const LoopT *L) const;
+
+ /// @brief Get the outermost loop in the region that contains a loop.
+ ///
+ /// Find for a Loop L the outermost loop OuterL that is a parent loop of L
+ /// and is itself contained in the region.
+ ///
+ /// @param L The loop the lookup is started.
+ /// @return The outermost loop in the region, NULL if such a loop does not
+ /// exist or if the region describes the whole function.
+ LoopT *outermostLoopInRegion(LoopT *L) const;
+
+ /// @brief Get the outermost loop in the region that contains a basic block.
+ ///
+ /// Find for a basic block BB the outermost loop L that contains BB and is
+ /// itself contained in the region.
+ ///
+ /// @param LI A pointer to a LoopInfo analysis.
+ /// @param BB The basic block surrounded by the loop.
+ /// @return The outermost loop in the region, NULL if such a loop does not
+ /// exist or if the region describes the whole function.
+ LoopT *outermostLoopInRegion(LoopInfoT *LI, BlockT *BB) const;
+
+ /// @brief Get the subregion that starts at a BasicBlock
+ ///
+ /// @param BB The BasicBlock the subregion should start.
+ /// @return The Subregion if available, otherwise NULL.
+ RegionT *getSubRegionNode(BlockT *BB) const;
+
+ /// @brief Get the RegionNode for a BasicBlock
+ ///
+ /// @param BB The BasicBlock at which the RegionNode should start.
+ /// @return If available, the RegionNode that represents the subregion
+ /// starting at BB. If no subregion starts at BB, the RegionNode
+ /// representing BB.
+ RegionNodeT *getNode(BlockT *BB) const;
+
+ /// @brief Get the BasicBlock RegionNode for a BasicBlock
+ ///
+ /// @param BB The BasicBlock for which the RegionNode is requested.
+ /// @return The RegionNode representing the BB.
+ RegionNodeT *getBBNode(BlockT *BB) const;
+
+ /// @brief Add a new subregion to this Region.
+ ///
+ /// @param SubRegion The new subregion that will be added.
+ /// @param moveChildren Move the children of this region, that are also
+ /// contained in SubRegion into SubRegion.
+ void addSubRegion(RegionT *SubRegion, bool moveChildren = false);
+
+ /// @brief Remove a subregion from this Region.
+ ///
+ /// The subregion is not deleted, as it will probably be inserted into another
+ /// region.
+ /// @param SubRegion The SubRegion that will be removed.
+ RegionT *removeSubRegion(RegionT *SubRegion);
+
+ /// @brief Move all direct child nodes of this Region to another Region.
+ ///
+ /// @param To The Region the child nodes will be transferred to.
+ void transferChildrenTo(RegionT *To);
+
+ /// @brief Verify if the region is a correct region.
+ ///
+ /// Check if this is a correctly build Region. This is an expensive check, as
+ /// the complete CFG of the Region will be walked.
+ void verifyRegion() const;
+
+ /// @brief Clear the cache for BB RegionNodes.
+ ///
+ /// After calling this function the BasicBlock RegionNodes will be stored at
+ /// different memory locations. RegionNodes obtained before this function is
+ /// called are therefore not comparable to RegionNodes abtained afterwords.
+ void clearNodeCache();
+
+ /// @name Subregion Iterators
+ ///
+ /// These iterators iterator over all subregions of this Region.
+ //@{
+ typedef typename RegionSet::iterator iterator;
+ typedef typename RegionSet::const_iterator const_iterator;
+
+ iterator begin() { return children.begin(); }
+ iterator end() { return children.end(); }
+
+ const_iterator begin() const { return children.begin(); }
+ const_iterator end() const { return children.end(); }
+ //@}
+
+ /// @name BasicBlock Iterators
+ ///
+ /// These iterators iterate over all BasicBlocks that are contained in this
+ /// Region. The iterator also iterates over BasicBlocks that are elements of
+ /// a subregion of this Region. It is therefore called a flat iterator.
+ //@{
+ template <bool IsConst>
+ class block_iterator_wrapper
+ : public df_iterator<
+ typename std::conditional<IsConst, const BlockT, BlockT>::type *> {
+ typedef df_iterator<
+ typename std::conditional<IsConst, const BlockT, BlockT>::type *> super;
+
+ public:
+ typedef block_iterator_wrapper<IsConst> Self;
+ typedef typename super::pointer pointer;
+
+ // Construct the begin iterator.
+ block_iterator_wrapper(pointer Entry, pointer Exit)
+ : super(df_begin(Entry)) {
+ // Mark the exit of the region as visited, so that the children of the
+ // exit and the exit itself, i.e. the block outside the region will never
+ // be visited.
+ super::Visited.insert(Exit);
+ }
+
+ // Construct the end iterator.
+ block_iterator_wrapper() : super(df_end<pointer>((BlockT *)nullptr)) {}
+
+ /*implicit*/ block_iterator_wrapper(super I) : super(I) {}
+
+ // FIXME: Even a const_iterator returns a non-const BasicBlock pointer.
+ // This was introduced for backwards compatibility, but should
+ // be removed as soon as all users are fixed.
+ BlockT *operator*() const {
+ return const_cast<BlockT *>(super::operator*());
+ }
+ };
+
+ typedef block_iterator_wrapper<false> block_iterator;
+ typedef block_iterator_wrapper<true> const_block_iterator;
+
+ block_iterator block_begin() { return block_iterator(getEntry(), getExit()); }
+
+ block_iterator block_end() { return block_iterator(); }
+
+ const_block_iterator block_begin() const {
+ return const_block_iterator(getEntry(), getExit());
+ }
+ const_block_iterator block_end() const { return const_block_iterator(); }
+
+ typedef iterator_range<block_iterator> block_range;
+ typedef iterator_range<const_block_iterator> const_block_range;
+
+ /// @brief Returns a range view of the basic blocks in the region.
+ inline block_range blocks() {
+ return block_range(block_begin(), block_end());
+ }
+
+ /// @brief Returns a range view of the basic blocks in the region.
+ ///
+ /// This is the 'const' version of the range view.
+ inline const_block_range blocks() const {
+ return const_block_range(block_begin(), block_end());
+ }
+ //@}
+
+ /// @name Element Iterators
+ ///
+ /// These iterators iterate over all BasicBlock and subregion RegionNodes that
+ /// are direct children of this Region. It does not iterate over any
+ /// RegionNodes that are also element of a subregion of this Region.
+ //@{
+ typedef df_iterator<RegionNodeT *, SmallPtrSet<RegionNodeT *, 8>, false,
+ GraphTraits<RegionNodeT *>> element_iterator;
+
+ typedef df_iterator<const RegionNodeT *, SmallPtrSet<const RegionNodeT *, 8>,
+ false,
+ GraphTraits<const RegionNodeT *>> const_element_iterator;
+
+ element_iterator element_begin();
+ element_iterator element_end();
+
+ const_element_iterator element_begin() const;
+ const_element_iterator element_end() const;
+ //@}
+};
+
+/// Print a RegionNode.
+template <class Tr>
+inline raw_ostream &operator<<(raw_ostream &OS, const RegionNodeBase<Tr> &Node);
+
+//===----------------------------------------------------------------------===//
+/// @brief Analysis that detects all canonical Regions.
+///
+/// The RegionInfo pass detects all canonical regions in a function. The Regions
+/// are connected using the parent relation. This builds a Program Structure
+/// Tree.
+template <class Tr>
+class RegionInfoBase {
+ typedef typename Tr::BlockT BlockT;
+ typedef typename Tr::FuncT FuncT;
+ typedef typename Tr::RegionT RegionT;
+ typedef typename Tr::RegionInfoT RegionInfoT;
+ typedef typename Tr::DomTreeT DomTreeT;
+ typedef typename Tr::DomTreeNodeT DomTreeNodeT;
+ typedef typename Tr::PostDomTreeT PostDomTreeT;
+ typedef typename Tr::DomFrontierT DomFrontierT;
+ typedef GraphTraits<BlockT *> BlockTraits;
+ typedef GraphTraits<Inverse<BlockT *>> InvBlockTraits;
+ typedef typename BlockTraits::ChildIteratorType SuccIterTy;
+ typedef typename InvBlockTraits::ChildIteratorType PredIterTy;
+
+ friend class RegionInfo;
+ friend class MachineRegionInfo;
+ typedef DenseMap<BlockT *, BlockT *> BBtoBBMap;
+ typedef DenseMap<BlockT *, RegionT *> BBtoRegionMap;
+ typedef SmallPtrSet<RegionT *, 4> RegionSet;
+
+ RegionInfoBase();
+ virtual ~RegionInfoBase();
+
+ RegionInfoBase(const RegionInfoBase &) = delete;
+ const RegionInfoBase &operator=(const RegionInfoBase &) = delete;
+
+ DomTreeT *DT;
+ PostDomTreeT *PDT;
+ DomFrontierT *DF;
+
+ /// The top level region.
+ RegionT *TopLevelRegion;
+
+private:
+ /// Map every BB to the smallest region, that contains BB.
+ BBtoRegionMap BBtoRegion;
+
+ // Check whether the entries of BBtoRegion for the BBs of region
+ // SR are correct. Triggers an assertion if not. Calls itself recursively for
+ // subregions.
+ void verifyBBMap(const RegionT *SR) const;
+
+ // Returns true if BB is in the dominance frontier of
+ // entry, because it was inherited from exit. In the other case there is an
+ // edge going from entry to BB without passing exit.
+ bool isCommonDomFrontier(BlockT *BB, BlockT *entry, BlockT *exit) const;
+
+ // Check if entry and exit surround a valid region, based on
+ // dominance tree and dominance frontier.
+ bool isRegion(BlockT *entry, BlockT *exit) const;
+
+ // Saves a shortcut pointing from entry to exit.
+ // This function may extend this shortcut if possible.
+ void insertShortCut(BlockT *entry, BlockT *exit, BBtoBBMap *ShortCut) const;
+
+ // Returns the next BB that postdominates N, while skipping
+ // all post dominators that cannot finish a canonical region.
+ DomTreeNodeT *getNextPostDom(DomTreeNodeT *N, BBtoBBMap *ShortCut) const;
+
+ // A region is trivial, if it contains only one BB.
+ bool isTrivialRegion(BlockT *entry, BlockT *exit) const;
+
+ // Creates a single entry single exit region.
+ RegionT *createRegion(BlockT *entry, BlockT *exit);
+
+ // Detect all regions starting with bb 'entry'.
+ void findRegionsWithEntry(BlockT *entry, BBtoBBMap *ShortCut);
+
+ // Detects regions in F.
+ void scanForRegions(FuncT &F, BBtoBBMap *ShortCut);
+
+ // Get the top most parent with the same entry block.
+ RegionT *getTopMostParent(RegionT *region);
+
+ // Build the region hierarchy after all region detected.
+ void buildRegionsTree(DomTreeNodeT *N, RegionT *region);
+
+ // Update statistic about created regions.
+ virtual void updateStatistics(RegionT *R) = 0;
+
+ // Detect all regions in function and build the region tree.
+ void calculate(FuncT &F);
+
+public:
+ static bool VerifyRegionInfo;
+ static typename RegionT::PrintStyle printStyle;
+
+ void print(raw_ostream &OS) const;
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ void dump() const;
+#endif
+
+ void releaseMemory();
+
+ /// @brief Get the smallest region that contains a BasicBlock.
+ ///
+ /// @param BB The basic block.
+ /// @return The smallest region, that contains BB or NULL, if there is no
+ /// region containing BB.
+ RegionT *getRegionFor(BlockT *BB) const;
+
+ /// @brief Set the smallest region that surrounds a basic block.
+ ///
+ /// @param BB The basic block surrounded by a region.
+ /// @param R The smallest region that surrounds BB.
+ void setRegionFor(BlockT *BB, RegionT *R);
+
+ /// @brief A shortcut for getRegionFor().
+ ///
+ /// @param BB The basic block.
+ /// @return The smallest region, that contains BB or NULL, if there is no
+ /// region containing BB.
+ RegionT *operator[](BlockT *BB) const;
+
+ /// @brief Return the exit of the maximal refined region, that starts at a
+ /// BasicBlock.
+ ///
+ /// @param BB The BasicBlock the refined region starts.
+ BlockT *getMaxRegionExit(BlockT *BB) const;
+
+ /// @brief Find the smallest region that contains two regions.
+ ///
+ /// @param A The first region.
+ /// @param B The second region.
+ /// @return The smallest region containing A and B.
+ RegionT *getCommonRegion(RegionT *A, RegionT *B) const;
+
+ /// @brief Find the smallest region that contains two basic blocks.
+ ///
+ /// @param A The first basic block.
+ /// @param B The second basic block.
+ /// @return The smallest region that contains A and B.
+ RegionT *getCommonRegion(BlockT *A, BlockT *B) const {
+ return getCommonRegion(getRegionFor(A), getRegionFor(B));
+ }
+
+ /// @brief Find the smallest region that contains a set of regions.
+ ///
+ /// @param Regions A vector of regions.
+ /// @return The smallest region that contains all regions in Regions.
+ RegionT *getCommonRegion(SmallVectorImpl<RegionT *> &Regions) const;
+
+ /// @brief Find the smallest region that contains a set of basic blocks.
+ ///
+ /// @param BBs A vector of basic blocks.
+ /// @return The smallest region that contains all basic blocks in BBS.
+ RegionT *getCommonRegion(SmallVectorImpl<BlockT *> &BBs) const;
+
+ RegionT *getTopLevelRegion() const { return TopLevelRegion; }
+
+ /// @brief Clear the Node Cache for all Regions.
+ ///
+ /// @see Region::clearNodeCache()
+ void clearNodeCache() {
+ if (TopLevelRegion)
+ TopLevelRegion->clearNodeCache();
+ }
+
+ void verifyAnalysis() const;
+};
+
+class Region;
+
+class RegionNode : public RegionNodeBase<RegionTraits<Function>> {
+public:
+ inline RegionNode(Region *Parent, BasicBlock *Entry, bool isSubRegion = false)
+ : RegionNodeBase<RegionTraits<Function>>(Parent, Entry, isSubRegion) {}
+
+ bool operator==(const Region &RN) const {
+ return this == reinterpret_cast<const RegionNode *>(&RN);
+ }
+};
+
+class Region : public RegionBase<RegionTraits<Function>> {
+public:
+ Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo *RI, DominatorTree *DT,
+ Region *Parent = nullptr);
+ ~Region();
+
+ bool operator==(const RegionNode &RN) const {
+ return &RN == reinterpret_cast<const RegionNode *>(this);
+ }
+};
+
+class RegionInfo : public RegionInfoBase<RegionTraits<Function>> {
+public:
+ explicit RegionInfo();
+
+ ~RegionInfo() override;
+
+ // updateStatistics - Update statistic about created regions.
+ void updateStatistics(Region *R) final;
+
+ void recalculate(Function &F, DominatorTree *DT, PostDominatorTree *PDT,
+ DominanceFrontier *DF);
+
+#ifndef NDEBUG
+ /// @brief Opens a viewer to show the GraphViz visualization of the regions.
+ ///
+ /// Useful during debugging as an alternative to dump().
+ void view();
+
+ /// @brief Opens a viewer to show the GraphViz visualization of this region
+ /// without instructions in the BasicBlocks.
+ ///
+ /// Useful during debugging as an alternative to dump().
+ void viewOnly();
+#endif
+};
+
+class RegionInfoPass : public FunctionPass {
+ RegionInfo RI;
+
+public:
+ static char ID;
+ explicit RegionInfoPass();
+
+ ~RegionInfoPass() override;
+
+ RegionInfo &getRegionInfo() { return RI; }
+
+ const RegionInfo &getRegionInfo() const { return RI; }
+
+ /// @name FunctionPass interface
+ //@{
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+ void verifyAnalysis() const override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void print(raw_ostream &OS, const Module *) const override;
+ void dump() const;
+ //@}
+};
+
+template <>
+template <>
+inline BasicBlock *
+RegionNodeBase<RegionTraits<Function>>::getNodeAs<BasicBlock>() const {
+ assert(!isSubRegion() && "This is not a BasicBlock RegionNode!");
+ return getEntry();
+}
+
+template <>
+template <>
+inline Region *
+RegionNodeBase<RegionTraits<Function>>::getNodeAs<Region>() const {
+ assert(isSubRegion() && "This is not a subregion RegionNode!");
+ auto Unconst = const_cast<RegionNodeBase<RegionTraits<Function>> *>(this);
+ return reinterpret_cast<Region *>(Unconst);
+}
+
+template <class Tr>
+inline raw_ostream &operator<<(raw_ostream &OS,
+ const RegionNodeBase<Tr> &Node) {
+ typedef typename Tr::BlockT BlockT;
+ typedef typename Tr::RegionT RegionT;
+
+ if (Node.isSubRegion())
+ return OS << Node.template getNodeAs<RegionT>()->getNameStr();
+ else
+ return OS << Node.template getNodeAs<BlockT>()->getName();
+}
+
+extern template class RegionBase<RegionTraits<Function>>;
+extern template class RegionNodeBase<RegionTraits<Function>>;
+extern template class RegionInfoBase<RegionTraits<Function>>;
+
+} // End llvm namespace
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/RegionInfoImpl.h b/contrib/llvm/include/llvm/Analysis/RegionInfoImpl.h
new file mode 100644
index 0000000..134cd8f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/RegionInfoImpl.h
@@ -0,0 +1,927 @@
+//===- RegionInfoImpl.h - SESE region detection analysis --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Detects single entry single exit regions in the control flow graph.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGIONINFOIMPL_H
+#define LLVM_ANALYSIS_REGIONINFOIMPL_H
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/PostDominators.h"
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionIterator.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <iterator>
+#include <set>
+
+namespace llvm {
+
+#define DEBUG_TYPE "region"
+
+//===----------------------------------------------------------------------===//
+/// RegionBase Implementation
+template <class Tr>
+RegionBase<Tr>::RegionBase(BlockT *Entry, BlockT *Exit,
+ typename Tr::RegionInfoT *RInfo, DomTreeT *dt,
+ RegionT *Parent)
+ : RegionNodeBase<Tr>(Parent, Entry, 1), RI(RInfo), DT(dt), exit(Exit) {}
+
+template <class Tr>
+RegionBase<Tr>::~RegionBase() {
+ // Free the cached nodes.
+ for (typename BBNodeMapT::iterator it = BBNodeMap.begin(),
+ ie = BBNodeMap.end();
+ it != ie; ++it)
+ delete it->second;
+
+ // Only clean the cache for this Region. Caches of child Regions will be
+ // cleaned when the child Regions are deleted.
+ BBNodeMap.clear();
+}
+
+template <class Tr>
+void RegionBase<Tr>::replaceEntry(BlockT *BB) {
+ this->entry.setPointer(BB);
+}
+
+template <class Tr>
+void RegionBase<Tr>::replaceExit(BlockT *BB) {
+ assert(exit && "No exit to replace!");
+ exit = BB;
+}
+
+template <class Tr>
+void RegionBase<Tr>::replaceEntryRecursive(BlockT *NewEntry) {
+ std::vector<RegionT *> RegionQueue;
+ BlockT *OldEntry = getEntry();
+
+ RegionQueue.push_back(static_cast<RegionT *>(this));
+ while (!RegionQueue.empty()) {
+ RegionT *R = RegionQueue.back();
+ RegionQueue.pop_back();
+
+ R->replaceEntry(NewEntry);
+ for (typename RegionT::const_iterator RI = R->begin(), RE = R->end();
+ RI != RE; ++RI) {
+ if ((*RI)->getEntry() == OldEntry)
+ RegionQueue.push_back(RI->get());
+ }
+ }
+}
+
+template <class Tr>
+void RegionBase<Tr>::replaceExitRecursive(BlockT *NewExit) {
+ std::vector<RegionT *> RegionQueue;
+ BlockT *OldExit = getExit();
+
+ RegionQueue.push_back(static_cast<RegionT *>(this));
+ while (!RegionQueue.empty()) {
+ RegionT *R = RegionQueue.back();
+ RegionQueue.pop_back();
+
+ R->replaceExit(NewExit);
+ for (typename RegionT::const_iterator RI = R->begin(), RE = R->end();
+ RI != RE; ++RI) {
+ if ((*RI)->getExit() == OldExit)
+ RegionQueue.push_back(RI->get());
+ }
+ }
+}
+
+template <class Tr>
+bool RegionBase<Tr>::contains(const BlockT *B) const {
+ BlockT *BB = const_cast<BlockT *>(B);
+
+ if (!DT->getNode(BB))
+ return false;
+
+ BlockT *entry = getEntry(), *exit = getExit();
+
+ // Toplevel region.
+ if (!exit)
+ return true;
+
+ return (DT->dominates(entry, BB) &&
+ !(DT->dominates(exit, BB) && DT->dominates(entry, exit)));
+}
+
+template <class Tr>
+bool RegionBase<Tr>::contains(const LoopT *L) const {
+ // BBs that are not part of any loop are element of the Loop
+ // described by the NULL pointer. This loop is not part of any region,
+ // except if the region describes the whole function.
+ if (!L)
+ return getExit() == nullptr;
+
+ if (!contains(L->getHeader()))
+ return false;
+
+ SmallVector<BlockT *, 8> ExitingBlocks;
+ L->getExitingBlocks(ExitingBlocks);
+
+ for (BlockT *BB : ExitingBlocks) {
+ if (!contains(BB))
+ return false;
+ }
+
+ return true;
+}
+
+template <class Tr>
+typename Tr::LoopT *RegionBase<Tr>::outermostLoopInRegion(LoopT *L) const {
+ if (!contains(L))
+ return nullptr;
+
+ while (L && contains(L->getParentLoop())) {
+ L = L->getParentLoop();
+ }
+
+ return L;
+}
+
+template <class Tr>
+typename Tr::LoopT *RegionBase<Tr>::outermostLoopInRegion(LoopInfoT *LI,
+ BlockT *BB) const {
+ assert(LI && BB && "LI and BB cannot be null!");
+ LoopT *L = LI->getLoopFor(BB);
+ return outermostLoopInRegion(L);
+}
+
+template <class Tr>
+typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getEnteringBlock() const {
+ BlockT *entry = getEntry();
+ BlockT *Pred;
+ BlockT *enteringBlock = nullptr;
+
+ for (PredIterTy PI = InvBlockTraits::child_begin(entry),
+ PE = InvBlockTraits::child_end(entry);
+ PI != PE; ++PI) {
+ Pred = *PI;
+ if (DT->getNode(Pred) && !contains(Pred)) {
+ if (enteringBlock)
+ return nullptr;
+
+ enteringBlock = Pred;
+ }
+ }
+
+ return enteringBlock;
+}
+
+template <class Tr>
+typename RegionBase<Tr>::BlockT *RegionBase<Tr>::getExitingBlock() const {
+ BlockT *exit = getExit();
+ BlockT *Pred;
+ BlockT *exitingBlock = nullptr;
+
+ if (!exit)
+ return nullptr;
+
+ for (PredIterTy PI = InvBlockTraits::child_begin(exit),
+ PE = InvBlockTraits::child_end(exit);
+ PI != PE; ++PI) {
+ Pred = *PI;
+ if (contains(Pred)) {
+ if (exitingBlock)
+ return nullptr;
+
+ exitingBlock = Pred;
+ }
+ }
+
+ return exitingBlock;
+}
+
+template <class Tr>
+bool RegionBase<Tr>::isSimple() const {
+ return !isTopLevelRegion() && getEnteringBlock() && getExitingBlock();
+}
+
+template <class Tr>
+std::string RegionBase<Tr>::getNameStr() const {
+ std::string exitName;
+ std::string entryName;
+
+ if (getEntry()->getName().empty()) {
+ raw_string_ostream OS(entryName);
+
+ getEntry()->printAsOperand(OS, false);
+ } else
+ entryName = getEntry()->getName();
+
+ if (getExit()) {
+ if (getExit()->getName().empty()) {
+ raw_string_ostream OS(exitName);
+
+ getExit()->printAsOperand(OS, false);
+ } else
+ exitName = getExit()->getName();
+ } else
+ exitName = "<Function Return>";
+
+ return entryName + " => " + exitName;
+}
+
+template <class Tr>
+void RegionBase<Tr>::verifyBBInRegion(BlockT *BB) const {
+ if (!contains(BB))
+ llvm_unreachable("Broken region found: enumerated BB not in region!");
+
+ BlockT *entry = getEntry(), *exit = getExit();
+
+ for (SuccIterTy SI = BlockTraits::child_begin(BB),
+ SE = BlockTraits::child_end(BB);
+ SI != SE; ++SI) {
+ if (!contains(*SI) && exit != *SI)
+ llvm_unreachable("Broken region found: edges leaving the region must go "
+ "to the exit node!");
+ }
+
+ if (entry != BB) {
+ for (PredIterTy SI = InvBlockTraits::child_begin(BB),
+ SE = InvBlockTraits::child_end(BB);
+ SI != SE; ++SI) {
+ if (!contains(*SI))
+ llvm_unreachable("Broken region found: edges entering the region must "
+ "go to the entry node!");
+ }
+ }
+}
+
+template <class Tr>
+void RegionBase<Tr>::verifyWalk(BlockT *BB, std::set<BlockT *> *visited) const {
+ BlockT *exit = getExit();
+
+ visited->insert(BB);
+
+ verifyBBInRegion(BB);
+
+ for (SuccIterTy SI = BlockTraits::child_begin(BB),
+ SE = BlockTraits::child_end(BB);
+ SI != SE; ++SI) {
+ if (*SI != exit && visited->find(*SI) == visited->end())
+ verifyWalk(*SI, visited);
+ }
+}
+
+template <class Tr>
+void RegionBase<Tr>::verifyRegion() const {
+ // Only do verification when user wants to, otherwise this expensive check
+ // will be invoked by PMDataManager::verifyPreservedAnalysis when
+ // a regionpass (marked PreservedAll) finish.
+ if (!RegionInfoBase<Tr>::VerifyRegionInfo)
+ return;
+
+ std::set<BlockT *> visited;
+ verifyWalk(getEntry(), &visited);
+}
+
+template <class Tr>
+void RegionBase<Tr>::verifyRegionNest() const {
+ for (typename RegionT::const_iterator RI = begin(), RE = end(); RI != RE;
+ ++RI)
+ (*RI)->verifyRegionNest();
+
+ verifyRegion();
+}
+
+template <class Tr>
+typename RegionBase<Tr>::element_iterator RegionBase<Tr>::element_begin() {
+ return GraphTraits<RegionT *>::nodes_begin(static_cast<RegionT *>(this));
+}
+
+template <class Tr>
+typename RegionBase<Tr>::element_iterator RegionBase<Tr>::element_end() {
+ return GraphTraits<RegionT *>::nodes_end(static_cast<RegionT *>(this));
+}
+
+template <class Tr>
+typename RegionBase<Tr>::const_element_iterator
+RegionBase<Tr>::element_begin() const {
+ return GraphTraits<const RegionT *>::nodes_begin(
+ static_cast<const RegionT *>(this));
+}
+
+template <class Tr>
+typename RegionBase<Tr>::const_element_iterator
+RegionBase<Tr>::element_end() const {
+ return GraphTraits<const RegionT *>::nodes_end(
+ static_cast<const RegionT *>(this));
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionBase<Tr>::getSubRegionNode(BlockT *BB) const {
+ typedef typename Tr::RegionT RegionT;
+ RegionT *R = RI->getRegionFor(BB);
+
+ if (!R || R == this)
+ return nullptr;
+
+ // If we pass the BB out of this region, that means our code is broken.
+ assert(contains(R) && "BB not in current region!");
+
+ while (contains(R->getParent()) && R->getParent() != this)
+ R = R->getParent();
+
+ if (R->getEntry() != BB)
+ return nullptr;
+
+ return R;
+}
+
+template <class Tr>
+typename Tr::RegionNodeT *RegionBase<Tr>::getBBNode(BlockT *BB) const {
+ assert(contains(BB) && "Can get BB node out of this region!");
+
+ typename BBNodeMapT::const_iterator at = BBNodeMap.find(BB);
+
+ if (at != BBNodeMap.end())
+ return at->second;
+
+ auto Deconst = const_cast<RegionBase<Tr> *>(this);
+ RegionNodeT *NewNode = new RegionNodeT(static_cast<RegionT *>(Deconst), BB);
+ BBNodeMap.insert(std::make_pair(BB, NewNode));
+ return NewNode;
+}
+
+template <class Tr>
+typename Tr::RegionNodeT *RegionBase<Tr>::getNode(BlockT *BB) const {
+ assert(contains(BB) && "Can get BB node out of this region!");
+ if (RegionT *Child = getSubRegionNode(BB))
+ return Child->getNode();
+
+ return getBBNode(BB);
+}
+
+template <class Tr>
+void RegionBase<Tr>::transferChildrenTo(RegionT *To) {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ (*I)->parent = To;
+ To->children.push_back(std::move(*I));
+ }
+ children.clear();
+}
+
+template <class Tr>
+void RegionBase<Tr>::addSubRegion(RegionT *SubRegion, bool moveChildren) {
+ assert(!SubRegion->parent && "SubRegion already has a parent!");
+ assert(std::find_if(begin(), end(), [&](const std::unique_ptr<RegionT> &R) {
+ return R.get() == SubRegion;
+ }) == children.end() &&
+ "Subregion already exists!");
+
+ SubRegion->parent = static_cast<RegionT *>(this);
+ children.push_back(std::unique_ptr<RegionT>(SubRegion));
+
+ if (!moveChildren)
+ return;
+
+ assert(SubRegion->children.empty() &&
+ "SubRegions that contain children are not supported");
+
+ for (element_iterator I = element_begin(), E = element_end(); I != E; ++I) {
+ if (!(*I)->isSubRegion()) {
+ BlockT *BB = (*I)->template getNodeAs<BlockT>();
+
+ if (SubRegion->contains(BB))
+ RI->setRegionFor(BB, SubRegion);
+ }
+ }
+
+ std::vector<std::unique_ptr<RegionT>> Keep;
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (SubRegion->contains(I->get()) && I->get() != SubRegion) {
+ (*I)->parent = SubRegion;
+ SubRegion->children.push_back(std::move(*I));
+ } else
+ Keep.push_back(std::move(*I));
+ }
+
+ children.clear();
+ children.insert(
+ children.begin(),
+ std::move_iterator<typename RegionSet::iterator>(Keep.begin()),
+ std::move_iterator<typename RegionSet::iterator>(Keep.end()));
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionBase<Tr>::removeSubRegion(RegionT *Child) {
+ assert(Child->parent == this && "Child is not a child of this region!");
+ Child->parent = nullptr;
+ typename RegionSet::iterator I = std::find_if(
+ children.begin(), children.end(),
+ [&](const std::unique_ptr<RegionT> &R) { return R.get() == Child; });
+ assert(I != children.end() && "Region does not exit. Unable to remove.");
+ children.erase(children.begin() + (I - begin()));
+ return Child;
+}
+
+template <class Tr>
+unsigned RegionBase<Tr>::getDepth() const {
+ unsigned Depth = 0;
+
+ for (RegionT *R = getParent(); R != nullptr; R = R->getParent())
+ ++Depth;
+
+ return Depth;
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionBase<Tr>::getExpandedRegion() const {
+ unsigned NumSuccessors = Tr::getNumSuccessors(exit);
+
+ if (NumSuccessors == 0)
+ return nullptr;
+
+ RegionT *R = RI->getRegionFor(exit);
+
+ if (R->getEntry() != exit) {
+ for (PredIterTy PI = InvBlockTraits::child_begin(getExit()),
+ PE = InvBlockTraits::child_end(getExit());
+ PI != PE; ++PI)
+ if (!contains(*PI))
+ return nullptr;
+ if (Tr::getNumSuccessors(exit) == 1)
+ return new RegionT(getEntry(), *BlockTraits::child_begin(exit), RI, DT);
+ return nullptr;
+ }
+
+ while (R->getParent() && R->getParent()->getEntry() == exit)
+ R = R->getParent();
+
+ for (PredIterTy PI = InvBlockTraits::child_begin(getExit()),
+ PE = InvBlockTraits::child_end(getExit());
+ PI != PE; ++PI) {
+ if (!(contains(*PI) || R->contains(*PI)))
+ return nullptr;
+ }
+
+ return new RegionT(getEntry(), R->getExit(), RI, DT);
+}
+
+template <class Tr>
+void RegionBase<Tr>::print(raw_ostream &OS, bool print_tree, unsigned level,
+ PrintStyle Style) const {
+ if (print_tree)
+ OS.indent(level * 2) << '[' << level << "] " << getNameStr();
+ else
+ OS.indent(level * 2) << getNameStr();
+
+ OS << '\n';
+
+ if (Style != PrintNone) {
+ OS.indent(level * 2) << "{\n";
+ OS.indent(level * 2 + 2);
+
+ if (Style == PrintBB) {
+ for (const auto *BB : blocks())
+ OS << BB->getName() << ", "; // TODO: remove the last ","
+ } else if (Style == PrintRN) {
+ for (const_element_iterator I = element_begin(), E = element_end();
+ I != E; ++I) {
+ OS << **I << ", "; // TODO: remove the last ",
+ }
+ }
+
+ OS << '\n';
+ }
+
+ if (print_tree) {
+ for (const_iterator RI = begin(), RE = end(); RI != RE; ++RI)
+ (*RI)->print(OS, print_tree, level + 1, Style);
+ }
+
+ if (Style != PrintNone)
+ OS.indent(level * 2) << "} \n";
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+template <class Tr>
+void RegionBase<Tr>::dump() const {
+ print(dbgs(), true, getDepth(), RegionInfoBase<Tr>::printStyle);
+}
+#endif
+
+template <class Tr>
+void RegionBase<Tr>::clearNodeCache() {
+ // Free the cached nodes.
+ for (typename BBNodeMapT::iterator I = BBNodeMap.begin(),
+ IE = BBNodeMap.end();
+ I != IE; ++I)
+ delete I->second;
+
+ BBNodeMap.clear();
+ for (typename RegionT::iterator RI = begin(), RE = end(); RI != RE; ++RI)
+ (*RI)->clearNodeCache();
+}
+
+//===----------------------------------------------------------------------===//
+// RegionInfoBase implementation
+//
+
+template <class Tr>
+RegionInfoBase<Tr>::RegionInfoBase()
+ : TopLevelRegion(nullptr) {}
+
+template <class Tr>
+RegionInfoBase<Tr>::~RegionInfoBase() {
+ releaseMemory();
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::verifyBBMap(const RegionT *R) const {
+ assert(R && "Re must be non-null");
+ for (auto I = R->element_begin(), E = R->element_end(); I != E; ++I) {
+ if (I->isSubRegion()) {
+ const RegionT *SR = I->template getNodeAs<RegionT>();
+ verifyBBMap(SR);
+ } else {
+ BlockT *BB = I->template getNodeAs<BlockT>();
+ if (getRegionFor(BB) != R)
+ llvm_unreachable("BB map does not match region nesting");
+ }
+ }
+}
+
+template <class Tr>
+bool RegionInfoBase<Tr>::isCommonDomFrontier(BlockT *BB, BlockT *entry,
+ BlockT *exit) const {
+ for (PredIterTy PI = InvBlockTraits::child_begin(BB),
+ PE = InvBlockTraits::child_end(BB);
+ PI != PE; ++PI) {
+ BlockT *P = *PI;
+ if (DT->dominates(entry, P) && !DT->dominates(exit, P))
+ return false;
+ }
+
+ return true;
+}
+
+template <class Tr>
+bool RegionInfoBase<Tr>::isRegion(BlockT *entry, BlockT *exit) const {
+ assert(entry && exit && "entry and exit must not be null!");
+ typedef typename DomFrontierT::DomSetType DST;
+
+ DST *entrySuccs = &DF->find(entry)->second;
+
+ // Exit is the header of a loop that contains the entry. In this case,
+ // the dominance frontier must only contain the exit.
+ if (!DT->dominates(entry, exit)) {
+ for (typename DST::iterator SI = entrySuccs->begin(),
+ SE = entrySuccs->end();
+ SI != SE; ++SI) {
+ if (*SI != exit && *SI != entry)
+ return false;
+ }
+
+ return true;
+ }
+
+ DST *exitSuccs = &DF->find(exit)->second;
+
+ // Do not allow edges leaving the region.
+ for (typename DST::iterator SI = entrySuccs->begin(), SE = entrySuccs->end();
+ SI != SE; ++SI) {
+ if (*SI == exit || *SI == entry)
+ continue;
+ if (exitSuccs->find(*SI) == exitSuccs->end())
+ return false;
+ if (!isCommonDomFrontier(*SI, entry, exit))
+ return false;
+ }
+
+ // Do not allow edges pointing into the region.
+ for (typename DST::iterator SI = exitSuccs->begin(), SE = exitSuccs->end();
+ SI != SE; ++SI) {
+ if (DT->properlyDominates(entry, *SI) && *SI != exit)
+ return false;
+ }
+
+ return true;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::insertShortCut(BlockT *entry, BlockT *exit,
+ BBtoBBMap *ShortCut) const {
+ assert(entry && exit && "entry and exit must not be null!");
+
+ typename BBtoBBMap::iterator e = ShortCut->find(exit);
+
+ if (e == ShortCut->end())
+ // No further region at exit available.
+ (*ShortCut)[entry] = exit;
+ else {
+ // We found a region e that starts at exit. Therefore (entry, e->second)
+ // is also a region, that is larger than (entry, exit). Insert the
+ // larger one.
+ BlockT *BB = e->second;
+ (*ShortCut)[entry] = BB;
+ }
+}
+
+template <class Tr>
+typename Tr::DomTreeNodeT *
+RegionInfoBase<Tr>::getNextPostDom(DomTreeNodeT *N, BBtoBBMap *ShortCut) const {
+ typename BBtoBBMap::iterator e = ShortCut->find(N->getBlock());
+
+ if (e == ShortCut->end())
+ return N->getIDom();
+
+ return PDT->getNode(e->second)->getIDom();
+}
+
+template <class Tr>
+bool RegionInfoBase<Tr>::isTrivialRegion(BlockT *entry, BlockT *exit) const {
+ assert(entry && exit && "entry and exit must not be null!");
+
+ unsigned num_successors =
+ BlockTraits::child_end(entry) - BlockTraits::child_begin(entry);
+
+ if (num_successors <= 1 && exit == *(BlockTraits::child_begin(entry)))
+ return true;
+
+ return false;
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionInfoBase<Tr>::createRegion(BlockT *entry,
+ BlockT *exit) {
+ assert(entry && exit && "entry and exit must not be null!");
+
+ if (isTrivialRegion(entry, exit))
+ return nullptr;
+
+ RegionT *region =
+ new RegionT(entry, exit, static_cast<RegionInfoT *>(this), DT);
+ BBtoRegion.insert(std::make_pair(entry, region));
+
+#ifdef XDEBUG
+ region->verifyRegion();
+#else
+ DEBUG(region->verifyRegion());
+#endif
+
+ updateStatistics(region);
+ return region;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::findRegionsWithEntry(BlockT *entry,
+ BBtoBBMap *ShortCut) {
+ assert(entry);
+
+ DomTreeNodeT *N = PDT->getNode(entry);
+ if (!N)
+ return;
+
+ RegionT *lastRegion = nullptr;
+ BlockT *lastExit = entry;
+
+ // As only a BasicBlock that postdominates entry can finish a region, walk the
+ // post dominance tree upwards.
+ while ((N = getNextPostDom(N, ShortCut))) {
+ BlockT *exit = N->getBlock();
+
+ if (!exit)
+ break;
+
+ if (isRegion(entry, exit)) {
+ RegionT *newRegion = createRegion(entry, exit);
+
+ if (lastRegion)
+ newRegion->addSubRegion(lastRegion);
+
+ lastRegion = newRegion;
+ lastExit = exit;
+ }
+
+ // This can never be a region, so stop the search.
+ if (!DT->dominates(entry, exit))
+ break;
+ }
+
+ // Tried to create regions from entry to lastExit. Next time take a
+ // shortcut from entry to lastExit.
+ if (lastExit != entry)
+ insertShortCut(entry, lastExit, ShortCut);
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::scanForRegions(FuncT &F, BBtoBBMap *ShortCut) {
+ typedef typename std::add_pointer<FuncT>::type FuncPtrT;
+ BlockT *entry = GraphTraits<FuncPtrT>::getEntryNode(&F);
+ DomTreeNodeT *N = DT->getNode(entry);
+
+ // Iterate over the dominance tree in post order to start with the small
+ // regions from the bottom of the dominance tree. If the small regions are
+ // detected first, detection of bigger regions is faster, as we can jump
+ // over the small regions.
+ for (auto DomNode : post_order(N))
+ findRegionsWithEntry(DomNode->getBlock(), ShortCut);
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionInfoBase<Tr>::getTopMostParent(RegionT *region) {
+ while (region->getParent())
+ region = region->getParent();
+
+ return region;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::buildRegionsTree(DomTreeNodeT *N, RegionT *region) {
+ BlockT *BB = N->getBlock();
+
+ // Passed region exit
+ while (BB == region->getExit())
+ region = region->getParent();
+
+ typename BBtoRegionMap::iterator it = BBtoRegion.find(BB);
+
+ // This basic block is a start block of a region. It is already in the
+ // BBtoRegion relation. Only the child basic blocks have to be updated.
+ if (it != BBtoRegion.end()) {
+ RegionT *newRegion = it->second;
+ region->addSubRegion(getTopMostParent(newRegion));
+ region = newRegion;
+ } else {
+ BBtoRegion[BB] = region;
+ }
+
+ for (typename DomTreeNodeT::iterator CI = N->begin(), CE = N->end(); CI != CE;
+ ++CI) {
+ buildRegionsTree(*CI, region);
+ }
+}
+
+#ifdef XDEBUG
+template <class Tr>
+bool RegionInfoBase<Tr>::VerifyRegionInfo = true;
+#else
+template <class Tr>
+bool RegionInfoBase<Tr>::VerifyRegionInfo = false;
+#endif
+
+template <class Tr>
+typename Tr::RegionT::PrintStyle RegionInfoBase<Tr>::printStyle =
+ RegionBase<Tr>::PrintNone;
+
+template <class Tr>
+void RegionInfoBase<Tr>::print(raw_ostream &OS) const {
+ OS << "Region tree:\n";
+ TopLevelRegion->print(OS, true, 0, printStyle);
+ OS << "End region tree\n";
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+template <class Tr>
+void RegionInfoBase<Tr>::dump() const { print(dbgs()); }
+#endif
+
+template <class Tr>
+void RegionInfoBase<Tr>::releaseMemory() {
+ BBtoRegion.clear();
+ if (TopLevelRegion)
+ delete TopLevelRegion;
+ TopLevelRegion = nullptr;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::verifyAnalysis() const {
+ // Do only verify regions if explicitely activated using XDEBUG or
+ // -verify-region-info
+ if (!RegionInfoBase<Tr>::VerifyRegionInfo)
+ return;
+
+ TopLevelRegion->verifyRegionNest();
+
+ verifyBBMap(TopLevelRegion);
+}
+
+// Region pass manager support.
+template <class Tr>
+typename Tr::RegionT *RegionInfoBase<Tr>::getRegionFor(BlockT *BB) const {
+ typename BBtoRegionMap::const_iterator I = BBtoRegion.find(BB);
+ return I != BBtoRegion.end() ? I->second : nullptr;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::setRegionFor(BlockT *BB, RegionT *R) {
+ BBtoRegion[BB] = R;
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionInfoBase<Tr>::operator[](BlockT *BB) const {
+ return getRegionFor(BB);
+}
+
+template <class Tr>
+typename RegionInfoBase<Tr>::BlockT *
+RegionInfoBase<Tr>::getMaxRegionExit(BlockT *BB) const {
+ BlockT *Exit = nullptr;
+
+ while (true) {
+ // Get largest region that starts at BB.
+ RegionT *R = getRegionFor(BB);
+ while (R && R->getParent() && R->getParent()->getEntry() == BB)
+ R = R->getParent();
+
+ // Get the single exit of BB.
+ if (R && R->getEntry() == BB)
+ Exit = R->getExit();
+ else if (++BlockTraits::child_begin(BB) == BlockTraits::child_end(BB))
+ Exit = *BlockTraits::child_begin(BB);
+ else // No single exit exists.
+ return Exit;
+
+ // Get largest region that starts at Exit.
+ RegionT *ExitR = getRegionFor(Exit);
+ while (ExitR && ExitR->getParent() &&
+ ExitR->getParent()->getEntry() == Exit)
+ ExitR = ExitR->getParent();
+
+ for (PredIterTy PI = InvBlockTraits::child_begin(Exit),
+ PE = InvBlockTraits::child_end(Exit);
+ PI != PE; ++PI) {
+ if (!R->contains(*PI) && !ExitR->contains(*PI))
+ break;
+ }
+
+ // This stops infinite cycles.
+ if (DT->dominates(Exit, BB))
+ break;
+
+ BB = Exit;
+ }
+
+ return Exit;
+}
+
+template <class Tr>
+typename Tr::RegionT *RegionInfoBase<Tr>::getCommonRegion(RegionT *A,
+ RegionT *B) const {
+ assert(A && B && "One of the Regions is NULL");
+
+ if (A->contains(B))
+ return A;
+
+ while (!B->contains(A))
+ B = B->getParent();
+
+ return B;
+}
+
+template <class Tr>
+typename Tr::RegionT *
+RegionInfoBase<Tr>::getCommonRegion(SmallVectorImpl<RegionT *> &Regions) const {
+ RegionT *ret = Regions.back();
+ Regions.pop_back();
+
+ for (RegionT *R : Regions)
+ ret = getCommonRegion(ret, R);
+
+ return ret;
+}
+
+template <class Tr>
+typename Tr::RegionT *
+RegionInfoBase<Tr>::getCommonRegion(SmallVectorImpl<BlockT *> &BBs) const {
+ RegionT *ret = getRegionFor(BBs.back());
+ BBs.pop_back();
+
+ for (BlockT *BB : BBs)
+ ret = getCommonRegion(ret, getRegionFor(BB));
+
+ return ret;
+}
+
+template <class Tr>
+void RegionInfoBase<Tr>::calculate(FuncT &F) {
+ typedef typename std::add_pointer<FuncT>::type FuncPtrT;
+
+ // ShortCut a function where for every BB the exit of the largest region
+ // starting with BB is stored. These regions can be threated as single BBS.
+ // This improves performance on linear CFGs.
+ BBtoBBMap ShortCut;
+
+ scanForRegions(F, &ShortCut);
+ BlockT *BB = GraphTraits<FuncPtrT>::getEntryNode(&F);
+ buildRegionsTree(DT->getNode(BB), TopLevelRegion);
+}
+
+#undef DEBUG_TYPE
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/RegionIterator.h b/contrib/llvm/include/llvm/Analysis/RegionIterator.h
new file mode 100644
index 0000000..ced58df
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/RegionIterator.h
@@ -0,0 +1,345 @@
+//===- RegionIterator.h - Iterators to iteratate over Regions ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines the iterators to iterate over the elements of a Region.
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_ANALYSIS_REGIONITERATOR_H
+#define LLVM_ANALYSIS_REGIONITERATOR_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+//===----------------------------------------------------------------------===//
+/// @brief Hierarchical RegionNode successor iterator.
+///
+/// This iterator iterates over all successors of a RegionNode.
+///
+/// For a BasicBlock RegionNode it skips all BasicBlocks that are not part of
+/// the parent Region. Furthermore for BasicBlocks that start a subregion, a
+/// RegionNode representing the subregion is returned.
+///
+/// For a subregion RegionNode there is just one successor. The RegionNode
+/// representing the exit of the subregion.
+template<class NodeType, class BlockT, class RegionT>
+class RNSuccIterator : public std::iterator<std::forward_iterator_tag,
+ NodeType, ptrdiff_t> {
+ typedef std::iterator<std::forward_iterator_tag, NodeType, ptrdiff_t> super;
+
+ typedef GraphTraits<BlockT*> BlockTraits;
+ typedef typename BlockTraits::ChildIteratorType SuccIterTy;
+
+ // The iterator works in two modes, bb mode or region mode.
+ enum ItMode {
+ // In BB mode it returns all successors of this BasicBlock as its
+ // successors.
+ ItBB,
+ // In region mode there is only one successor, thats the regionnode mapping
+ // to the exit block of the regionnode
+ ItRgBegin, // At the beginning of the regionnode successor.
+ ItRgEnd // At the end of the regionnode successor.
+ };
+
+ // Use two bit to represent the mode iterator.
+ PointerIntPair<NodeType*, 2, ItMode> Node;
+
+ // The block successor iterator.
+ SuccIterTy BItor;
+
+ // advanceRegionSucc - A region node has only one successor. It reaches end
+ // once we advance it.
+ void advanceRegionSucc() {
+ assert(Node.getInt() == ItRgBegin && "Cannot advance region successor!");
+ Node.setInt(ItRgEnd);
+ }
+
+ NodeType* getNode() const{ return Node.getPointer(); }
+
+ // isRegionMode - Is the current iterator in region mode?
+ bool isRegionMode() const { return Node.getInt() != ItBB; }
+
+ // Get the immediate successor. This function may return a Basic Block
+ // RegionNode or a subregion RegionNode.
+ NodeType* getISucc(BlockT* BB) const {
+ NodeType *succ;
+ succ = getNode()->getParent()->getNode(BB);
+ assert(succ && "BB not in Region or entered subregion!");
+ return succ;
+ }
+
+ // getRegionSucc - Return the successor basic block of a SubRegion RegionNode.
+ inline BlockT* getRegionSucc() const {
+ assert(Node.getInt() == ItRgBegin && "Cannot get the region successor!");
+ return getNode()->template getNodeAs<RegionT>()->getExit();
+ }
+
+ // isExit - Is this the exit BB of the Region?
+ inline bool isExit(BlockT* BB) const {
+ return getNode()->getParent()->getExit() == BB;
+ }
+public:
+ typedef RNSuccIterator<NodeType, BlockT, RegionT> Self;
+
+ typedef typename super::pointer pointer;
+
+ /// @brief Create begin iterator of a RegionNode.
+ inline RNSuccIterator(NodeType* node)
+ : Node(node, node->isSubRegion() ? ItRgBegin : ItBB),
+ BItor(BlockTraits::child_begin(node->getEntry())) {
+
+ // Skip the exit block
+ if (!isRegionMode())
+ while (BlockTraits::child_end(node->getEntry()) != BItor && isExit(*BItor))
+ ++BItor;
+
+ if (isRegionMode() && isExit(getRegionSucc()))
+ advanceRegionSucc();
+ }
+
+ /// @brief Create an end iterator.
+ inline RNSuccIterator(NodeType* node, bool)
+ : Node(node, node->isSubRegion() ? ItRgEnd : ItBB),
+ BItor(BlockTraits::child_end(node->getEntry())) {}
+
+ inline bool operator==(const Self& x) const {
+ assert(isRegionMode() == x.isRegionMode() && "Broken iterator!");
+ if (isRegionMode())
+ return Node.getInt() == x.Node.getInt();
+ else
+ return BItor == x.BItor;
+ }
+
+ inline bool operator!=(const Self& x) const { return !operator==(x); }
+
+ inline pointer operator*() const {
+ BlockT *BB = isRegionMode() ? getRegionSucc() : *BItor;
+ assert(!isExit(BB) && "Iterator out of range!");
+ return getISucc(BB);
+ }
+
+ inline Self& operator++() {
+ if(isRegionMode()) {
+ // The Region only has 1 successor.
+ advanceRegionSucc();
+ } else {
+ // Skip the exit.
+ do
+ ++BItor;
+ while (BItor != BlockTraits::child_end(getNode()->getEntry())
+ && isExit(*BItor));
+ }
+ return *this;
+ }
+
+ inline Self operator++(int) {
+ Self tmp = *this;
+ ++*this;
+ return tmp;
+ }
+};
+
+
+//===----------------------------------------------------------------------===//
+/// @brief Flat RegionNode iterator.
+///
+/// The Flat Region iterator will iterate over all BasicBlock RegionNodes that
+/// are contained in the Region and its subregions. This is close to a virtual
+/// control flow graph of the Region.
+template<class NodeType, class BlockT, class RegionT>
+class RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT>
+ : public std::iterator<std::forward_iterator_tag, NodeType, ptrdiff_t> {
+ typedef std::iterator<std::forward_iterator_tag, NodeType, ptrdiff_t> super;
+ typedef GraphTraits<BlockT*> BlockTraits;
+ typedef typename BlockTraits::ChildIteratorType SuccIterTy;
+
+ NodeType* Node;
+ SuccIterTy Itor;
+
+public:
+ typedef RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT> Self;
+ typedef typename super::pointer pointer;
+
+ /// @brief Create the iterator from a RegionNode.
+ ///
+ /// Note that the incoming node must be a bb node, otherwise it will trigger
+ /// an assertion when we try to get a BasicBlock.
+ inline RNSuccIterator(NodeType* node) :
+ Node(node),
+ Itor(BlockTraits::child_begin(node->getEntry())) {
+ assert(!Node->isSubRegion()
+ && "Subregion node not allowed in flat iterating mode!");
+ assert(Node->getParent() && "A BB node must have a parent!");
+
+ // Skip the exit block of the iterating region.
+ while (BlockTraits::child_end(Node->getEntry()) != Itor
+ && Node->getParent()->getExit() == *Itor)
+ ++Itor;
+ }
+
+ /// @brief Create an end iterator
+ inline RNSuccIterator(NodeType* node, bool) :
+ Node(node),
+ Itor(BlockTraits::child_end(node->getEntry())) {
+ assert(!Node->isSubRegion()
+ && "Subregion node not allowed in flat iterating mode!");
+ }
+
+ inline bool operator==(const Self& x) const {
+ assert(Node->getParent() == x.Node->getParent()
+ && "Cannot compare iterators of different regions!");
+
+ return Itor == x.Itor && Node == x.Node;
+ }
+
+ inline bool operator!=(const Self& x) const { return !operator==(x); }
+
+ inline pointer operator*() const {
+ BlockT *BB = *Itor;
+
+ // Get the iterating region.
+ RegionT *Parent = Node->getParent();
+
+ // The only case that the successor reaches out of the region is it reaches
+ // the exit of the region.
+ assert(Parent->getExit() != BB && "iterator out of range!");
+
+ return Parent->getBBNode(BB);
+ }
+
+ inline Self& operator++() {
+ // Skip the exit block of the iterating region.
+ do
+ ++Itor;
+ while (Itor != succ_end(Node->getEntry())
+ && Node->getParent()->getExit() == *Itor);
+
+ return *this;
+ }
+
+ inline Self operator++(int) {
+ Self tmp = *this;
+ ++*this;
+ return tmp;
+ }
+};
+
+template<class NodeType, class BlockT, class RegionT>
+inline RNSuccIterator<NodeType, BlockT, RegionT> succ_begin(NodeType* Node) {
+ return RNSuccIterator<NodeType, BlockT, RegionT>(Node);
+}
+
+template<class NodeType, class BlockT, class RegionT>
+inline RNSuccIterator<NodeType, BlockT, RegionT> succ_end(NodeType* Node) {
+ return RNSuccIterator<NodeType, BlockT, RegionT>(Node, true);
+}
+
+//===--------------------------------------------------------------------===//
+// RegionNode GraphTraits specialization so the bbs in the region can be
+// iterate by generic graph iterators.
+//
+// NodeT can either be region node or const region node, otherwise child_begin
+// and child_end fail.
+
+#define RegionNodeGraphTraits(NodeT, BlockT, RegionT) \
+ template<> struct GraphTraits<NodeT*> { \
+ typedef NodeT NodeType; \
+ typedef RNSuccIterator<NodeType, BlockT, RegionT> ChildIteratorType; \
+ static NodeType *getEntryNode(NodeType* N) { return N; } \
+ static inline ChildIteratorType child_begin(NodeType *N) { \
+ return RNSuccIterator<NodeType, BlockT, RegionT>(N); \
+ } \
+ static inline ChildIteratorType child_end(NodeType *N) { \
+ return RNSuccIterator<NodeType, BlockT, RegionT>(N, true); \
+ } \
+}; \
+template<> struct GraphTraits<FlatIt<NodeT*>> { \
+ typedef NodeT NodeType; \
+ typedef RNSuccIterator<FlatIt<NodeT>, BlockT, RegionT > ChildIteratorType; \
+ static NodeType *getEntryNode(NodeType* N) { return N; } \
+ static inline ChildIteratorType child_begin(NodeType *N) { \
+ return RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT>(N); \
+ } \
+ static inline ChildIteratorType child_end(NodeType *N) { \
+ return RNSuccIterator<FlatIt<NodeType>, BlockT, RegionT>(N, true); \
+ } \
+}
+
+#define RegionGraphTraits(RegionT, NodeT) \
+template<> struct GraphTraits<RegionT*> \
+ : public GraphTraits<NodeT*> { \
+ typedef df_iterator<NodeType*> nodes_iterator; \
+ static NodeType *getEntryNode(RegionT* R) { \
+ return R->getNode(R->getEntry()); \
+ } \
+ static nodes_iterator nodes_begin(RegionT* R) { \
+ return nodes_iterator::begin(getEntryNode(R)); \
+ } \
+ static nodes_iterator nodes_end(RegionT* R) { \
+ return nodes_iterator::end(getEntryNode(R)); \
+ } \
+}; \
+template<> struct GraphTraits<FlatIt<RegionT*> > \
+ : public GraphTraits<FlatIt<NodeT*> > { \
+ typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false, \
+ GraphTraits<FlatIt<NodeType*> > > nodes_iterator; \
+ static NodeType *getEntryNode(RegionT* R) { \
+ return R->getBBNode(R->getEntry()); \
+ } \
+ static nodes_iterator nodes_begin(RegionT* R) { \
+ return nodes_iterator::begin(getEntryNode(R)); \
+ } \
+ static nodes_iterator nodes_end(RegionT* R) { \
+ return nodes_iterator::end(getEntryNode(R)); \
+ } \
+}
+
+RegionNodeGraphTraits(RegionNode, BasicBlock, Region);
+RegionNodeGraphTraits(const RegionNode, BasicBlock, Region);
+
+RegionGraphTraits(Region, RegionNode);
+RegionGraphTraits(const Region, const RegionNode);
+
+template <> struct GraphTraits<RegionInfo*>
+ : public GraphTraits<FlatIt<RegionNode*> > {
+ typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
+ GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
+
+ static NodeType *getEntryNode(RegionInfo *RI) {
+ return GraphTraits<FlatIt<Region*> >::getEntryNode(RI->getTopLevelRegion());
+ }
+ static nodes_iterator nodes_begin(RegionInfo* RI) {
+ return nodes_iterator::begin(getEntryNode(RI));
+ }
+ static nodes_iterator nodes_end(RegionInfo *RI) {
+ return nodes_iterator::end(getEntryNode(RI));
+ }
+};
+
+template <> struct GraphTraits<RegionInfoPass*>
+ : public GraphTraits<RegionInfo *> {
+ typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
+ GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
+
+ static NodeType *getEntryNode(RegionInfoPass *RI) {
+ return GraphTraits<RegionInfo*>::getEntryNode(&RI->getRegionInfo());
+ }
+ static nodes_iterator nodes_begin(RegionInfoPass* RI) {
+ return GraphTraits<RegionInfo*>::nodes_begin(&RI->getRegionInfo());
+ }
+ static nodes_iterator nodes_end(RegionInfoPass *RI) {
+ return GraphTraits<RegionInfo*>::nodes_end(&RI->getRegionInfo());
+ }
+};
+
+} // End namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/RegionPass.h b/contrib/llvm/include/llvm/Analysis/RegionPass.h
new file mode 100644
index 0000000..bd51c49
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/RegionPass.h
@@ -0,0 +1,128 @@
+//===- RegionPass.h - RegionPass class --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RegionPass class. All region based analysis,
+// optimization and transformation passes are derived from RegionPass.
+// This class is implemented following the some ideas of the LoopPass.h class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGIONPASS_H
+#define LLVM_ANALYSIS_REGIONPASS_H
+
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LegacyPassManagers.h"
+#include "llvm/Pass.h"
+#include <deque>
+
+namespace llvm {
+
+class RGPassManager;
+class Function;
+
+//===----------------------------------------------------------------------===//
+/// @brief A pass that runs on each Region in a function.
+///
+/// RegionPass is managed by RGPassManager.
+class RegionPass : public Pass {
+public:
+ explicit RegionPass(char &pid) : Pass(PT_Region, pid) {}
+
+ //===--------------------------------------------------------------------===//
+ /// @name To be implemented by every RegionPass
+ ///
+ //@{
+ /// @brief Run the pass on a specific Region
+ ///
+ /// Accessing regions not contained in the current region is not allowed.
+ ///
+ /// @param R The region this pass is run on.
+ /// @param RGM The RegionPassManager that manages this Pass.
+ ///
+ /// @return True if the pass modifies this Region.
+ virtual bool runOnRegion(Region *R, RGPassManager &RGM) = 0;
+
+ /// @brief Get a pass to print the LLVM IR in the region.
+ ///
+ /// @param O The output stream to print the Region.
+ /// @param Banner The banner to separate different printed passes.
+ ///
+ /// @return The pass to print the LLVM IR in the region.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override;
+
+ using llvm::Pass::doInitialization;
+ using llvm::Pass::doFinalization;
+
+ virtual bool doInitialization(Region *R, RGPassManager &RGM) { return false; }
+ virtual bool doFinalization() { return false; }
+ //@}
+
+ //===--------------------------------------------------------------------===//
+ /// @name PassManager API
+ ///
+ //@{
+ void preparePassManager(PMStack &PMS) override;
+
+ void assignPassManager(PMStack &PMS,
+ PassManagerType PMT = PMT_RegionPassManager) override;
+
+ PassManagerType getPotentialPassManagerType() const override {
+ return PMT_RegionPassManager;
+ }
+ //@}
+};
+
+/// @brief The pass manager to schedule RegionPasses.
+class RGPassManager : public FunctionPass, public PMDataManager {
+ std::deque<Region*> RQ;
+ bool skipThisRegion;
+ bool redoThisRegion;
+ RegionInfo *RI;
+ Region *CurrentRegion;
+
+public:
+ static char ID;
+ explicit RGPassManager();
+
+ /// @brief Execute all of the passes scheduled for execution.
+ ///
+ /// @return True if any of the passes modifies the function.
+ bool runOnFunction(Function &F) override;
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ /// RGPassManager needs RegionInfo.
+ void getAnalysisUsage(AnalysisUsage &Info) const override;
+
+ const char *getPassName() const override {
+ return "Region Pass Manager";
+ }
+
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+
+ /// @brief Print passes managed by this manager.
+ void dumpPassStructure(unsigned Offset) override;
+
+ /// @brief Get passes contained by this manager.
+ Pass *getContainedPass(unsigned N) {
+ assert(N < PassVector.size() && "Pass number out of range!");
+ Pass *FP = static_cast<Pass *>(PassVector[N]);
+ return FP;
+ }
+
+ PassManagerType getPassManagerType() const override {
+ return PMT_RegionPassManager;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/RegionPrinter.h b/contrib/llvm/include/llvm/Analysis/RegionPrinter.h
new file mode 100644
index 0000000..8f0035c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/RegionPrinter.h
@@ -0,0 +1,71 @@
+//===-- RegionPrinter.h - Region printer external interface -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines external functions that can be called to explicitly
+// instantiate the region printer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_REGIONPRINTER_H
+#define LLVM_ANALYSIS_REGIONPRINTER_H
+
+namespace llvm {
+ class FunctionPass;
+ class Function;
+ class RegionInfo;
+
+ FunctionPass *createRegionViewerPass();
+ FunctionPass *createRegionOnlyViewerPass();
+ FunctionPass *createRegionPrinterPass();
+ FunctionPass *createRegionOnlyPrinterPass();
+
+#ifndef NDEBUG
+ /// @brief Open a viewer to display the GraphViz vizualization of the analysis
+ /// result.
+ ///
+ /// Practical to call in the debugger.
+ /// Includes the instructions in each BasicBlock.
+ ///
+ /// @param RI The analysis to display.
+ void viewRegion(llvm::RegionInfo *RI);
+
+ /// @brief Analyze the regions of a function and open its GraphViz
+ /// visualization in a viewer.
+ ///
+ /// Useful to call in the debugger.
+ /// Includes the instructions in each BasicBlock.
+ /// The result of a new analysis may differ from the RegionInfo the pass
+ /// manager currently holds.
+ ///
+ /// @param F Function to analyze.
+ void viewRegion(const llvm::Function *F);
+
+ /// @brief Open a viewer to display the GraphViz vizualization of the analysis
+ /// result.
+ ///
+ /// Useful to call in the debugger.
+ /// Shows only the BasicBlock names without their instructions.
+ ///
+ /// @param RI The analysis to display.
+ void viewRegionOnly(llvm::RegionInfo *RI);
+
+ /// @brief Analyze the regions of a function and open its GraphViz
+ /// visualization in a viewer.
+ ///
+ /// Useful to call in the debugger.
+ /// Shows only the BasicBlock names without their instructions.
+ /// The result of a new analysis may differ from the RegionInfo the pass
+ /// manager currently holds.
+ ///
+ /// @param F Function to analyze.
+ void viewRegionOnly(const llvm::Function *F);
+#endif
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
new file mode 100644
index 0000000..c08335d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -0,0 +1,1381 @@
+//===- llvm/Analysis/ScalarEvolution.h - Scalar Evolution -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The ScalarEvolution class is an LLVM pass which can be used to analyze and
+// categorize scalar expressions in loops. It specializes in recognizing
+// general induction variables, representing them with the abstract and opaque
+// SCEV class. Given this analysis, trip counts of loops and other important
+// properties can be obtained.
+//
+// This analysis is primarily useful for induction variable substitution and
+// strength reduction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_H
+#define LLVM_ANALYSIS_SCALAREVOLUTION_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DataTypes.h"
+#include <map>
+
+namespace llvm {
+ class APInt;
+ class AssumptionCache;
+ class Constant;
+ class ConstantInt;
+ class DominatorTree;
+ class Type;
+ class ScalarEvolution;
+ class DataLayout;
+ class TargetLibraryInfo;
+ class LLVMContext;
+ class Operator;
+ class SCEV;
+ class SCEVAddRecExpr;
+ class SCEVConstant;
+ class SCEVExpander;
+ class SCEVPredicate;
+ class SCEVUnknown;
+
+ template <> struct FoldingSetTrait<SCEV>;
+ template <> struct FoldingSetTrait<SCEVPredicate>;
+
+ /// This class represents an analyzed expression in the program. These are
+ /// opaque objects that the client is not allowed to do much with directly.
+ ///
+ class SCEV : public FoldingSetNode {
+ friend struct FoldingSetTrait<SCEV>;
+
+ /// A reference to an Interned FoldingSetNodeID for this node. The
+ /// ScalarEvolution's BumpPtrAllocator holds the data.
+ FoldingSetNodeIDRef FastID;
+
+ // The SCEV baseclass this node corresponds to
+ const unsigned short SCEVType;
+
+ protected:
+ /// This field is initialized to zero and may be used in subclasses to store
+ /// miscellaneous information.
+ unsigned short SubclassData;
+
+ private:
+ SCEV(const SCEV &) = delete;
+ void operator=(const SCEV &) = delete;
+
+ public:
+ /// NoWrapFlags are bitfield indices into SubclassData.
+ ///
+ /// Add and Mul expressions may have no-unsigned-wrap <NUW> or
+ /// no-signed-wrap <NSW> properties, which are derived from the IR
+ /// operator. NSW is a misnomer that we use to mean no signed overflow or
+ /// underflow.
+ ///
+ /// AddRec expressions may have a no-self-wraparound <NW> property if, in
+ /// the integer domain, abs(step) * max-iteration(loop) <=
+ /// unsigned-max(bitwidth). This means that the recurrence will never reach
+ /// its start value if the step is non-zero. Computing the same value on
+ /// each iteration is not considered wrapping, and recurrences with step = 0
+ /// are trivially <NW>. <NW> is independent of the sign of step and the
+ /// value the add recurrence starts with.
+ ///
+ /// Note that NUW and NSW are also valid properties of a recurrence, and
+ /// either implies NW. For convenience, NW will be set for a recurrence
+ /// whenever either NUW or NSW are set.
+ enum NoWrapFlags { FlagAnyWrap = 0, // No guarantee.
+ FlagNW = (1 << 0), // No self-wrap.
+ FlagNUW = (1 << 1), // No unsigned wrap.
+ FlagNSW = (1 << 2), // No signed wrap.
+ NoWrapMask = (1 << 3) -1 };
+
+ explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy) :
+ FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
+
+ unsigned getSCEVType() const { return SCEVType; }
+
+ /// Return the LLVM type of this SCEV expression.
+ ///
+ Type *getType() const;
+
+ /// Return true if the expression is a constant zero.
+ ///
+ bool isZero() const;
+
+ /// Return true if the expression is a constant one.
+ ///
+ bool isOne() const;
+
+ /// Return true if the expression is a constant all-ones value.
+ ///
+ bool isAllOnesValue() const;
+
+ /// Return true if the specified scev is negated, but not a constant.
+ bool isNonConstantNegative() const;
+
+ /// Print out the internal representation of this scalar to the specified
+ /// stream. This should really only be used for debugging purposes.
+ void print(raw_ostream &OS) const;
+
+ /// This method is used for debugging.
+ ///
+ void dump() const;
+ };
+
+ // Specialize FoldingSetTrait for SCEV to avoid needing to compute
+ // temporary FoldingSetNodeID values.
+ template<> struct FoldingSetTrait<SCEV> : DefaultFoldingSetTrait<SCEV> {
+ static void Profile(const SCEV &X, FoldingSetNodeID& ID) {
+ ID = X.FastID;
+ }
+ static bool Equals(const SCEV &X, const FoldingSetNodeID &ID,
+ unsigned IDHash, FoldingSetNodeID &TempID) {
+ return ID == X.FastID;
+ }
+ static unsigned ComputeHash(const SCEV &X, FoldingSetNodeID &TempID) {
+ return X.FastID.ComputeHash();
+ }
+ };
+
+ inline raw_ostream &operator<<(raw_ostream &OS, const SCEV &S) {
+ S.print(OS);
+ return OS;
+ }
+
+ /// An object of this class is returned by queries that could not be answered.
+ /// For example, if you ask for the number of iterations of a linked-list
+ /// traversal loop, you will get one of these. None of the standard SCEV
+ /// operations are valid on this class, it is just a marker.
+ struct SCEVCouldNotCompute : public SCEV {
+ SCEVCouldNotCompute();
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const SCEV *S);
+ };
+
+ /// SCEVPredicate - This class represents an assumption made using SCEV
+ /// expressions which can be checked at run-time.
+ class SCEVPredicate : public FoldingSetNode {
+ friend struct FoldingSetTrait<SCEVPredicate>;
+
+ /// A reference to an Interned FoldingSetNodeID for this node. The
+ /// ScalarEvolution's BumpPtrAllocator holds the data.
+ FoldingSetNodeIDRef FastID;
+
+ public:
+ enum SCEVPredicateKind { P_Union, P_Equal };
+
+ protected:
+ SCEVPredicateKind Kind;
+ ~SCEVPredicate() = default;
+ SCEVPredicate(const SCEVPredicate&) = default;
+ SCEVPredicate &operator=(const SCEVPredicate&) = default;
+
+ public:
+ SCEVPredicate(const FoldingSetNodeIDRef ID, SCEVPredicateKind Kind);
+
+ SCEVPredicateKind getKind() const { return Kind; }
+
+ /// \brief Returns the estimated complexity of this predicate.
+ /// This is roughly measured in the number of run-time checks required.
+ virtual unsigned getComplexity() const { return 1; }
+
+ /// \brief Returns true if the predicate is always true. This means that no
+ /// assumptions were made and nothing needs to be checked at run-time.
+ virtual bool isAlwaysTrue() const = 0;
+
+ /// \brief Returns true if this predicate implies \p N.
+ virtual bool implies(const SCEVPredicate *N) const = 0;
+
+ /// \brief Prints a textual representation of this predicate with an
+ /// indentation of \p Depth.
+ virtual void print(raw_ostream &OS, unsigned Depth = 0) const = 0;
+
+ /// \brief Returns the SCEV to which this predicate applies, or nullptr
+ /// if this is a SCEVUnionPredicate.
+ virtual const SCEV *getExpr() const = 0;
+ };
+
+ inline raw_ostream &operator<<(raw_ostream &OS, const SCEVPredicate &P) {
+ P.print(OS);
+ return OS;
+ }
+
+ // Specialize FoldingSetTrait for SCEVPredicate to avoid needing to compute
+ // temporary FoldingSetNodeID values.
+ template <>
+ struct FoldingSetTrait<SCEVPredicate>
+ : DefaultFoldingSetTrait<SCEVPredicate> {
+
+ static void Profile(const SCEVPredicate &X, FoldingSetNodeID &ID) {
+ ID = X.FastID;
+ }
+
+ static bool Equals(const SCEVPredicate &X, const FoldingSetNodeID &ID,
+ unsigned IDHash, FoldingSetNodeID &TempID) {
+ return ID == X.FastID;
+ }
+ static unsigned ComputeHash(const SCEVPredicate &X,
+ FoldingSetNodeID &TempID) {
+ return X.FastID.ComputeHash();
+ }
+ };
+
+ /// SCEVEqualPredicate - This class represents an assumption that two SCEV
+ /// expressions are equal, and this can be checked at run-time. We assume
+ /// that the left hand side is a SCEVUnknown and the right hand side a
+ /// constant.
+ class SCEVEqualPredicate final : public SCEVPredicate {
+ /// We assume that LHS == RHS, where LHS is a SCEVUnknown and RHS a
+ /// constant.
+ const SCEVUnknown *LHS;
+ const SCEVConstant *RHS;
+
+ public:
+ SCEVEqualPredicate(const FoldingSetNodeIDRef ID, const SCEVUnknown *LHS,
+ const SCEVConstant *RHS);
+
+ /// Implementation of the SCEVPredicate interface
+ bool implies(const SCEVPredicate *N) const override;
+ void print(raw_ostream &OS, unsigned Depth = 0) const override;
+ bool isAlwaysTrue() const override;
+ const SCEV *getExpr() const override;
+
+ /// \brief Returns the left hand side of the equality.
+ const SCEVUnknown *getLHS() const { return LHS; }
+
+ /// \brief Returns the right hand side of the equality.
+ const SCEVConstant *getRHS() const { return RHS; }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEVPredicate *P) {
+ return P->getKind() == P_Equal;
+ }
+ };
+
+ /// SCEVUnionPredicate - This class represents a composition of other
+ /// SCEV predicates, and is the class that most clients will interact with.
+ /// This is equivalent to a logical "AND" of all the predicates in the union.
+ class SCEVUnionPredicate final : public SCEVPredicate {
+ private:
+ typedef DenseMap<const SCEV *, SmallVector<const SCEVPredicate *, 4>>
+ PredicateMap;
+
+ /// Vector with references to all predicates in this union.
+ SmallVector<const SCEVPredicate *, 16> Preds;
+ /// Maps SCEVs to predicates for quick look-ups.
+ PredicateMap SCEVToPreds;
+
+ public:
+ SCEVUnionPredicate();
+
+ const SmallVectorImpl<const SCEVPredicate *> &getPredicates() const {
+ return Preds;
+ }
+
+ /// \brief Adds a predicate to this union.
+ void add(const SCEVPredicate *N);
+
+ /// \brief Returns a reference to a vector containing all predicates
+ /// which apply to \p Expr.
+ ArrayRef<const SCEVPredicate *> getPredicatesForExpr(const SCEV *Expr);
+
+ /// Implementation of the SCEVPredicate interface
+ bool isAlwaysTrue() const override;
+ bool implies(const SCEVPredicate *N) const override;
+ void print(raw_ostream &OS, unsigned Depth) const override;
+ const SCEV *getExpr() const override;
+
+ /// \brief We estimate the complexity of a union predicate as the size
+ /// number of predicates in the union.
+ unsigned getComplexity() const override { return Preds.size(); }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEVPredicate *P) {
+ return P->getKind() == P_Union;
+ }
+ };
+
+ /// The main scalar evolution driver. Because client code (intentionally)
+ /// can't do much with the SCEV objects directly, they must ask this class
+ /// for services.
+ class ScalarEvolution {
+ public:
+ /// An enum describing the relationship between a SCEV and a loop.
+ enum LoopDisposition {
+ LoopVariant, ///< The SCEV is loop-variant (unknown).
+ LoopInvariant, ///< The SCEV is loop-invariant.
+ LoopComputable ///< The SCEV varies predictably with the loop.
+ };
+
+ /// An enum describing the relationship between a SCEV and a basic block.
+ enum BlockDisposition {
+ DoesNotDominateBlock, ///< The SCEV does not dominate the block.
+ DominatesBlock, ///< The SCEV dominates the block.
+ ProperlyDominatesBlock ///< The SCEV properly dominates the block.
+ };
+
+ /// Convenient NoWrapFlags manipulation that hides enum casts and is
+ /// visible in the ScalarEvolution name space.
+ static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
+ maskFlags(SCEV::NoWrapFlags Flags, int Mask) {
+ return (SCEV::NoWrapFlags)(Flags & Mask);
+ }
+ static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
+ setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags) {
+ return (SCEV::NoWrapFlags)(Flags | OnFlags);
+ }
+ static SCEV::NoWrapFlags LLVM_ATTRIBUTE_UNUSED_RESULT
+ clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags) {
+ return (SCEV::NoWrapFlags)(Flags & ~OffFlags);
+ }
+
+ private:
+ /// A CallbackVH to arrange for ScalarEvolution to be notified whenever a
+ /// Value is deleted.
+ class SCEVCallbackVH final : public CallbackVH {
+ ScalarEvolution *SE;
+ void deleted() override;
+ void allUsesReplacedWith(Value *New) override;
+ public:
+ SCEVCallbackVH(Value *V, ScalarEvolution *SE = nullptr);
+ };
+
+ friend class SCEVCallbackVH;
+ friend class SCEVExpander;
+ friend class SCEVUnknown;
+
+ /// The function we are analyzing.
+ ///
+ Function &F;
+
+ /// The target library information for the target we are targeting.
+ ///
+ TargetLibraryInfo &TLI;
+
+ /// The tracker for @llvm.assume intrinsics in this function.
+ AssumptionCache &AC;
+
+ /// The dominator tree.
+ ///
+ DominatorTree &DT;
+
+ /// The loop information for the function we are currently analyzing.
+ ///
+ LoopInfo &LI;
+
+ /// This SCEV is used to represent unknown trip counts and things.
+ std::unique_ptr<SCEVCouldNotCompute> CouldNotCompute;
+
+ /// The typedef for ValueExprMap.
+ ///
+ typedef DenseMap<SCEVCallbackVH, const SCEV *, DenseMapInfo<Value *> >
+ ValueExprMapType;
+
+ /// This is a cache of the values we have analyzed so far.
+ ///
+ ValueExprMapType ValueExprMap;
+
+ /// Mark predicate values currently being processed by isImpliedCond.
+ DenseSet<Value*> PendingLoopPredicates;
+
+ /// Set to true by isLoopBackedgeGuardedByCond when we're walking the set of
+ /// conditions dominating the backedge of a loop.
+ bool WalkingBEDominatingConds;
+
+ /// Set to true by isKnownPredicateViaSplitting when we're trying to prove a
+ /// predicate by splitting it into a set of independent predicates.
+ bool ProvingSplitPredicate;
+
+ /// Information about the number of loop iterations for which a loop exit's
+ /// branch condition evaluates to the not-taken path. This is a temporary
+ /// pair of exact and max expressions that are eventually summarized in
+ /// ExitNotTakenInfo and BackedgeTakenInfo.
+ struct ExitLimit {
+ const SCEV *Exact;
+ const SCEV *Max;
+
+ /*implicit*/ ExitLimit(const SCEV *E) : Exact(E), Max(E) {}
+
+ ExitLimit(const SCEV *E, const SCEV *M) : Exact(E), Max(M) {}
+
+ /// Test whether this ExitLimit contains any computed information, or
+ /// whether it's all SCEVCouldNotCompute values.
+ bool hasAnyInfo() const {
+ return !isa<SCEVCouldNotCompute>(Exact) ||
+ !isa<SCEVCouldNotCompute>(Max);
+ }
+ };
+
+ /// Information about the number of times a particular loop exit may be
+ /// reached before exiting the loop.
+ struct ExitNotTakenInfo {
+ AssertingVH<BasicBlock> ExitingBlock;
+ const SCEV *ExactNotTaken;
+ PointerIntPair<ExitNotTakenInfo*, 1> NextExit;
+
+ ExitNotTakenInfo() : ExitingBlock(nullptr), ExactNotTaken(nullptr) {}
+
+ /// Return true if all loop exits are computable.
+ bool isCompleteList() const {
+ return NextExit.getInt() == 0;
+ }
+
+ void setIncomplete() { NextExit.setInt(1); }
+
+ /// Return a pointer to the next exit's not-taken info.
+ ExitNotTakenInfo *getNextExit() const {
+ return NextExit.getPointer();
+ }
+
+ void setNextExit(ExitNotTakenInfo *ENT) { NextExit.setPointer(ENT); }
+ };
+
+ /// Information about the backedge-taken count of a loop. This currently
+ /// includes an exact count and a maximum count.
+ ///
+ class BackedgeTakenInfo {
+ /// A list of computable exits and their not-taken counts. Loops almost
+ /// never have more than one computable exit.
+ ExitNotTakenInfo ExitNotTaken;
+
+ /// An expression indicating the least maximum backedge-taken count of the
+ /// loop that is known, or a SCEVCouldNotCompute.
+ const SCEV *Max;
+
+ public:
+ BackedgeTakenInfo() : Max(nullptr) {}
+
+ /// Initialize BackedgeTakenInfo from a list of exact exit counts.
+ BackedgeTakenInfo(
+ SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
+ bool Complete, const SCEV *MaxCount);
+
+ /// Test whether this BackedgeTakenInfo contains any computed information,
+ /// or whether it's all SCEVCouldNotCompute values.
+ bool hasAnyInfo() const {
+ return ExitNotTaken.ExitingBlock || !isa<SCEVCouldNotCompute>(Max);
+ }
+
+ /// Return an expression indicating the exact backedge-taken count of the
+ /// loop if it is known, or SCEVCouldNotCompute otherwise. This is the
+ /// number of times the loop header can be guaranteed to execute, minus
+ /// one.
+ const SCEV *getExact(ScalarEvolution *SE) const;
+
+ /// Return the number of times this loop exit may fall through to the back
+ /// edge, or SCEVCouldNotCompute. The loop is guaranteed not to exit via
+ /// this block before this number of iterations, but may exit via another
+ /// block.
+ const SCEV *getExact(BasicBlock *ExitingBlock, ScalarEvolution *SE) const;
+
+ /// Get the max backedge taken count for the loop.
+ const SCEV *getMax(ScalarEvolution *SE) const;
+
+ /// Return true if any backedge taken count expressions refer to the given
+ /// subexpression.
+ bool hasOperand(const SCEV *S, ScalarEvolution *SE) const;
+
+ /// Invalidate this result and free associated memory.
+ void clear();
+ };
+
+ /// Cache the backedge-taken count of the loops for this function as they
+ /// are computed.
+ DenseMap<const Loop*, BackedgeTakenInfo> BackedgeTakenCounts;
+
+ /// This map contains entries for all of the PHI instructions that we
+ /// attempt to compute constant evolutions for. This allows us to avoid
+ /// potentially expensive recomputation of these properties. An instruction
+ /// maps to null if we are unable to compute its exit value.
+ DenseMap<PHINode*, Constant*> ConstantEvolutionLoopExitValue;
+
+ /// This map contains entries for all the expressions that we attempt to
+ /// compute getSCEVAtScope information for, which can be expensive in
+ /// extreme cases.
+ DenseMap<const SCEV *,
+ SmallVector<std::pair<const Loop *, const SCEV *>, 2> > ValuesAtScopes;
+
+ /// Memoized computeLoopDisposition results.
+ DenseMap<const SCEV *,
+ SmallVector<PointerIntPair<const Loop *, 2, LoopDisposition>, 2>>
+ LoopDispositions;
+
+ /// Compute a LoopDisposition value.
+ LoopDisposition computeLoopDisposition(const SCEV *S, const Loop *L);
+
+ /// Memoized computeBlockDisposition results.
+ DenseMap<
+ const SCEV *,
+ SmallVector<PointerIntPair<const BasicBlock *, 2, BlockDisposition>, 2>>
+ BlockDispositions;
+
+ /// Compute a BlockDisposition value.
+ BlockDisposition computeBlockDisposition(const SCEV *S, const BasicBlock *BB);
+
+ /// Memoized results from getRange
+ DenseMap<const SCEV *, ConstantRange> UnsignedRanges;
+
+ /// Memoized results from getRange
+ DenseMap<const SCEV *, ConstantRange> SignedRanges;
+
+ /// Used to parameterize getRange
+ enum RangeSignHint { HINT_RANGE_UNSIGNED, HINT_RANGE_SIGNED };
+
+ /// Set the memoized range for the given SCEV.
+ const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
+ const ConstantRange &CR) {
+ DenseMap<const SCEV *, ConstantRange> &Cache =
+ Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;
+
+ std::pair<DenseMap<const SCEV *, ConstantRange>::iterator, bool> Pair =
+ Cache.insert(std::make_pair(S, CR));
+ if (!Pair.second)
+ Pair.first->second = CR;
+ return Pair.first->second;
+ }
+
+ /// Determine the range for a particular SCEV.
+ ConstantRange getRange(const SCEV *S, RangeSignHint Hint);
+
+ /// We know that there is no SCEV for the specified value. Analyze the
+ /// expression.
+ const SCEV *createSCEV(Value *V);
+
+ /// Provide the special handling we need to analyze PHI SCEVs.
+ const SCEV *createNodeForPHI(PHINode *PN);
+
+ /// Helper function called from createNodeForPHI.
+ const SCEV *createAddRecFromPHI(PHINode *PN);
+
+ /// Helper function called from createNodeForPHI.
+ const SCEV *createNodeFromSelectLikePHI(PHINode *PN);
+
+ /// Provide special handling for a select-like instruction (currently this
+ /// is either a select instruction or a phi node). \p I is the instruction
+ /// being processed, and it is assumed equivalent to "Cond ? TrueVal :
+ /// FalseVal".
+ const SCEV *createNodeForSelectOrPHI(Instruction *I, Value *Cond,
+ Value *TrueVal, Value *FalseVal);
+
+ /// Provide the special handling we need to analyze GEP SCEVs.
+ const SCEV *createNodeForGEP(GEPOperator *GEP);
+
+ /// Implementation code for getSCEVAtScope; called at most once for each
+ /// SCEV+Loop pair.
+ ///
+ const SCEV *computeSCEVAtScope(const SCEV *S, const Loop *L);
+
+ /// This looks up computed SCEV values for all instructions that depend on
+ /// the given instruction and removes them from the ValueExprMap map if they
+ /// reference SymName. This is used during PHI resolution.
+ void ForgetSymbolicName(Instruction *I, const SCEV *SymName);
+
+ /// Return the BackedgeTakenInfo for the given loop, lazily computing new
+ /// values if the loop hasn't been analyzed yet.
+ const BackedgeTakenInfo &getBackedgeTakenInfo(const Loop *L);
+
+ /// Compute the number of times the specified loop will iterate.
+ BackedgeTakenInfo computeBackedgeTakenCount(const Loop *L);
+
+ /// Compute the number of times the backedge of the specified loop will
+ /// execute if it exits via the specified block.
+ ExitLimit computeExitLimit(const Loop *L, BasicBlock *ExitingBlock);
+
+ /// Compute the number of times the backedge of the specified loop will
+ /// execute if its exit condition were a conditional branch of ExitCond,
+ /// TBB, and FBB.
+ ExitLimit computeExitLimitFromCond(const Loop *L,
+ Value *ExitCond,
+ BasicBlock *TBB,
+ BasicBlock *FBB,
+ bool IsSubExpr);
+
+ /// Compute the number of times the backedge of the specified loop will
+ /// execute if its exit condition were a conditional branch of the ICmpInst
+ /// ExitCond, TBB, and FBB.
+ ExitLimit computeExitLimitFromICmp(const Loop *L,
+ ICmpInst *ExitCond,
+ BasicBlock *TBB,
+ BasicBlock *FBB,
+ bool IsSubExpr);
+
+ /// Compute the number of times the backedge of the specified loop will
+ /// execute if its exit condition were a switch with a single exiting case
+ /// to ExitingBB.
+ ExitLimit
+ computeExitLimitFromSingleExitSwitch(const Loop *L, SwitchInst *Switch,
+ BasicBlock *ExitingBB, bool IsSubExpr);
+
+ /// Given an exit condition of 'icmp op load X, cst', try to see if we can
+ /// compute the backedge-taken count.
+ ExitLimit computeLoadConstantCompareExitLimit(LoadInst *LI,
+ Constant *RHS,
+ const Loop *L,
+ ICmpInst::Predicate p);
+
+ /// Compute the exit limit of a loop that is controlled by a
+ /// "(IV >> 1) != 0" type comparison. We cannot compute the exact trip
+ /// count in these cases (since SCEV has no way of expressing them), but we
+ /// can still sometimes compute an upper bound.
+ ///
+ /// Return an ExitLimit for a loop whose backedge is guarded by `LHS Pred
+ /// RHS`.
+ ExitLimit computeShiftCompareExitLimit(Value *LHS, Value *RHS,
+ const Loop *L,
+ ICmpInst::Predicate Pred);
+
+ /// If the loop is known to execute a constant number of times (the
+ /// condition evolves only from constants), try to evaluate a few iterations
+ /// of the loop until we get the exit condition gets a value of ExitWhen
+ /// (true or false). If we cannot evaluate the exit count of the loop,
+ /// return CouldNotCompute.
+ const SCEV *computeExitCountExhaustively(const Loop *L,
+ Value *Cond,
+ bool ExitWhen);
+
+ /// Return the number of times an exit condition comparing the specified
+ /// value to zero will execute. If not computable, return CouldNotCompute.
+ ExitLimit HowFarToZero(const SCEV *V, const Loop *L, bool IsSubExpr);
+
+ /// Return the number of times an exit condition checking the specified
+ /// value for nonzero will execute. If not computable, return
+ /// CouldNotCompute.
+ ExitLimit HowFarToNonZero(const SCEV *V, const Loop *L);
+
+ /// Return the number of times an exit condition containing the specified
+ /// less-than comparison will execute. If not computable, return
+ /// CouldNotCompute. isSigned specifies whether the less-than is signed.
+ ExitLimit HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
+ const Loop *L, bool isSigned, bool IsSubExpr);
+ ExitLimit HowManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
+ const Loop *L, bool isSigned, bool IsSubExpr);
+
+ /// Return a predecessor of BB (which may not be an immediate predecessor)
+ /// which has exactly one successor from which BB is reachable, or null if
+ /// no such block is found.
+ std::pair<BasicBlock *, BasicBlock *>
+ getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the given FoundCondValue value evaluates to true.
+ bool isImpliedCond(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS,
+ Value *FoundCondValue,
+ bool Inverse);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by FoundPred, FoundLHS, FoundRHS is
+ /// true.
+ bool isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS, ICmpInst::Predicate FoundPred,
+ const SCEV *FoundLHS, const SCEV *FoundRHS);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true.
+ bool isImpliedCondOperands(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS,
+ const SCEV *FoundLHS, const SCEV *FoundRHS);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true.
+ bool isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS,
+ const SCEV *FoundLHS,
+ const SCEV *FoundRHS);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true. Utility function used by isImpliedCondOperands.
+ bool isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS,
+ const SCEV *FoundLHS,
+ const SCEV *FoundRHS);
+
+ /// Test whether the condition described by Pred, LHS, and RHS is true
+ /// whenever the condition described by Pred, FoundLHS, and FoundRHS is
+ /// true.
+ ///
+ /// This routine tries to rule out certain kinds of integer overflow, and
+ /// then tries to reason about arithmetic properties of the predicates.
+ bool isImpliedCondOperandsViaNoOverflow(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS,
+ const SCEV *FoundLHS,
+ const SCEV *FoundRHS);
+
+ /// If we know that the specified Phi is in the header of its containing
+ /// loop, we know the loop executes a constant number of times, and the PHI
+ /// node is just a recurrence involving constants, fold it.
+ Constant *getConstantEvolutionLoopExitValue(PHINode *PN, const APInt& BEs,
+ const Loop *L);
+
+ /// Test if the given expression is known to satisfy the condition described
+ /// by Pred and the known constant ranges of LHS and RHS.
+ ///
+ bool isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
+
+ /// Try to prove the condition described by "LHS Pred RHS" by ruling out
+ /// integer overflow.
+ ///
+ /// For instance, this will return true for "A s< (A + C)<nsw>" if C is
+ /// positive.
+ bool isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
+
+ /// Try to split Pred LHS RHS into logical conjunctions (and's) and try to
+ /// prove them individually.
+ bool isKnownPredicateViaSplitting(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS);
+
+ /// Try to match the Expr as "(L + R)<Flags>".
+ bool splitBinaryAdd(const SCEV *Expr, const SCEV *&L, const SCEV *&R,
+ SCEV::NoWrapFlags &Flags);
+
+ /// Return true if More == (Less + C), where C is a constant. This is
+ /// intended to be used as a cheaper substitute for full SCEV subtraction.
+ bool computeConstantDifference(const SCEV *Less, const SCEV *More,
+ APInt &C);
+
+ /// Drop memoized information computed for S.
+ void forgetMemoizedResults(const SCEV *S);
+
+ /// Return an existing SCEV for V if there is one, otherwise return nullptr.
+ const SCEV *getExistingSCEV(Value *V);
+
+ /// Return false iff given SCEV contains a SCEVUnknown with NULL value-
+ /// pointer.
+ bool checkValidity(const SCEV *S) const;
+
+ /// Return true if `ExtendOpTy`({`Start`,+,`Step`}) can be proved to be
+ /// equal to {`ExtendOpTy`(`Start`),+,`ExtendOpTy`(`Step`)}. This is
+ /// equivalent to proving no signed (resp. unsigned) wrap in
+ /// {`Start`,+,`Step`} if `ExtendOpTy` is `SCEVSignExtendExpr`
+ /// (resp. `SCEVZeroExtendExpr`).
+ ///
+ template<typename ExtendOpTy>
+ bool proveNoWrapByVaryingStart(const SCEV *Start, const SCEV *Step,
+ const Loop *L);
+
+ bool isMonotonicPredicateImpl(const SCEVAddRecExpr *LHS,
+ ICmpInst::Predicate Pred, bool &Increasing);
+
+ /// Return true if, for all loop invariant X, the predicate "LHS `Pred` X"
+ /// is monotonically increasing or decreasing. In the former case set
+ /// `Increasing` to true and in the latter case set `Increasing` to false.
+ ///
+ /// A predicate is said to be monotonically increasing if may go from being
+ /// false to being true as the loop iterates, but never the other way
+ /// around. A predicate is said to be monotonically decreasing if may go
+ /// from being true to being false as the loop iterates, but never the other
+ /// way around.
+ bool isMonotonicPredicate(const SCEVAddRecExpr *LHS,
+ ICmpInst::Predicate Pred, bool &Increasing);
+
+ // Return SCEV no-wrap flags that can be proven based on reasoning
+ // about how poison produced from no-wrap flags on this value
+ // (e.g. a nuw add) would trigger undefined behavior on overflow.
+ SCEV::NoWrapFlags getNoWrapFlagsFromUB(const Value *V);
+
+ public:
+ ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC,
+ DominatorTree &DT, LoopInfo &LI);
+ ~ScalarEvolution();
+ ScalarEvolution(ScalarEvolution &&Arg);
+
+ LLVMContext &getContext() const { return F.getContext(); }
+
+ /// Test if values of the given type are analyzable within the SCEV
+ /// framework. This primarily includes integer types, and it can optionally
+ /// include pointer types if the ScalarEvolution class has access to
+ /// target-specific information.
+ bool isSCEVable(Type *Ty) const;
+
+ /// Return the size in bits of the specified type, for which isSCEVable must
+ /// return true.
+ uint64_t getTypeSizeInBits(Type *Ty) const;
+
+ /// Return a type with the same bitwidth as the given type and which
+ /// represents how SCEV will treat the given type, for which isSCEVable must
+ /// return true. For pointer types, this is the pointer-sized integer type.
+ Type *getEffectiveSCEVType(Type *Ty) const;
+
+ /// Return a SCEV expression for the full generality of the specified
+ /// expression.
+ const SCEV *getSCEV(Value *V);
+
+ const SCEV *getConstant(ConstantInt *V);
+ const SCEV *getConstant(const APInt& Val);
+ const SCEV *getConstant(Type *Ty, uint64_t V, bool isSigned = false);
+ const SCEV *getTruncateExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getZeroExtendExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getSignExtendExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getAnyExtendExpr(const SCEV *Op, Type *Ty);
+ const SCEV *getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+ const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+ return getAddExpr(Ops, Flags);
+ }
+ const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+ return getAddExpr(Ops, Flags);
+ }
+ const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+ const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
+ return getMulExpr(Ops, Flags);
+ }
+ const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
+ return getMulExpr(Ops, Flags);
+ }
+ const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getAddRecExpr(const SCEV *Start, const SCEV *Step,
+ const Loop *L, SCEV::NoWrapFlags Flags);
+ const SCEV *getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
+ const Loop *L, SCEV::NoWrapFlags Flags);
+ const SCEV *getAddRecExpr(const SmallVectorImpl<const SCEV *> &Operands,
+ const Loop *L, SCEV::NoWrapFlags Flags) {
+ SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
+ return getAddRecExpr(NewOp, L, Flags);
+ }
+ /// \brief Returns an expression for a GEP
+ ///
+ /// \p PointeeType The type used as the basis for the pointer arithmetics
+ /// \p BaseExpr The expression for the pointer operand.
+ /// \p IndexExprs The expressions for the indices.
+ /// \p InBounds Whether the GEP is in bounds.
+ const SCEV *getGEPExpr(Type *PointeeType, const SCEV *BaseExpr,
+ const SmallVectorImpl<const SCEV *> &IndexExprs,
+ bool InBounds = false);
+ const SCEV *getSMaxExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getSMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
+ const SCEV *getUMaxExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
+ const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS);
+ const SCEV *getUnknown(Value *V);
+ const SCEV *getCouldNotCompute();
+
+ /// \brief Return a SCEV for the constant 0 of a specific type.
+ const SCEV *getZero(Type *Ty) { return getConstant(Ty, 0); }
+
+ /// \brief Return a SCEV for the constant 1 of a specific type.
+ const SCEV *getOne(Type *Ty) { return getConstant(Ty, 1); }
+
+ /// Return an expression for sizeof AllocTy that is type IntTy
+ ///
+ const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
+
+ /// Return an expression for offsetof on the given field with type IntTy
+ ///
+ const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
+
+ /// Return the SCEV object corresponding to -V.
+ ///
+ const SCEV *getNegativeSCEV(const SCEV *V,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+
+ /// Return the SCEV object corresponding to ~V.
+ ///
+ const SCEV *getNotSCEV(const SCEV *V);
+
+ /// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
+ const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. If the type must be extended, it is zero extended.
+ const SCEV *getTruncateOrZeroExtend(const SCEV *V, Type *Ty);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. If the type must be extended, it is sign extended.
+ const SCEV *getTruncateOrSignExtend(const SCEV *V, Type *Ty);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. If the type must be extended, it is zero extended. The
+ /// conversion must not be narrowing.
+ const SCEV *getNoopOrZeroExtend(const SCEV *V, Type *Ty);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. If the type must be extended, it is sign extended. The
+ /// conversion must not be narrowing.
+ const SCEV *getNoopOrSignExtend(const SCEV *V, Type *Ty);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. If the type must be extended, it is extended with
+ /// unspecified bits. The conversion must not be narrowing.
+ const SCEV *getNoopOrAnyExtend(const SCEV *V, Type *Ty);
+
+ /// Return a SCEV corresponding to a conversion of the input value to the
+ /// specified type. The conversion must not be widening.
+ const SCEV *getTruncateOrNoop(const SCEV *V, Type *Ty);
+
+ /// Promote the operands to the wider of the types using zero-extension, and
+ /// then perform a umax operation with them.
+ const SCEV *getUMaxFromMismatchedTypes(const SCEV *LHS,
+ const SCEV *RHS);
+
+ /// Promote the operands to the wider of the types using zero-extension, and
+ /// then perform a umin operation with them.
+ const SCEV *getUMinFromMismatchedTypes(const SCEV *LHS,
+ const SCEV *RHS);
+
+ /// Transitively follow the chain of pointer-type operands until reaching a
+ /// SCEV that does not have a single pointer operand. This returns a
+ /// SCEVUnknown pointer for well-formed pointer-type expressions, but corner
+ /// cases do exist.
+ const SCEV *getPointerBase(const SCEV *V);
+
+ /// Return a SCEV expression for the specified value at the specified scope
+ /// in the program. The L value specifies a loop nest to evaluate the
+ /// expression at, where null is the top-level or a specified loop is
+ /// immediately inside of the loop.
+ ///
+ /// This method can be used to compute the exit value for a variable defined
+ /// in a loop by querying what the value will hold in the parent loop.
+ ///
+ /// In the case that a relevant loop exit value cannot be computed, the
+ /// original value V is returned.
+ const SCEV *getSCEVAtScope(const SCEV *S, const Loop *L);
+
+ /// This is a convenience function which does getSCEVAtScope(getSCEV(V), L).
+ const SCEV *getSCEVAtScope(Value *V, const Loop *L);
+
+ /// Test whether entry to the loop is protected by a conditional between LHS
+ /// and RHS. This is used to help avoid max expressions in loop trip
+ /// counts, and to eliminate casts.
+ bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
+
+ /// Test whether the backedge of the loop is protected by a conditional
+ /// between LHS and RHS. This is used to to eliminate casts.
+ bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
+
+ /// \brief Returns the maximum trip count of the loop if it is a single-exit
+ /// loop and we can compute a small maximum for that loop.
+ ///
+ /// Implemented in terms of the \c getSmallConstantTripCount overload with
+ /// the single exiting block passed to it. See that routine for details.
+ unsigned getSmallConstantTripCount(Loop *L);
+
+ /// Returns the maximum trip count of this loop as a normal unsigned
+ /// value. Returns 0 if the trip count is unknown or not constant. This
+ /// "trip count" assumes that control exits via ExitingBlock. More
+ /// precisely, it is the number of times that control may reach ExitingBlock
+ /// before taking the branch. For loops with multiple exits, it may not be
+ /// the number times that the loop header executes if the loop exits
+ /// prematurely via another branch.
+ unsigned getSmallConstantTripCount(Loop *L, BasicBlock *ExitingBlock);
+
+ /// \brief Returns the largest constant divisor of the trip count of the
+ /// loop if it is a single-exit loop and we can compute a small maximum for
+ /// that loop.
+ ///
+ /// Implemented in terms of the \c getSmallConstantTripMultiple overload with
+ /// the single exiting block passed to it. See that routine for details.
+ unsigned getSmallConstantTripMultiple(Loop *L);
+
+ /// Returns the largest constant divisor of the trip count of this loop as a
+ /// normal unsigned value, if possible. This means that the actual trip
+ /// count is always a multiple of the returned value (don't forget the trip
+ /// count could very well be zero as well!). As explained in the comments
+ /// for getSmallConstantTripCount, this assumes that control exits the loop
+ /// via ExitingBlock.
+ unsigned getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock);
+
+ /// Get the expression for the number of loop iterations for which this loop
+ /// is guaranteed not to exit via ExitingBlock. Otherwise return
+ /// SCEVCouldNotCompute.
+ const SCEV *getExitCount(Loop *L, BasicBlock *ExitingBlock);
+
+ /// If the specified loop has a predictable backedge-taken count, return it,
+ /// otherwise return a SCEVCouldNotCompute object. The backedge-taken count
+ /// is the number of times the loop header will be branched to from within
+ /// the loop. This is one less than the trip count of the loop, since it
+ /// doesn't count the first iteration, when the header is branched to from
+ /// outside the loop.
+ ///
+ /// Note that it is not valid to call this method on a loop without a
+ /// loop-invariant backedge-taken count (see
+ /// hasLoopInvariantBackedgeTakenCount).
+ ///
+ const SCEV *getBackedgeTakenCount(const Loop *L);
+
+ /// Similar to getBackedgeTakenCount, except return the least SCEV value
+ /// that is known never to be less than the actual backedge taken count.
+ const SCEV *getMaxBackedgeTakenCount(const Loop *L);
+
+ /// Return true if the specified loop has an analyzable loop-invariant
+ /// backedge-taken count.
+ bool hasLoopInvariantBackedgeTakenCount(const Loop *L);
+
+ /// This method should be called by the client when it has changed a loop in
+ /// a way that may effect ScalarEvolution's ability to compute a trip count,
+ /// or if the loop is deleted. This call is potentially expensive for large
+ /// loop bodies.
+ void forgetLoop(const Loop *L);
+
+ /// This method should be called by the client when it has changed a value
+ /// in a way that may effect its value, or which may disconnect it from a
+ /// def-use chain linking it to a loop.
+ void forgetValue(Value *V);
+
+ /// \brief Called when the client has changed the disposition of values in
+ /// this loop.
+ ///
+ /// We don't have a way to invalidate per-loop dispositions. Clear and
+ /// recompute is simpler.
+ void forgetLoopDispositions(const Loop *L) { LoopDispositions.clear(); }
+
+ /// Determine the minimum number of zero bits that S is guaranteed to end in
+ /// (at every loop iteration). It is, at the same time, the minimum number
+ /// of times S is divisible by 2. For example, given {4,+,8} it returns 2.
+ /// If S is guaranteed to be 0, it returns the bitwidth of S.
+ uint32_t GetMinTrailingZeros(const SCEV *S);
+
+ /// Determine the unsigned range for a particular SCEV.
+ ///
+ ConstantRange getUnsignedRange(const SCEV *S) {
+ return getRange(S, HINT_RANGE_UNSIGNED);
+ }
+
+ /// Determine the signed range for a particular SCEV.
+ ///
+ ConstantRange getSignedRange(const SCEV *S) {
+ return getRange(S, HINT_RANGE_SIGNED);
+ }
+
+ /// Test if the given expression is known to be negative.
+ ///
+ bool isKnownNegative(const SCEV *S);
+
+ /// Test if the given expression is known to be positive.
+ ///
+ bool isKnownPositive(const SCEV *S);
+
+ /// Test if the given expression is known to be non-negative.
+ ///
+ bool isKnownNonNegative(const SCEV *S);
+
+ /// Test if the given expression is known to be non-positive.
+ ///
+ bool isKnownNonPositive(const SCEV *S);
+
+ /// Test if the given expression is known to be non-zero.
+ ///
+ bool isKnownNonZero(const SCEV *S);
+
+ /// Test if the given expression is known to satisfy the condition described
+ /// by Pred, LHS, and RHS.
+ ///
+ bool isKnownPredicate(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS);
+
+ /// Return true if the result of the predicate LHS `Pred` RHS is loop
+ /// invariant with respect to L. Set InvariantPred, InvariantLHS and
+ /// InvariantLHS so that InvariantLHS `InvariantPred` InvariantRHS is the
+ /// loop invariant form of LHS `Pred` RHS.
+ bool isLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS,
+ const SCEV *RHS, const Loop *L,
+ ICmpInst::Predicate &InvariantPred,
+ const SCEV *&InvariantLHS,
+ const SCEV *&InvariantRHS);
+
+ /// Simplify LHS and RHS in a comparison with predicate Pred. Return true
+ /// iff any changes were made. If the operands are provably equal or
+ /// unequal, LHS and RHS are set to the same value and Pred is set to either
+ /// ICMP_EQ or ICMP_NE.
+ ///
+ bool SimplifyICmpOperands(ICmpInst::Predicate &Pred,
+ const SCEV *&LHS,
+ const SCEV *&RHS,
+ unsigned Depth = 0);
+
+ /// Return the "disposition" of the given SCEV with respect to the given
+ /// loop.
+ LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L);
+
+ /// Return true if the value of the given SCEV is unchanging in the
+ /// specified loop.
+ bool isLoopInvariant(const SCEV *S, const Loop *L);
+
+ /// Return true if the given SCEV changes value in a known way in the
+ /// specified loop. This property being true implies that the value is
+ /// variant in the loop AND that we can emit an expression to compute the
+ /// value of the expression at any particular loop iteration.
+ bool hasComputableLoopEvolution(const SCEV *S, const Loop *L);
+
+ /// Return the "disposition" of the given SCEV with respect to the given
+ /// block.
+ BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB);
+
+ /// Return true if elements that makes up the given SCEV dominate the
+ /// specified basic block.
+ bool dominates(const SCEV *S, const BasicBlock *BB);
+
+ /// Return true if elements that makes up the given SCEV properly dominate
+ /// the specified basic block.
+ bool properlyDominates(const SCEV *S, const BasicBlock *BB);
+
+ /// Test whether the given SCEV has Op as a direct or indirect operand.
+ bool hasOperand(const SCEV *S, const SCEV *Op) const;
+
+ /// Return the size of an element read or written by Inst.
+ const SCEV *getElementSize(Instruction *Inst);
+
+ /// Compute the array dimensions Sizes from the set of Terms extracted from
+ /// the memory access function of this SCEVAddRecExpr.
+ void findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
+ SmallVectorImpl<const SCEV *> &Sizes,
+ const SCEV *ElementSize) const;
+
+ void print(raw_ostream &OS) const;
+ void verify() const;
+
+ /// Collect parametric terms occurring in step expressions.
+ void collectParametricTerms(const SCEV *Expr,
+ SmallVectorImpl<const SCEV *> &Terms);
+
+
+
+ /// Return in Subscripts the access functions for each dimension in Sizes.
+ void computeAccessFunctions(const SCEV *Expr,
+ SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes);
+
+ /// Split this SCEVAddRecExpr into two vectors of SCEVs representing the
+ /// subscripts and sizes of an array access.
+ ///
+ /// The delinearization is a 3 step process: the first two steps compute the
+ /// sizes of each subscript and the third step computes the access functions
+ /// for the delinearized array:
+ ///
+ /// 1. Find the terms in the step functions
+ /// 2. Compute the array size
+ /// 3. Compute the access function: divide the SCEV by the array size
+ /// starting with the innermost dimensions found in step 2. The Quotient
+ /// is the SCEV to be divided in the next step of the recursion. The
+ /// Remainder is the subscript of the innermost dimension. Loop over all
+ /// array dimensions computed in step 2.
+ ///
+ /// To compute a uniform array size for several memory accesses to the same
+ /// object, one can collect in step 1 all the step terms for all the memory
+ /// accesses, and compute in step 2 a unique array shape. This guarantees
+ /// that the array shape will be the same across all memory accesses.
+ ///
+ /// FIXME: We could derive the result of steps 1 and 2 from a description of
+ /// the array shape given in metadata.
+ ///
+ /// Example:
+ ///
+ /// A[][n][m]
+ ///
+ /// for i
+ /// for j
+ /// for k
+ /// A[j+k][2i][5i] =
+ ///
+ /// The initial SCEV:
+ ///
+ /// A[{{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k]
+ ///
+ /// 1. Find the different terms in the step functions:
+ /// -> [2*m, 5, n*m, n*m]
+ ///
+ /// 2. Compute the array size: sort and unique them
+ /// -> [n*m, 2*m, 5]
+ /// find the GCD of all the terms = 1
+ /// divide by the GCD and erase constant terms
+ /// -> [n*m, 2*m]
+ /// GCD = m
+ /// divide by GCD -> [n, 2]
+ /// remove constant terms
+ /// -> [n]
+ /// size of the array is A[unknown][n][m]
+ ///
+ /// 3. Compute the access function
+ /// a. Divide {{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k by the innermost size m
+ /// Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k
+ /// Remainder: {{{0,+,5}_i, +, 0}_j, +, 0}_k
+ /// The remainder is the subscript of the innermost array dimension: [5i].
+ ///
+ /// b. Divide Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k by next outer size n
+ /// Quotient: {{{0,+,0}_i, +, 1}_j, +, 1}_k
+ /// Remainder: {{{0,+,2}_i, +, 0}_j, +, 0}_k
+ /// The Remainder is the subscript of the next array dimension: [2i].
+ ///
+ /// The subscript of the outermost dimension is the Quotient: [j+k].
+ ///
+ /// Overall, we have: A[][n][m], and the access function: A[j+k][2i][5i].
+ void delinearize(const SCEV *Expr,
+ SmallVectorImpl<const SCEV *> &Subscripts,
+ SmallVectorImpl<const SCEV *> &Sizes,
+ const SCEV *ElementSize);
+
+ /// Return the DataLayout associated with the module this SCEV instance is
+ /// operating on.
+ const DataLayout &getDataLayout() const {
+ return F.getParent()->getDataLayout();
+ }
+
+ const SCEVPredicate *getEqualPredicate(const SCEVUnknown *LHS,
+ const SCEVConstant *RHS);
+
+ /// Re-writes the SCEV according to the Predicates in \p Preds.
+ const SCEV *rewriteUsingPredicate(const SCEV *Scev, SCEVUnionPredicate &A);
+
+ private:
+ /// Compute the backedge taken count knowing the interval difference, the
+ /// stride and presence of the equality in the comparison.
+ const SCEV *computeBECount(const SCEV *Delta, const SCEV *Stride,
+ bool Equality);
+
+ /// Verify if an linear IV with positive stride can overflow when in a
+ /// less-than comparison, knowing the invariant term of the comparison,
+ /// the stride and the knowledge of NSW/NUW flags on the recurrence.
+ bool doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
+ bool IsSigned, bool NoWrap);
+
+ /// Verify if an linear IV with negative stride can overflow when in a
+ /// greater-than comparison, knowing the invariant term of the comparison,
+ /// the stride and the knowledge of NSW/NUW flags on the recurrence.
+ bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
+ bool IsSigned, bool NoWrap);
+
+ private:
+ FoldingSet<SCEV> UniqueSCEVs;
+ FoldingSet<SCEVPredicate> UniquePreds;
+ BumpPtrAllocator SCEVAllocator;
+
+ /// The head of a linked list of all SCEVUnknown values that have been
+ /// allocated. This is used by releaseMemory to locate them all and call
+ /// their destructors.
+ SCEVUnknown *FirstUnknown;
+ };
+
+ /// \brief Analysis pass that exposes the \c ScalarEvolution for a function.
+ class ScalarEvolutionAnalysis {
+ static char PassID;
+
+ public:
+ typedef ScalarEvolution Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ /// \brief Provide a name for the analysis for debugging and logging.
+ static StringRef name() { return "ScalarEvolutionAnalysis"; }
+
+ ScalarEvolution run(Function &F, AnalysisManager<Function> *AM);
+ };
+
+ /// \brief Printer pass for the \c ScalarEvolutionAnalysis results.
+ class ScalarEvolutionPrinterPass {
+ raw_ostream &OS;
+
+ public:
+ explicit ScalarEvolutionPrinterPass(raw_ostream &OS) : OS(OS) {}
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+
+ static StringRef name() { return "ScalarEvolutionPrinterPass"; }
+ };
+
+ class ScalarEvolutionWrapperPass : public FunctionPass {
+ std::unique_ptr<ScalarEvolution> SE;
+
+ public:
+ static char ID;
+
+ ScalarEvolutionWrapperPass();
+
+ ScalarEvolution &getSE() { return *SE; }
+ const ScalarEvolution &getSE() const { return *SE; }
+
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void print(raw_ostream &OS, const Module * = nullptr) const override;
+ void verifyAnalysis() const override;
+ };
+
+ /// An interface layer with SCEV used to manage how we see SCEV expressions
+ /// for values in the context of existing predicates. We can add new
+ /// predicates, but we cannot remove them.
+ ///
+ /// This layer has multiple purposes:
+ /// - provides a simple interface for SCEV versioning.
+ /// - guarantees that the order of transformations applied on a SCEV
+ /// expression for a single Value is consistent across two different
+ /// getSCEV calls. This means that, for example, once we've obtained
+ /// an AddRec expression for a certain value through expression
+ /// rewriting, we will continue to get an AddRec expression for that
+ /// Value.
+ /// - lowers the number of expression rewrites.
+ class PredicatedScalarEvolution {
+ public:
+ PredicatedScalarEvolution(ScalarEvolution &SE);
+ const SCEVUnionPredicate &getUnionPredicate() const;
+ /// \brief Returns the SCEV expression of V, in the context of the current
+ /// SCEV predicate.
+ /// The order of transformations applied on the expression of V returned
+ /// by ScalarEvolution is guaranteed to be preserved, even when adding new
+ /// predicates.
+ const SCEV *getSCEV(Value *V);
+ /// \brief Adds a new predicate.
+ void addPredicate(const SCEVPredicate &Pred);
+ /// \brief Returns the ScalarEvolution analysis used.
+ ScalarEvolution *getSE() const { return &SE; }
+
+ private:
+ /// \brief Increments the version number of the predicate.
+ /// This needs to be called every time the SCEV predicate changes.
+ void updateGeneration();
+ /// Holds a SCEV and the version number of the SCEV predicate used to
+ /// perform the rewrite of the expression.
+ typedef std::pair<unsigned, const SCEV *> RewriteEntry;
+ /// Maps a SCEV to the rewrite result of that SCEV at a certain version
+ /// number. If this number doesn't match the current Generation, we will
+ /// need to do a rewrite. To preserve the transformation order of previous
+ /// rewrites, we will rewrite the previous result instead of the original
+ /// SCEV.
+ DenseMap<const SCEV *, RewriteEntry> RewriteMap;
+ /// The ScalarEvolution analysis.
+ ScalarEvolution &SE;
+ /// The SCEVPredicate that forms our context. We will rewrite all
+ /// expressions assuming that this predicate true.
+ SCEVUnionPredicate Preds;
+ /// Marks the version of the SCEV predicate used. When rewriting a SCEV
+ /// expression we mark it with the version of the predicate. We use this to
+ /// figure out if the predicate has changed from the last rewrite of the
+ /// SCEV. If so, we need to perform a new rewrite.
+ unsigned Generation;
+ };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
new file mode 100644
index 0000000..7bbbf55
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionAliasAnalysis.h
@@ -0,0 +1,79 @@
+//===- ScalarEvolutionAliasAnalysis.h - SCEV-based AA -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for a SCEV-based alias analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONALIASANALYSIS_H
+#define LLVM_ANALYSIS_SCALAREVOLUTIONALIASANALYSIS_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+/// A simple alias analysis implementation that uses ScalarEvolution to answer
+/// queries.
+class SCEVAAResult : public AAResultBase<SCEVAAResult> {
+ ScalarEvolution &SE;
+
+public:
+ explicit SCEVAAResult(const TargetLibraryInfo &TLI, ScalarEvolution &SE)
+ : AAResultBase(TLI), SE(SE) {}
+ SCEVAAResult(SCEVAAResult &&Arg) : AAResultBase(std::move(Arg)), SE(Arg.SE) {}
+
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+
+private:
+ Value *GetBaseValue(const SCEV *S);
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class SCEVAA {
+public:
+ typedef SCEVAAResult Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ SCEVAAResult run(Function &F, AnalysisManager<Function> *AM);
+
+ /// \brief Provide access to a name for this pass for debugging purposes.
+ static StringRef name() { return "SCEVAA"; }
+
+private:
+ static char PassID;
+};
+
+/// Legacy wrapper pass to provide the SCEVAAResult object.
+class SCEVAAWrapperPass : public FunctionPass {
+ std::unique_ptr<SCEVAAResult> Result;
+
+public:
+ static char ID;
+
+ SCEVAAWrapperPass();
+
+ SCEVAAResult &getResult() { return *Result; }
+ const SCEVAAResult &getResult() const { return *Result; }
+
+ bool runOnFunction(Function &F) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+/// Creates an instance of \c SCEVAAWrapperPass.
+FunctionPass *createSCEVAAWrapperPass();
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
new file mode 100644
index 0000000..b993916
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -0,0 +1,312 @@
+//===---- llvm/Analysis/ScalarEvolutionExpander.h - SCEV Exprs --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes used to generate code from scalar expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
+#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPANDER_H
+
+#include "llvm/Analysis/ScalarEvolutionExpressions.h"
+#include "llvm/Analysis/ScalarEvolutionNormalization.h"
+#include "llvm/Analysis/TargetFolder.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/ValueHandle.h"
+#include <set>
+
+namespace llvm {
+ class TargetTransformInfo;
+
+ /// Return true if the given expression is safe to expand in the sense that
+ /// all materialized values are safe to speculate.
+ bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE);
+
+ /// This class uses information about analyze scalars to
+ /// rewrite expressions in canonical form.
+ ///
+ /// Clients should create an instance of this class when rewriting is needed,
+ /// and destroy it when finished to allow the release of the associated
+ /// memory.
+ class SCEVExpander : public SCEVVisitor<SCEVExpander, Value*> {
+ ScalarEvolution &SE;
+ const DataLayout &DL;
+
+ // New instructions receive a name to identifies them with the current pass.
+ const char* IVName;
+
+ // InsertedExpressions caches Values for reuse, so must track RAUW.
+ std::map<std::pair<const SCEV *, Instruction *>, TrackingVH<Value> >
+ InsertedExpressions;
+ // InsertedValues only flags inserted instructions so needs no RAUW.
+ std::set<AssertingVH<Value> > InsertedValues;
+ std::set<AssertingVH<Value> > InsertedPostIncValues;
+
+ /// A memoization of the "relevant" loop for a given SCEV.
+ DenseMap<const SCEV *, const Loop *> RelevantLoops;
+
+ /// \brief Addrecs referring to any of the given loops are expanded
+ /// in post-inc mode. For example, expanding {1,+,1}<L> in post-inc mode
+ /// returns the add instruction that adds one to the phi for {0,+,1}<L>,
+ /// as opposed to a new phi starting at 1. This is only supported in
+ /// non-canonical mode.
+ PostIncLoopSet PostIncLoops;
+
+ /// \brief When this is non-null, addrecs expanded in the loop it indicates
+ /// should be inserted with increments at IVIncInsertPos.
+ const Loop *IVIncInsertLoop;
+
+ /// \brief When expanding addrecs in the IVIncInsertLoop loop, insert the IV
+ /// increment at this position.
+ Instruction *IVIncInsertPos;
+
+ /// \brief Phis that complete an IV chain. Reuse
+ std::set<AssertingVH<PHINode> > ChainedPhis;
+
+ /// \brief When true, expressions are expanded in "canonical" form. In
+ /// particular, addrecs are expanded as arithmetic based on a canonical
+ /// induction variable. When false, expression are expanded in a more
+ /// literal form.
+ bool CanonicalMode;
+
+ /// \brief When invoked from LSR, the expander is in "strength reduction"
+ /// mode. The only difference is that phi's are only reused if they are
+ /// already in "expanded" form.
+ bool LSRMode;
+
+ typedef IRBuilder<true, TargetFolder> BuilderType;
+ BuilderType Builder;
+
+#ifndef NDEBUG
+ const char *DebugType;
+#endif
+
+ friend struct SCEVVisitor<SCEVExpander, Value*>;
+
+ public:
+ /// \brief Construct a SCEVExpander in "canonical" mode.
+ explicit SCEVExpander(ScalarEvolution &se, const DataLayout &DL,
+ const char *name)
+ : SE(se), DL(DL), IVName(name), IVIncInsertLoop(nullptr),
+ IVIncInsertPos(nullptr), CanonicalMode(true), LSRMode(false),
+ Builder(se.getContext(), TargetFolder(DL)) {
+#ifndef NDEBUG
+ DebugType = "";
+#endif
+ }
+
+#ifndef NDEBUG
+ void setDebugType(const char* s) { DebugType = s; }
+#endif
+
+ /// \brief Erase the contents of the InsertedExpressions map so that users
+ /// trying to expand the same expression into multiple BasicBlocks or
+ /// different places within the same BasicBlock can do so.
+ void clear() {
+ InsertedExpressions.clear();
+ InsertedValues.clear();
+ InsertedPostIncValues.clear();
+ ChainedPhis.clear();
+ }
+
+ /// \brief Return true for expressions that may incur non-trivial cost to
+ /// evaluate at runtime.
+ ///
+ /// At is an optional parameter which specifies point in code where user is
+ /// going to expand this expression. Sometimes this knowledge can lead to a
+ /// more accurate cost estimation.
+ bool isHighCostExpansion(const SCEV *Expr, Loop *L,
+ const Instruction *At = nullptr) {
+ SmallPtrSet<const SCEV *, 8> Processed;
+ return isHighCostExpansionHelper(Expr, L, At, Processed);
+ }
+
+ /// \brief This method returns the canonical induction variable of the
+ /// specified type for the specified loop (inserting one if there is none).
+ /// A canonical induction variable starts at zero and steps by one on each
+ /// iteration.
+ PHINode *getOrInsertCanonicalInductionVariable(const Loop *L, Type *Ty);
+
+ /// \brief Return the induction variable increment's IV operand.
+ Instruction *getIVIncOperand(Instruction *IncV, Instruction *InsertPos,
+ bool allowScale);
+
+ /// \brief Utility for hoisting an IV increment.
+ bool hoistIVInc(Instruction *IncV, Instruction *InsertPos);
+
+ /// \brief replace congruent phis with their most canonical
+ /// representative. Return the number of phis eliminated.
+ unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
+ SmallVectorImpl<WeakVH> &DeadInsts,
+ const TargetTransformInfo *TTI = nullptr);
+
+ /// \brief Insert code to directly compute the specified SCEV expression
+ /// into the program. The inserted code is inserted into the specified
+ /// block.
+ Value *expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I);
+
+ /// \brief Generates a code sequence that evaluates this predicate.
+ /// The inserted instructions will be at position \p Loc.
+ /// The result will be of type i1 and will have a value of 0 when the
+ /// predicate is false and 1 otherwise.
+ Value *expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc);
+
+ /// \brief A specialized variant of expandCodeForPredicate, handling the
+ /// case when we are expanding code for a SCEVEqualPredicate.
+ Value *expandEqualPredicate(const SCEVEqualPredicate *Pred,
+ Instruction *Loc);
+
+ /// \brief A specialized variant of expandCodeForPredicate, handling the
+ /// case when we are expanding code for a SCEVUnionPredicate.
+ Value *expandUnionPredicate(const SCEVUnionPredicate *Pred,
+ Instruction *Loc);
+
+ /// \brief Set the current IV increment loop and position.
+ void setIVIncInsertPos(const Loop *L, Instruction *Pos) {
+ assert(!CanonicalMode &&
+ "IV increment positions are not supported in CanonicalMode");
+ IVIncInsertLoop = L;
+ IVIncInsertPos = Pos;
+ }
+
+ /// \brief Enable post-inc expansion for addrecs referring to the given
+ /// loops. Post-inc expansion is only supported in non-canonical mode.
+ void setPostInc(const PostIncLoopSet &L) {
+ assert(!CanonicalMode &&
+ "Post-inc expansion is not supported in CanonicalMode");
+ PostIncLoops = L;
+ }
+
+ /// \brief Disable all post-inc expansion.
+ void clearPostInc() {
+ PostIncLoops.clear();
+
+ // When we change the post-inc loop set, cached expansions may no
+ // longer be valid.
+ InsertedPostIncValues.clear();
+ }
+
+ /// \brief Disable the behavior of expanding expressions in canonical form
+ /// rather than in a more literal form. Non-canonical mode is useful for
+ /// late optimization passes.
+ void disableCanonicalMode() { CanonicalMode = false; }
+
+ void enableLSRMode() { LSRMode = true; }
+
+ /// \brief Clear the current insertion point. This is useful if the
+ /// instruction that had been serving as the insertion point may have been
+ /// deleted.
+ void clearInsertPoint() {
+ Builder.ClearInsertionPoint();
+ }
+
+ /// \brief Return true if the specified instruction was inserted by the code
+ /// rewriter. If so, the client should not modify the instruction.
+ bool isInsertedInstruction(Instruction *I) const {
+ return InsertedValues.count(I) || InsertedPostIncValues.count(I);
+ }
+
+ void setChainedPhi(PHINode *PN) { ChainedPhis.insert(PN); }
+
+ /// \brief Try to find LLVM IR value for S available at the point At.
+ ///
+ /// L is a hint which tells in which loop to look for the suitable value.
+ /// On success return value which is equivalent to the expanded S at point
+ /// At. Return nullptr if value was not found.
+ ///
+ /// Note that this function does not perform an exhaustive search. I.e if it
+ /// didn't find any value it does not mean that there is no such value.
+ Value *findExistingExpansion(const SCEV *S, const Instruction *At, Loop *L);
+
+ private:
+ LLVMContext &getContext() const { return SE.getContext(); }
+
+ /// \brief Recursive helper function for isHighCostExpansion.
+ bool isHighCostExpansionHelper(const SCEV *S, Loop *L,
+ const Instruction *At,
+ SmallPtrSetImpl<const SCEV *> &Processed);
+
+ /// \brief Insert the specified binary operator, doing a small amount
+ /// of work to avoid inserting an obviously redundant operation.
+ Value *InsertBinop(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS);
+
+ /// \brief Arrange for there to be a cast of V to Ty at IP, reusing an
+ /// existing cast if a suitable one exists, moving an existing cast if a
+ /// suitable one exists but isn't in the right place, or or creating a new
+ /// one.
+ Value *ReuseOrCreateCast(Value *V, Type *Ty,
+ Instruction::CastOps Op,
+ BasicBlock::iterator IP);
+
+ /// \brief Insert a cast of V to the specified type, which must be possible
+ /// with a noop cast, doing what we can to share the casts.
+ Value *InsertNoopCastOfTo(Value *V, Type *Ty);
+
+ /// \brief Expand a SCEVAddExpr with a pointer type into a GEP
+ /// instead of using ptrtoint+arithmetic+inttoptr.
+ Value *expandAddToGEP(const SCEV *const *op_begin,
+ const SCEV *const *op_end,
+ PointerType *PTy, Type *Ty, Value *V);
+
+ Value *expand(const SCEV *S);
+
+ /// \brief Insert code to directly compute the specified SCEV expression
+ /// into the program. The inserted code is inserted into the SCEVExpander's
+ /// current insertion point. If a type is specified, the result will be
+ /// expanded to have that type, with a cast if necessary.
+ Value *expandCodeFor(const SCEV *SH, Type *Ty = nullptr);
+
+ /// \brief Determine the most "relevant" loop for the given SCEV.
+ const Loop *getRelevantLoop(const SCEV *);
+
+ Value *visitConstant(const SCEVConstant *S) {
+ return S->getValue();
+ }
+
+ Value *visitTruncateExpr(const SCEVTruncateExpr *S);
+
+ Value *visitZeroExtendExpr(const SCEVZeroExtendExpr *S);
+
+ Value *visitSignExtendExpr(const SCEVSignExtendExpr *S);
+
+ Value *visitAddExpr(const SCEVAddExpr *S);
+
+ Value *visitMulExpr(const SCEVMulExpr *S);
+
+ Value *visitUDivExpr(const SCEVUDivExpr *S);
+
+ Value *visitAddRecExpr(const SCEVAddRecExpr *S);
+
+ Value *visitSMaxExpr(const SCEVSMaxExpr *S);
+
+ Value *visitUMaxExpr(const SCEVUMaxExpr *S);
+
+ Value *visitUnknown(const SCEVUnknown *S) {
+ return S->getValue();
+ }
+
+ void rememberInstruction(Value *I);
+
+ bool isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);
+
+ bool isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, const Loop *L);
+
+ Value *expandAddRecExprLiterally(const SCEVAddRecExpr *);
+ PHINode *getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
+ const Loop *L,
+ Type *ExpandTy,
+ Type *IntTy,
+ Type *&TruncTy,
+ bool &InvertStep);
+ Value *expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
+ Type *ExpandTy, Type *IntTy, bool useSubtract);
+ };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
new file mode 100644
index 0000000..1699268
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -0,0 +1,709 @@
+//===- llvm/Analysis/ScalarEvolutionExpressions.h - SCEV Exprs --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes used to represent and build scalar expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
+#define LLVM_ANALYSIS_SCALAREVOLUTIONEXPRESSIONS_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+ class ConstantInt;
+ class ConstantRange;
+ class DominatorTree;
+
+ enum SCEVTypes {
+ // These should be ordered in terms of increasing complexity to make the
+ // folders simpler.
+ scConstant, scTruncate, scZeroExtend, scSignExtend, scAddExpr, scMulExpr,
+ scUDivExpr, scAddRecExpr, scUMaxExpr, scSMaxExpr,
+ scUnknown, scCouldNotCompute
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVConstant - This class represents a constant integer value.
+ ///
+ class SCEVConstant : public SCEV {
+ friend class ScalarEvolution;
+
+ ConstantInt *V;
+ SCEVConstant(const FoldingSetNodeIDRef ID, ConstantInt *v) :
+ SCEV(ID, scConstant), V(v) {}
+ public:
+ ConstantInt *getValue() const { return V; }
+ const APInt &getAPInt() const { return getValue()->getValue(); }
+
+ Type *getType() const { return V->getType(); }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scConstant;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVCastExpr - This is the base class for unary cast operator classes.
+ ///
+ class SCEVCastExpr : public SCEV {
+ protected:
+ const SCEV *Op;
+ Type *Ty;
+
+ SCEVCastExpr(const FoldingSetNodeIDRef ID,
+ unsigned SCEVTy, const SCEV *op, Type *ty);
+
+ public:
+ const SCEV *getOperand() const { return Op; }
+ Type *getType() const { return Ty; }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scTruncate ||
+ S->getSCEVType() == scZeroExtend ||
+ S->getSCEVType() == scSignExtend;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVTruncateExpr - This class represents a truncation of an integer value
+ /// to a smaller integer value.
+ ///
+ class SCEVTruncateExpr : public SCEVCastExpr {
+ friend class ScalarEvolution;
+
+ SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *op, Type *ty);
+
+ public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scTruncate;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVZeroExtendExpr - This class represents a zero extension of a small
+ /// integer value to a larger integer value.
+ ///
+ class SCEVZeroExtendExpr : public SCEVCastExpr {
+ friend class ScalarEvolution;
+
+ SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *op, Type *ty);
+
+ public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scZeroExtend;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVSignExtendExpr - This class represents a sign extension of a small
+ /// integer value to a larger integer value.
+ ///
+ class SCEVSignExtendExpr : public SCEVCastExpr {
+ friend class ScalarEvolution;
+
+ SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *op, Type *ty);
+
+ public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scSignExtend;
+ }
+ };
+
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVNAryExpr - This node is a base class providing common
+ /// functionality for n'ary operators.
+ ///
+ class SCEVNAryExpr : public SCEV {
+ protected:
+ // Since SCEVs are immutable, ScalarEvolution allocates operand
+ // arrays with its SCEVAllocator, so this class just needs a simple
+ // pointer rather than a more elaborate vector-like data structure.
+ // This also avoids the need for a non-trivial destructor.
+ const SCEV *const *Operands;
+ size_t NumOperands;
+
+ SCEVNAryExpr(const FoldingSetNodeIDRef ID,
+ enum SCEVTypes T, const SCEV *const *O, size_t N)
+ : SCEV(ID, T), Operands(O), NumOperands(N) {}
+
+ public:
+ size_t getNumOperands() const { return NumOperands; }
+ const SCEV *getOperand(unsigned i) const {
+ assert(i < NumOperands && "Operand index out of range!");
+ return Operands[i];
+ }
+
+ typedef const SCEV *const *op_iterator;
+ typedef iterator_range<op_iterator> op_range;
+ op_iterator op_begin() const { return Operands; }
+ op_iterator op_end() const { return Operands + NumOperands; }
+ op_range operands() const {
+ return make_range(op_begin(), op_end());
+ }
+
+ Type *getType() const { return getOperand(0)->getType(); }
+
+ NoWrapFlags getNoWrapFlags(NoWrapFlags Mask = NoWrapMask) const {
+ return (NoWrapFlags)(SubclassData & Mask);
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scAddExpr ||
+ S->getSCEVType() == scMulExpr ||
+ S->getSCEVType() == scSMaxExpr ||
+ S->getSCEVType() == scUMaxExpr ||
+ S->getSCEVType() == scAddRecExpr;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVCommutativeExpr - This node is the base class for n'ary commutative
+ /// operators.
+ ///
+ class SCEVCommutativeExpr : public SCEVNAryExpr {
+ protected:
+ SCEVCommutativeExpr(const FoldingSetNodeIDRef ID,
+ enum SCEVTypes T, const SCEV *const *O, size_t N)
+ : SCEVNAryExpr(ID, T, O, N) {}
+
+ public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scAddExpr ||
+ S->getSCEVType() == scMulExpr ||
+ S->getSCEVType() == scSMaxExpr ||
+ S->getSCEVType() == scUMaxExpr;
+ }
+
+ /// Set flags for a non-recurrence without clearing previously set flags.
+ void setNoWrapFlags(NoWrapFlags Flags) {
+ SubclassData |= Flags;
+ }
+ };
+
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVAddExpr - This node represents an addition of some number of SCEVs.
+ ///
+ class SCEVAddExpr : public SCEVCommutativeExpr {
+ friend class ScalarEvolution;
+
+ SCEVAddExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *const *O, size_t N)
+ : SCEVCommutativeExpr(ID, scAddExpr, O, N) {
+ }
+
+ public:
+ Type *getType() const {
+ // Use the type of the last operand, which is likely to be a pointer
+ // type, if there is one. This doesn't usually matter, but it can help
+ // reduce casts when the expressions are expanded.
+ return getOperand(getNumOperands() - 1)->getType();
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scAddExpr;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVMulExpr - This node represents multiplication of some number of SCEVs.
+ ///
+ class SCEVMulExpr : public SCEVCommutativeExpr {
+ friend class ScalarEvolution;
+
+ SCEVMulExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *const *O, size_t N)
+ : SCEVCommutativeExpr(ID, scMulExpr, O, N) {
+ }
+
+ public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scMulExpr;
+ }
+ };
+
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVUDivExpr - This class represents a binary unsigned division operation.
+ ///
+ class SCEVUDivExpr : public SCEV {
+ friend class ScalarEvolution;
+
+ const SCEV *LHS;
+ const SCEV *RHS;
+ SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
+ : SCEV(ID, scUDivExpr), LHS(lhs), RHS(rhs) {}
+
+ public:
+ const SCEV *getLHS() const { return LHS; }
+ const SCEV *getRHS() const { return RHS; }
+
+ Type *getType() const {
+ // In most cases the types of LHS and RHS will be the same, but in some
+ // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
+ // depend on the type for correctness, but handling types carefully can
+ // avoid extra casts in the SCEVExpander. The LHS is more likely to be
+ // a pointer type than the RHS, so use the RHS' type here.
+ return getRHS()->getType();
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scUDivExpr;
+ }
+ };
+
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVAddRecExpr - This node represents a polynomial recurrence on the trip
+ /// count of the specified loop. This is the primary focus of the
+ /// ScalarEvolution framework; all the other SCEV subclasses are mostly just
+ /// supporting infrastructure to allow SCEVAddRecExpr expressions to be
+ /// created and analyzed.
+ ///
+ /// All operands of an AddRec are required to be loop invariant.
+ ///
+ class SCEVAddRecExpr : public SCEVNAryExpr {
+ friend class ScalarEvolution;
+
+ const Loop *L;
+
+ SCEVAddRecExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *const *O, size_t N, const Loop *l)
+ : SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {}
+
+ public:
+ const SCEV *getStart() const { return Operands[0]; }
+ const Loop *getLoop() const { return L; }
+
+ /// getStepRecurrence - This method constructs and returns the recurrence
+ /// indicating how much this expression steps by. If this is a polynomial
+ /// of degree N, it returns a chrec of degree N-1.
+ /// We cannot determine whether the step recurrence has self-wraparound.
+ const SCEV *getStepRecurrence(ScalarEvolution &SE) const {
+ if (isAffine()) return getOperand(1);
+ return SE.getAddRecExpr(SmallVector<const SCEV *, 3>(op_begin()+1,
+ op_end()),
+ getLoop(), FlagAnyWrap);
+ }
+
+ /// isAffine - Return true if this represents an expression
+ /// A + B*x where A and B are loop invariant values.
+ bool isAffine() const {
+ // We know that the start value is invariant. This expression is thus
+ // affine iff the step is also invariant.
+ return getNumOperands() == 2;
+ }
+
+ /// isQuadratic - Return true if this represents an expression
+ /// A + B*x + C*x^2 where A, B and C are loop invariant values.
+ /// This corresponds to an addrec of the form {L,+,M,+,N}
+ bool isQuadratic() const {
+ return getNumOperands() == 3;
+ }
+
+ /// Set flags for a recurrence without clearing any previously set flags.
+ /// For AddRec, either NUW or NSW implies NW. Keep track of this fact here
+ /// to make it easier to propagate flags.
+ void setNoWrapFlags(NoWrapFlags Flags) {
+ if (Flags & (FlagNUW | FlagNSW))
+ Flags = ScalarEvolution::setFlags(Flags, FlagNW);
+ SubclassData |= Flags;
+ }
+
+ /// evaluateAtIteration - Return the value of this chain of recurrences at
+ /// the specified iteration number.
+ const SCEV *evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const;
+
+ /// getNumIterationsInRange - Return the number of iterations of this loop
+ /// that produce values in the specified constant range. Another way of
+ /// looking at this is that it returns the first iteration number where the
+ /// value is not in the condition, thus computing the exit count. If the
+ /// iteration count can't be computed, an instance of SCEVCouldNotCompute is
+ /// returned.
+ const SCEV *getNumIterationsInRange(ConstantRange Range,
+ ScalarEvolution &SE) const;
+
+ /// getPostIncExpr - Return an expression representing the value of
+ /// this expression one iteration of the loop ahead.
+ const SCEVAddRecExpr *getPostIncExpr(ScalarEvolution &SE) const {
+ return cast<SCEVAddRecExpr>(SE.getAddExpr(this, getStepRecurrence(SE)));
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scAddRecExpr;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVSMaxExpr - This class represents a signed maximum selection.
+ ///
+ class SCEVSMaxExpr : public SCEVCommutativeExpr {
+ friend class ScalarEvolution;
+
+ SCEVSMaxExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *const *O, size_t N)
+ : SCEVCommutativeExpr(ID, scSMaxExpr, O, N) {
+ // Max never overflows.
+ setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
+ }
+
+ public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scSMaxExpr;
+ }
+ };
+
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVUMaxExpr - This class represents an unsigned maximum selection.
+ ///
+ class SCEVUMaxExpr : public SCEVCommutativeExpr {
+ friend class ScalarEvolution;
+
+ SCEVUMaxExpr(const FoldingSetNodeIDRef ID,
+ const SCEV *const *O, size_t N)
+ : SCEVCommutativeExpr(ID, scUMaxExpr, O, N) {
+ // Max never overflows.
+ setNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW));
+ }
+
+ public:
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scUMaxExpr;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SCEVUnknown - This means that we are dealing with an entirely unknown SCEV
+ /// value, and only represent it as its LLVM Value. This is the "bottom"
+ /// value for the analysis.
+ ///
+ class SCEVUnknown final : public SCEV, private CallbackVH {
+ friend class ScalarEvolution;
+
+ // Implement CallbackVH.
+ void deleted() override;
+ void allUsesReplacedWith(Value *New) override;
+
+ /// SE - The parent ScalarEvolution value. This is used to update
+ /// the parent's maps when the value associated with a SCEVUnknown
+ /// is deleted or RAUW'd.
+ ScalarEvolution *SE;
+
+ /// Next - The next pointer in the linked list of all
+ /// SCEVUnknown instances owned by a ScalarEvolution.
+ SCEVUnknown *Next;
+
+ SCEVUnknown(const FoldingSetNodeIDRef ID, Value *V,
+ ScalarEvolution *se, SCEVUnknown *next) :
+ SCEV(ID, scUnknown), CallbackVH(V), SE(se), Next(next) {}
+
+ public:
+ Value *getValue() const { return getValPtr(); }
+
+ /// isSizeOf, isAlignOf, isOffsetOf - Test whether this is a special
+ /// constant representing a type size, alignment, or field offset in
+ /// a target-independent manner, and hasn't happened to have been
+ /// folded with other operations into something unrecognizable. This
+ /// is mainly only useful for pretty-printing and other situations
+ /// where it isn't absolutely required for these to succeed.
+ bool isSizeOf(Type *&AllocTy) const;
+ bool isAlignOf(Type *&AllocTy) const;
+ bool isOffsetOf(Type *&STy, Constant *&FieldNo) const;
+
+ Type *getType() const { return getValPtr()->getType(); }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const SCEV *S) {
+ return S->getSCEVType() == scUnknown;
+ }
+ };
+
+ /// SCEVVisitor - This class defines a simple visitor class that may be used
+ /// for various SCEV analysis purposes.
+ template<typename SC, typename RetVal=void>
+ struct SCEVVisitor {
+ RetVal visit(const SCEV *S) {
+ switch (S->getSCEVType()) {
+ case scConstant:
+ return ((SC*)this)->visitConstant((const SCEVConstant*)S);
+ case scTruncate:
+ return ((SC*)this)->visitTruncateExpr((const SCEVTruncateExpr*)S);
+ case scZeroExtend:
+ return ((SC*)this)->visitZeroExtendExpr((const SCEVZeroExtendExpr*)S);
+ case scSignExtend:
+ return ((SC*)this)->visitSignExtendExpr((const SCEVSignExtendExpr*)S);
+ case scAddExpr:
+ return ((SC*)this)->visitAddExpr((const SCEVAddExpr*)S);
+ case scMulExpr:
+ return ((SC*)this)->visitMulExpr((const SCEVMulExpr*)S);
+ case scUDivExpr:
+ return ((SC*)this)->visitUDivExpr((const SCEVUDivExpr*)S);
+ case scAddRecExpr:
+ return ((SC*)this)->visitAddRecExpr((const SCEVAddRecExpr*)S);
+ case scSMaxExpr:
+ return ((SC*)this)->visitSMaxExpr((const SCEVSMaxExpr*)S);
+ case scUMaxExpr:
+ return ((SC*)this)->visitUMaxExpr((const SCEVUMaxExpr*)S);
+ case scUnknown:
+ return ((SC*)this)->visitUnknown((const SCEVUnknown*)S);
+ case scCouldNotCompute:
+ return ((SC*)this)->visitCouldNotCompute((const SCEVCouldNotCompute*)S);
+ default:
+ llvm_unreachable("Unknown SCEV type!");
+ }
+ }
+
+ RetVal visitCouldNotCompute(const SCEVCouldNotCompute *S) {
+ llvm_unreachable("Invalid use of SCEVCouldNotCompute!");
+ }
+ };
+
+ /// Visit all nodes in the expression tree using worklist traversal.
+ ///
+ /// Visitor implements:
+ /// // return true to follow this node.
+ /// bool follow(const SCEV *S);
+ /// // return true to terminate the search.
+ /// bool isDone();
+ template<typename SV>
+ class SCEVTraversal {
+ SV &Visitor;
+ SmallVector<const SCEV *, 8> Worklist;
+ SmallPtrSet<const SCEV *, 8> Visited;
+
+ void push(const SCEV *S) {
+ if (Visited.insert(S).second && Visitor.follow(S))
+ Worklist.push_back(S);
+ }
+ public:
+ SCEVTraversal(SV& V): Visitor(V) {}
+
+ void visitAll(const SCEV *Root) {
+ push(Root);
+ while (!Worklist.empty() && !Visitor.isDone()) {
+ const SCEV *S = Worklist.pop_back_val();
+
+ switch (S->getSCEVType()) {
+ case scConstant:
+ case scUnknown:
+ break;
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend:
+ push(cast<SCEVCastExpr>(S)->getOperand());
+ break;
+ case scAddExpr:
+ case scMulExpr:
+ case scSMaxExpr:
+ case scUMaxExpr:
+ case scAddRecExpr: {
+ const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
+ for (SCEVNAryExpr::op_iterator I = NAry->op_begin(),
+ E = NAry->op_end(); I != E; ++I) {
+ push(*I);
+ }
+ break;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
+ push(UDiv->getLHS());
+ push(UDiv->getRHS());
+ break;
+ }
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ default:
+ llvm_unreachable("Unknown SCEV kind!");
+ }
+ }
+ }
+ };
+
+ /// Use SCEVTraversal to visit all nodes in the given expression tree.
+ template<typename SV>
+ void visitAll(const SCEV *Root, SV& Visitor) {
+ SCEVTraversal<SV> T(Visitor);
+ T.visitAll(Root);
+ }
+
+ /// Recursively visits a SCEV expression and re-writes it.
+ template<typename SC>
+ class SCEVRewriteVisitor : public SCEVVisitor<SC, const SCEV *> {
+ protected:
+ ScalarEvolution &SE;
+ public:
+ SCEVRewriteVisitor(ScalarEvolution &SE) : SE(SE) {}
+
+ const SCEV *visitConstant(const SCEVConstant *Constant) {
+ return Constant;
+ }
+
+ const SCEV *visitTruncateExpr(const SCEVTruncateExpr *Expr) {
+ const SCEV *Operand = ((SC*)this)->visit(Expr->getOperand());
+ return SE.getTruncateExpr(Operand, Expr->getType());
+ }
+
+ const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
+ const SCEV *Operand = ((SC*)this)->visit(Expr->getOperand());
+ return SE.getZeroExtendExpr(Operand, Expr->getType());
+ }
+
+ const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
+ const SCEV *Operand = ((SC*)this)->visit(Expr->getOperand());
+ return SE.getSignExtendExpr(Operand, Expr->getType());
+ }
+
+ const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
+ Operands.push_back(((SC*)this)->visit(Expr->getOperand(i)));
+ return SE.getAddExpr(Operands);
+ }
+
+ const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
+ Operands.push_back(((SC*)this)->visit(Expr->getOperand(i)));
+ return SE.getMulExpr(Operands);
+ }
+
+ const SCEV *visitUDivExpr(const SCEVUDivExpr *Expr) {
+ return SE.getUDivExpr(((SC*)this)->visit(Expr->getLHS()),
+ ((SC*)this)->visit(Expr->getRHS()));
+ }
+
+ const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
+ Operands.push_back(((SC*)this)->visit(Expr->getOperand(i)));
+ return SE.getAddRecExpr(Operands, Expr->getLoop(),
+ Expr->getNoWrapFlags());
+ }
+
+ const SCEV *visitSMaxExpr(const SCEVSMaxExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
+ Operands.push_back(((SC*)this)->visit(Expr->getOperand(i)));
+ return SE.getSMaxExpr(Operands);
+ }
+
+ const SCEV *visitUMaxExpr(const SCEVUMaxExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
+ Operands.push_back(((SC*)this)->visit(Expr->getOperand(i)));
+ return SE.getUMaxExpr(Operands);
+ }
+
+ const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ return Expr;
+ }
+
+ const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
+ return Expr;
+ }
+ };
+
+ typedef DenseMap<const Value*, Value*> ValueToValueMap;
+
+ /// The SCEVParameterRewriter takes a scalar evolution expression and updates
+ /// the SCEVUnknown components following the Map (Value -> Value).
+ class SCEVParameterRewriter : public SCEVRewriteVisitor<SCEVParameterRewriter> {
+ public:
+ static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE,
+ ValueToValueMap &Map,
+ bool InterpretConsts = false) {
+ SCEVParameterRewriter Rewriter(SE, Map, InterpretConsts);
+ return Rewriter.visit(Scev);
+ }
+
+ SCEVParameterRewriter(ScalarEvolution &SE, ValueToValueMap &M, bool C)
+ : SCEVRewriteVisitor(SE), Map(M), InterpretConsts(C) {}
+
+ const SCEV *visitUnknown(const SCEVUnknown *Expr) {
+ Value *V = Expr->getValue();
+ if (Map.count(V)) {
+ Value *NV = Map[V];
+ if (InterpretConsts && isa<ConstantInt>(NV))
+ return SE.getConstant(cast<ConstantInt>(NV));
+ return SE.getUnknown(NV);
+ }
+ return Expr;
+ }
+
+ private:
+ ValueToValueMap &Map;
+ bool InterpretConsts;
+ };
+
+ typedef DenseMap<const Loop*, const SCEV*> LoopToScevMapT;
+
+ /// The SCEVLoopAddRecRewriter takes a scalar evolution expression and applies
+ /// the Map (Loop -> SCEV) to all AddRecExprs.
+ class SCEVLoopAddRecRewriter
+ : public SCEVRewriteVisitor<SCEVLoopAddRecRewriter> {
+ public:
+ static const SCEV *rewrite(const SCEV *Scev, LoopToScevMapT &Map,
+ ScalarEvolution &SE) {
+ SCEVLoopAddRecRewriter Rewriter(SE, Map);
+ return Rewriter.visit(Scev);
+ }
+
+ SCEVLoopAddRecRewriter(ScalarEvolution &SE, LoopToScevMapT &M)
+ : SCEVRewriteVisitor(SE), Map(M) {}
+
+ const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
+ SmallVector<const SCEV *, 2> Operands;
+ for (int i = 0, e = Expr->getNumOperands(); i < e; ++i)
+ Operands.push_back(visit(Expr->getOperand(i)));
+
+ const Loop *L = Expr->getLoop();
+ const SCEV *Res = SE.getAddRecExpr(Operands, L, Expr->getNoWrapFlags());
+
+ if (0 == Map.count(L))
+ return Res;
+
+ const SCEVAddRecExpr *Rec = cast<SCEVAddRecExpr>(Res);
+ return Rec->evaluateAtIteration(Map[L], SE);
+ }
+
+ private:
+ LoopToScevMapT &Map;
+ };
+
+/// Applies the Map (Loop -> SCEV) to the given Scev.
+static inline const SCEV *apply(const SCEV *Scev, LoopToScevMapT &Map,
+ ScalarEvolution &SE) {
+ return SCEVLoopAddRecRewriter::rewrite(Scev, Map, SE);
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
new file mode 100644
index 0000000..7c6423a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ScalarEvolutionNormalization.h
@@ -0,0 +1,78 @@
+//===- llvm/Analysis/ScalarEvolutionNormalization.h - See below -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines utilities for working with "normalized" ScalarEvolution
+// expressions.
+//
+// The following example illustrates post-increment uses and how normalized
+// expressions help.
+//
+// for (i=0; i!=n; ++i) {
+// ...
+// }
+// use(i);
+//
+// While the expression for most uses of i inside the loop is {0,+,1}<%L>, the
+// expression for the use of i outside the loop is {1,+,1}<%L>, since i is
+// incremented at the end of the loop body. This is inconveient, since it
+// suggests that we need two different induction variables, one that starts
+// at 0 and one that starts at 1. We'd prefer to be able to think of these as
+// the same induction variable, with uses inside the loop using the
+// "pre-incremented" value, and uses after the loop using the
+// "post-incremented" value.
+//
+// Expressions for post-incremented uses are represented as an expression
+// paired with a set of loops for which the expression is in "post-increment"
+// mode (there may be multiple loops).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
+#define LLVM_ANALYSIS_SCALAREVOLUTIONNORMALIZATION_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+
+namespace llvm {
+
+class Instruction;
+class DominatorTree;
+class Loop;
+class ScalarEvolution;
+class SCEV;
+class Value;
+
+/// TransformKind - Different types of transformations that
+/// TransformForPostIncUse can do.
+enum TransformKind {
+ /// Normalize - Normalize according to the given loops.
+ Normalize,
+ /// NormalizeAutodetect - Detect post-inc opportunities on new expressions,
+ /// update the given loop set, and normalize.
+ NormalizeAutodetect,
+ /// Denormalize - Perform the inverse transform on the expression with the
+ /// given loop set.
+ Denormalize
+};
+
+/// PostIncLoopSet - A set of loops.
+typedef SmallPtrSet<const Loop *, 2> PostIncLoopSet;
+
+/// TransformForPostIncUse - Transform the given expression according to the
+/// given transformation kind.
+const SCEV *TransformForPostIncUse(TransformKind Kind,
+ const SCEV *S,
+ Instruction *User,
+ Value *OperandValToReplace,
+ PostIncLoopSet &Loops,
+ ScalarEvolution &SE,
+ DominatorTree &DT);
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ScopedNoAliasAA.h b/contrib/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
new file mode 100644
index 0000000..1755616
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ScopedNoAliasAA.h
@@ -0,0 +1,92 @@
+//===- ScopedNoAliasAA.h - Scoped No-Alias Alias Analysis -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for a metadata-based scoped no-alias analysis.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SCOPEDNOALIASAA_H
+#define LLVM_ANALYSIS_SCOPEDNOALIASAA_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+/// A simple AA result which uses scoped-noalias metadata to answer queries.
+class ScopedNoAliasAAResult : public AAResultBase<ScopedNoAliasAAResult> {
+ friend AAResultBase<ScopedNoAliasAAResult>;
+
+public:
+ explicit ScopedNoAliasAAResult(const TargetLibraryInfo &TLI)
+ : AAResultBase(TLI) {}
+ ScopedNoAliasAAResult(ScopedNoAliasAAResult &&Arg)
+ : AAResultBase(std::move(Arg)) {}
+
+ /// Handle invalidation events from the new pass manager.
+ ///
+ /// By definition, this result is stateless and so remains valid.
+ bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+ ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+
+private:
+ bool mayAliasInScopes(const MDNode *Scopes, const MDNode *NoAlias) const;
+ void collectMDInDomain(const MDNode *List, const MDNode *Domain,
+ SmallPtrSetImpl<const MDNode *> &Nodes) const;
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class ScopedNoAliasAA {
+public:
+ typedef ScopedNoAliasAAResult Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ ScopedNoAliasAAResult run(Function &F, AnalysisManager<Function> *AM);
+
+ /// \brief Provide access to a name for this pass for debugging purposes.
+ static StringRef name() { return "ScopedNoAliasAA"; }
+
+private:
+ static char PassID;
+};
+
+/// Legacy wrapper pass to provide the ScopedNoAliasAAResult object.
+class ScopedNoAliasAAWrapperPass : public ImmutablePass {
+ std::unique_ptr<ScopedNoAliasAAResult> Result;
+
+public:
+ static char ID;
+
+ ScopedNoAliasAAWrapperPass();
+
+ ScopedNoAliasAAResult &getResult() { return *Result; }
+ const ScopedNoAliasAAResult &getResult() const { return *Result; }
+
+ bool doInitialization(Module &M) override;
+ bool doFinalization(Module &M) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+//===--------------------------------------------------------------------===//
+//
+// createScopedNoAliasAAWrapperPass - This pass implements metadata-based
+// scoped noalias analysis.
+//
+ImmutablePass *createScopedNoAliasAAWrapperPass();
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/SparsePropagation.h b/contrib/llvm/include/llvm/Analysis/SparsePropagation.h
new file mode 100644
index 0000000..2c7f5dd
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/SparsePropagation.h
@@ -0,0 +1,201 @@
+//===- SparsePropagation.h - Sparse Conditional Property Propagation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an abstract sparse conditional propagation algorithm,
+// modeled after SCCP, but with a customizable lattice function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_SPARSEPROPAGATION_H
+#define LLVM_ANALYSIS_SPARSEPROPAGATION_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include <set>
+#include <vector>
+
+namespace llvm {
+class Value;
+class Constant;
+class Argument;
+class Instruction;
+class PHINode;
+class TerminatorInst;
+class BasicBlock;
+class Function;
+class SparseSolver;
+class raw_ostream;
+
+template <typename T> class SmallVectorImpl;
+
+/// AbstractLatticeFunction - This class is implemented by the dataflow instance
+/// to specify what the lattice values are and how they handle merges etc.
+/// This gives the client the power to compute lattice values from instructions,
+/// constants, etc. The requirement is that lattice values must all fit into
+/// a void*. If a void* is not sufficient, the implementation should use this
+/// pointer to be a pointer into a uniquing set or something.
+///
+class AbstractLatticeFunction {
+public:
+ typedef void *LatticeVal;
+
+private:
+ LatticeVal UndefVal, OverdefinedVal, UntrackedVal;
+
+public:
+ AbstractLatticeFunction(LatticeVal undefVal, LatticeVal overdefinedVal,
+ LatticeVal untrackedVal) {
+ UndefVal = undefVal;
+ OverdefinedVal = overdefinedVal;
+ UntrackedVal = untrackedVal;
+ }
+ virtual ~AbstractLatticeFunction();
+
+ LatticeVal getUndefVal() const { return UndefVal; }
+ LatticeVal getOverdefinedVal() const { return OverdefinedVal; }
+ LatticeVal getUntrackedVal() const { return UntrackedVal; }
+
+ /// IsUntrackedValue - If the specified Value is something that is obviously
+ /// uninteresting to the analysis (and would always return UntrackedVal),
+ /// this function can return true to avoid pointless work.
+ virtual bool IsUntrackedValue(Value *V) { return false; }
+
+ /// ComputeConstant - Given a constant value, compute and return a lattice
+ /// value corresponding to the specified constant.
+ virtual LatticeVal ComputeConstant(Constant *C) {
+ return getOverdefinedVal(); // always safe
+ }
+
+ /// IsSpecialCasedPHI - Given a PHI node, determine whether this PHI node is
+ /// one that the we want to handle through ComputeInstructionState.
+ virtual bool IsSpecialCasedPHI(PHINode *PN) { return false; }
+
+ /// GetConstant - If the specified lattice value is representable as an LLVM
+ /// constant value, return it. Otherwise return null. The returned value
+ /// must be in the same LLVM type as Val.
+ virtual Constant *GetConstant(LatticeVal LV, Value *Val, SparseSolver &SS) {
+ return nullptr;
+ }
+
+ /// ComputeArgument - Given a formal argument value, compute and return a
+ /// lattice value corresponding to the specified argument.
+ virtual LatticeVal ComputeArgument(Argument *I) {
+ return getOverdefinedVal(); // always safe
+ }
+
+ /// MergeValues - Compute and return the merge of the two specified lattice
+ /// values. Merging should only move one direction down the lattice to
+ /// guarantee convergence (toward overdefined).
+ virtual LatticeVal MergeValues(LatticeVal X, LatticeVal Y) {
+ return getOverdefinedVal(); // always safe, never useful.
+ }
+
+ /// ComputeInstructionState - Given an instruction and a vector of its operand
+ /// values, compute the result value of the instruction.
+ virtual LatticeVal ComputeInstructionState(Instruction &I, SparseSolver &SS) {
+ return getOverdefinedVal(); // always safe, never useful.
+ }
+
+ /// PrintValue - Render the specified lattice value to the specified stream.
+ virtual void PrintValue(LatticeVal V, raw_ostream &OS);
+};
+
+/// SparseSolver - This class is a general purpose solver for Sparse Conditional
+/// Propagation with a programmable lattice function.
+///
+class SparseSolver {
+ typedef AbstractLatticeFunction::LatticeVal LatticeVal;
+
+ /// LatticeFunc - This is the object that knows the lattice and how to do
+ /// compute transfer functions.
+ AbstractLatticeFunction *LatticeFunc;
+
+ DenseMap<Value *, LatticeVal> ValueState; // The state each value is in.
+ SmallPtrSet<BasicBlock *, 16> BBExecutable; // The bbs that are executable.
+
+ std::vector<Instruction *> InstWorkList; // Worklist of insts to process.
+
+ std::vector<BasicBlock *> BBWorkList; // The BasicBlock work list
+
+ /// KnownFeasibleEdges - Entries in this set are edges which have already had
+ /// PHI nodes retriggered.
+ typedef std::pair<BasicBlock*,BasicBlock*> Edge;
+ std::set<Edge> KnownFeasibleEdges;
+
+ SparseSolver(const SparseSolver&) = delete;
+ void operator=(const SparseSolver&) = delete;
+
+public:
+ explicit SparseSolver(AbstractLatticeFunction *Lattice)
+ : LatticeFunc(Lattice) {}
+ ~SparseSolver() { delete LatticeFunc; }
+
+ /// Solve - Solve for constants and executable blocks.
+ ///
+ void Solve(Function &F);
+
+ void Print(Function &F, raw_ostream &OS) const;
+
+ /// getLatticeState - Return the LatticeVal object that corresponds to the
+ /// value. If an value is not in the map, it is returned as untracked,
+ /// unlike the getOrInitValueState method.
+ LatticeVal getLatticeState(Value *V) const {
+ DenseMap<Value*, LatticeVal>::const_iterator I = ValueState.find(V);
+ return I != ValueState.end() ? I->second : LatticeFunc->getUntrackedVal();
+ }
+
+ /// getOrInitValueState - Return the LatticeVal object that corresponds to the
+ /// value, initializing the value's state if it hasn't been entered into the
+ /// map yet. This function is necessary because not all values should start
+ /// out in the underdefined state... Arguments should be overdefined, and
+ /// constants should be marked as constants.
+ ///
+ LatticeVal getOrInitValueState(Value *V);
+
+ /// isEdgeFeasible - Return true if the control flow edge from the 'From'
+ /// basic block to the 'To' basic block is currently feasible. If
+ /// AggressiveUndef is true, then this treats values with unknown lattice
+ /// values as undefined. This is generally only useful when solving the
+ /// lattice, not when querying it.
+ bool isEdgeFeasible(BasicBlock *From, BasicBlock *To,
+ bool AggressiveUndef = false);
+
+ /// isBlockExecutable - Return true if there are any known feasible
+ /// edges into the basic block. This is generally only useful when
+ /// querying the lattice.
+ bool isBlockExecutable(BasicBlock *BB) const {
+ return BBExecutable.count(BB);
+ }
+
+private:
+ /// UpdateState - When the state for some instruction is potentially updated,
+ /// this function notices and adds I to the worklist if needed.
+ void UpdateState(Instruction &Inst, LatticeVal V);
+
+ /// MarkBlockExecutable - This method can be used by clients to mark all of
+ /// the blocks that are known to be intrinsically live in the processed unit.
+ void MarkBlockExecutable(BasicBlock *BB);
+
+ /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
+ /// work list if it is not already executable.
+ void markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest);
+
+ /// getFeasibleSuccessors - Return a vector of booleans to indicate which
+ /// successors are reachable from a given terminator instruction.
+ void getFeasibleSuccessors(TerminatorInst &TI, SmallVectorImpl<bool> &Succs,
+ bool AggressiveUndef);
+
+ void visitInst(Instruction &I);
+ void visitPHINode(PHINode &I);
+ void visitTerminatorInst(TerminatorInst &TI);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_SPARSEPROPAGATION_H
diff --git a/contrib/llvm/include/llvm/Analysis/TargetFolder.h b/contrib/llvm/include/llvm/Analysis/TargetFolder.h
new file mode 100644
index 0000000..12bf9fe
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/TargetFolder.h
@@ -0,0 +1,270 @@
+//====- TargetFolder.h - Constant folding helper ---------------*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TargetFolder class, a helper for IRBuilder.
+// It provides IRBuilder with a set of methods for creating constants with
+// target dependent folding, in addition to the same target-independent
+// folding that the ConstantFolder class provides. For general constant
+// creation and folding, use ConstantExpr and the routines in
+// llvm/Analysis/ConstantFolding.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TARGETFOLDER_H
+#define LLVM_ANALYSIS_TARGETFOLDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+
+namespace llvm {
+
+class DataLayout;
+
+/// TargetFolder - Create constants with target dependent folding.
+class TargetFolder {
+ const DataLayout &DL;
+
+ /// Fold - Fold the constant using target specific information.
+ Constant *Fold(Constant *C) const {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ if (Constant *CF = ConstantFoldConstantExpression(CE, DL))
+ return CF;
+ return C;
+ }
+
+public:
+ explicit TargetFolder(const DataLayout &DL) : DL(DL) {}
+
+ //===--------------------------------------------------------------------===//
+ // Binary Operators
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateAdd(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return Fold(ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW));
+ }
+ Constant *CreateFAdd(Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::getFAdd(LHS, RHS));
+ }
+ Constant *CreateSub(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return Fold(ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW));
+ }
+ Constant *CreateFSub(Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::getFSub(LHS, RHS));
+ }
+ Constant *CreateMul(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return Fold(ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW));
+ }
+ Constant *CreateFMul(Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::getFMul(LHS, RHS));
+ }
+ Constant *CreateUDiv(Constant *LHS, Constant *RHS, bool isExact = false)const{
+ return Fold(ConstantExpr::getUDiv(LHS, RHS, isExact));
+ }
+ Constant *CreateSDiv(Constant *LHS, Constant *RHS, bool isExact = false)const{
+ return Fold(ConstantExpr::getSDiv(LHS, RHS, isExact));
+ }
+ Constant *CreateFDiv(Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::getFDiv(LHS, RHS));
+ }
+ Constant *CreateURem(Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::getURem(LHS, RHS));
+ }
+ Constant *CreateSRem(Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::getSRem(LHS, RHS));
+ }
+ Constant *CreateFRem(Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::getFRem(LHS, RHS));
+ }
+ Constant *CreateShl(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return Fold(ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW));
+ }
+ Constant *CreateLShr(Constant *LHS, Constant *RHS, bool isExact = false)const{
+ return Fold(ConstantExpr::getLShr(LHS, RHS, isExact));
+ }
+ Constant *CreateAShr(Constant *LHS, Constant *RHS, bool isExact = false)const{
+ return Fold(ConstantExpr::getAShr(LHS, RHS, isExact));
+ }
+ Constant *CreateAnd(Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::getAnd(LHS, RHS));
+ }
+ Constant *CreateOr(Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::getOr(LHS, RHS));
+ }
+ Constant *CreateXor(Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::getXor(LHS, RHS));
+ }
+
+ Constant *CreateBinOp(Instruction::BinaryOps Opc,
+ Constant *LHS, Constant *RHS) const {
+ return Fold(ConstantExpr::get(Opc, LHS, RHS));
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Unary Operators
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateNeg(Constant *C,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return Fold(ConstantExpr::getNeg(C, HasNUW, HasNSW));
+ }
+ Constant *CreateFNeg(Constant *C) const {
+ return Fold(ConstantExpr::getFNeg(C));
+ }
+ Constant *CreateNot(Constant *C) const {
+ return Fold(ConstantExpr::getNot(C));
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Memory Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Constant *> IdxList) const {
+ return Fold(ConstantExpr::getGetElementPtr(Ty, C, IdxList));
+ }
+ Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return Fold(ConstantExpr::getGetElementPtr(Ty, C, Idx));
+ }
+ Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> IdxList) const {
+ return Fold(ConstantExpr::getGetElementPtr(Ty, C, IdxList));
+ }
+
+ Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Constant *> IdxList) const {
+ return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList));
+ }
+ Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+ Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx));
+ }
+ Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> IdxList) const {
+ return Fold(ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList));
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Cast/Conversion Operators
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateCast(Instruction::CastOps Op, Constant *C,
+ Type *DestTy) const {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getCast(Op, C, DestTy));
+ }
+ Constant *CreateIntCast(Constant *C, Type *DestTy,
+ bool isSigned) const {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getIntegerCast(C, DestTy, isSigned));
+ }
+ Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getPointerCast(C, DestTy));
+ }
+ Constant *CreateFPCast(Constant *C, Type *DestTy) const {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getFPCast(C, DestTy));
+ }
+ Constant *CreateBitCast(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::BitCast, C, DestTy);
+ }
+ Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::IntToPtr, C, DestTy);
+ }
+ Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::PtrToInt, C, DestTy);
+ }
+ Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getZExtOrBitCast(C, DestTy));
+ }
+ Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getSExtOrBitCast(C, DestTy));
+ }
+ Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getTruncOrBitCast(C, DestTy));
+ }
+
+ Constant *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
+ Type *DestTy) const {
+ if (C->getType() == DestTy)
+ return C; // avoid calling Fold
+ return Fold(ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy));
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Compare Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS,
+ Constant *RHS) const {
+ return Fold(ConstantExpr::getCompare(P, LHS, RHS));
+ }
+ Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
+ Constant *RHS) const {
+ return Fold(ConstantExpr::getCompare(P, LHS, RHS));
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Other Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateSelect(Constant *C, Constant *True, Constant *False) const {
+ return Fold(ConstantExpr::getSelect(C, True, False));
+ }
+
+ Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+ return Fold(ConstantExpr::getExtractElement(Vec, Idx));
+ }
+
+ Constant *CreateInsertElement(Constant *Vec, Constant *NewElt,
+ Constant *Idx) const {
+ return Fold(ConstantExpr::getInsertElement(Vec, NewElt, Idx));
+ }
+
+ Constant *CreateShuffleVector(Constant *V1, Constant *V2,
+ Constant *Mask) const {
+ return Fold(ConstantExpr::getShuffleVector(V1, V2, Mask));
+ }
+
+ Constant *CreateExtractValue(Constant *Agg,
+ ArrayRef<unsigned> IdxList) const {
+ return Fold(ConstantExpr::getExtractValue(Agg, IdxList));
+ }
+
+ Constant *CreateInsertValue(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> IdxList) const {
+ return Fold(ConstantExpr::getInsertValue(Agg, Val, IdxList));
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def b/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def
new file mode 100644
index 0000000..7798e3c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.def
@@ -0,0 +1,1119 @@
+//===-- TargetLibraryInfo.def - Library information -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// This .def file will either fill in the enum definition or fill in the
+// string representation array definition for TargetLibraryInfo.
+// Which is defined depends on whether TLI_DEFINE_ENUM is defined or
+// TLI_DEFINE_STRING is defined. Only one should be defined at a time.
+
+#if !(defined(TLI_DEFINE_ENUM) || defined(TLI_DEFINE_STRING))
+#error "Must define TLI_DEFINE_ENUM or TLI_DEFINE_STRING for TLI .def."
+#elif defined(TLI_DEFINE_ENUM) && defined(TLI_DEFINE_STRING)
+#error "Can only define one of TLI_DEFINE_ENUM or TLI_DEFINE_STRING at a time."
+#else
+// One of TLI_DEFINE_ENUM/STRING are defined.
+
+#if defined(TLI_DEFINE_ENUM)
+#define TLI_DEFINE_ENUM_INTERNAL(enum_variant) enum_variant,
+#define TLI_DEFINE_STRING_INTERNAL(string_repr)
+#else
+#define TLI_DEFINE_ENUM_INTERNAL(enum_variant)
+#define TLI_DEFINE_STRING_INTERNAL(string_repr) string_repr,
+#endif
+
+/// void *new(unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_int)
+TLI_DEFINE_STRING_INTERNAL("??2@YAPAXI@Z")
+
+/// void *new(unsigned int, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_int_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??2@YAPAXIABUnothrow_t@std@@@Z")
+
+/// void *new(unsigned long long);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_longlong)
+TLI_DEFINE_STRING_INTERNAL("??2@YAPEAX_K@Z")
+
+/// void *new(unsigned long long, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_longlong_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??2@YAPEAX_KAEBUnothrow_t@std@@@Z")
+
+/// void operator delete(void*);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr32)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPAX@Z")
+
+/// void operator delete(void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr32_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPAXABUnothrow_t@std@@@Z")
+
+/// void operator delete(void*, unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr32_int)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPAXI@Z")
+
+/// void operator delete(void*);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr64)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPEAX@Z")
+
+/// void operator delete(void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr64_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPEAXAEBUnothrow_t@std@@@Z")
+
+/// void operator delete(void*, unsigned long long);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_ptr64_longlong)
+TLI_DEFINE_STRING_INTERNAL("??3@YAXPEAX_K@Z")
+
+/// void *new[](unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_int)
+TLI_DEFINE_STRING_INTERNAL("??_U@YAPAXI@Z")
+
+/// void *new[](unsigned int, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_int_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??_U@YAPAXIABUnothrow_t@std@@@Z")
+
+/// void *new[](unsigned long long);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_longlong)
+TLI_DEFINE_STRING_INTERNAL("??_U@YAPEAX_K@Z")
+
+/// void *new[](unsigned long long, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_new_array_longlong_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??_U@YAPEAX_KAEBUnothrow_t@std@@@Z")
+
+/// void operator delete[](void*);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr32)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPAX@Z")
+
+/// void operator delete[](void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr32_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPAXABUnothrow_t@std@@@Z")
+
+/// void operator delete[](void*, unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr32_int)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPAXI@Z")
+
+/// void operator delete[](void*);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr64)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPEAX@Z")
+
+/// void operator delete[](void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr64_nothrow)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPEAXAEBUnothrow_t@std@@@Z")
+
+/// void operator delete[](void*, unsigned long long);
+TLI_DEFINE_ENUM_INTERNAL(msvc_delete_array_ptr64_longlong)
+TLI_DEFINE_STRING_INTERNAL("??_V@YAXPEAX_K@Z")
+
+/// int _IO_getc(_IO_FILE * __fp);
+TLI_DEFINE_ENUM_INTERNAL(under_IO_getc)
+TLI_DEFINE_STRING_INTERNAL("_IO_getc")
+/// int _IO_putc(int __c, _IO_FILE * __fp);
+TLI_DEFINE_ENUM_INTERNAL(under_IO_putc)
+TLI_DEFINE_STRING_INTERNAL("_IO_putc")
+/// void operator delete[](void*);
+TLI_DEFINE_ENUM_INTERNAL(ZdaPv)
+TLI_DEFINE_STRING_INTERNAL("_ZdaPv")
+/// void operator delete[](void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZdaPvRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZdaPvRKSt9nothrow_t")
+/// void operator delete[](void*, unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(ZdaPvj)
+TLI_DEFINE_STRING_INTERNAL("_ZdaPvj")
+/// void operator delete[](void*, unsigned long);
+TLI_DEFINE_ENUM_INTERNAL(ZdaPvm)
+TLI_DEFINE_STRING_INTERNAL("_ZdaPvm")
+/// void operator delete(void*);
+TLI_DEFINE_ENUM_INTERNAL(ZdlPv)
+TLI_DEFINE_STRING_INTERNAL("_ZdlPv")
+/// void operator delete(void*, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZdlPvRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZdlPvRKSt9nothrow_t")
+/// void operator delete(void*, unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(ZdlPvj)
+TLI_DEFINE_STRING_INTERNAL("_ZdlPvj")
+/// void operator delete(void*, unsigned long);
+TLI_DEFINE_ENUM_INTERNAL(ZdlPvm)
+TLI_DEFINE_STRING_INTERNAL("_ZdlPvm")
+/// void *new[](unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(Znaj)
+TLI_DEFINE_STRING_INTERNAL("_Znaj")
+/// void *new[](unsigned int, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZnajRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnajRKSt9nothrow_t")
+/// void *new[](unsigned long);
+TLI_DEFINE_ENUM_INTERNAL(Znam)
+TLI_DEFINE_STRING_INTERNAL("_Znam")
+/// void *new[](unsigned long, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZnamRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnamRKSt9nothrow_t")
+/// void *new(unsigned int);
+TLI_DEFINE_ENUM_INTERNAL(Znwj)
+TLI_DEFINE_STRING_INTERNAL("_Znwj")
+/// void *new(unsigned int, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZnwjRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnwjRKSt9nothrow_t")
+/// void *new(unsigned long);
+TLI_DEFINE_ENUM_INTERNAL(Znwm)
+TLI_DEFINE_STRING_INTERNAL("_Znwm")
+/// void *new(unsigned long, nothrow);
+TLI_DEFINE_ENUM_INTERNAL(ZnwmRKSt9nothrow_t)
+TLI_DEFINE_STRING_INTERNAL("_ZnwmRKSt9nothrow_t")
+/// double __cospi(double x);
+TLI_DEFINE_ENUM_INTERNAL(cospi)
+TLI_DEFINE_STRING_INTERNAL("__cospi")
+/// float __cospif(float x);
+TLI_DEFINE_ENUM_INTERNAL(cospif)
+TLI_DEFINE_STRING_INTERNAL("__cospif")
+/// int __cxa_atexit(void (*f)(void *), void *p, void *d);
+TLI_DEFINE_ENUM_INTERNAL(cxa_atexit)
+TLI_DEFINE_STRING_INTERNAL("__cxa_atexit")
+/// void __cxa_guard_abort(guard_t *guard);
+/// guard_t is int64_t in Itanium ABI or int32_t on ARM eabi.
+TLI_DEFINE_ENUM_INTERNAL(cxa_guard_abort)
+TLI_DEFINE_STRING_INTERNAL("__cxa_guard_abort")
+/// int __cxa_guard_acquire(guard_t *guard);
+TLI_DEFINE_ENUM_INTERNAL(cxa_guard_acquire)
+TLI_DEFINE_STRING_INTERNAL("__cxa_guard_acquire")
+/// void __cxa_guard_release(guard_t *guard);
+TLI_DEFINE_ENUM_INTERNAL(cxa_guard_release)
+TLI_DEFINE_STRING_INTERNAL("__cxa_guard_release")
+/// int __isoc99_scanf (const char *format, ...)
+TLI_DEFINE_ENUM_INTERNAL(dunder_isoc99_scanf)
+TLI_DEFINE_STRING_INTERNAL("__isoc99_scanf")
+/// int __isoc99_sscanf(const char *s, const char *format, ...)
+TLI_DEFINE_ENUM_INTERNAL(dunder_isoc99_sscanf)
+TLI_DEFINE_STRING_INTERNAL("__isoc99_sscanf")
+/// void *__memcpy_chk(void *s1, const void *s2, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(memcpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__memcpy_chk")
+/// void *__memmove_chk(void *s1, const void *s2, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(memmove_chk)
+TLI_DEFINE_STRING_INTERNAL("__memmove_chk")
+/// void *__memset_chk(void *s, char v, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(memset_chk)
+TLI_DEFINE_STRING_INTERNAL("__memset_chk")
+/// double __sincospi_stret(double x);
+TLI_DEFINE_ENUM_INTERNAL(sincospi_stret)
+TLI_DEFINE_STRING_INTERNAL("__sincospi_stret")
+/// float __sincospif_stret(float x);
+TLI_DEFINE_ENUM_INTERNAL(sincospif_stret)
+TLI_DEFINE_STRING_INTERNAL("__sincospif_stret")
+/// double __sinpi(double x);
+TLI_DEFINE_ENUM_INTERNAL(sinpi)
+TLI_DEFINE_STRING_INTERNAL("__sinpi")
+/// float __sinpif(float x);
+TLI_DEFINE_ENUM_INTERNAL(sinpif)
+TLI_DEFINE_STRING_INTERNAL("__sinpif")
+/// double __sqrt_finite(double x);
+TLI_DEFINE_ENUM_INTERNAL(sqrt_finite)
+TLI_DEFINE_STRING_INTERNAL("__sqrt_finite")
+/// float __sqrt_finite(float x);
+TLI_DEFINE_ENUM_INTERNAL(sqrtf_finite)
+TLI_DEFINE_STRING_INTERNAL("__sqrtf_finite")
+/// long double __sqrt_finite(long double x);
+TLI_DEFINE_ENUM_INTERNAL(sqrtl_finite)
+TLI_DEFINE_STRING_INTERNAL("__sqrtl_finite")
+/// char *__stpcpy_chk(char *s1, const char *s2, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(stpcpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__stpcpy_chk")
+/// char *__stpncpy_chk(char *s1, const char *s2, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(stpncpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__stpncpy_chk")
+/// char *__strcpy_chk(char *s1, const char *s2, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(strcpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__strcpy_chk")
+/// char * __strdup(const char *s);
+TLI_DEFINE_ENUM_INTERNAL(dunder_strdup)
+TLI_DEFINE_STRING_INTERNAL("__strdup")
+/// char *__strncpy_chk(char *s1, const char *s2, size_t n, size_t s1size);
+TLI_DEFINE_ENUM_INTERNAL(strncpy_chk)
+TLI_DEFINE_STRING_INTERNAL("__strncpy_chk")
+/// char *__strndup(const char *s, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(dunder_strndup)
+TLI_DEFINE_STRING_INTERNAL("__strndup")
+/// char * __strtok_r(char *s, const char *delim, char **save_ptr);
+TLI_DEFINE_ENUM_INTERNAL(dunder_strtok_r)
+TLI_DEFINE_STRING_INTERNAL("__strtok_r")
+/// int abs(int j);
+TLI_DEFINE_ENUM_INTERNAL(abs)
+TLI_DEFINE_STRING_INTERNAL("abs")
+/// int access(const char *path, int amode);
+TLI_DEFINE_ENUM_INTERNAL(access)
+TLI_DEFINE_STRING_INTERNAL("access")
+/// double acos(double x);
+TLI_DEFINE_ENUM_INTERNAL(acos)
+TLI_DEFINE_STRING_INTERNAL("acos")
+/// float acosf(float x);
+TLI_DEFINE_ENUM_INTERNAL(acosf)
+TLI_DEFINE_STRING_INTERNAL("acosf")
+/// double acosh(double x);
+TLI_DEFINE_ENUM_INTERNAL(acosh)
+TLI_DEFINE_STRING_INTERNAL("acosh")
+/// float acoshf(float x);
+TLI_DEFINE_ENUM_INTERNAL(acoshf)
+TLI_DEFINE_STRING_INTERNAL("acoshf")
+/// long double acoshl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(acoshl)
+TLI_DEFINE_STRING_INTERNAL("acoshl")
+/// long double acosl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(acosl)
+TLI_DEFINE_STRING_INTERNAL("acosl")
+/// double asin(double x);
+TLI_DEFINE_ENUM_INTERNAL(asin)
+TLI_DEFINE_STRING_INTERNAL("asin")
+/// float asinf(float x);
+TLI_DEFINE_ENUM_INTERNAL(asinf)
+TLI_DEFINE_STRING_INTERNAL("asinf")
+/// double asinh(double x);
+TLI_DEFINE_ENUM_INTERNAL(asinh)
+TLI_DEFINE_STRING_INTERNAL("asinh")
+/// float asinhf(float x);
+TLI_DEFINE_ENUM_INTERNAL(asinhf)
+TLI_DEFINE_STRING_INTERNAL("asinhf")
+/// long double asinhl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(asinhl)
+TLI_DEFINE_STRING_INTERNAL("asinhl")
+/// long double asinl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(asinl)
+TLI_DEFINE_STRING_INTERNAL("asinl")
+/// double atan(double x);
+TLI_DEFINE_ENUM_INTERNAL(atan)
+TLI_DEFINE_STRING_INTERNAL("atan")
+/// double atan2(double y, double x);
+TLI_DEFINE_ENUM_INTERNAL(atan2)
+TLI_DEFINE_STRING_INTERNAL("atan2")
+/// float atan2f(float y, float x);
+TLI_DEFINE_ENUM_INTERNAL(atan2f)
+TLI_DEFINE_STRING_INTERNAL("atan2f")
+/// long double atan2l(long double y, long double x);
+TLI_DEFINE_ENUM_INTERNAL(atan2l)
+TLI_DEFINE_STRING_INTERNAL("atan2l")
+/// float atanf(float x);
+TLI_DEFINE_ENUM_INTERNAL(atanf)
+TLI_DEFINE_STRING_INTERNAL("atanf")
+/// double atanh(double x);
+TLI_DEFINE_ENUM_INTERNAL(atanh)
+TLI_DEFINE_STRING_INTERNAL("atanh")
+/// float atanhf(float x);
+TLI_DEFINE_ENUM_INTERNAL(atanhf)
+TLI_DEFINE_STRING_INTERNAL("atanhf")
+/// long double atanhl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(atanhl)
+TLI_DEFINE_STRING_INTERNAL("atanhl")
+/// long double atanl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(atanl)
+TLI_DEFINE_STRING_INTERNAL("atanl")
+/// double atof(const char *str);
+TLI_DEFINE_ENUM_INTERNAL(atof)
+TLI_DEFINE_STRING_INTERNAL("atof")
+/// int atoi(const char *str);
+TLI_DEFINE_ENUM_INTERNAL(atoi)
+TLI_DEFINE_STRING_INTERNAL("atoi")
+/// long atol(const char *str);
+TLI_DEFINE_ENUM_INTERNAL(atol)
+TLI_DEFINE_STRING_INTERNAL("atol")
+/// long long atoll(const char *nptr);
+TLI_DEFINE_ENUM_INTERNAL(atoll)
+TLI_DEFINE_STRING_INTERNAL("atoll")
+/// int bcmp(const void *s1, const void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(bcmp)
+TLI_DEFINE_STRING_INTERNAL("bcmp")
+/// void bcopy(const void *s1, void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(bcopy)
+TLI_DEFINE_STRING_INTERNAL("bcopy")
+/// void bzero(void *s, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(bzero)
+TLI_DEFINE_STRING_INTERNAL("bzero")
+/// void *calloc(size_t count, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(calloc)
+TLI_DEFINE_STRING_INTERNAL("calloc")
+/// double cbrt(double x);
+TLI_DEFINE_ENUM_INTERNAL(cbrt)
+TLI_DEFINE_STRING_INTERNAL("cbrt")
+/// float cbrtf(float x);
+TLI_DEFINE_ENUM_INTERNAL(cbrtf)
+TLI_DEFINE_STRING_INTERNAL("cbrtf")
+/// long double cbrtl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(cbrtl)
+TLI_DEFINE_STRING_INTERNAL("cbrtl")
+/// double ceil(double x);
+TLI_DEFINE_ENUM_INTERNAL(ceil)
+TLI_DEFINE_STRING_INTERNAL("ceil")
+/// float ceilf(float x);
+TLI_DEFINE_ENUM_INTERNAL(ceilf)
+TLI_DEFINE_STRING_INTERNAL("ceilf")
+/// long double ceill(long double x);
+TLI_DEFINE_ENUM_INTERNAL(ceill)
+TLI_DEFINE_STRING_INTERNAL("ceill")
+/// int chmod(const char *path, mode_t mode);
+TLI_DEFINE_ENUM_INTERNAL(chmod)
+TLI_DEFINE_STRING_INTERNAL("chmod")
+/// int chown(const char *path, uid_t owner, gid_t group);
+TLI_DEFINE_ENUM_INTERNAL(chown)
+TLI_DEFINE_STRING_INTERNAL("chown")
+/// void clearerr(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(clearerr)
+TLI_DEFINE_STRING_INTERNAL("clearerr")
+/// int closedir(DIR *dirp);
+TLI_DEFINE_ENUM_INTERNAL(closedir)
+TLI_DEFINE_STRING_INTERNAL("closedir")
+/// double copysign(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(copysign)
+TLI_DEFINE_STRING_INTERNAL("copysign")
+/// float copysignf(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(copysignf)
+TLI_DEFINE_STRING_INTERNAL("copysignf")
+/// long double copysignl(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(copysignl)
+TLI_DEFINE_STRING_INTERNAL("copysignl")
+/// double cos(double x);
+TLI_DEFINE_ENUM_INTERNAL(cos)
+TLI_DEFINE_STRING_INTERNAL("cos")
+/// float cosf(float x);
+TLI_DEFINE_ENUM_INTERNAL(cosf)
+TLI_DEFINE_STRING_INTERNAL("cosf")
+/// double cosh(double x);
+TLI_DEFINE_ENUM_INTERNAL(cosh)
+TLI_DEFINE_STRING_INTERNAL("cosh")
+/// float coshf(float x);
+TLI_DEFINE_ENUM_INTERNAL(coshf)
+TLI_DEFINE_STRING_INTERNAL("coshf")
+/// long double coshl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(coshl)
+TLI_DEFINE_STRING_INTERNAL("coshl")
+/// long double cosl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(cosl)
+TLI_DEFINE_STRING_INTERNAL("cosl")
+/// char *ctermid(char *s);
+TLI_DEFINE_ENUM_INTERNAL(ctermid)
+TLI_DEFINE_STRING_INTERNAL("ctermid")
+/// double exp(double x);
+TLI_DEFINE_ENUM_INTERNAL(exp)
+TLI_DEFINE_STRING_INTERNAL("exp")
+/// double exp10(double x);
+TLI_DEFINE_ENUM_INTERNAL(exp10)
+TLI_DEFINE_STRING_INTERNAL("exp10")
+/// float exp10f(float x);
+TLI_DEFINE_ENUM_INTERNAL(exp10f)
+TLI_DEFINE_STRING_INTERNAL("exp10f")
+/// long double exp10l(long double x);
+TLI_DEFINE_ENUM_INTERNAL(exp10l)
+TLI_DEFINE_STRING_INTERNAL("exp10l")
+/// double exp2(double x);
+TLI_DEFINE_ENUM_INTERNAL(exp2)
+TLI_DEFINE_STRING_INTERNAL("exp2")
+/// float exp2f(float x);
+TLI_DEFINE_ENUM_INTERNAL(exp2f)
+TLI_DEFINE_STRING_INTERNAL("exp2f")
+/// long double exp2l(long double x);
+TLI_DEFINE_ENUM_INTERNAL(exp2l)
+TLI_DEFINE_STRING_INTERNAL("exp2l")
+/// float expf(float x);
+TLI_DEFINE_ENUM_INTERNAL(expf)
+TLI_DEFINE_STRING_INTERNAL("expf")
+/// long double expl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(expl)
+TLI_DEFINE_STRING_INTERNAL("expl")
+/// double expm1(double x);
+TLI_DEFINE_ENUM_INTERNAL(expm1)
+TLI_DEFINE_STRING_INTERNAL("expm1")
+/// float expm1f(float x);
+TLI_DEFINE_ENUM_INTERNAL(expm1f)
+TLI_DEFINE_STRING_INTERNAL("expm1f")
+/// long double expm1l(long double x);
+TLI_DEFINE_ENUM_INTERNAL(expm1l)
+TLI_DEFINE_STRING_INTERNAL("expm1l")
+/// double fabs(double x);
+TLI_DEFINE_ENUM_INTERNAL(fabs)
+TLI_DEFINE_STRING_INTERNAL("fabs")
+/// float fabsf(float x);
+TLI_DEFINE_ENUM_INTERNAL(fabsf)
+TLI_DEFINE_STRING_INTERNAL("fabsf")
+/// long double fabsl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(fabsl)
+TLI_DEFINE_STRING_INTERNAL("fabsl")
+/// int fclose(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fclose)
+TLI_DEFINE_STRING_INTERNAL("fclose")
+/// FILE *fdopen(int fildes, const char *mode);
+TLI_DEFINE_ENUM_INTERNAL(fdopen)
+TLI_DEFINE_STRING_INTERNAL("fdopen")
+/// int feof(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(feof)
+TLI_DEFINE_STRING_INTERNAL("feof")
+/// int ferror(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(ferror)
+TLI_DEFINE_STRING_INTERNAL("ferror")
+/// int fflush(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fflush)
+TLI_DEFINE_STRING_INTERNAL("fflush")
+/// int ffs(int i);
+TLI_DEFINE_ENUM_INTERNAL(ffs)
+TLI_DEFINE_STRING_INTERNAL("ffs")
+/// int ffsl(long int i);
+TLI_DEFINE_ENUM_INTERNAL(ffsl)
+TLI_DEFINE_STRING_INTERNAL("ffsl")
+/// int ffsll(long long int i);
+TLI_DEFINE_ENUM_INTERNAL(ffsll)
+TLI_DEFINE_STRING_INTERNAL("ffsll")
+/// int fgetc(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fgetc)
+TLI_DEFINE_STRING_INTERNAL("fgetc")
+/// int fgetpos(FILE *stream, fpos_t *pos);
+TLI_DEFINE_ENUM_INTERNAL(fgetpos)
+TLI_DEFINE_STRING_INTERNAL("fgetpos")
+/// char *fgets(char *s, int n, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fgets)
+TLI_DEFINE_STRING_INTERNAL("fgets")
+/// int fileno(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fileno)
+TLI_DEFINE_STRING_INTERNAL("fileno")
+/// int fiprintf(FILE *stream, const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(fiprintf)
+TLI_DEFINE_STRING_INTERNAL("fiprintf")
+/// void flockfile(FILE *file);
+TLI_DEFINE_ENUM_INTERNAL(flockfile)
+TLI_DEFINE_STRING_INTERNAL("flockfile")
+/// double floor(double x);
+TLI_DEFINE_ENUM_INTERNAL(floor)
+TLI_DEFINE_STRING_INTERNAL("floor")
+/// float floorf(float x);
+TLI_DEFINE_ENUM_INTERNAL(floorf)
+TLI_DEFINE_STRING_INTERNAL("floorf")
+/// long double floorl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(floorl)
+TLI_DEFINE_STRING_INTERNAL("floorl")
+/// int fls(int i);
+TLI_DEFINE_ENUM_INTERNAL(fls)
+TLI_DEFINE_STRING_INTERNAL("fls")
+/// int flsl(long int i);
+TLI_DEFINE_ENUM_INTERNAL(flsl)
+TLI_DEFINE_STRING_INTERNAL("flsl")
+/// int flsll(long long int i);
+TLI_DEFINE_ENUM_INTERNAL(flsll)
+TLI_DEFINE_STRING_INTERNAL("flsll")
+/// double fmax(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(fmax)
+TLI_DEFINE_STRING_INTERNAL("fmax")
+/// float fmaxf(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(fmaxf)
+TLI_DEFINE_STRING_INTERNAL("fmaxf")
+/// long double fmaxl(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(fmaxl)
+TLI_DEFINE_STRING_INTERNAL("fmaxl")
+/// double fmin(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(fmin)
+TLI_DEFINE_STRING_INTERNAL("fmin")
+/// float fminf(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(fminf)
+TLI_DEFINE_STRING_INTERNAL("fminf")
+/// long double fminl(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(fminl)
+TLI_DEFINE_STRING_INTERNAL("fminl")
+/// double fmod(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(fmod)
+TLI_DEFINE_STRING_INTERNAL("fmod")
+/// float fmodf(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(fmodf)
+TLI_DEFINE_STRING_INTERNAL("fmodf")
+/// long double fmodl(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(fmodl)
+TLI_DEFINE_STRING_INTERNAL("fmodl")
+/// FILE *fopen(const char *filename, const char *mode);
+TLI_DEFINE_ENUM_INTERNAL(fopen)
+TLI_DEFINE_STRING_INTERNAL("fopen")
+/// FILE *fopen64(const char *filename, const char *opentype)
+TLI_DEFINE_ENUM_INTERNAL(fopen64)
+TLI_DEFINE_STRING_INTERNAL("fopen64")
+/// int fprintf(FILE *stream, const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(fprintf)
+TLI_DEFINE_STRING_INTERNAL("fprintf")
+/// int fputc(int c, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fputc)
+TLI_DEFINE_STRING_INTERNAL("fputc")
+/// int fputs(const char *s, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fputs)
+TLI_DEFINE_STRING_INTERNAL("fputs")
+/// size_t fread(void *ptr, size_t size, size_t nitems, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fread)
+TLI_DEFINE_STRING_INTERNAL("fread")
+/// void free(void *ptr);
+TLI_DEFINE_ENUM_INTERNAL(free)
+TLI_DEFINE_STRING_INTERNAL("free")
+/// double frexp(double num, int *exp);
+TLI_DEFINE_ENUM_INTERNAL(frexp)
+TLI_DEFINE_STRING_INTERNAL("frexp")
+/// float frexpf(float num, int *exp);
+TLI_DEFINE_ENUM_INTERNAL(frexpf)
+TLI_DEFINE_STRING_INTERNAL("frexpf")
+/// long double frexpl(long double num, int *exp);
+TLI_DEFINE_ENUM_INTERNAL(frexpl)
+TLI_DEFINE_STRING_INTERNAL("frexpl")
+/// int fscanf(FILE *stream, const char *format, ... );
+TLI_DEFINE_ENUM_INTERNAL(fscanf)
+TLI_DEFINE_STRING_INTERNAL("fscanf")
+/// int fseek(FILE *stream, long offset, int whence);
+TLI_DEFINE_ENUM_INTERNAL(fseek)
+TLI_DEFINE_STRING_INTERNAL("fseek")
+/// int fseeko(FILE *stream, off_t offset, int whence);
+TLI_DEFINE_ENUM_INTERNAL(fseeko)
+TLI_DEFINE_STRING_INTERNAL("fseeko")
+/// int fseeko64(FILE *stream, off64_t offset, int whence)
+TLI_DEFINE_ENUM_INTERNAL(fseeko64)
+TLI_DEFINE_STRING_INTERNAL("fseeko64")
+/// int fsetpos(FILE *stream, const fpos_t *pos);
+TLI_DEFINE_ENUM_INTERNAL(fsetpos)
+TLI_DEFINE_STRING_INTERNAL("fsetpos")
+/// int fstat(int fildes, struct stat *buf);
+TLI_DEFINE_ENUM_INTERNAL(fstat)
+TLI_DEFINE_STRING_INTERNAL("fstat")
+/// int fstat64(int filedes, struct stat64 *buf)
+TLI_DEFINE_ENUM_INTERNAL(fstat64)
+TLI_DEFINE_STRING_INTERNAL("fstat64")
+/// int fstatvfs(int fildes, struct statvfs *buf);
+TLI_DEFINE_ENUM_INTERNAL(fstatvfs)
+TLI_DEFINE_STRING_INTERNAL("fstatvfs")
+/// int fstatvfs64(int fildes, struct statvfs64 *buf);
+TLI_DEFINE_ENUM_INTERNAL(fstatvfs64)
+TLI_DEFINE_STRING_INTERNAL("fstatvfs64")
+/// long ftell(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(ftell)
+TLI_DEFINE_STRING_INTERNAL("ftell")
+/// off_t ftello(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(ftello)
+TLI_DEFINE_STRING_INTERNAL("ftello")
+/// off64_t ftello64(FILE *stream)
+TLI_DEFINE_ENUM_INTERNAL(ftello64)
+TLI_DEFINE_STRING_INTERNAL("ftello64")
+/// int ftrylockfile(FILE *file);
+TLI_DEFINE_ENUM_INTERNAL(ftrylockfile)
+TLI_DEFINE_STRING_INTERNAL("ftrylockfile")
+/// void funlockfile(FILE *file);
+TLI_DEFINE_ENUM_INTERNAL(funlockfile)
+TLI_DEFINE_STRING_INTERNAL("funlockfile")
+/// size_t fwrite(const void *ptr, size_t size, size_t nitems, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(fwrite)
+TLI_DEFINE_STRING_INTERNAL("fwrite")
+/// int getc(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(getc)
+TLI_DEFINE_STRING_INTERNAL("getc")
+/// int getc_unlocked(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(getc_unlocked)
+TLI_DEFINE_STRING_INTERNAL("getc_unlocked")
+/// int getchar(void);
+TLI_DEFINE_ENUM_INTERNAL(getchar)
+TLI_DEFINE_STRING_INTERNAL("getchar")
+/// char *getenv(const char *name);
+TLI_DEFINE_ENUM_INTERNAL(getenv)
+TLI_DEFINE_STRING_INTERNAL("getenv")
+/// int getitimer(int which, struct itimerval *value);
+TLI_DEFINE_ENUM_INTERNAL(getitimer)
+TLI_DEFINE_STRING_INTERNAL("getitimer")
+/// int getlogin_r(char *name, size_t namesize);
+TLI_DEFINE_ENUM_INTERNAL(getlogin_r)
+TLI_DEFINE_STRING_INTERNAL("getlogin_r")
+/// struct passwd *getpwnam(const char *name);
+TLI_DEFINE_ENUM_INTERNAL(getpwnam)
+TLI_DEFINE_STRING_INTERNAL("getpwnam")
+/// char *gets(char *s);
+TLI_DEFINE_ENUM_INTERNAL(gets)
+TLI_DEFINE_STRING_INTERNAL("gets")
+/// int gettimeofday(struct timeval *tp, void *tzp);
+TLI_DEFINE_ENUM_INTERNAL(gettimeofday)
+TLI_DEFINE_STRING_INTERNAL("gettimeofday")
+/// uint32_t htonl(uint32_t hostlong);
+TLI_DEFINE_ENUM_INTERNAL(htonl)
+TLI_DEFINE_STRING_INTERNAL("htonl")
+/// uint16_t htons(uint16_t hostshort);
+TLI_DEFINE_ENUM_INTERNAL(htons)
+TLI_DEFINE_STRING_INTERNAL("htons")
+/// int iprintf(const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(iprintf)
+TLI_DEFINE_STRING_INTERNAL("iprintf")
+/// int isascii(int c);
+TLI_DEFINE_ENUM_INTERNAL(isascii)
+TLI_DEFINE_STRING_INTERNAL("isascii")
+/// int isdigit(int c);
+TLI_DEFINE_ENUM_INTERNAL(isdigit)
+TLI_DEFINE_STRING_INTERNAL("isdigit")
+/// long int labs(long int j);
+TLI_DEFINE_ENUM_INTERNAL(labs)
+TLI_DEFINE_STRING_INTERNAL("labs")
+/// int lchown(const char *path, uid_t owner, gid_t group);
+TLI_DEFINE_ENUM_INTERNAL(lchown)
+TLI_DEFINE_STRING_INTERNAL("lchown")
+/// double ldexp(double x, int n);
+TLI_DEFINE_ENUM_INTERNAL(ldexp)
+TLI_DEFINE_STRING_INTERNAL("ldexp")
+/// float ldexpf(float x, int n);
+TLI_DEFINE_ENUM_INTERNAL(ldexpf)
+TLI_DEFINE_STRING_INTERNAL("ldexpf")
+/// long double ldexpl(long double x, int n);
+TLI_DEFINE_ENUM_INTERNAL(ldexpl)
+TLI_DEFINE_STRING_INTERNAL("ldexpl")
+/// long long int llabs(long long int j);
+TLI_DEFINE_ENUM_INTERNAL(llabs)
+TLI_DEFINE_STRING_INTERNAL("llabs")
+/// double log(double x);
+TLI_DEFINE_ENUM_INTERNAL(log)
+TLI_DEFINE_STRING_INTERNAL("log")
+/// double log10(double x);
+TLI_DEFINE_ENUM_INTERNAL(log10)
+TLI_DEFINE_STRING_INTERNAL("log10")
+/// float log10f(float x);
+TLI_DEFINE_ENUM_INTERNAL(log10f)
+TLI_DEFINE_STRING_INTERNAL("log10f")
+/// long double log10l(long double x);
+TLI_DEFINE_ENUM_INTERNAL(log10l)
+TLI_DEFINE_STRING_INTERNAL("log10l")
+/// double log1p(double x);
+TLI_DEFINE_ENUM_INTERNAL(log1p)
+TLI_DEFINE_STRING_INTERNAL("log1p")
+/// float log1pf(float x);
+TLI_DEFINE_ENUM_INTERNAL(log1pf)
+TLI_DEFINE_STRING_INTERNAL("log1pf")
+/// long double log1pl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(log1pl)
+TLI_DEFINE_STRING_INTERNAL("log1pl")
+/// double log2(double x);
+TLI_DEFINE_ENUM_INTERNAL(log2)
+TLI_DEFINE_STRING_INTERNAL("log2")
+/// float log2f(float x);
+TLI_DEFINE_ENUM_INTERNAL(log2f)
+TLI_DEFINE_STRING_INTERNAL("log2f")
+/// double long double log2l(long double x);
+TLI_DEFINE_ENUM_INTERNAL(log2l)
+TLI_DEFINE_STRING_INTERNAL("log2l")
+/// double logb(double x);
+TLI_DEFINE_ENUM_INTERNAL(logb)
+TLI_DEFINE_STRING_INTERNAL("logb")
+/// float logbf(float x);
+TLI_DEFINE_ENUM_INTERNAL(logbf)
+TLI_DEFINE_STRING_INTERNAL("logbf")
+/// long double logbl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(logbl)
+TLI_DEFINE_STRING_INTERNAL("logbl")
+/// float logf(float x);
+TLI_DEFINE_ENUM_INTERNAL(logf)
+TLI_DEFINE_STRING_INTERNAL("logf")
+/// long double logl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(logl)
+TLI_DEFINE_STRING_INTERNAL("logl")
+/// int lstat(const char *path, struct stat *buf);
+TLI_DEFINE_ENUM_INTERNAL(lstat)
+TLI_DEFINE_STRING_INTERNAL("lstat")
+/// int lstat64(const char *path, struct stat64 *buf);
+TLI_DEFINE_ENUM_INTERNAL(lstat64)
+TLI_DEFINE_STRING_INTERNAL("lstat64")
+/// void *malloc(size_t size);
+TLI_DEFINE_ENUM_INTERNAL(malloc)
+TLI_DEFINE_STRING_INTERNAL("malloc")
+/// void *memalign(size_t boundary, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(memalign)
+TLI_DEFINE_STRING_INTERNAL("memalign")
+/// void *memccpy(void *s1, const void *s2, int c, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memccpy)
+TLI_DEFINE_STRING_INTERNAL("memccpy")
+/// void *memchr(const void *s, int c, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memchr)
+TLI_DEFINE_STRING_INTERNAL("memchr")
+/// int memcmp(const void *s1, const void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memcmp)
+TLI_DEFINE_STRING_INTERNAL("memcmp")
+/// void *memcpy(void *s1, const void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memcpy)
+TLI_DEFINE_STRING_INTERNAL("memcpy")
+/// void *memmove(void *s1, const void *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memmove)
+TLI_DEFINE_STRING_INTERNAL("memmove")
+// void *memrchr(const void *s, int c, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(memrchr)
+TLI_DEFINE_STRING_INTERNAL("memrchr")
+/// void *memset(void *b, int c, size_t len);
+TLI_DEFINE_ENUM_INTERNAL(memset)
+TLI_DEFINE_STRING_INTERNAL("memset")
+/// void memset_pattern16(void *b, const void *pattern16, size_t len);
+TLI_DEFINE_ENUM_INTERNAL(memset_pattern16)
+TLI_DEFINE_STRING_INTERNAL("memset_pattern16")
+/// int mkdir(const char *path, mode_t mode);
+TLI_DEFINE_ENUM_INTERNAL(mkdir)
+TLI_DEFINE_STRING_INTERNAL("mkdir")
+/// time_t mktime(struct tm *timeptr);
+TLI_DEFINE_ENUM_INTERNAL(mktime)
+TLI_DEFINE_STRING_INTERNAL("mktime")
+/// double modf(double x, double *iptr);
+TLI_DEFINE_ENUM_INTERNAL(modf)
+TLI_DEFINE_STRING_INTERNAL("modf")
+/// float modff(float, float *iptr);
+TLI_DEFINE_ENUM_INTERNAL(modff)
+TLI_DEFINE_STRING_INTERNAL("modff")
+/// long double modfl(long double value, long double *iptr);
+TLI_DEFINE_ENUM_INTERNAL(modfl)
+TLI_DEFINE_STRING_INTERNAL("modfl")
+
+/// double nearbyint(double x);
+TLI_DEFINE_ENUM_INTERNAL(nearbyint)
+TLI_DEFINE_STRING_INTERNAL("nearbyint")
+/// float nearbyintf(float x);
+TLI_DEFINE_ENUM_INTERNAL(nearbyintf)
+TLI_DEFINE_STRING_INTERNAL("nearbyintf")
+/// long double nearbyintl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(nearbyintl)
+TLI_DEFINE_STRING_INTERNAL("nearbyintl")
+/// uint32_t ntohl(uint32_t netlong);
+TLI_DEFINE_ENUM_INTERNAL(ntohl)
+TLI_DEFINE_STRING_INTERNAL("ntohl")
+/// uint16_t ntohs(uint16_t netshort);
+TLI_DEFINE_ENUM_INTERNAL(ntohs)
+TLI_DEFINE_STRING_INTERNAL("ntohs")
+/// int open(const char *path, int oflag, ... );
+TLI_DEFINE_ENUM_INTERNAL(open)
+TLI_DEFINE_STRING_INTERNAL("open")
+/// int open64(const char *filename, int flags[, mode_t mode])
+TLI_DEFINE_ENUM_INTERNAL(open64)
+TLI_DEFINE_STRING_INTERNAL("open64")
+/// DIR *opendir(const char *dirname);
+TLI_DEFINE_ENUM_INTERNAL(opendir)
+TLI_DEFINE_STRING_INTERNAL("opendir")
+/// int pclose(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(pclose)
+TLI_DEFINE_STRING_INTERNAL("pclose")
+/// void perror(const char *s);
+TLI_DEFINE_ENUM_INTERNAL(perror)
+TLI_DEFINE_STRING_INTERNAL("perror")
+/// FILE *popen(const char *command, const char *mode);
+TLI_DEFINE_ENUM_INTERNAL(popen)
+TLI_DEFINE_STRING_INTERNAL("popen")
+/// int posix_memalign(void **memptr, size_t alignment, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(posix_memalign)
+TLI_DEFINE_STRING_INTERNAL("posix_memalign")
+/// double pow(double x, double y);
+TLI_DEFINE_ENUM_INTERNAL(pow)
+TLI_DEFINE_STRING_INTERNAL("pow")
+/// float powf(float x, float y);
+TLI_DEFINE_ENUM_INTERNAL(powf)
+TLI_DEFINE_STRING_INTERNAL("powf")
+/// long double powl(long double x, long double y);
+TLI_DEFINE_ENUM_INTERNAL(powl)
+TLI_DEFINE_STRING_INTERNAL("powl")
+/// ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset);
+TLI_DEFINE_ENUM_INTERNAL(pread)
+TLI_DEFINE_STRING_INTERNAL("pread")
+/// int printf(const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(printf)
+TLI_DEFINE_STRING_INTERNAL("printf")
+/// int putc(int c, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(putc)
+TLI_DEFINE_STRING_INTERNAL("putc")
+/// int putchar(int c);
+TLI_DEFINE_ENUM_INTERNAL(putchar)
+TLI_DEFINE_STRING_INTERNAL("putchar")
+/// int puts(const char *s);
+TLI_DEFINE_ENUM_INTERNAL(puts)
+TLI_DEFINE_STRING_INTERNAL("puts")
+/// ssize_t pwrite(int fildes, const void *buf, size_t nbyte, off_t offset);
+TLI_DEFINE_ENUM_INTERNAL(pwrite)
+TLI_DEFINE_STRING_INTERNAL("pwrite")
+/// void qsort(void *base, size_t nel, size_t width,
+/// int (*compar)(const void *, const void *));
+TLI_DEFINE_ENUM_INTERNAL(qsort)
+TLI_DEFINE_STRING_INTERNAL("qsort")
+/// ssize_t read(int fildes, void *buf, size_t nbyte);
+TLI_DEFINE_ENUM_INTERNAL(read)
+TLI_DEFINE_STRING_INTERNAL("read")
+/// ssize_t readlink(const char *path, char *buf, size_t bufsize);
+TLI_DEFINE_ENUM_INTERNAL(readlink)
+TLI_DEFINE_STRING_INTERNAL("readlink")
+/// void *realloc(void *ptr, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(realloc)
+TLI_DEFINE_STRING_INTERNAL("realloc")
+/// void *reallocf(void *ptr, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(reallocf)
+TLI_DEFINE_STRING_INTERNAL("reallocf")
+/// char *realpath(const char *file_name, char *resolved_name);
+TLI_DEFINE_ENUM_INTERNAL(realpath)
+TLI_DEFINE_STRING_INTERNAL("realpath")
+/// int remove(const char *path);
+TLI_DEFINE_ENUM_INTERNAL(remove)
+TLI_DEFINE_STRING_INTERNAL("remove")
+/// int rename(const char *old, const char *new);
+TLI_DEFINE_ENUM_INTERNAL(rename)
+TLI_DEFINE_STRING_INTERNAL("rename")
+/// void rewind(FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(rewind)
+TLI_DEFINE_STRING_INTERNAL("rewind")
+/// double rint(double x);
+TLI_DEFINE_ENUM_INTERNAL(rint)
+TLI_DEFINE_STRING_INTERNAL("rint")
+/// float rintf(float x);
+TLI_DEFINE_ENUM_INTERNAL(rintf)
+TLI_DEFINE_STRING_INTERNAL("rintf")
+/// long double rintl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(rintl)
+TLI_DEFINE_STRING_INTERNAL("rintl")
+/// int rmdir(const char *path);
+TLI_DEFINE_ENUM_INTERNAL(rmdir)
+TLI_DEFINE_STRING_INTERNAL("rmdir")
+/// double round(double x);
+TLI_DEFINE_ENUM_INTERNAL(round)
+TLI_DEFINE_STRING_INTERNAL("round")
+/// float roundf(float x);
+TLI_DEFINE_ENUM_INTERNAL(roundf)
+TLI_DEFINE_STRING_INTERNAL("roundf")
+/// long double roundl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(roundl)
+TLI_DEFINE_STRING_INTERNAL("roundl")
+/// int scanf(const char *restrict format, ... );
+TLI_DEFINE_ENUM_INTERNAL(scanf)
+TLI_DEFINE_STRING_INTERNAL("scanf")
+/// void setbuf(FILE *stream, char *buf);
+TLI_DEFINE_ENUM_INTERNAL(setbuf)
+TLI_DEFINE_STRING_INTERNAL("setbuf")
+/// int setitimer(int which, const struct itimerval *value,
+/// struct itimerval *ovalue);
+TLI_DEFINE_ENUM_INTERNAL(setitimer)
+TLI_DEFINE_STRING_INTERNAL("setitimer")
+/// int setvbuf(FILE *stream, char *buf, int type, size_t size);
+TLI_DEFINE_ENUM_INTERNAL(setvbuf)
+TLI_DEFINE_STRING_INTERNAL("setvbuf")
+/// double sin(double x);
+TLI_DEFINE_ENUM_INTERNAL(sin)
+TLI_DEFINE_STRING_INTERNAL("sin")
+/// float sinf(float x);
+TLI_DEFINE_ENUM_INTERNAL(sinf)
+TLI_DEFINE_STRING_INTERNAL("sinf")
+/// double sinh(double x);
+TLI_DEFINE_ENUM_INTERNAL(sinh)
+TLI_DEFINE_STRING_INTERNAL("sinh")
+/// float sinhf(float x);
+TLI_DEFINE_ENUM_INTERNAL(sinhf)
+TLI_DEFINE_STRING_INTERNAL("sinhf")
+/// long double sinhl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(sinhl)
+TLI_DEFINE_STRING_INTERNAL("sinhl")
+/// long double sinl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(sinl)
+TLI_DEFINE_STRING_INTERNAL("sinl")
+/// int siprintf(char *str, const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(siprintf)
+TLI_DEFINE_STRING_INTERNAL("siprintf")
+/// int snprintf(char *s, size_t n, const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(snprintf)
+TLI_DEFINE_STRING_INTERNAL("snprintf")
+/// int sprintf(char *str, const char *format, ...);
+TLI_DEFINE_ENUM_INTERNAL(sprintf)
+TLI_DEFINE_STRING_INTERNAL("sprintf")
+/// double sqrt(double x);
+TLI_DEFINE_ENUM_INTERNAL(sqrt)
+TLI_DEFINE_STRING_INTERNAL("sqrt")
+/// float sqrtf(float x);
+TLI_DEFINE_ENUM_INTERNAL(sqrtf)
+TLI_DEFINE_STRING_INTERNAL("sqrtf")
+/// long double sqrtl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(sqrtl)
+TLI_DEFINE_STRING_INTERNAL("sqrtl")
+/// int sscanf(const char *s, const char *format, ... );
+TLI_DEFINE_ENUM_INTERNAL(sscanf)
+TLI_DEFINE_STRING_INTERNAL("sscanf")
+/// int stat(const char *path, struct stat *buf);
+TLI_DEFINE_ENUM_INTERNAL(stat)
+TLI_DEFINE_STRING_INTERNAL("stat")
+/// int stat64(const char *path, struct stat64 *buf);
+TLI_DEFINE_ENUM_INTERNAL(stat64)
+TLI_DEFINE_STRING_INTERNAL("stat64")
+/// int statvfs(const char *path, struct statvfs *buf);
+TLI_DEFINE_ENUM_INTERNAL(statvfs)
+TLI_DEFINE_STRING_INTERNAL("statvfs")
+/// int statvfs64(const char *path, struct statvfs64 *buf)
+TLI_DEFINE_ENUM_INTERNAL(statvfs64)
+TLI_DEFINE_STRING_INTERNAL("statvfs64")
+/// char *stpcpy(char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(stpcpy)
+TLI_DEFINE_STRING_INTERNAL("stpcpy")
+/// char *stpncpy(char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(stpncpy)
+TLI_DEFINE_STRING_INTERNAL("stpncpy")
+/// int strcasecmp(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcasecmp)
+TLI_DEFINE_STRING_INTERNAL("strcasecmp")
+/// char *strcat(char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcat)
+TLI_DEFINE_STRING_INTERNAL("strcat")
+/// char *strchr(const char *s, int c);
+TLI_DEFINE_ENUM_INTERNAL(strchr)
+TLI_DEFINE_STRING_INTERNAL("strchr")
+/// int strcmp(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcmp)
+TLI_DEFINE_STRING_INTERNAL("strcmp")
+/// int strcoll(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcoll)
+TLI_DEFINE_STRING_INTERNAL("strcoll")
+/// char *strcpy(char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcpy)
+TLI_DEFINE_STRING_INTERNAL("strcpy")
+/// size_t strcspn(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strcspn)
+TLI_DEFINE_STRING_INTERNAL("strcspn")
+/// char *strdup(const char *s1);
+TLI_DEFINE_ENUM_INTERNAL(strdup)
+TLI_DEFINE_STRING_INTERNAL("strdup")
+/// size_t strlen(const char *s);
+TLI_DEFINE_ENUM_INTERNAL(strlen)
+TLI_DEFINE_STRING_INTERNAL("strlen")
+/// int strncasecmp(const char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strncasecmp)
+TLI_DEFINE_STRING_INTERNAL("strncasecmp")
+/// char *strncat(char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strncat)
+TLI_DEFINE_STRING_INTERNAL("strncat")
+/// int strncmp(const char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strncmp)
+TLI_DEFINE_STRING_INTERNAL("strncmp")
+/// char *strncpy(char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strncpy)
+TLI_DEFINE_STRING_INTERNAL("strncpy")
+/// char *strndup(const char *s1, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strndup)
+TLI_DEFINE_STRING_INTERNAL("strndup")
+/// size_t strnlen(const char *s, size_t maxlen);
+TLI_DEFINE_ENUM_INTERNAL(strnlen)
+TLI_DEFINE_STRING_INTERNAL("strnlen")
+/// char *strpbrk(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strpbrk)
+TLI_DEFINE_STRING_INTERNAL("strpbrk")
+/// char *strrchr(const char *s, int c);
+TLI_DEFINE_ENUM_INTERNAL(strrchr)
+TLI_DEFINE_STRING_INTERNAL("strrchr")
+/// size_t strspn(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strspn)
+TLI_DEFINE_STRING_INTERNAL("strspn")
+/// char *strstr(const char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strstr)
+TLI_DEFINE_STRING_INTERNAL("strstr")
+/// double strtod(const char *nptr, char **endptr);
+TLI_DEFINE_ENUM_INTERNAL(strtod)
+TLI_DEFINE_STRING_INTERNAL("strtod")
+/// float strtof(const char *nptr, char **endptr);
+TLI_DEFINE_ENUM_INTERNAL(strtof)
+TLI_DEFINE_STRING_INTERNAL("strtof")
+// char *strtok(char *s1, const char *s2);
+TLI_DEFINE_ENUM_INTERNAL(strtok)
+TLI_DEFINE_STRING_INTERNAL("strtok")
+// char *strtok_r(char *s, const char *sep, char **lasts);
+TLI_DEFINE_ENUM_INTERNAL(strtok_r)
+TLI_DEFINE_STRING_INTERNAL("strtok_r")
+/// long int strtol(const char *nptr, char **endptr, int base);
+TLI_DEFINE_ENUM_INTERNAL(strtol)
+TLI_DEFINE_STRING_INTERNAL("strtol")
+/// long double strtold(const char *nptr, char **endptr);
+TLI_DEFINE_ENUM_INTERNAL(strtold)
+TLI_DEFINE_STRING_INTERNAL("strtold")
+/// long long int strtoll(const char *nptr, char **endptr, int base);
+TLI_DEFINE_ENUM_INTERNAL(strtoll)
+TLI_DEFINE_STRING_INTERNAL("strtoll")
+/// unsigned long int strtoul(const char *nptr, char **endptr, int base);
+TLI_DEFINE_ENUM_INTERNAL(strtoul)
+TLI_DEFINE_STRING_INTERNAL("strtoul")
+/// unsigned long long int strtoull(const char *nptr, char **endptr, int base);
+TLI_DEFINE_ENUM_INTERNAL(strtoull)
+TLI_DEFINE_STRING_INTERNAL("strtoull")
+/// size_t strxfrm(char *s1, const char *s2, size_t n);
+TLI_DEFINE_ENUM_INTERNAL(strxfrm)
+TLI_DEFINE_STRING_INTERNAL("strxfrm")
+/// int system(const char *command);
+TLI_DEFINE_ENUM_INTERNAL(system)
+TLI_DEFINE_STRING_INTERNAL("system")
+/// double tan(double x);
+TLI_DEFINE_ENUM_INTERNAL(tan)
+TLI_DEFINE_STRING_INTERNAL("tan")
+/// float tanf(float x);
+TLI_DEFINE_ENUM_INTERNAL(tanf)
+TLI_DEFINE_STRING_INTERNAL("tanf")
+/// double tanh(double x);
+TLI_DEFINE_ENUM_INTERNAL(tanh)
+TLI_DEFINE_STRING_INTERNAL("tanh")
+/// float tanhf(float x);
+TLI_DEFINE_ENUM_INTERNAL(tanhf)
+TLI_DEFINE_STRING_INTERNAL("tanhf")
+/// long double tanhl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(tanhl)
+TLI_DEFINE_STRING_INTERNAL("tanhl")
+/// long double tanl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(tanl)
+TLI_DEFINE_STRING_INTERNAL("tanl")
+/// clock_t times(struct tms *buffer);
+TLI_DEFINE_ENUM_INTERNAL(times)
+TLI_DEFINE_STRING_INTERNAL("times")
+/// FILE *tmpfile(void);
+TLI_DEFINE_ENUM_INTERNAL(tmpfile)
+TLI_DEFINE_STRING_INTERNAL("tmpfile")
+/// FILE *tmpfile64(void)
+TLI_DEFINE_ENUM_INTERNAL(tmpfile64)
+TLI_DEFINE_STRING_INTERNAL("tmpfile64")
+/// int toascii(int c);
+TLI_DEFINE_ENUM_INTERNAL(toascii)
+TLI_DEFINE_STRING_INTERNAL("toascii")
+/// double trunc(double x);
+TLI_DEFINE_ENUM_INTERNAL(trunc)
+TLI_DEFINE_STRING_INTERNAL("trunc")
+/// float truncf(float x);
+TLI_DEFINE_ENUM_INTERNAL(truncf)
+TLI_DEFINE_STRING_INTERNAL("truncf")
+/// long double truncl(long double x);
+TLI_DEFINE_ENUM_INTERNAL(truncl)
+TLI_DEFINE_STRING_INTERNAL("truncl")
+/// int uname(struct utsname *name);
+TLI_DEFINE_ENUM_INTERNAL(uname)
+TLI_DEFINE_STRING_INTERNAL("uname")
+/// int ungetc(int c, FILE *stream);
+TLI_DEFINE_ENUM_INTERNAL(ungetc)
+TLI_DEFINE_STRING_INTERNAL("ungetc")
+/// int unlink(const char *path);
+TLI_DEFINE_ENUM_INTERNAL(unlink)
+TLI_DEFINE_STRING_INTERNAL("unlink")
+/// int unsetenv(const char *name);
+TLI_DEFINE_ENUM_INTERNAL(unsetenv)
+TLI_DEFINE_STRING_INTERNAL("unsetenv")
+/// int utime(const char *path, const struct utimbuf *times);
+TLI_DEFINE_ENUM_INTERNAL(utime)
+TLI_DEFINE_STRING_INTERNAL("utime")
+/// int utimes(const char *path, const struct timeval times[2]);
+TLI_DEFINE_ENUM_INTERNAL(utimes)
+TLI_DEFINE_STRING_INTERNAL("utimes")
+/// void *valloc(size_t size);
+TLI_DEFINE_ENUM_INTERNAL(valloc)
+TLI_DEFINE_STRING_INTERNAL("valloc")
+/// int vfprintf(FILE *stream, const char *format, va_list ap);
+TLI_DEFINE_ENUM_INTERNAL(vfprintf)
+TLI_DEFINE_STRING_INTERNAL("vfprintf")
+/// int vfscanf(FILE *stream, const char *format, va_list arg);
+TLI_DEFINE_ENUM_INTERNAL(vfscanf)
+TLI_DEFINE_STRING_INTERNAL("vfscanf")
+/// int vprintf(const char *restrict format, va_list ap);
+TLI_DEFINE_ENUM_INTERNAL(vprintf)
+TLI_DEFINE_STRING_INTERNAL("vprintf")
+/// int vscanf(const char *format, va_list arg);
+TLI_DEFINE_ENUM_INTERNAL(vscanf)
+TLI_DEFINE_STRING_INTERNAL("vscanf")
+/// int vsnprintf(char *s, size_t n, const char *format, va_list ap);
+TLI_DEFINE_ENUM_INTERNAL(vsnprintf)
+TLI_DEFINE_STRING_INTERNAL("vsnprintf")
+/// int vsprintf(char *s, const char *format, va_list ap);
+TLI_DEFINE_ENUM_INTERNAL(vsprintf)
+TLI_DEFINE_STRING_INTERNAL("vsprintf")
+/// int vsscanf(const char *s, const char *format, va_list arg);
+TLI_DEFINE_ENUM_INTERNAL(vsscanf)
+TLI_DEFINE_STRING_INTERNAL("vsscanf")
+/// ssize_t write(int fildes, const void *buf, size_t nbyte);
+TLI_DEFINE_ENUM_INTERNAL(write)
+TLI_DEFINE_STRING_INTERNAL("write")
+
+#undef TLI_DEFINE_ENUM_INTERNAL
+#undef TLI_DEFINE_STRING_INTERNAL
+#endif // One of TLI_DEFINE_ENUM/STRING are defined.
+
+#undef TLI_DEFINE_ENUM
+#undef TLI_DEFINE_STRING
diff --git a/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.h b/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.h
new file mode 100644
index 0000000..7becdf0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/TargetLibraryInfo.h
@@ -0,0 +1,328 @@
+//===-- TargetLibraryInfo.h - Library information ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TARGETLIBRARYINFO_H
+#define LLVM_ANALYSIS_TARGETLIBRARYINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+/// VecDesc - Describes a possible vectorization of a function.
+/// Function 'VectorFnName' is equivalent to 'ScalarFnName' vectorized
+/// by a factor 'VectorizationFactor'.
+struct VecDesc {
+ const char *ScalarFnName;
+ const char *VectorFnName;
+ unsigned VectorizationFactor;
+};
+class PreservedAnalyses;
+
+ namespace LibFunc {
+ enum Func {
+#define TLI_DEFINE_ENUM
+#include "llvm/Analysis/TargetLibraryInfo.def"
+
+ NumLibFuncs
+ };
+ }
+
+/// \brief Implementation of the target library information.
+///
+/// This class constructs tables that hold the target library information and
+/// make it available. However, it is somewhat expensive to compute and only
+/// depends on the triple. So users typically interact with the \c
+/// TargetLibraryInfo wrapper below.
+class TargetLibraryInfoImpl {
+ friend class TargetLibraryInfo;
+
+ unsigned char AvailableArray[(LibFunc::NumLibFuncs+3)/4];
+ llvm::DenseMap<unsigned, std::string> CustomNames;
+ static const char *const StandardNames[LibFunc::NumLibFuncs];
+
+ enum AvailabilityState {
+ StandardName = 3, // (memset to all ones)
+ CustomName = 1,
+ Unavailable = 0 // (memset to all zeros)
+ };
+ void setState(LibFunc::Func F, AvailabilityState State) {
+ AvailableArray[F/4] &= ~(3 << 2*(F&3));
+ AvailableArray[F/4] |= State << 2*(F&3);
+ }
+ AvailabilityState getState(LibFunc::Func F) const {
+ return static_cast<AvailabilityState>((AvailableArray[F/4] >> 2*(F&3)) & 3);
+ }
+
+ /// Vectorization descriptors - sorted by ScalarFnName.
+ std::vector<VecDesc> VectorDescs;
+ /// Scalarization descriptors - same content as VectorDescs but sorted based
+ /// on VectorFnName rather than ScalarFnName.
+ std::vector<VecDesc> ScalarDescs;
+
+public:
+ /// \brief List of known vector-functions libraries.
+ ///
+ /// The vector-functions library defines, which functions are vectorizable
+ /// and with which factor. The library can be specified by either frontend,
+ /// or a commandline option, and then used by
+ /// addVectorizableFunctionsFromVecLib for filling up the tables of
+ /// vectorizable functions.
+ enum VectorLibrary {
+ NoLibrary, // Don't use any vector library.
+ Accelerate // Use Accelerate framework.
+ };
+
+ TargetLibraryInfoImpl();
+ explicit TargetLibraryInfoImpl(const Triple &T);
+
+ // Provide value semantics.
+ TargetLibraryInfoImpl(const TargetLibraryInfoImpl &TLI);
+ TargetLibraryInfoImpl(TargetLibraryInfoImpl &&TLI);
+ TargetLibraryInfoImpl &operator=(const TargetLibraryInfoImpl &TLI);
+ TargetLibraryInfoImpl &operator=(TargetLibraryInfoImpl &&TLI);
+
+ /// \brief Searches for a particular function name.
+ ///
+ /// If it is one of the known library functions, return true and set F to the
+ /// corresponding value.
+ bool getLibFunc(StringRef funcName, LibFunc::Func &F) const;
+
+ /// \brief Forces a function to be marked as unavailable.
+ void setUnavailable(LibFunc::Func F) {
+ setState(F, Unavailable);
+ }
+
+ /// \brief Forces a function to be marked as available.
+ void setAvailable(LibFunc::Func F) {
+ setState(F, StandardName);
+ }
+
+ /// \brief Forces a function to be marked as available and provide an
+ /// alternate name that must be used.
+ void setAvailableWithName(LibFunc::Func F, StringRef Name) {
+ if (StandardNames[F] != Name) {
+ setState(F, CustomName);
+ CustomNames[F] = Name;
+ assert(CustomNames.find(F) != CustomNames.end());
+ } else {
+ setState(F, StandardName);
+ }
+ }
+
+ /// \brief Disables all builtins.
+ ///
+ /// This can be used for options like -fno-builtin.
+ void disableAllFunctions();
+
+ /// addVectorizableFunctions - Add a set of scalar -> vector mappings,
+ /// queryable via getVectorizedFunction and getScalarizedFunction.
+ void addVectorizableFunctions(ArrayRef<VecDesc> Fns);
+
+ /// Calls addVectorizableFunctions with a known preset of functions for the
+ /// given vector library.
+ void addVectorizableFunctionsFromVecLib(enum VectorLibrary VecLib);
+
+ /// isFunctionVectorizable - Return true if the function F has a
+ /// vector equivalent with vectorization factor VF.
+ bool isFunctionVectorizable(StringRef F, unsigned VF) const {
+ return !getVectorizedFunction(F, VF).empty();
+ }
+
+ /// isFunctionVectorizable - Return true if the function F has a
+ /// vector equivalent with any vectorization factor.
+ bool isFunctionVectorizable(StringRef F) const;
+
+ /// getVectorizedFunction - Return the name of the equivalent of
+ /// F, vectorized with factor VF. If no such mapping exists,
+ /// return the empty string.
+ StringRef getVectorizedFunction(StringRef F, unsigned VF) const;
+
+ /// isFunctionScalarizable - Return true if the function F has a
+ /// scalar equivalent, and set VF to be the vectorization factor.
+ bool isFunctionScalarizable(StringRef F, unsigned &VF) const {
+ return !getScalarizedFunction(F, VF).empty();
+ }
+
+ /// getScalarizedFunction - Return the name of the equivalent of
+ /// F, scalarized. If no such mapping exists, return the empty string.
+ ///
+ /// Set VF to the vectorization factor.
+ StringRef getScalarizedFunction(StringRef F, unsigned &VF) const;
+};
+
+/// \brief Provides information about what library functions are available for
+/// the current target.
+///
+/// This both allows optimizations to handle them specially and frontends to
+/// disable such optimizations through -fno-builtin etc.
+class TargetLibraryInfo {
+ friend class TargetLibraryAnalysis;
+ friend class TargetLibraryInfoWrapperPass;
+
+ const TargetLibraryInfoImpl *Impl;
+
+public:
+ explicit TargetLibraryInfo(const TargetLibraryInfoImpl &Impl) : Impl(&Impl) {}
+
+ // Provide value semantics.
+ TargetLibraryInfo(const TargetLibraryInfo &TLI) : Impl(TLI.Impl) {}
+ TargetLibraryInfo(TargetLibraryInfo &&TLI) : Impl(TLI.Impl) {}
+ TargetLibraryInfo &operator=(const TargetLibraryInfo &TLI) {
+ Impl = TLI.Impl;
+ return *this;
+ }
+ TargetLibraryInfo &operator=(TargetLibraryInfo &&TLI) {
+ Impl = TLI.Impl;
+ return *this;
+ }
+
+ /// \brief Searches for a particular function name.
+ ///
+ /// If it is one of the known library functions, return true and set F to the
+ /// corresponding value.
+ bool getLibFunc(StringRef funcName, LibFunc::Func &F) const {
+ return Impl->getLibFunc(funcName, F);
+ }
+
+ /// \brief Tests whether a library function is available.
+ bool has(LibFunc::Func F) const {
+ return Impl->getState(F) != TargetLibraryInfoImpl::Unavailable;
+ }
+ bool isFunctionVectorizable(StringRef F, unsigned VF) const {
+ return Impl->isFunctionVectorizable(F, VF);
+ }
+ bool isFunctionVectorizable(StringRef F) const {
+ return Impl->isFunctionVectorizable(F);
+ }
+ StringRef getVectorizedFunction(StringRef F, unsigned VF) const {
+ return Impl->getVectorizedFunction(F, VF);
+ }
+
+ /// \brief Tests if the function is both available and a candidate for
+ /// optimized code generation.
+ bool hasOptimizedCodeGen(LibFunc::Func F) const {
+ if (Impl->getState(F) == TargetLibraryInfoImpl::Unavailable)
+ return false;
+ switch (F) {
+ default: break;
+ case LibFunc::copysign: case LibFunc::copysignf: case LibFunc::copysignl:
+ case LibFunc::fabs: case LibFunc::fabsf: case LibFunc::fabsl:
+ case LibFunc::sin: case LibFunc::sinf: case LibFunc::sinl:
+ case LibFunc::cos: case LibFunc::cosf: case LibFunc::cosl:
+ case LibFunc::sqrt: case LibFunc::sqrtf: case LibFunc::sqrtl:
+ case LibFunc::sqrt_finite: case LibFunc::sqrtf_finite:
+ case LibFunc::sqrtl_finite:
+ case LibFunc::fmax: case LibFunc::fmaxf: case LibFunc::fmaxl:
+ case LibFunc::fmin: case LibFunc::fminf: case LibFunc::fminl:
+ case LibFunc::floor: case LibFunc::floorf: case LibFunc::floorl:
+ case LibFunc::nearbyint: case LibFunc::nearbyintf: case LibFunc::nearbyintl:
+ case LibFunc::ceil: case LibFunc::ceilf: case LibFunc::ceill:
+ case LibFunc::rint: case LibFunc::rintf: case LibFunc::rintl:
+ case LibFunc::round: case LibFunc::roundf: case LibFunc::roundl:
+ case LibFunc::trunc: case LibFunc::truncf: case LibFunc::truncl:
+ case LibFunc::log2: case LibFunc::log2f: case LibFunc::log2l:
+ case LibFunc::exp2: case LibFunc::exp2f: case LibFunc::exp2l:
+ case LibFunc::memcmp: case LibFunc::strcmp: case LibFunc::strcpy:
+ case LibFunc::stpcpy: case LibFunc::strlen: case LibFunc::strnlen:
+ case LibFunc::memchr:
+ return true;
+ }
+ return false;
+ }
+
+ StringRef getName(LibFunc::Func F) const {
+ auto State = Impl->getState(F);
+ if (State == TargetLibraryInfoImpl::Unavailable)
+ return StringRef();
+ if (State == TargetLibraryInfoImpl::StandardName)
+ return Impl->StandardNames[F];
+ assert(State == TargetLibraryInfoImpl::CustomName);
+ return Impl->CustomNames.find(F)->second;
+ }
+
+ /// \brief Handle invalidation from the pass manager.
+ ///
+ /// If we try to invalidate this info, just return false. It cannot become
+ /// invalid even if the module changes.
+ bool invalidate(Module &, const PreservedAnalyses &) { return false; }
+};
+
+/// \brief Analysis pass providing the \c TargetLibraryInfo.
+///
+/// Note that this pass's result cannot be invalidated, it is immutable for the
+/// life of the module.
+class TargetLibraryAnalysis {
+public:
+ typedef TargetLibraryInfo Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ /// \brief Default construct the library analysis.
+ ///
+ /// This will use the module's triple to construct the library info for that
+ /// module.
+ TargetLibraryAnalysis() {}
+
+ /// \brief Construct a library analysis with preset info.
+ ///
+ /// This will directly copy the preset info into the result without
+ /// consulting the module's triple.
+ TargetLibraryAnalysis(TargetLibraryInfoImpl PresetInfoImpl)
+ : PresetInfoImpl(std::move(PresetInfoImpl)) {}
+
+ // Move semantics. We spell out the constructors for MSVC.
+ TargetLibraryAnalysis(TargetLibraryAnalysis &&Arg)
+ : PresetInfoImpl(std::move(Arg.PresetInfoImpl)), Impls(std::move(Arg.Impls)) {}
+ TargetLibraryAnalysis &operator=(TargetLibraryAnalysis &&RHS) {
+ PresetInfoImpl = std::move(RHS.PresetInfoImpl);
+ Impls = std::move(RHS.Impls);
+ return *this;
+ }
+
+ TargetLibraryInfo run(Module &M);
+ TargetLibraryInfo run(Function &F);
+
+ /// \brief Provide access to a name for this pass for debugging purposes.
+ static StringRef name() { return "TargetLibraryAnalysis"; }
+
+private:
+ static char PassID;
+
+ Optional<TargetLibraryInfoImpl> PresetInfoImpl;
+
+ StringMap<std::unique_ptr<TargetLibraryInfoImpl>> Impls;
+
+ TargetLibraryInfoImpl &lookupInfoImpl(Triple T);
+};
+
+class TargetLibraryInfoWrapperPass : public ImmutablePass {
+ TargetLibraryInfoImpl TLIImpl;
+ TargetLibraryInfo TLI;
+
+ virtual void anchor();
+
+public:
+ static char ID;
+ TargetLibraryInfoWrapperPass();
+ explicit TargetLibraryInfoWrapperPass(const Triple &T);
+ explicit TargetLibraryInfoWrapperPass(const TargetLibraryInfoImpl &TLI);
+
+ TargetLibraryInfo &getTLI() { return TLI; }
+ const TargetLibraryInfo &getTLI() const { return TLI; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/TargetTransformInfo.h b/contrib/llvm/include/llvm/Analysis/TargetTransformInfo.h
new file mode 100644
index 0000000..3913cc3
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -0,0 +1,968 @@
+//===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This pass exposes codegen information to IR-level passes. Every
+/// transformation that uses codegen information is broken into three parts:
+/// 1. The IR-level analysis pass.
+/// 2. The IR-level transformation interface which provides the needed
+/// information.
+/// 3. Codegen-level implementation which uses target-specific hooks.
+///
+/// This file defines #2, which is the interface that IR-level transformations
+/// use for querying the codegen.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
+#define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/DataTypes.h"
+#include <functional>
+
+namespace llvm {
+
+class Function;
+class GlobalValue;
+class Loop;
+class PreservedAnalyses;
+class Type;
+class User;
+class Value;
+
+/// \brief Information about a load/store intrinsic defined by the target.
+struct MemIntrinsicInfo {
+ MemIntrinsicInfo()
+ : ReadMem(false), WriteMem(false), IsSimple(false), MatchingId(0),
+ NumMemRefs(0), PtrVal(nullptr) {}
+ bool ReadMem;
+ bool WriteMem;
+ /// True only if this memory operation is non-volatile, non-atomic, and
+ /// unordered. (See LoadInst/StoreInst for details on each)
+ bool IsSimple;
+ // Same Id is set by the target for corresponding load/store intrinsics.
+ unsigned short MatchingId;
+ int NumMemRefs;
+ Value *PtrVal;
+};
+
+/// \brief This pass provides access to the codegen interfaces that are needed
+/// for IR-level transformations.
+class TargetTransformInfo {
+public:
+ /// \brief Construct a TTI object using a type implementing the \c Concept
+ /// API below.
+ ///
+ /// This is used by targets to construct a TTI wrapping their target-specific
+ /// implementaion that encodes appropriate costs for their target.
+ template <typename T> TargetTransformInfo(T Impl);
+
+ /// \brief Construct a baseline TTI object using a minimal implementation of
+ /// the \c Concept API below.
+ ///
+ /// The TTI implementation will reflect the information in the DataLayout
+ /// provided if non-null.
+ explicit TargetTransformInfo(const DataLayout &DL);
+
+ // Provide move semantics.
+ TargetTransformInfo(TargetTransformInfo &&Arg);
+ TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
+
+ // We need to define the destructor out-of-line to define our sub-classes
+ // out-of-line.
+ ~TargetTransformInfo();
+
+ /// \brief Handle the invalidation of this information.
+ ///
+ /// When used as a result of \c TargetIRAnalysis this method will be called
+ /// when the function this was computed for changes. When it returns false,
+ /// the information is preserved across those changes.
+ bool invalidate(Function &, const PreservedAnalyses &) {
+ // FIXME: We should probably in some way ensure that the subtarget
+ // information for a function hasn't changed.
+ return false;
+ }
+
+ /// \name Generic Target Information
+ /// @{
+
+ /// \brief Underlying constants for 'cost' values in this interface.
+ ///
+ /// Many APIs in this interface return a cost. This enum defines the
+ /// fundamental values that should be used to interpret (and produce) those
+ /// costs. The costs are returned as an int rather than a member of this
+ /// enumeration because it is expected that the cost of one IR instruction
+ /// may have a multiplicative factor to it or otherwise won't fit directly
+ /// into the enum. Moreover, it is common to sum or average costs which works
+ /// better as simple integral values. Thus this enum only provides constants.
+ /// Also note that the returned costs are signed integers to make it natural
+ /// to add, subtract, and test with zero (a common boundary condition). It is
+ /// not expected that 2^32 is a realistic cost to be modeling at any point.
+ ///
+ /// Note that these costs should usually reflect the intersection of code-size
+ /// cost and execution cost. A free instruction is typically one that folds
+ /// into another instruction. For example, reg-to-reg moves can often be
+ /// skipped by renaming the registers in the CPU, but they still are encoded
+ /// and thus wouldn't be considered 'free' here.
+ enum TargetCostConstants {
+ TCC_Free = 0, ///< Expected to fold away in lowering.
+ TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
+ TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
+ };
+
+ /// \brief Estimate the cost of a specific operation when lowered.
+ ///
+ /// Note that this is designed to work on an arbitrary synthetic opcode, and
+ /// thus work for hypothetical queries before an instruction has even been
+ /// formed. However, this does *not* work for GEPs, and must not be called
+ /// for a GEP instruction. Instead, use the dedicated getGEPCost interface as
+ /// analyzing a GEP's cost required more information.
+ ///
+ /// Typically only the result type is required, and the operand type can be
+ /// omitted. However, if the opcode is one of the cast instructions, the
+ /// operand type is required.
+ ///
+ /// The returned cost is defined in terms of \c TargetCostConstants, see its
+ /// comments for a detailed explanation of the cost values.
+ int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy = nullptr) const;
+
+ /// \brief Estimate the cost of a GEP operation when lowered.
+ ///
+ /// The contract for this function is the same as \c getOperationCost except
+ /// that it supports an interface that provides extra information specific to
+ /// the GEP operation.
+ int getGEPCost(Type *PointeeType, const Value *Ptr,
+ ArrayRef<const Value *> Operands) const;
+
+ /// \brief Estimate the cost of a function call when lowered.
+ ///
+ /// The contract for this is the same as \c getOperationCost except that it
+ /// supports an interface that provides extra information specific to call
+ /// instructions.
+ ///
+ /// This is the most basic query for estimating call cost: it only knows the
+ /// function type and (potentially) the number of arguments at the call site.
+ /// The latter is only interesting for varargs function types.
+ int getCallCost(FunctionType *FTy, int NumArgs = -1) const;
+
+ /// \brief Estimate the cost of calling a specific function when lowered.
+ ///
+ /// This overload adds the ability to reason about the particular function
+ /// being called in the event it is a library call with special lowering.
+ int getCallCost(const Function *F, int NumArgs = -1) const;
+
+ /// \brief Estimate the cost of calling a specific function when lowered.
+ ///
+ /// This overload allows specifying a set of candidate argument values.
+ int getCallCost(const Function *F, ArrayRef<const Value *> Arguments) const;
+
+ /// \brief Estimate the cost of an intrinsic when lowered.
+ ///
+ /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
+ int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Type *> ParamTys) const;
+
+ /// \brief Estimate the cost of an intrinsic when lowered.
+ ///
+ /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
+ int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<const Value *> Arguments) const;
+
+ /// \brief Estimate the cost of a given IR user when lowered.
+ ///
+ /// This can estimate the cost of either a ConstantExpr or Instruction when
+ /// lowered. It has two primary advantages over the \c getOperationCost and
+ /// \c getGEPCost above, and one significant disadvantage: it can only be
+ /// used when the IR construct has already been formed.
+ ///
+ /// The advantages are that it can inspect the SSA use graph to reason more
+ /// accurately about the cost. For example, all-constant-GEPs can often be
+ /// folded into a load or other instruction, but if they are used in some
+ /// other context they may not be folded. This routine can distinguish such
+ /// cases.
+ ///
+ /// The returned cost is defined in terms of \c TargetCostConstants, see its
+ /// comments for a detailed explanation of the cost values.
+ int getUserCost(const User *U) const;
+
+ /// \brief Return true if branch divergence exists.
+ ///
+ /// Branch divergence has a significantly negative impact on GPU performance
+ /// when threads in the same wavefront take different paths due to conditional
+ /// branches.
+ bool hasBranchDivergence() const;
+
+ /// \brief Returns whether V is a source of divergence.
+ ///
+ /// This function provides the target-dependent information for
+ /// the target-independent DivergenceAnalysis. DivergenceAnalysis first
+ /// builds the dependency graph, and then runs the reachability algorithm
+ /// starting with the sources of divergence.
+ bool isSourceOfDivergence(const Value *V) const;
+
+ /// \brief Test whether calls to a function lower to actual program function
+ /// calls.
+ ///
+ /// The idea is to test whether the program is likely to require a 'call'
+ /// instruction or equivalent in order to call the given function.
+ ///
+ /// FIXME: It's not clear that this is a good or useful query API. Client's
+ /// should probably move to simpler cost metrics using the above.
+ /// Alternatively, we could split the cost interface into distinct code-size
+ /// and execution-speed costs. This would allow modelling the core of this
+ /// query more accurately as a call is a single small instruction, but
+ /// incurs significant execution cost.
+ bool isLoweredToCall(const Function *F) const;
+
+ /// Parameters that control the generic loop unrolling transformation.
+ struct UnrollingPreferences {
+ /// The cost threshold for the unrolled loop. Should be relative to the
+ /// getUserCost values returned by this API, and the expectation is that
+ /// the unrolled loop's instructions when run through that interface should
+ /// not exceed this cost. However, this is only an estimate. Also, specific
+ /// loops may be unrolled even with a cost above this threshold if deemed
+ /// profitable. Set this to UINT_MAX to disable the loop body cost
+ /// restriction.
+ unsigned Threshold;
+ /// If complete unrolling will reduce the cost of the loop below its
+ /// expected dynamic cost while rolled by this percentage, apply a discount
+ /// (below) to its unrolled cost.
+ unsigned PercentDynamicCostSavedThreshold;
+ /// The discount applied to the unrolled cost when the *dynamic* cost
+ /// savings of unrolling exceed the \c PercentDynamicCostSavedThreshold.
+ unsigned DynamicCostSavingsDiscount;
+ /// The cost threshold for the unrolled loop when optimizing for size (set
+ /// to UINT_MAX to disable).
+ unsigned OptSizeThreshold;
+ /// The cost threshold for the unrolled loop, like Threshold, but used
+ /// for partial/runtime unrolling (set to UINT_MAX to disable).
+ unsigned PartialThreshold;
+ /// The cost threshold for the unrolled loop when optimizing for size, like
+ /// OptSizeThreshold, but used for partial/runtime unrolling (set to
+ /// UINT_MAX to disable).
+ unsigned PartialOptSizeThreshold;
+ /// A forced unrolling factor (the number of concatenated bodies of the
+ /// original loop in the unrolled loop body). When set to 0, the unrolling
+ /// transformation will select an unrolling factor based on the current cost
+ /// threshold and other factors.
+ unsigned Count;
+ // Set the maximum unrolling factor. The unrolling factor may be selected
+ // using the appropriate cost threshold, but may not exceed this number
+ // (set to UINT_MAX to disable). This does not apply in cases where the
+ // loop is being fully unrolled.
+ unsigned MaxCount;
+ /// Allow partial unrolling (unrolling of loops to expand the size of the
+ /// loop body, not only to eliminate small constant-trip-count loops).
+ bool Partial;
+ /// Allow runtime unrolling (unrolling of loops to expand the size of the
+ /// loop body even when the number of loop iterations is not known at
+ /// compile time).
+ bool Runtime;
+ /// Allow emitting expensive instructions (such as divisions) when computing
+ /// the trip count of a loop for runtime unrolling.
+ bool AllowExpensiveTripCount;
+ };
+
+ /// \brief Get target-customized preferences for the generic loop unrolling
+ /// transformation. The caller will initialize UP with the current
+ /// target-independent defaults.
+ void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const;
+
+ /// @}
+
+ /// \name Scalar Target Information
+ /// @{
+
+ /// \brief Flags indicating the kind of support for population count.
+ ///
+ /// Compared to the SW implementation, HW support is supposed to
+ /// significantly boost the performance when the population is dense, and it
+ /// may or may not degrade performance if the population is sparse. A HW
+ /// support is considered as "Fast" if it can outperform, or is on a par
+ /// with, SW implementation when the population is sparse; otherwise, it is
+ /// considered as "Slow".
+ enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
+
+ /// \brief Return true if the specified immediate is legal add immediate, that
+ /// is the target has add instructions which can add a register with the
+ /// immediate without having to materialize the immediate into a register.
+ bool isLegalAddImmediate(int64_t Imm) const;
+
+ /// \brief Return true if the specified immediate is legal icmp immediate,
+ /// that is the target has icmp instructions which can compare a register
+ /// against the immediate without having to materialize the immediate into a
+ /// register.
+ bool isLegalICmpImmediate(int64_t Imm) const;
+
+ /// \brief Return true if the addressing mode represented by AM is legal for
+ /// this target, for a load/store of the specified type.
+ /// The type may be VoidTy, in which case only return true if the addressing
+ /// mode is legal for a load/store of any legal type.
+ /// TODO: Handle pre/postinc as well.
+ bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+ bool HasBaseReg, int64_t Scale,
+ unsigned AddrSpace = 0) const;
+
+ /// \brief Return true if the target supports masked load/store
+ /// AVX2 and AVX-512 targets allow masks for consecutive load and store for
+ /// 32 and 64 bit elements.
+ bool isLegalMaskedStore(Type *DataType) const;
+ bool isLegalMaskedLoad(Type *DataType) const;
+
+ /// \brief Return true if the target supports masked gather/scatter
+ /// AVX-512 fully supports gather and scatter for vectors with 32 and 64
+ /// bits scalar type.
+ bool isLegalMaskedScatter(Type *DataType) const;
+ bool isLegalMaskedGather(Type *DataType) const;
+
+ /// \brief Return the cost of the scaling factor used in the addressing
+ /// mode represented by AM for this target, for a load/store
+ /// of the specified type.
+ /// If the AM is supported, the return value must be >= 0.
+ /// If the AM is not supported, it returns a negative value.
+ /// TODO: Handle pre/postinc as well.
+ int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+ bool HasBaseReg, int64_t Scale,
+ unsigned AddrSpace = 0) const;
+
+ /// \brief Return true if it's free to truncate a value of type Ty1 to type
+ /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
+ /// by referencing its sub-register AX.
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const;
+
+ /// \brief Return true if it is profitable to hoist instruction in the
+ /// then/else to before if.
+ bool isProfitableToHoist(Instruction *I) const;
+
+ /// \brief Return true if this type is legal.
+ bool isTypeLegal(Type *Ty) const;
+
+ /// \brief Returns the target's jmp_buf alignment in bytes.
+ unsigned getJumpBufAlignment() const;
+
+ /// \brief Returns the target's jmp_buf size in bytes.
+ unsigned getJumpBufSize() const;
+
+ /// \brief Return true if switches should be turned into lookup tables for the
+ /// target.
+ bool shouldBuildLookupTables() const;
+
+ /// \brief Don't restrict interleaved unrolling to small loops.
+ bool enableAggressiveInterleaving(bool LoopHasReductions) const;
+
+ /// \brief Enable matching of interleaved access groups.
+ bool enableInterleavedAccessVectorization() const;
+
+ /// \brief Return hardware support for population count.
+ PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
+
+ /// \brief Return true if the hardware has a fast square-root instruction.
+ bool haveFastSqrt(Type *Ty) const;
+
+ /// \brief Return the expected cost of supporting the floating point operation
+ /// of the specified type.
+ int getFPOpCost(Type *Ty) const;
+
+ /// \brief Return the expected cost of materializing for the given integer
+ /// immediate of the specified type.
+ int getIntImmCost(const APInt &Imm, Type *Ty) const;
+
+ /// \brief Return the expected cost of materialization for the given integer
+ /// immediate of the specified type for a given instruction. The cost can be
+ /// zero if the immediate can be folded into the specified instruction.
+ int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
+ Type *Ty) const;
+ int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty) const;
+ /// @}
+
+ /// \name Vector Target Information
+ /// @{
+
+ /// \brief The various kinds of shuffle patterns for vector queries.
+ enum ShuffleKind {
+ SK_Broadcast, ///< Broadcast element 0 to all other elements.
+ SK_Reverse, ///< Reverse the order of the vector.
+ SK_Alternate, ///< Choose alternate elements from vector.
+ SK_InsertSubvector, ///< InsertSubvector. Index indicates start offset.
+ SK_ExtractSubvector ///< ExtractSubvector Index indicates start offset.
+ };
+
+ /// \brief Additional information about an operand's possible values.
+ enum OperandValueKind {
+ OK_AnyValue, // Operand can have any value.
+ OK_UniformValue, // Operand is uniform (splat of a value).
+ OK_UniformConstantValue, // Operand is uniform constant.
+ OK_NonUniformConstantValue // Operand is a non uniform constant value.
+ };
+
+ /// \brief Additional properties of an operand's values.
+ enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
+
+ /// \return The number of scalar or vector registers that the target has.
+ /// If 'Vectors' is true, it returns the number of vector registers. If it is
+ /// set to false, it returns the number of scalar registers.
+ unsigned getNumberOfRegisters(bool Vector) const;
+
+ /// \return The width of the largest scalar or vector register type.
+ unsigned getRegisterBitWidth(bool Vector) const;
+
+ /// \return The maximum interleave factor that any transform should try to
+ /// perform for this target. This number depends on the level of parallelism
+ /// and the number of execution units in the CPU.
+ unsigned getMaxInterleaveFactor(unsigned VF) const;
+
+ /// \return The expected cost of arithmetic ops, such as mul, xor, fsub, etc.
+ int getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
+ OperandValueKind Opd2Info = OK_AnyValue,
+ OperandValueProperties Opd1PropInfo = OP_None,
+ OperandValueProperties Opd2PropInfo = OP_None) const;
+
+ /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
+ /// The index and subtype parameters are used by the subvector insertion and
+ /// extraction shuffle kinds.
+ int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
+ Type *SubTp = nullptr) const;
+
+ /// \return The expected cost of cast instructions, such as bitcast, trunc,
+ /// zext, etc.
+ int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const;
+
+ /// \return The expected cost of control-flow related instructions such as
+ /// Phi, Ret, Br.
+ int getCFInstrCost(unsigned Opcode) const;
+
+ /// \returns The expected cost of compare and select instructions.
+ int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy = nullptr) const;
+
+ /// \return The expected cost of vector Insert and Extract.
+ /// Use -1 to indicate that there is no information on the index value.
+ int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index = -1) const;
+
+ /// \return The cost of Load and Store instructions.
+ int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) const;
+
+ /// \return The cost of masked Load and Store instructions.
+ int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) const;
+
+ /// \return The cost of Gather or Scatter operation
+ /// \p Opcode - is a type of memory access Load or Store
+ /// \p DataTy - a vector type of the data to be loaded or stored
+ /// \p Ptr - pointer [or vector of pointers] - address[es] in memory
+ /// \p VariableMask - true when the memory access is predicated with a mask
+ /// that is not a compile-time constant
+ /// \p Alignment - alignment of single element
+ int getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
+ bool VariableMask, unsigned Alignment) const;
+
+ /// \return The cost of the interleaved memory operation.
+ /// \p Opcode is the memory operation code
+ /// \p VecTy is the vector type of the interleaved access.
+ /// \p Factor is the interleave factor
+ /// \p Indices is the indices for interleaved load members (as interleaved
+ /// load allows gaps)
+ /// \p Alignment is the alignment of the memory operation
+ /// \p AddressSpace is address space of the pointer.
+ int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
+ ArrayRef<unsigned> Indices, unsigned Alignment,
+ unsigned AddressSpace) const;
+
+ /// \brief Calculate the cost of performing a vector reduction.
+ ///
+ /// This is the cost of reducing the vector value of type \p Ty to a scalar
+ /// value using the operation denoted by \p Opcode. The form of the reduction
+ /// can either be a pairwise reduction or a reduction that splits the vector
+ /// at every reduction level.
+ ///
+ /// Pairwise:
+ /// (v0, v1, v2, v3)
+ /// ((v0+v1), (v2, v3), undef, undef)
+ /// Split:
+ /// (v0, v1, v2, v3)
+ /// ((v0+v2), (v1+v3), undef, undef)
+ int getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm) const;
+
+ /// \returns The cost of Intrinsic instructions. Types analysis only.
+ int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+ ArrayRef<Type *> Tys) const;
+
+ /// \returns The cost of Intrinsic instructions. Analyses the real arguments.
+ int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+ ArrayRef<Value *> Args) const;
+
+ /// \returns The cost of Call instructions.
+ int getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) const;
+
+ /// \returns The number of pieces into which the provided type must be
+ /// split during legalization. Zero is returned when the answer is unknown.
+ unsigned getNumberOfParts(Type *Tp) const;
+
+ /// \returns The cost of the address computation. For most targets this can be
+ /// merged into the instruction indexing mode. Some targets might want to
+ /// distinguish between address computation for memory operations on vector
+ /// types and scalar types. Such targets should override this function.
+ /// The 'IsComplex' parameter is a hint that the address computation is likely
+ /// to involve multiple instructions and as such unlikely to be merged into
+ /// the address indexing mode.
+ int getAddressComputationCost(Type *Ty, bool IsComplex = false) const;
+
+ /// \returns The cost, if any, of keeping values of the given types alive
+ /// over a callsite.
+ ///
+ /// Some types may require the use of register classes that do not have
+ /// any callee-saved registers, so would require a spill and fill.
+ unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
+
+ /// \returns True if the intrinsic is a supported memory intrinsic. Info
+ /// will contain additional information - whether the intrinsic may write
+ /// or read to memory, volatility and the pointer. Info is undefined
+ /// if false is returned.
+ bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
+
+ /// \returns A value which is the result of the given memory intrinsic. New
+ /// instructions may be created to extract the result from the given intrinsic
+ /// memory operation. Returns nullptr if the target cannot create a result
+ /// from the given intrinsic.
+ Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+ Type *ExpectedType) const;
+
+ /// \returns True if the two functions have compatible attributes for inlining
+ /// purposes.
+ bool areInlineCompatible(const Function *Caller,
+ const Function *Callee) const;
+
+ /// @}
+
+private:
+ /// \brief The abstract base class used to type erase specific TTI
+ /// implementations.
+ class Concept;
+
+ /// \brief The template model for the base class which wraps a concrete
+ /// implementation in a type erased interface.
+ template <typename T> class Model;
+
+ std::unique_ptr<Concept> TTIImpl;
+};
+
+class TargetTransformInfo::Concept {
+public:
+ virtual ~Concept() = 0;
+ virtual const DataLayout &getDataLayout() const = 0;
+ virtual int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
+ virtual int getGEPCost(Type *PointeeType, const Value *Ptr,
+ ArrayRef<const Value *> Operands) = 0;
+ virtual int getCallCost(FunctionType *FTy, int NumArgs) = 0;
+ virtual int getCallCost(const Function *F, int NumArgs) = 0;
+ virtual int getCallCost(const Function *F,
+ ArrayRef<const Value *> Arguments) = 0;
+ virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Type *> ParamTys) = 0;
+ virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<const Value *> Arguments) = 0;
+ virtual int getUserCost(const User *U) = 0;
+ virtual bool hasBranchDivergence() = 0;
+ virtual bool isSourceOfDivergence(const Value *V) = 0;
+ virtual bool isLoweredToCall(const Function *F) = 0;
+ virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) = 0;
+ virtual bool isLegalAddImmediate(int64_t Imm) = 0;
+ virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
+ virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
+ int64_t BaseOffset, bool HasBaseReg,
+ int64_t Scale,
+ unsigned AddrSpace) = 0;
+ virtual bool isLegalMaskedStore(Type *DataType) = 0;
+ virtual bool isLegalMaskedLoad(Type *DataType) = 0;
+ virtual bool isLegalMaskedScatter(Type *DataType) = 0;
+ virtual bool isLegalMaskedGather(Type *DataType) = 0;
+ virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
+ int64_t BaseOffset, bool HasBaseReg,
+ int64_t Scale, unsigned AddrSpace) = 0;
+ virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
+ virtual bool isProfitableToHoist(Instruction *I) = 0;
+ virtual bool isTypeLegal(Type *Ty) = 0;
+ virtual unsigned getJumpBufAlignment() = 0;
+ virtual unsigned getJumpBufSize() = 0;
+ virtual bool shouldBuildLookupTables() = 0;
+ virtual bool enableAggressiveInterleaving(bool LoopHasReductions) = 0;
+ virtual bool enableInterleavedAccessVectorization() = 0;
+ virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
+ virtual bool haveFastSqrt(Type *Ty) = 0;
+ virtual int getFPOpCost(Type *Ty) = 0;
+ virtual int getIntImmCost(const APInt &Imm, Type *Ty) = 0;
+ virtual int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
+ Type *Ty) = 0;
+ virtual int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty) = 0;
+ virtual unsigned getNumberOfRegisters(bool Vector) = 0;
+ virtual unsigned getRegisterBitWidth(bool Vector) = 0;
+ virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
+ virtual unsigned
+ getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
+ OperandValueKind Opd2Info,
+ OperandValueProperties Opd1PropInfo,
+ OperandValueProperties Opd2PropInfo) = 0;
+ virtual int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp) = 0;
+ virtual int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) = 0;
+ virtual int getCFInstrCost(unsigned Opcode) = 0;
+ virtual int getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
+ Type *CondTy) = 0;
+ virtual int getVectorInstrCost(unsigned Opcode, Type *Val,
+ unsigned Index) = 0;
+ virtual int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) = 0;
+ virtual int getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
+ unsigned Alignment,
+ unsigned AddressSpace) = 0;
+ virtual int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
+ Value *Ptr, bool VariableMask,
+ unsigned Alignment) = 0;
+ virtual int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
+ unsigned Factor,
+ ArrayRef<unsigned> Indices,
+ unsigned Alignment,
+ unsigned AddressSpace) = 0;
+ virtual int getReductionCost(unsigned Opcode, Type *Ty,
+ bool IsPairwiseForm) = 0;
+ virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+ ArrayRef<Type *> Tys) = 0;
+ virtual int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+ ArrayRef<Value *> Args) = 0;
+ virtual int getCallInstrCost(Function *F, Type *RetTy,
+ ArrayRef<Type *> Tys) = 0;
+ virtual unsigned getNumberOfParts(Type *Tp) = 0;
+ virtual int getAddressComputationCost(Type *Ty, bool IsComplex) = 0;
+ virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
+ virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
+ MemIntrinsicInfo &Info) = 0;
+ virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+ Type *ExpectedType) = 0;
+ virtual bool areInlineCompatible(const Function *Caller,
+ const Function *Callee) const = 0;
+};
+
+template <typename T>
+class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
+ T Impl;
+
+public:
+ Model(T Impl) : Impl(std::move(Impl)) {}
+ ~Model() override {}
+
+ const DataLayout &getDataLayout() const override {
+ return Impl.getDataLayout();
+ }
+
+ int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) override {
+ return Impl.getOperationCost(Opcode, Ty, OpTy);
+ }
+ int getGEPCost(Type *PointeeType, const Value *Ptr,
+ ArrayRef<const Value *> Operands) override {
+ return Impl.getGEPCost(PointeeType, Ptr, Operands);
+ }
+ int getCallCost(FunctionType *FTy, int NumArgs) override {
+ return Impl.getCallCost(FTy, NumArgs);
+ }
+ int getCallCost(const Function *F, int NumArgs) override {
+ return Impl.getCallCost(F, NumArgs);
+ }
+ int getCallCost(const Function *F,
+ ArrayRef<const Value *> Arguments) override {
+ return Impl.getCallCost(F, Arguments);
+ }
+ int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Type *> ParamTys) override {
+ return Impl.getIntrinsicCost(IID, RetTy, ParamTys);
+ }
+ int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<const Value *> Arguments) override {
+ return Impl.getIntrinsicCost(IID, RetTy, Arguments);
+ }
+ int getUserCost(const User *U) override { return Impl.getUserCost(U); }
+ bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
+ bool isSourceOfDivergence(const Value *V) override {
+ return Impl.isSourceOfDivergence(V);
+ }
+ bool isLoweredToCall(const Function *F) override {
+ return Impl.isLoweredToCall(F);
+ }
+ void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) override {
+ return Impl.getUnrollingPreferences(L, UP);
+ }
+ bool isLegalAddImmediate(int64_t Imm) override {
+ return Impl.isLegalAddImmediate(Imm);
+ }
+ bool isLegalICmpImmediate(int64_t Imm) override {
+ return Impl.isLegalICmpImmediate(Imm);
+ }
+ bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+ bool HasBaseReg, int64_t Scale,
+ unsigned AddrSpace) override {
+ return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
+ Scale, AddrSpace);
+ }
+ bool isLegalMaskedStore(Type *DataType) override {
+ return Impl.isLegalMaskedStore(DataType);
+ }
+ bool isLegalMaskedLoad(Type *DataType) override {
+ return Impl.isLegalMaskedLoad(DataType);
+ }
+ bool isLegalMaskedScatter(Type *DataType) override {
+ return Impl.isLegalMaskedScatter(DataType);
+ }
+ bool isLegalMaskedGather(Type *DataType) override {
+ return Impl.isLegalMaskedGather(DataType);
+ }
+ int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+ bool HasBaseReg, int64_t Scale,
+ unsigned AddrSpace) override {
+ return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg,
+ Scale, AddrSpace);
+ }
+ bool isTruncateFree(Type *Ty1, Type *Ty2) override {
+ return Impl.isTruncateFree(Ty1, Ty2);
+ }
+ bool isProfitableToHoist(Instruction *I) override {
+ return Impl.isProfitableToHoist(I);
+ }
+ bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
+ unsigned getJumpBufAlignment() override { return Impl.getJumpBufAlignment(); }
+ unsigned getJumpBufSize() override { return Impl.getJumpBufSize(); }
+ bool shouldBuildLookupTables() override {
+ return Impl.shouldBuildLookupTables();
+ }
+ bool enableAggressiveInterleaving(bool LoopHasReductions) override {
+ return Impl.enableAggressiveInterleaving(LoopHasReductions);
+ }
+ bool enableInterleavedAccessVectorization() override {
+ return Impl.enableInterleavedAccessVectorization();
+ }
+ PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
+ return Impl.getPopcntSupport(IntTyWidthInBit);
+ }
+ bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
+
+ int getFPOpCost(Type *Ty) override { return Impl.getFPOpCost(Ty); }
+
+ int getIntImmCost(const APInt &Imm, Type *Ty) override {
+ return Impl.getIntImmCost(Imm, Ty);
+ }
+ int getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
+ Type *Ty) override {
+ return Impl.getIntImmCost(Opc, Idx, Imm, Ty);
+ }
+ int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty) override {
+ return Impl.getIntImmCost(IID, Idx, Imm, Ty);
+ }
+ unsigned getNumberOfRegisters(bool Vector) override {
+ return Impl.getNumberOfRegisters(Vector);
+ }
+ unsigned getRegisterBitWidth(bool Vector) override {
+ return Impl.getRegisterBitWidth(Vector);
+ }
+ unsigned getMaxInterleaveFactor(unsigned VF) override {
+ return Impl.getMaxInterleaveFactor(VF);
+ }
+ unsigned
+ getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
+ OperandValueKind Opd2Info,
+ OperandValueProperties Opd1PropInfo,
+ OperandValueProperties Opd2PropInfo) override {
+ return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
+ Opd1PropInfo, Opd2PropInfo);
+ }
+ int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp) override {
+ return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
+ }
+ int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) override {
+ return Impl.getCastInstrCost(Opcode, Dst, Src);
+ }
+ int getCFInstrCost(unsigned Opcode) override {
+ return Impl.getCFInstrCost(Opcode);
+ }
+ int getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) override {
+ return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy);
+ }
+ int getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) override {
+ return Impl.getVectorInstrCost(Opcode, Val, Index);
+ }
+ int getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) override {
+ return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
+ }
+ int getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) override {
+ return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
+ }
+ int getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
+ Value *Ptr, bool VariableMask,
+ unsigned Alignment) override {
+ return Impl.getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
+ Alignment);
+ }
+ int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
+ ArrayRef<unsigned> Indices, unsigned Alignment,
+ unsigned AddressSpace) override {
+ return Impl.getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
+ Alignment, AddressSpace);
+ }
+ int getReductionCost(unsigned Opcode, Type *Ty,
+ bool IsPairwiseForm) override {
+ return Impl.getReductionCost(Opcode, Ty, IsPairwiseForm);
+ }
+ int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+ ArrayRef<Type *> Tys) override {
+ return Impl.getIntrinsicInstrCost(ID, RetTy, Tys);
+ }
+ int getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+ ArrayRef<Value *> Args) override {
+ return Impl.getIntrinsicInstrCost(ID, RetTy, Args);
+ }
+ int getCallInstrCost(Function *F, Type *RetTy,
+ ArrayRef<Type *> Tys) override {
+ return Impl.getCallInstrCost(F, RetTy, Tys);
+ }
+ unsigned getNumberOfParts(Type *Tp) override {
+ return Impl.getNumberOfParts(Tp);
+ }
+ int getAddressComputationCost(Type *Ty, bool IsComplex) override {
+ return Impl.getAddressComputationCost(Ty, IsComplex);
+ }
+ unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
+ return Impl.getCostOfKeepingLiveOverCall(Tys);
+ }
+ bool getTgtMemIntrinsic(IntrinsicInst *Inst,
+ MemIntrinsicInfo &Info) override {
+ return Impl.getTgtMemIntrinsic(Inst, Info);
+ }
+ Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+ Type *ExpectedType) override {
+ return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
+ }
+ bool areInlineCompatible(const Function *Caller,
+ const Function *Callee) const override {
+ return Impl.areInlineCompatible(Caller, Callee);
+ }
+};
+
+template <typename T>
+TargetTransformInfo::TargetTransformInfo(T Impl)
+ : TTIImpl(new Model<T>(Impl)) {}
+
+/// \brief Analysis pass providing the \c TargetTransformInfo.
+///
+/// The core idea of the TargetIRAnalysis is to expose an interface through
+/// which LLVM targets can analyze and provide information about the middle
+/// end's target-independent IR. This supports use cases such as target-aware
+/// cost modeling of IR constructs.
+///
+/// This is a function analysis because much of the cost modeling for targets
+/// is done in a subtarget specific way and LLVM supports compiling different
+/// functions targeting different subtargets in order to support runtime
+/// dispatch according to the observed subtarget.
+class TargetIRAnalysis {
+public:
+ typedef TargetTransformInfo Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ /// \brief Provide access to a name for this pass for debugging purposes.
+ static StringRef name() { return "TargetIRAnalysis"; }
+
+ /// \brief Default construct a target IR analysis.
+ ///
+ /// This will use the module's datalayout to construct a baseline
+ /// conservative TTI result.
+ TargetIRAnalysis();
+
+ /// \brief Construct an IR analysis pass around a target-provide callback.
+ ///
+ /// The callback will be called with a particular function for which the TTI
+ /// is needed and must return a TTI object for that function.
+ TargetIRAnalysis(std::function<Result(const Function &)> TTICallback);
+
+ // Value semantics. We spell out the constructors for MSVC.
+ TargetIRAnalysis(const TargetIRAnalysis &Arg)
+ : TTICallback(Arg.TTICallback) {}
+ TargetIRAnalysis(TargetIRAnalysis &&Arg)
+ : TTICallback(std::move(Arg.TTICallback)) {}
+ TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
+ TTICallback = RHS.TTICallback;
+ return *this;
+ }
+ TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
+ TTICallback = std::move(RHS.TTICallback);
+ return *this;
+ }
+
+ Result run(const Function &F);
+
+private:
+ static char PassID;
+
+ /// \brief The callback used to produce a result.
+ ///
+ /// We use a completely opaque callback so that targets can provide whatever
+ /// mechanism they desire for constructing the TTI for a given function.
+ ///
+ /// FIXME: Should we really use std::function? It's relatively inefficient.
+ /// It might be possible to arrange for even stateful callbacks to outlive
+ /// the analysis and thus use a function_ref which would be lighter weight.
+ /// This may also be less error prone as the callback is likely to reference
+ /// the external TargetMachine, and that reference needs to never dangle.
+ std::function<Result(const Function &)> TTICallback;
+
+ /// \brief Helper function used as the callback in the default constructor.
+ static Result getDefaultTTI(const Function &F);
+};
+
+/// \brief Wrapper pass for TargetTransformInfo.
+///
+/// This pass can be constructed from a TTI object which it stores internally
+/// and is queried by passes.
+class TargetTransformInfoWrapperPass : public ImmutablePass {
+ TargetIRAnalysis TIRA;
+ Optional<TargetTransformInfo> TTI;
+
+ virtual void anchor();
+
+public:
+ static char ID;
+
+ /// \brief We must provide a default constructor for the pass but it should
+ /// never be used.
+ ///
+ /// Use the constructor below or call one of the creation routines.
+ TargetTransformInfoWrapperPass();
+
+ explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
+
+ TargetTransformInfo &getTTI(const Function &F);
+};
+
+/// \brief Create an analysis pass wrapper around a TTI object.
+///
+/// This analysis pass just holds the TTI instance and makes it available to
+/// clients.
+ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/contrib/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
new file mode 100644
index 0000000..4381523
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -0,0 +1,512 @@
+//===- TargetTransformInfoImpl.h --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides helpers for the implementation of
+/// a TargetTransformInfo-conforming class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
+#define LLVM_ANALYSIS_TARGETTRANSFORMINFOIMPL_H
+
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Analysis/VectorUtils.h"
+
+namespace llvm {
+
+/// \brief Base class for use as a mix-in that aids implementing
+/// a TargetTransformInfo-compatible class.
+class TargetTransformInfoImplBase {
+protected:
+ typedef TargetTransformInfo TTI;
+
+ const DataLayout &DL;
+
+ explicit TargetTransformInfoImplBase(const DataLayout &DL) : DL(DL) {}
+
+public:
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ TargetTransformInfoImplBase(const TargetTransformInfoImplBase &Arg)
+ : DL(Arg.DL) {}
+ TargetTransformInfoImplBase(TargetTransformInfoImplBase &&Arg) : DL(Arg.DL) {}
+
+ const DataLayout &getDataLayout() const { return DL; }
+
+ unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
+ switch (Opcode) {
+ default:
+ // By default, just classify everything as 'basic'.
+ return TTI::TCC_Basic;
+
+ case Instruction::GetElementPtr:
+ llvm_unreachable("Use getGEPCost for GEP operations!");
+
+ case Instruction::BitCast:
+ assert(OpTy && "Cast instructions must provide the operand type");
+ if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy()))
+ // Identity and pointer-to-pointer casts are free.
+ return TTI::TCC_Free;
+
+ // Otherwise, the default basic cost is used.
+ return TTI::TCC_Basic;
+
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ case Instruction::SDiv:
+ case Instruction::SRem:
+ case Instruction::UDiv:
+ case Instruction::URem:
+ return TTI::TCC_Expensive;
+
+ case Instruction::IntToPtr: {
+ // An inttoptr cast is free so long as the input is a legal integer type
+ // which doesn't contain values outside the range of a pointer.
+ unsigned OpSize = OpTy->getScalarSizeInBits();
+ if (DL.isLegalInteger(OpSize) &&
+ OpSize <= DL.getPointerTypeSizeInBits(Ty))
+ return TTI::TCC_Free;
+
+ // Otherwise it's not a no-op.
+ return TTI::TCC_Basic;
+ }
+ case Instruction::PtrToInt: {
+ // A ptrtoint cast is free so long as the result is large enough to store
+ // the pointer, and a legal integer type.
+ unsigned DestSize = Ty->getScalarSizeInBits();
+ if (DL.isLegalInteger(DestSize) &&
+ DestSize >= DL.getPointerTypeSizeInBits(OpTy))
+ return TTI::TCC_Free;
+
+ // Otherwise it's not a no-op.
+ return TTI::TCC_Basic;
+ }
+ case Instruction::Trunc:
+ // trunc to a native type is free (assuming the target has compare and
+ // shift-right of the same width).
+ if (DL.isLegalInteger(DL.getTypeSizeInBits(Ty)))
+ return TTI::TCC_Free;
+
+ return TTI::TCC_Basic;
+ }
+ }
+
+ unsigned getGEPCost(Type *PointeeType, const Value *Ptr,
+ ArrayRef<const Value *> Operands) {
+ // In the basic model, we just assume that all-constant GEPs will be folded
+ // into their uses via addressing modes.
+ for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx)
+ if (!isa<Constant>(Operands[Idx]))
+ return TTI::TCC_Basic;
+
+ return TTI::TCC_Free;
+ }
+
+ unsigned getCallCost(FunctionType *FTy, int NumArgs) {
+ assert(FTy && "FunctionType must be provided to this routine.");
+
+ // The target-independent implementation just measures the size of the
+ // function by approximating that each argument will take on average one
+ // instruction to prepare.
+
+ if (NumArgs < 0)
+ // Set the argument number to the number of explicit arguments in the
+ // function.
+ NumArgs = FTy->getNumParams();
+
+ return TTI::TCC_Basic * (NumArgs + 1);
+ }
+
+ unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Type *> ParamTys) {
+ switch (IID) {
+ default:
+ // Intrinsics rarely (if ever) have normal argument setup constraints.
+ // Model them as having a basic instruction cost.
+ // FIXME: This is wrong for libc intrinsics.
+ return TTI::TCC_Basic;
+
+ case Intrinsic::annotation:
+ case Intrinsic::assume:
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::objectsize:
+ case Intrinsic::ptr_annotation:
+ case Intrinsic::var_annotation:
+ case Intrinsic::experimental_gc_result:
+ case Intrinsic::experimental_gc_relocate:
+ // These intrinsics don't actually represent code after lowering.
+ return TTI::TCC_Free;
+ }
+ }
+
+ bool hasBranchDivergence() { return false; }
+
+ bool isSourceOfDivergence(const Value *V) { return false; }
+
+ bool isLoweredToCall(const Function *F) {
+ // FIXME: These should almost certainly not be handled here, and instead
+ // handled with the help of TLI or the target itself. This was largely
+ // ported from existing analysis heuristics here so that such refactorings
+ // can take place in the future.
+
+ if (F->isIntrinsic())
+ return false;
+
+ if (F->hasLocalLinkage() || !F->hasName())
+ return true;
+
+ StringRef Name = F->getName();
+
+ // These will all likely lower to a single selection DAG node.
+ if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
+ Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" ||
+ Name == "fmin" || Name == "fminf" || Name == "fminl" ||
+ Name == "fmax" || Name == "fmaxf" || Name == "fmaxl" ||
+ Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" ||
+ Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl")
+ return false;
+
+ // These are all likely to be optimized into something smaller.
+ if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" ||
+ Name == "exp2l" || Name == "exp2f" || Name == "floor" ||
+ Name == "floorf" || Name == "ceil" || Name == "round" ||
+ Name == "ffs" || Name == "ffsl" || Name == "abs" || Name == "labs" ||
+ Name == "llabs")
+ return false;
+
+ return true;
+ }
+
+ void getUnrollingPreferences(Loop *, TTI::UnrollingPreferences &) {}
+
+ bool isLegalAddImmediate(int64_t Imm) { return false; }
+
+ bool isLegalICmpImmediate(int64_t Imm) { return false; }
+
+ bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+ bool HasBaseReg, int64_t Scale,
+ unsigned AddrSpace) {
+ // Guess that only reg and reg+reg addressing is allowed. This heuristic is
+ // taken from the implementation of LSR.
+ return !BaseGV && BaseOffset == 0 && (Scale == 0 || Scale == 1);
+ }
+
+ bool isLegalMaskedStore(Type *DataType) { return false; }
+
+ bool isLegalMaskedLoad(Type *DataType) { return false; }
+
+ bool isLegalMaskedScatter(Type *DataType) { return false; }
+
+ bool isLegalMaskedGather(Type *DataType) { return false; }
+
+ int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+ bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
+ // Guess that all legal addressing mode are free.
+ if (isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
+ Scale, AddrSpace))
+ return 0;
+ return -1;
+ }
+
+ bool isTruncateFree(Type *Ty1, Type *Ty2) { return false; }
+
+ bool isProfitableToHoist(Instruction *I) { return true; }
+
+ bool isTypeLegal(Type *Ty) { return false; }
+
+ unsigned getJumpBufAlignment() { return 0; }
+
+ unsigned getJumpBufSize() { return 0; }
+
+ bool shouldBuildLookupTables() { return true; }
+
+ bool enableAggressiveInterleaving(bool LoopHasReductions) { return false; }
+
+ bool enableInterleavedAccessVectorization() { return false; }
+
+ TTI::PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) {
+ return TTI::PSK_Software;
+ }
+
+ bool haveFastSqrt(Type *Ty) { return false; }
+
+ unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
+
+ unsigned getIntImmCost(const APInt &Imm, Type *Ty) { return TTI::TCC_Basic; }
+
+ unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
+ Type *Ty) {
+ return TTI::TCC_Free;
+ }
+
+ unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
+ Type *Ty) {
+ return TTI::TCC_Free;
+ }
+
+ unsigned getNumberOfRegisters(bool Vector) { return 8; }
+
+ unsigned getRegisterBitWidth(bool Vector) { return 32; }
+
+ unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
+
+ unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Opd1Info,
+ TTI::OperandValueKind Opd2Info,
+ TTI::OperandValueProperties Opd1PropInfo,
+ TTI::OperandValueProperties Opd2PropInfo) {
+ return 1;
+ }
+
+ unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Ty, int Index,
+ Type *SubTp) {
+ return 1;
+ }
+
+ unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { return 1; }
+
+ unsigned getCFInstrCost(unsigned Opcode) { return 1; }
+
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
+ return 1;
+ }
+
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
+ return 1;
+ }
+
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) {
+ return 1;
+ }
+
+ unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) {
+ return 1;
+ }
+
+ unsigned getGatherScatterOpCost(unsigned Opcode, Type *DataTy, Value *Ptr,
+ bool VariableMask,
+ unsigned Alignment) {
+ return 1;
+ }
+
+ unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
+ unsigned Factor,
+ ArrayRef<unsigned> Indices,
+ unsigned Alignment,
+ unsigned AddressSpace) {
+ return 1;
+ }
+
+ unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+ ArrayRef<Type *> Tys) {
+ return 1;
+ }
+ unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
+ ArrayRef<Value *> Args) {
+ return 1;
+ }
+
+ unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
+ return 1;
+ }
+
+ unsigned getNumberOfParts(Type *Tp) { return 0; }
+
+ unsigned getAddressComputationCost(Type *Tp, bool) { return 0; }
+
+ unsigned getReductionCost(unsigned, Type *, bool) { return 1; }
+
+ unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) { return 0; }
+
+ bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) {
+ return false;
+ }
+
+ Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
+ Type *ExpectedType) {
+ return nullptr;
+ }
+
+ bool areInlineCompatible(const Function *Caller,
+ const Function *Callee) const {
+ return (Caller->getFnAttribute("target-cpu") ==
+ Callee->getFnAttribute("target-cpu")) &&
+ (Caller->getFnAttribute("target-features") ==
+ Callee->getFnAttribute("target-features"));
+ }
+};
+
+/// \brief CRTP base class for use as a mix-in that aids implementing
+/// a TargetTransformInfo-compatible class.
+template <typename T>
+class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
+private:
+ typedef TargetTransformInfoImplBase BaseT;
+
+protected:
+ explicit TargetTransformInfoImplCRTPBase(const DataLayout &DL) : BaseT(DL) {}
+
+public:
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ TargetTransformInfoImplCRTPBase(const TargetTransformInfoImplCRTPBase &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)) {}
+ TargetTransformInfoImplCRTPBase(TargetTransformInfoImplCRTPBase &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
+
+ using BaseT::getCallCost;
+
+ unsigned getCallCost(const Function *F, int NumArgs) {
+ assert(F && "A concrete function must be provided to this routine.");
+
+ if (NumArgs < 0)
+ // Set the argument number to the number of explicit arguments in the
+ // function.
+ NumArgs = F->arg_size();
+
+ if (Intrinsic::ID IID = F->getIntrinsicID()) {
+ FunctionType *FTy = F->getFunctionType();
+ SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end());
+ return static_cast<T *>(this)
+ ->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys);
+ }
+
+ if (!static_cast<T *>(this)->isLoweredToCall(F))
+ return TTI::TCC_Basic; // Give a basic cost if it will be lowered
+ // directly.
+
+ return static_cast<T *>(this)->getCallCost(F->getFunctionType(), NumArgs);
+ }
+
+ unsigned getCallCost(const Function *F, ArrayRef<const Value *> Arguments) {
+ // Simply delegate to generic handling of the call.
+ // FIXME: We should use instsimplify or something else to catch calls which
+ // will constant fold with these arguments.
+ return static_cast<T *>(this)->getCallCost(F, Arguments.size());
+ }
+
+ using BaseT::getGEPCost;
+
+ unsigned getGEPCost(Type *PointeeType, const Value *Ptr,
+ ArrayRef<const Value *> Operands) {
+ const GlobalValue *BaseGV = nullptr;
+ if (Ptr != nullptr) {
+ // TODO: will remove this when pointers have an opaque type.
+ assert(Ptr->getType()->getScalarType()->getPointerElementType() ==
+ PointeeType &&
+ "explicit pointee type doesn't match operand's pointee type");
+ BaseGV = dyn_cast<GlobalValue>(Ptr->stripPointerCasts());
+ }
+ bool HasBaseReg = (BaseGV == nullptr);
+ int64_t BaseOffset = 0;
+ int64_t Scale = 0;
+
+ // Assumes the address space is 0 when Ptr is nullptr.
+ unsigned AS =
+ (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
+ auto GTI = gep_type_begin(PointerType::get(PointeeType, AS), Operands);
+ for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
+ // We assume that the cost of Scalar GEP with constant index and the
+ // cost of Vector GEP with splat constant index are the same.
+ const ConstantInt *ConstIdx = dyn_cast<ConstantInt>(*I);
+ if (!ConstIdx)
+ if (auto Splat = getSplatValue(*I))
+ ConstIdx = dyn_cast<ConstantInt>(Splat);
+ if (isa<SequentialType>(*GTI)) {
+ int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
+ if (ConstIdx)
+ BaseOffset += ConstIdx->getSExtValue() * ElementSize;
+ else {
+ // Needs scale register.
+ if (Scale != 0)
+ // No addressing mode takes two scale registers.
+ return TTI::TCC_Basic;
+ Scale = ElementSize;
+ }
+ } else {
+ StructType *STy = cast<StructType>(*GTI);
+ // For structures the index is always splat or scalar constant
+ assert(ConstIdx && "Unexpected GEP index");
+ uint64_t Field = ConstIdx->getZExtValue();
+ BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
+ }
+ }
+
+ if (static_cast<T *>(this)->isLegalAddressingMode(
+ PointerType::get(*GTI, AS), const_cast<GlobalValue *>(BaseGV),
+ BaseOffset, HasBaseReg, Scale, AS)) {
+ return TTI::TCC_Free;
+ }
+ return TTI::TCC_Basic;
+ }
+
+ using BaseT::getIntrinsicCost;
+
+ unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<const Value *> Arguments) {
+ // Delegate to the generic intrinsic handling code. This mostly provides an
+ // opportunity for targets to (for example) special case the cost of
+ // certain intrinsics based on constants used as arguments.
+ SmallVector<Type *, 8> ParamTys;
+ ParamTys.reserve(Arguments.size());
+ for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx)
+ ParamTys.push_back(Arguments[Idx]->getType());
+ return static_cast<T *>(this)->getIntrinsicCost(IID, RetTy, ParamTys);
+ }
+
+ unsigned getUserCost(const User *U) {
+ if (isa<PHINode>(U))
+ return TTI::TCC_Free; // Model all PHI nodes as free.
+
+ if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) {
+ SmallVector<Value *, 4> Indices(GEP->idx_begin(), GEP->idx_end());
+ return static_cast<T *>(this)->getGEPCost(
+ GEP->getSourceElementType(), GEP->getPointerOperand(), Indices);
+ }
+
+ if (auto CS = ImmutableCallSite(U)) {
+ const Function *F = CS.getCalledFunction();
+ if (!F) {
+ // Just use the called value type.
+ Type *FTy = CS.getCalledValue()->getType()->getPointerElementType();
+ return static_cast<T *>(this)
+ ->getCallCost(cast<FunctionType>(FTy), CS.arg_size());
+ }
+
+ SmallVector<const Value *, 8> Arguments(CS.arg_begin(), CS.arg_end());
+ return static_cast<T *>(this)->getCallCost(F, Arguments);
+ }
+
+ if (const CastInst *CI = dyn_cast<CastInst>(U)) {
+ // Result of a cmp instruction is often extended (to be used by other
+ // cmp instructions, logical or return instructions). These are usually
+ // nop on most sane targets.
+ if (isa<CmpInst>(CI->getOperand(0)))
+ return TTI::TCC_Free;
+ }
+
+ return static_cast<T *>(this)->getOperationCost(
+ Operator::getOpcode(U), U->getType(),
+ U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr);
+ }
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/Trace.h b/contrib/llvm/include/llvm/Analysis/Trace.h
new file mode 100644
index 0000000..bedd654
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/Trace.h
@@ -0,0 +1,119 @@
+//===- llvm/Analysis/Trace.h - Represent one trace of LLVM code -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class represents a single trace of LLVM basic blocks. A trace is a
+// single entry, multiple exit, region of code that is often hot. Trace-based
+// optimizations treat traces almost like they are a large, strange, basic
+// block: because the trace path is assumed to be hot, optimizations for the
+// fall-through path are made at the expense of the non-fall-through paths.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TRACE_H
+#define LLVM_ANALYSIS_TRACE_H
+
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+ class BasicBlock;
+ class Function;
+ class Module;
+ class raw_ostream;
+
+class Trace {
+ typedef std::vector<BasicBlock *> BasicBlockListType;
+ BasicBlockListType BasicBlocks;
+
+public:
+ /// Trace ctor - Make a new trace from a vector of basic blocks,
+ /// residing in the function which is the parent of the first
+ /// basic block in the vector.
+ ///
+ Trace(const std::vector<BasicBlock *> &vBB) : BasicBlocks (vBB) {}
+
+ /// getEntryBasicBlock - Return the entry basic block (first block)
+ /// of the trace.
+ ///
+ BasicBlock *getEntryBasicBlock () const { return BasicBlocks[0]; }
+
+ /// operator[]/getBlock - Return basic block N in the trace.
+ ///
+ BasicBlock *operator[](unsigned i) const { return BasicBlocks[i]; }
+ BasicBlock *getBlock(unsigned i) const { return BasicBlocks[i]; }
+
+ /// getFunction - Return this trace's parent function.
+ ///
+ Function *getFunction () const;
+
+ /// getModule - Return this Module that contains this trace's parent
+ /// function.
+ ///
+ Module *getModule () const;
+
+ /// getBlockIndex - Return the index of the specified basic block in the
+ /// trace, or -1 if it is not in the trace.
+ int getBlockIndex(const BasicBlock *X) const {
+ for (unsigned i = 0, e = BasicBlocks.size(); i != e; ++i)
+ if (BasicBlocks[i] == X)
+ return i;
+ return -1;
+ }
+
+ /// contains - Returns true if this trace contains the given basic
+ /// block.
+ ///
+ bool contains(const BasicBlock *X) const {
+ return getBlockIndex(X) != -1;
+ }
+
+ /// Returns true if B1 occurs before B2 in the trace, or if it is the same
+ /// block as B2.. Both blocks must be in the trace.
+ ///
+ bool dominates(const BasicBlock *B1, const BasicBlock *B2) const {
+ int B1Idx = getBlockIndex(B1), B2Idx = getBlockIndex(B2);
+ assert(B1Idx != -1 && B2Idx != -1 && "Block is not in the trace!");
+ return B1Idx <= B2Idx;
+ }
+
+ // BasicBlock iterators...
+ typedef BasicBlockListType::iterator iterator;
+ typedef BasicBlockListType::const_iterator const_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ iterator begin() { return BasicBlocks.begin(); }
+ const_iterator begin() const { return BasicBlocks.begin(); }
+ iterator end () { return BasicBlocks.end(); }
+ const_iterator end () const { return BasicBlocks.end(); }
+
+ reverse_iterator rbegin() { return BasicBlocks.rbegin(); }
+ const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); }
+ reverse_iterator rend () { return BasicBlocks.rend(); }
+ const_reverse_iterator rend () const { return BasicBlocks.rend(); }
+
+ unsigned size() const { return BasicBlocks.size(); }
+ bool empty() const { return BasicBlocks.empty(); }
+
+ iterator erase(iterator q) { return BasicBlocks.erase (q); }
+ iterator erase(iterator q1, iterator q2) { return BasicBlocks.erase (q1, q2); }
+
+ /// print - Write trace to output stream.
+ ///
+ void print(raw_ostream &O) const;
+
+ /// dump - Debugger convenience method; writes trace to standard error
+ /// output stream.
+ ///
+ void dump() const;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_ANALYSIS_TRACE_H
diff --git a/contrib/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h b/contrib/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
new file mode 100644
index 0000000..7b44ac7
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/TypeBasedAliasAnalysis.h
@@ -0,0 +1,93 @@
+//===- TypeBasedAliasAnalysis.h - Type-Based Alias Analysis -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This is the interface for a metadata-based TBAA. See the source file for
+/// details on the algorithm.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
+#define LLVM_ANALYSIS_TYPEBASEDALIASANALYSIS_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+/// A simple AA result that uses TBAA metadata to answer queries.
+class TypeBasedAAResult : public AAResultBase<TypeBasedAAResult> {
+ friend AAResultBase<TypeBasedAAResult>;
+
+public:
+ explicit TypeBasedAAResult(const TargetLibraryInfo &TLI)
+ : AAResultBase(TLI) {}
+ TypeBasedAAResult(TypeBasedAAResult &&Arg) : AAResultBase(std::move(Arg)) {}
+
+ /// Handle invalidation events from the new pass manager.
+ ///
+ /// By definition, this result is stateless and so remains valid.
+ bool invalidate(Function &, const PreservedAnalyses &) { return false; }
+
+ AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
+ bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal);
+ FunctionModRefBehavior getModRefBehavior(ImmutableCallSite CS);
+ FunctionModRefBehavior getModRefBehavior(const Function *F);
+ ModRefInfo getModRefInfo(ImmutableCallSite CS, const MemoryLocation &Loc);
+ ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
+
+private:
+ bool Aliases(const MDNode *A, const MDNode *B) const;
+ bool PathAliases(const MDNode *A, const MDNode *B) const;
+};
+
+/// Analysis pass providing a never-invalidated alias analysis result.
+class TypeBasedAA {
+public:
+ typedef TypeBasedAAResult Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ TypeBasedAAResult run(Function &F, AnalysisManager<Function> *AM);
+
+ /// \brief Provide access to a name for this pass for debugging purposes.
+ static StringRef name() { return "TypeBasedAA"; }
+
+private:
+ static char PassID;
+};
+
+/// Legacy wrapper pass to provide the TypeBasedAAResult object.
+class TypeBasedAAWrapperPass : public ImmutablePass {
+ std::unique_ptr<TypeBasedAAResult> Result;
+
+public:
+ static char ID;
+
+ TypeBasedAAWrapperPass();
+
+ TypeBasedAAResult &getResult() { return *Result; }
+ const TypeBasedAAResult &getResult() const { return *Result; }
+
+ bool doInitialization(Module &M) override;
+ bool doFinalization(Module &M) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+//===--------------------------------------------------------------------===//
+//
+// createTypeBasedAAWrapperPass - This pass implements metadata-based
+// type-based alias analysis.
+//
+ImmutablePass *createTypeBasedAAWrapperPass();
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/ValueTracking.h b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
new file mode 100644
index 0000000..8e02910
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/ValueTracking.h
@@ -0,0 +1,458 @@
+//===- llvm/Analysis/ValueTracking.h - Walk computations --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains routines that help analyze properties that chains of
+// computations have.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ANALYSIS_VALUETRACKING_H
+#define LLVM_ANALYSIS_VALUETRACKING_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/ConstantRange.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+ class APInt;
+ class AddOperator;
+ class AssumptionCache;
+ class DataLayout;
+ class DominatorTree;
+ class Instruction;
+ class Loop;
+ class LoopInfo;
+ class MDNode;
+ class StringRef;
+ class TargetLibraryInfo;
+ class Value;
+
+ /// Determine which bits of V are known to be either zero or one and return
+ /// them in the KnownZero/KnownOne bit sets.
+ ///
+ /// This function is defined on values with integer type, values with pointer
+ /// type, and vectors of integers. In the case
+ /// where V is a vector, the known zero and known one values are the
+ /// same width as the vector element, and the bit is set only if it is true
+ /// for all of the elements in the vector.
+ void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
+ const DataLayout &DL, unsigned Depth = 0,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+ /// Compute known bits from the range metadata.
+ /// \p KnownZero the set of bits that are known to be zero
+ /// \p KnownOne the set of bits that are known to be one
+ void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
+ APInt &KnownZero, APInt &KnownOne);
+ /// Return true if LHS and RHS have no common bits set.
+ bool haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+
+ /// ComputeSignBit - Determine whether the sign bit is known to be zero or
+ /// one. Convenience wrapper around computeKnownBits.
+ void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
+ const DataLayout &DL, unsigned Depth = 0,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+
+ /// isKnownToBeAPowerOfTwo - Return true if the given value is known to have
+ /// exactly one bit set when defined. For vectors return true if every
+ /// element is known to be a power of two when defined. Supports values with
+ /// integer or pointer type and vectors of integers. If 'OrZero' is set then
+ /// return true if the given value is either a power of two or zero.
+ bool isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL,
+ bool OrZero = false, unsigned Depth = 0,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+
+ /// isKnownNonZero - Return true if the given value is known to be non-zero
+ /// when defined. For vectors return true if every element is known to be
+ /// non-zero when defined. Supports values with integer or pointer type and
+ /// vectors of integers.
+ bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth = 0,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+
+ /// Returns true if the give value is known to be non-negative.
+ bool isKnownNonNegative(Value *V, const DataLayout &DL, unsigned Depth = 0,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+
+ /// isKnownNonEqual - Return true if the given values are known to be
+ /// non-equal when defined. Supports scalar integer types only.
+ bool isKnownNonEqual(Value *V1, Value *V2, const DataLayout &DL,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+
+ /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
+ /// this predicate to simplify operations downstream. Mask is known to be
+ /// zero for bits that V cannot have.
+ ///
+ /// This function is defined on values with integer type, values with pointer
+ /// type, and vectors of integers. In the case
+ /// where V is a vector, the mask, known zero, and known one values are the
+ /// same width as the vector element, and the bit is set only if it is true
+ /// for all of the elements in the vector.
+ bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
+ unsigned Depth = 0, AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+
+ /// ComputeNumSignBits - Return the number of times the sign bit of the
+ /// register is replicated into the other bits. We know that at least 1 bit
+ /// is always equal to the sign bit (itself), but other cases can give us
+ /// information. For example, immediately after an "ashr X, 2", we know that
+ /// the top 3 bits are all equal to each other, so we return 3.
+ ///
+ /// 'Op' must have a scalar integer type.
+ ///
+ unsigned ComputeNumSignBits(Value *Op, const DataLayout &DL,
+ unsigned Depth = 0, AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+
+ /// ComputeMultiple - This function computes the integer multiple of Base that
+ /// equals V. If successful, it returns true and returns the multiple in
+ /// Multiple. If unsuccessful, it returns false. Also, if V can be
+ /// simplified to an integer, then the simplified V is returned in Val. Look
+ /// through sext only if LookThroughSExt=true.
+ bool ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
+ bool LookThroughSExt = false,
+ unsigned Depth = 0);
+
+ /// CannotBeNegativeZero - Return true if we can prove that the specified FP
+ /// value is never equal to -0.0.
+ ///
+ bool CannotBeNegativeZero(const Value *V, unsigned Depth = 0);
+
+ /// CannotBeOrderedLessThanZero - Return true if we can prove that the
+ /// specified FP value is either a NaN or never less than 0.0.
+ ///
+ bool CannotBeOrderedLessThanZero(const Value *V, unsigned Depth = 0);
+
+ /// isBytewiseValue - If the specified value can be set by repeating the same
+ /// byte in memory, return the i8 value that it is represented with. This is
+ /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
+ /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
+ /// byte store (e.g. i16 0x1234), return null.
+ Value *isBytewiseValue(Value *V);
+
+ /// FindInsertedValue - Given an aggregrate and an sequence of indices, see if
+ /// the scalar value indexed is already around as a register, for example if
+ /// it were inserted directly into the aggregrate.
+ ///
+ /// If InsertBefore is not null, this function will duplicate (modified)
+ /// insertvalues when a part of a nested struct is extracted.
+ Value *FindInsertedValue(Value *V,
+ ArrayRef<unsigned> idx_range,
+ Instruction *InsertBefore = nullptr);
+
+ /// GetPointerBaseWithConstantOffset - Analyze the specified pointer to see if
+ /// it can be expressed as a base pointer plus a constant offset. Return the
+ /// base and offset to the caller.
+ Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
+ const DataLayout &DL);
+ static inline const Value *
+ GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
+ const DataLayout &DL) {
+ return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset,
+ DL);
+ }
+
+ /// getConstantStringInfo - This function computes the length of a
+ /// null-terminated C string pointed to by V. If successful, it returns true
+ /// and returns the string in Str. If unsuccessful, it returns false. This
+ /// does not include the trailing nul character by default. If TrimAtNul is
+ /// set to false, then this returns any trailing nul characters as well as any
+ /// other characters that come after it.
+ bool getConstantStringInfo(const Value *V, StringRef &Str,
+ uint64_t Offset = 0, bool TrimAtNul = true);
+
+ /// GetStringLength - If we can compute the length of the string pointed to by
+ /// the specified pointer, return 'len+1'. If we can't, return 0.
+ uint64_t GetStringLength(Value *V);
+
+ /// GetUnderlyingObject - This method strips off any GEP address adjustments
+ /// and pointer casts from the specified value, returning the original object
+ /// being addressed. Note that the returned value has pointer type if the
+ /// specified value does. If the MaxLookup value is non-zero, it limits the
+ /// number of instructions to be stripped off.
+ Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
+ unsigned MaxLookup = 6);
+ static inline const Value *GetUnderlyingObject(const Value *V,
+ const DataLayout &DL,
+ unsigned MaxLookup = 6) {
+ return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
+ }
+
+ /// \brief This method is similar to GetUnderlyingObject except that it can
+ /// look through phi and select instructions and return multiple objects.
+ ///
+ /// If LoopInfo is passed, loop phis are further analyzed. If a pointer
+ /// accesses different objects in each iteration, we don't look through the
+ /// phi node. E.g. consider this loop nest:
+ ///
+ /// int **A;
+ /// for (i)
+ /// for (j) {
+ /// A[i][j] = A[i-1][j] * B[j]
+ /// }
+ ///
+ /// This is transformed by Load-PRE to stash away A[i] for the next iteration
+ /// of the outer loop:
+ ///
+ /// Curr = A[0]; // Prev_0
+ /// for (i: 1..N) {
+ /// Prev = Curr; // Prev = PHI (Prev_0, Curr)
+ /// Curr = A[i];
+ /// for (j: 0..N) {
+ /// Curr[j] = Prev[j] * B[j]
+ /// }
+ /// }
+ ///
+ /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects
+ /// should not assume that Curr and Prev share the same underlying object thus
+ /// it shouldn't look through the phi above.
+ void GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
+ const DataLayout &DL, LoopInfo *LI = nullptr,
+ unsigned MaxLookup = 6);
+
+ /// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
+ /// are lifetime markers.
+ bool onlyUsedByLifetimeMarkers(const Value *V);
+
+ /// isDereferenceablePointer - Return true if this is always a dereferenceable
+ /// pointer. If the context instruction is specified perform context-sensitive
+ /// analysis and return true if the pointer is dereferenceable at the
+ /// specified instruction.
+ bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
+ const Instruction *CtxI = nullptr,
+ const DominatorTree *DT = nullptr,
+ const TargetLibraryInfo *TLI = nullptr);
+
+ /// Returns true if V is always a dereferenceable pointer with alignment
+ /// greater or equal than requested. If the context instruction is specified
+ /// performs context-sensitive analysis and returns true if the pointer is
+ /// dereferenceable at the specified instruction.
+ bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
+ const DataLayout &DL,
+ const Instruction *CtxI = nullptr,
+ const DominatorTree *DT = nullptr,
+ const TargetLibraryInfo *TLI = nullptr);
+
+ /// isSafeToSpeculativelyExecute - Return true if the instruction does not
+ /// have any effects besides calculating the result and does not have
+ /// undefined behavior.
+ ///
+ /// This method never returns true for an instruction that returns true for
+ /// mayHaveSideEffects; however, this method also does some other checks in
+ /// addition. It checks for undefined behavior, like dividing by zero or
+ /// loading from an invalid pointer (but not for undefined results, like a
+ /// shift with a shift amount larger than the width of the result). It checks
+ /// for malloc and alloca because speculatively executing them might cause a
+ /// memory leak. It also returns false for instructions related to control
+ /// flow, specifically terminators and PHI nodes.
+ ///
+ /// If the CtxI is specified this method performs context-sensitive analysis
+ /// and returns true if it is safe to execute the instruction immediately
+ /// before the CtxI.
+ ///
+ /// If the CtxI is NOT specified this method only looks at the instruction
+ /// itself and its operands, so if this method returns true, it is safe to
+ /// move the instruction as long as the correct dominance relationships for
+ /// the operands and users hold.
+ ///
+ /// This method can return true for instructions that read memory;
+ /// for such instructions, moving them may change the resulting value.
+ bool isSafeToSpeculativelyExecute(const Value *V,
+ const Instruction *CtxI = nullptr,
+ const DominatorTree *DT = nullptr,
+ const TargetLibraryInfo *TLI = nullptr);
+
+ /// Returns true if the result or effects of the given instructions \p I
+ /// depend on or influence global memory.
+ /// Memory dependence arises for example if the instruction reads from
+ /// memory or may produce effects or undefined behaviour. Memory dependent
+ /// instructions generally cannot be reorderd with respect to other memory
+ /// dependent instructions or moved into non-dominated basic blocks.
+ /// Instructions which just compute a value based on the values of their
+ /// operands are not memory dependent.
+ bool mayBeMemoryDependent(const Instruction &I);
+
+ /// isKnownNonNull - Return true if this pointer couldn't possibly be null by
+ /// its definition. This returns true for allocas, non-extern-weak globals
+ /// and byval arguments.
+ bool isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI = nullptr);
+
+ /// isKnownNonNullAt - Return true if this pointer couldn't possibly be null.
+ /// If the context instruction is specified perform context-sensitive analysis
+ /// and return true if the pointer couldn't possibly be null at the specified
+ /// instruction.
+ bool isKnownNonNullAt(const Value *V,
+ const Instruction *CtxI = nullptr,
+ const DominatorTree *DT = nullptr,
+ const TargetLibraryInfo *TLI = nullptr);
+
+ /// Return true if it is valid to use the assumptions provided by an
+ /// assume intrinsic, I, at the point in the control-flow identified by the
+ /// context instruction, CxtI.
+ bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
+ const DominatorTree *DT = nullptr);
+
+ enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
+ OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT);
+ OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
+ const DataLayout &DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT);
+ OverflowResult computeOverflowForSignedAdd(Value *LHS, Value *RHS,
+ const DataLayout &DL,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+ /// This version also leverages the sign bit of Add if known.
+ OverflowResult computeOverflowForSignedAdd(AddOperator *Add,
+ const DataLayout &DL,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+
+ /// Return true if this function can prove that the instruction I will
+ /// always transfer execution to one of its successors (including the next
+ /// instruction that follows within a basic block). E.g. this is not
+ /// guaranteed for function calls that could loop infinitely.
+ ///
+ /// In other words, this function returns false for instructions that may
+ /// transfer execution or fail to transfer execution in a way that is not
+ /// captured in the CFG nor in the sequence of instructions within a basic
+ /// block.
+ ///
+ /// Undefined behavior is assumed not to happen, so e.g. division is
+ /// guaranteed to transfer execution to the following instruction even
+ /// though division by zero might cause undefined behavior.
+ bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I);
+
+ /// Return true if this function can prove that the instruction I
+ /// is executed for every iteration of the loop L.
+ ///
+ /// Note that this currently only considers the loop header.
+ bool isGuaranteedToExecuteForEveryIteration(const Instruction *I,
+ const Loop *L);
+
+ /// Return true if this function can prove that I is guaranteed to yield
+ /// full-poison (all bits poison) if at least one of its operands are
+ /// full-poison (all bits poison).
+ ///
+ /// The exact rules for how poison propagates through instructions have
+ /// not been settled as of 2015-07-10, so this function is conservative
+ /// and only considers poison to be propagated in uncontroversial
+ /// cases. There is no attempt to track values that may be only partially
+ /// poison.
+ bool propagatesFullPoison(const Instruction *I);
+
+ /// Return either nullptr or an operand of I such that I will trigger
+ /// undefined behavior if I is executed and that operand has a full-poison
+ /// value (all bits poison).
+ const Value *getGuaranteedNonFullPoisonOp(const Instruction *I);
+
+ /// Return true if this function can prove that if PoisonI is executed
+ /// and yields a full-poison value (all bits poison), then that will
+ /// trigger undefined behavior.
+ ///
+ /// Note that this currently only considers the basic block that is
+ /// the parent of I.
+ bool isKnownNotFullPoison(const Instruction *PoisonI);
+
+ /// \brief Specific patterns of select instructions we can match.
+ enum SelectPatternFlavor {
+ SPF_UNKNOWN = 0,
+ SPF_SMIN, /// Signed minimum
+ SPF_UMIN, /// Unsigned minimum
+ SPF_SMAX, /// Signed maximum
+ SPF_UMAX, /// Unsigned maximum
+ SPF_FMINNUM, /// Floating point minnum
+ SPF_FMAXNUM, /// Floating point maxnum
+ SPF_ABS, /// Absolute value
+ SPF_NABS /// Negated absolute value
+ };
+ /// \brief Behavior when a floating point min/max is given one NaN and one
+ /// non-NaN as input.
+ enum SelectPatternNaNBehavior {
+ SPNB_NA = 0, /// NaN behavior not applicable.
+ SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN.
+ SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN.
+ SPNB_RETURNS_ANY /// Given one NaN input, can return either (or
+ /// it has been determined that no operands can
+ /// be NaN).
+ };
+ struct SelectPatternResult {
+ SelectPatternFlavor Flavor;
+ SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is
+ /// SPF_FMINNUM or SPF_FMAXNUM.
+ bool Ordered; /// When implementing this min/max pattern as
+ /// fcmp; select, does the fcmp have to be
+ /// ordered?
+
+ /// \brief Return true if \p SPF is a min or a max pattern.
+ static bool isMinOrMax(SelectPatternFlavor SPF) {
+ return !(SPF == SPF_UNKNOWN || SPF == SPF_ABS || SPF == SPF_NABS);
+ }
+ };
+ /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind
+ /// and providing the out parameter results if we successfully match.
+ ///
+ /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does
+ /// not match that of the original select. If this is the case, the cast
+ /// operation (one of Trunc,SExt,Zext) that must be done to transform the
+ /// type of LHS and RHS into the type of V is returned in CastOp.
+ ///
+ /// For example:
+ /// %1 = icmp slt i32 %a, i32 4
+ /// %2 = sext i32 %a to i64
+ /// %3 = select i1 %1, i64 %2, i64 4
+ ///
+ /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt
+ ///
+ SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
+ Instruction::CastOps *CastOp = nullptr);
+
+ /// Parse out a conservative ConstantRange from !range metadata.
+ ///
+ /// E.g. if RangeMD is !{i32 0, i32 10, i32 15, i32 20} then return [0, 20).
+ ConstantRange getConstantRangeFromMetadata(MDNode &RangeMD);
+
+ /// Return true if RHS is known to be implied by LHS. A & B must be i1
+ /// (boolean) values or a vector of such values. Note that the truth table for
+ /// implication is the same as <=u on i1 values (but not <=s!). The truth
+ /// table for both is:
+ /// | T | F (B)
+ /// T | T | F
+ /// F | T | T
+ /// (A)
+ bool isImpliedCondition(Value *LHS, Value *RHS, const DataLayout &DL,
+ unsigned Depth = 0, AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr);
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Analysis/VectorUtils.h b/contrib/llvm/include/llvm/Analysis/VectorUtils.h
new file mode 100644
index 0000000..531803a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Analysis/VectorUtils.h
@@ -0,0 +1,132 @@
+//===- llvm/Transforms/Utils/VectorUtils.h - Vector utilities -*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some vectorizer utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
+#define LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+
+namespace llvm {
+
+struct DemandedBits;
+class GetElementPtrInst;
+class Loop;
+class ScalarEvolution;
+class TargetTransformInfo;
+class Type;
+class Value;
+
+/// \brief Identify if the intrinsic is trivially vectorizable.
+/// This method returns true if the intrinsic's argument types are all
+/// scalars for the scalar form of the intrinsic and all vectors for
+/// the vector form of the intrinsic.
+bool isTriviallyVectorizable(Intrinsic::ID ID);
+
+/// \brief Identifies if the intrinsic has a scalar operand. It checks for
+/// ctlz,cttz and powi special intrinsics whose argument is scalar.
+bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx);
+
+/// \brief Identify if call has a unary float signature
+/// It returns input intrinsic ID if call has a single argument,
+/// argument type and call instruction type should be floating
+/// point type and call should only reads memory.
+/// else return not_intrinsic.
+Intrinsic::ID checkUnaryFloatSignature(const CallInst &I,
+ Intrinsic::ID ValidIntrinsicID);
+
+/// \brief Identify if call has a binary float signature
+/// It returns input intrinsic ID if call has two arguments,
+/// arguments type and call instruction type should be floating
+/// point type and call should only reads memory.
+/// else return not_intrinsic.
+Intrinsic::ID checkBinaryFloatSignature(const CallInst &I,
+ Intrinsic::ID ValidIntrinsicID);
+
+/// \brief Returns intrinsic ID for call.
+/// For the input call instruction it finds mapping intrinsic and returns
+/// its intrinsic ID, in case it does not found it return not_intrinsic.
+Intrinsic::ID getIntrinsicIDForCall(CallInst *CI, const TargetLibraryInfo *TLI);
+
+/// \brief Find the operand of the GEP that should be checked for consecutive
+/// stores. This ignores trailing indices that have no effect on the final
+/// pointer.
+unsigned getGEPInductionOperand(const GetElementPtrInst *Gep);
+
+/// \brief If the argument is a GEP, then returns the operand identified by
+/// getGEPInductionOperand. However, if there is some other non-loop-invariant
+/// operand, it returns that instead.
+Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
+
+/// \brief If a value has only one user that is a CastInst, return it.
+Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty);
+
+/// \brief Get the stride of a pointer access in a loop. Looks for symbolic
+/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
+Value *getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp);
+
+/// \brief Given a vector and an element number, see if the scalar value is
+/// already around as a register, for example if it were inserted then extracted
+/// from the vector.
+Value *findScalarElement(Value *V, unsigned EltNo);
+
+/// \brief Get splat value if the input is a splat vector or return nullptr.
+/// The value may be extracted from a splat constants vector or from
+/// a sequence of instructions that broadcast a single value into a vector.
+const Value *getSplatValue(const Value *V);
+
+/// \brief Compute a map of integer instructions to their minimum legal type
+/// size.
+///
+/// C semantics force sub-int-sized values (e.g. i8, i16) to be promoted to int
+/// type (e.g. i32) whenever arithmetic is performed on them.
+///
+/// For targets with native i8 or i16 operations, usually InstCombine can shrink
+/// the arithmetic type down again. However InstCombine refuses to create
+/// illegal types, so for targets without i8 or i16 registers, the lengthening
+/// and shrinking remains.
+///
+/// Most SIMD ISAs (e.g. NEON) however support vectors of i8 or i16 even when
+/// their scalar equivalents do not, so during vectorization it is important to
+/// remove these lengthens and truncates when deciding the profitability of
+/// vectorization.
+///
+/// This function analyzes the given range of instructions and determines the
+/// minimum type size each can be converted to. It attempts to remove or
+/// minimize type size changes across each def-use chain, so for example in the
+/// following code:
+///
+/// %1 = load i8, i8*
+/// %2 = add i8 %1, 2
+/// %3 = load i16, i16*
+/// %4 = zext i8 %2 to i32
+/// %5 = zext i16 %3 to i32
+/// %6 = add i32 %4, %5
+/// %7 = trunc i32 %6 to i16
+///
+/// Instruction %6 must be done at least in i16, so computeMinimumValueSizes
+/// will return: {%1: 16, %2: 16, %3: 16, %4: 16, %5: 16, %6: 16, %7: 16}.
+///
+/// If the optional TargetTransformInfo is provided, this function tries harder
+/// to do less work by only looking at illegal types.
+MapVector<Instruction*, uint64_t>
+computeMinimumValueSizes(ArrayRef<BasicBlock*> Blocks,
+ DemandedBits &DB,
+ const TargetTransformInfo *TTI=nullptr);
+
+} // llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/AsmParser/Parser.h b/contrib/llvm/include/llvm/AsmParser/Parser.h
new file mode 100644
index 0000000..96a15c1
--- /dev/null
+++ b/contrib/llvm/include/llvm/AsmParser/Parser.h
@@ -0,0 +1,96 @@
+//===-- Parser.h - Parser for LLVM IR text assembly files -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes are implemented by the lib/AsmParser library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ASMPARSER_PARSER_H
+#define LLVM_ASMPARSER_PARSER_H
+
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+
+class Constant;
+class LLVMContext;
+class Module;
+struct SlotMapping;
+class SMDiagnostic;
+
+/// This function is the main interface to the LLVM Assembly Parser. It parses
+/// an ASCII file that (presumably) contains LLVM Assembly code. It returns a
+/// Module (intermediate representation) with the corresponding features. Note
+/// that this does not verify that the generated Module is valid, so you should
+/// run the verifier after parsing the file to check that it is okay.
+/// \brief Parse LLVM Assembly from a file
+/// \param Filename The name of the file to parse
+/// \param Error Error result info.
+/// \param Context Context in which to allocate globals info.
+/// \param Slots The optional slot mapping that will be initialized during
+/// parsing.
+std::unique_ptr<Module> parseAssemblyFile(StringRef Filename,
+ SMDiagnostic &Error,
+ LLVMContext &Context,
+ SlotMapping *Slots = nullptr);
+
+/// The function is a secondary interface to the LLVM Assembly Parser. It parses
+/// an ASCII string that (presumably) contains LLVM Assembly code. It returns a
+/// Module (intermediate representation) with the corresponding features. Note
+/// that this does not verify that the generated Module is valid, so you should
+/// run the verifier after parsing the file to check that it is okay.
+/// \brief Parse LLVM Assembly from a string
+/// \param AsmString The string containing assembly
+/// \param Error Error result info.
+/// \param Context Context in which to allocate globals info.
+/// \param Slots The optional slot mapping that will be initialized during
+/// parsing.
+std::unique_ptr<Module> parseAssemblyString(StringRef AsmString,
+ SMDiagnostic &Error,
+ LLVMContext &Context,
+ SlotMapping *Slots = nullptr);
+
+/// parseAssemblyFile and parseAssemblyString are wrappers around this function.
+/// \brief Parse LLVM Assembly from a MemoryBuffer.
+/// \param F The MemoryBuffer containing assembly
+/// \param Err Error result info.
+/// \param Slots The optional slot mapping that will be initialized during
+/// parsing.
+std::unique_ptr<Module> parseAssembly(MemoryBufferRef F, SMDiagnostic &Err,
+ LLVMContext &Context,
+ SlotMapping *Slots = nullptr);
+
+/// This function is the low-level interface to the LLVM Assembly Parser.
+/// This is kept as an independent function instead of being inlined into
+/// parseAssembly for the convenience of interactive users that want to add
+/// recently parsed bits to an existing module.
+///
+/// \param F The MemoryBuffer containing assembly
+/// \param M The module to add data to.
+/// \param Err Error result info.
+/// \param Slots The optional slot mapping that will be initialized during
+/// parsing.
+/// \return true on error.
+bool parseAssemblyInto(MemoryBufferRef F, Module &M, SMDiagnostic &Err,
+ SlotMapping *Slots = nullptr);
+
+/// Parse a type and a constant value in the given string.
+///
+/// The constant value can be any LLVM constant, including a constant
+/// expression.
+///
+/// \param Slots The optional slot mapping that will restore the parsing state
+/// of the module.
+/// \return null on error.
+Constant *parseConstantValue(StringRef Asm, SMDiagnostic &Err, const Module &M,
+ const SlotMapping *Slots = nullptr);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/AsmParser/SlotMapping.h b/contrib/llvm/include/llvm/AsmParser/SlotMapping.h
new file mode 100644
index 0000000..bd7e8fc
--- /dev/null
+++ b/contrib/llvm/include/llvm/AsmParser/SlotMapping.h
@@ -0,0 +1,42 @@
+//===-- SlotMapping.h - Slot number mapping for unnamed values --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the SlotMapping struct.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ASMPARSER_SLOTMAPPING_H
+#define LLVM_ASMPARSER_SLOTMAPPING_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+class GlobalValue;
+class Type;
+
+/// This struct contains the mappings from the slot numbers to unnamed metadata
+/// nodes, global values and types. It also contains the mapping for the named
+/// types.
+/// It can be used to save the parsing state of an LLVM IR module so that the
+/// textual references to the values in the module can be parsed outside of the
+/// module's source.
+struct SlotMapping {
+ std::vector<GlobalValue *> GlobalValues;
+ std::map<unsigned, TrackingMDNodeRef> MetadataNodes;
+ StringMap<Type *> NamedTypes;
+ std::map<unsigned, Type *> Types;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Bitcode/BitCodes.h b/contrib/llvm/include/llvm/Bitcode/BitCodes.h
new file mode 100644
index 0000000..96c4201
--- /dev/null
+++ b/contrib/llvm/include/llvm/Bitcode/BitCodes.h
@@ -0,0 +1,185 @@
+//===- BitCodes.h - Enum values for the bitcode format ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header Bitcode enum values.
+//
+// The enum values defined in this file should be considered permanent. If
+// new features are added, they should have values added at the end of the
+// respective lists.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITCODES_H
+#define LLVM_BITCODE_BITCODES_H
+
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+
+namespace llvm {
+namespace bitc {
+ enum StandardWidths {
+ BlockIDWidth = 8, // We use VBR-8 for block IDs.
+ CodeLenWidth = 4, // Codelen are VBR-4.
+ BlockSizeWidth = 32 // BlockSize up to 2^32 32-bit words = 16GB per block.
+ };
+
+ // The standard abbrev namespace always has a way to exit a block, enter a
+ // nested block, define abbrevs, and define an unabbreviated record.
+ enum FixedAbbrevIDs {
+ END_BLOCK = 0, // Must be zero to guarantee termination for broken bitcode.
+ ENTER_SUBBLOCK = 1,
+
+ /// DEFINE_ABBREV - Defines an abbrev for the current block. It consists
+ /// of a vbr5 for # operand infos. Each operand info is emitted with a
+ /// single bit to indicate if it is a literal encoding. If so, the value is
+ /// emitted with a vbr8. If not, the encoding is emitted as 3 bits followed
+ /// by the info value as a vbr5 if needed.
+ DEFINE_ABBREV = 2,
+
+ // UNABBREV_RECORDs are emitted with a vbr6 for the record code, followed by
+ // a vbr6 for the # operands, followed by vbr6's for each operand.
+ UNABBREV_RECORD = 3,
+
+ // This is not a code, this is a marker for the first abbrev assignment.
+ FIRST_APPLICATION_ABBREV = 4
+ };
+
+ /// StandardBlockIDs - All bitcode files can optionally include a BLOCKINFO
+ /// block, which contains metadata about other blocks in the file.
+ enum StandardBlockIDs {
+ /// BLOCKINFO_BLOCK is used to define metadata about blocks, for example,
+ /// standard abbrevs that should be available to all blocks of a specified
+ /// ID.
+ BLOCKINFO_BLOCK_ID = 0,
+
+ // Block IDs 1-7 are reserved for future expansion.
+ FIRST_APPLICATION_BLOCKID = 8
+ };
+
+ /// BlockInfoCodes - The blockinfo block contains metadata about user-defined
+ /// blocks.
+ enum BlockInfoCodes {
+ // DEFINE_ABBREV has magic semantics here, applying to the current SETBID'd
+ // block, instead of the BlockInfo block.
+
+ BLOCKINFO_CODE_SETBID = 1, // SETBID: [blockid#]
+ BLOCKINFO_CODE_BLOCKNAME = 2, // BLOCKNAME: [name]
+ BLOCKINFO_CODE_SETRECORDNAME = 3 // BLOCKINFO_CODE_SETRECORDNAME:
+ // [id, name]
+ };
+
+} // End bitc namespace
+
+/// BitCodeAbbrevOp - This describes one or more operands in an abbreviation.
+/// This is actually a union of two different things:
+/// 1. It could be a literal integer value ("the operand is always 17").
+/// 2. It could be an encoding specification ("this operand encoded like so").
+///
+class BitCodeAbbrevOp {
+ uint64_t Val; // A literal value or data for an encoding.
+ bool IsLiteral : 1; // Indicate whether this is a literal value or not.
+ unsigned Enc : 3; // The encoding to use.
+public:
+ enum Encoding {
+ Fixed = 1, // A fixed width field, Val specifies number of bits.
+ VBR = 2, // A VBR field where Val specifies the width of each chunk.
+ Array = 3, // A sequence of fields, next field species elt encoding.
+ Char6 = 4, // A 6-bit fixed field which maps to [a-zA-Z0-9._].
+ Blob = 5 // 32-bit aligned array of 8-bit characters.
+ };
+
+ explicit BitCodeAbbrevOp(uint64_t V) : Val(V), IsLiteral(true) {}
+ explicit BitCodeAbbrevOp(Encoding E, uint64_t Data = 0)
+ : Val(Data), IsLiteral(false), Enc(E) {}
+
+ bool isLiteral() const { return IsLiteral; }
+ bool isEncoding() const { return !IsLiteral; }
+
+ // Accessors for literals.
+ uint64_t getLiteralValue() const { assert(isLiteral()); return Val; }
+
+ // Accessors for encoding info.
+ Encoding getEncoding() const { assert(isEncoding()); return (Encoding)Enc; }
+ uint64_t getEncodingData() const {
+ assert(isEncoding() && hasEncodingData());
+ return Val;
+ }
+
+ bool hasEncodingData() const { return hasEncodingData(getEncoding()); }
+ static bool hasEncodingData(Encoding E) {
+ switch (E) {
+ case Fixed:
+ case VBR:
+ return true;
+ case Array:
+ case Char6:
+ case Blob:
+ return false;
+ }
+ report_fatal_error("Invalid encoding");
+ }
+
+ /// isChar6 - Return true if this character is legal in the Char6 encoding.
+ static bool isChar6(char C) {
+ if (C >= 'a' && C <= 'z') return true;
+ if (C >= 'A' && C <= 'Z') return true;
+ if (C >= '0' && C <= '9') return true;
+ if (C == '.' || C == '_') return true;
+ return false;
+ }
+ static unsigned EncodeChar6(char C) {
+ if (C >= 'a' && C <= 'z') return C-'a';
+ if (C >= 'A' && C <= 'Z') return C-'A'+26;
+ if (C >= '0' && C <= '9') return C-'0'+26+26;
+ if (C == '.') return 62;
+ if (C == '_') return 63;
+ llvm_unreachable("Not a value Char6 character!");
+ }
+
+ static char DecodeChar6(unsigned V) {
+ assert((V & ~63) == 0 && "Not a Char6 encoded character!");
+ if (V < 26) return V+'a';
+ if (V < 26+26) return V-26+'A';
+ if (V < 26+26+10) return V-26-26+'0';
+ if (V == 62) return '.';
+ if (V == 63) return '_';
+ llvm_unreachable("Not a value Char6 character!");
+ }
+
+};
+
+template <> struct isPodLike<BitCodeAbbrevOp> { static const bool value=true; };
+
+/// BitCodeAbbrev - This class represents an abbreviation record. An
+/// abbreviation allows a complex record that has redundancy to be stored in a
+/// specialized format instead of the fully-general, fully-vbr, format.
+class BitCodeAbbrev : public RefCountedBase<BitCodeAbbrev> {
+ SmallVector<BitCodeAbbrevOp, 32> OperandList;
+ // Only RefCountedBase is allowed to delete.
+ ~BitCodeAbbrev() = default;
+ friend class RefCountedBase<BitCodeAbbrev>;
+
+public:
+ unsigned getNumOperandInfos() const {
+ return static_cast<unsigned>(OperandList.size());
+ }
+ const BitCodeAbbrevOp &getOperandInfo(unsigned N) const {
+ return OperandList[N];
+ }
+
+ void Add(const BitCodeAbbrevOp &OpInfo) {
+ OperandList.push_back(OpInfo);
+ }
+};
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Bitcode/BitcodeWriterPass.h b/contrib/llvm/include/llvm/Bitcode/BitcodeWriterPass.h
new file mode 100644
index 0000000..a1272cf
--- /dev/null
+++ b/contrib/llvm/include/llvm/Bitcode/BitcodeWriterPass.h
@@ -0,0 +1,71 @@
+//===-- BitcodeWriterPass.h - Bitcode writing pass --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides a bitcode writing pass.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITCODEWRITERPASS_H
+#define LLVM_BITCODE_BITCODEWRITERPASS_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+class Module;
+class ModulePass;
+class raw_ostream;
+class PreservedAnalyses;
+
+/// \brief Create and return a pass that writes the module to the specified
+/// ostream. Note that this pass is designed for use with the legacy pass
+/// manager.
+///
+/// If \c ShouldPreserveUseListOrder, encode use-list order so it can be
+/// reproduced when deserialized.
+///
+/// If \c EmitFunctionSummary, emit the function summary index (currently
+/// for use in ThinLTO optimization).
+ModulePass *createBitcodeWriterPass(raw_ostream &Str,
+ bool ShouldPreserveUseListOrder = false,
+ bool EmitFunctionSummary = false);
+
+/// \brief Pass for writing a module of IR out to a bitcode file.
+///
+/// Note that this is intended for use with the new pass manager. To construct
+/// a pass for the legacy pass manager, use the function above.
+class BitcodeWriterPass {
+ raw_ostream &OS;
+ bool ShouldPreserveUseListOrder;
+ bool EmitFunctionSummary;
+
+public:
+ /// \brief Construct a bitcode writer pass around a particular output stream.
+ ///
+ /// If \c ShouldPreserveUseListOrder, encode use-list order so it can be
+ /// reproduced when deserialized.
+ ///
+ /// If \c EmitFunctionSummary, emit the function summary index (currently
+ /// for use in ThinLTO optimization).
+ explicit BitcodeWriterPass(raw_ostream &OS,
+ bool ShouldPreserveUseListOrder = false,
+ bool EmitFunctionSummary = false)
+ : OS(OS), ShouldPreserveUseListOrder(ShouldPreserveUseListOrder),
+ EmitFunctionSummary(EmitFunctionSummary) {}
+
+ /// \brief Run the bitcode writer pass, and output the module to the selected
+ /// output stream.
+ PreservedAnalyses run(Module &M);
+
+ static StringRef name() { return "BitcodeWriterPass"; }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h b/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h
new file mode 100644
index 0000000..c0cf6cd
--- /dev/null
+++ b/contrib/llvm/include/llvm/Bitcode/BitstreamReader.h
@@ -0,0 +1,519 @@
+//===- BitstreamReader.h - Low-level bitstream reader interface -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the BitstreamReader class. This class can be used to
+// read an arbitrary bitstream, regardless of its contents.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITSTREAMREADER_H
+#define LLVM_BITCODE_BITSTREAMREADER_H
+
+#include "llvm/Bitcode/BitCodes.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/StreamingMemoryObject.h"
+#include <climits>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+/// This class is used to read from an LLVM bitcode stream, maintaining
+/// information that is global to decoding the entire file. While a file is
+/// being read, multiple cursors can be independently advanced or skipped around
+/// within the file. These are represented by the BitstreamCursor class.
+class BitstreamReader {
+public:
+ /// This contains information emitted to BLOCKINFO_BLOCK blocks. These
+ /// describe abbreviations that all blocks of the specified ID inherit.
+ struct BlockInfo {
+ unsigned BlockID;
+ std::vector<IntrusiveRefCntPtr<BitCodeAbbrev>> Abbrevs;
+ std::string Name;
+
+ std::vector<std::pair<unsigned, std::string> > RecordNames;
+ };
+private:
+ std::unique_ptr<MemoryObject> BitcodeBytes;
+
+ std::vector<BlockInfo> BlockInfoRecords;
+
+ /// This is set to true if we don't care about the block/record name
+ /// information in the BlockInfo block. Only llvm-bcanalyzer uses this.
+ bool IgnoreBlockInfoNames;
+
+ BitstreamReader(const BitstreamReader&) = delete;
+ void operator=(const BitstreamReader&) = delete;
+public:
+ BitstreamReader() : IgnoreBlockInfoNames(true) {
+ }
+
+ BitstreamReader(const unsigned char *Start, const unsigned char *End)
+ : IgnoreBlockInfoNames(true) {
+ init(Start, End);
+ }
+
+ BitstreamReader(std::unique_ptr<MemoryObject> BitcodeBytes)
+ : BitcodeBytes(std::move(BitcodeBytes)), IgnoreBlockInfoNames(true) {}
+
+ BitstreamReader(BitstreamReader &&Other) {
+ *this = std::move(Other);
+ }
+
+ BitstreamReader &operator=(BitstreamReader &&Other) {
+ BitcodeBytes = std::move(Other.BitcodeBytes);
+ // Explicitly swap block info, so that nothing gets destroyed twice.
+ std::swap(BlockInfoRecords, Other.BlockInfoRecords);
+ IgnoreBlockInfoNames = Other.IgnoreBlockInfoNames;
+ return *this;
+ }
+
+ void init(const unsigned char *Start, const unsigned char *End) {
+ assert(((End-Start) & 3) == 0 &&"Bitcode stream not a multiple of 4 bytes");
+ BitcodeBytes.reset(getNonStreamedMemoryObject(Start, End));
+ }
+
+ MemoryObject &getBitcodeBytes() { return *BitcodeBytes; }
+
+ /// This is called by clients that want block/record name information.
+ void CollectBlockInfoNames() { IgnoreBlockInfoNames = false; }
+ bool isIgnoringBlockInfoNames() { return IgnoreBlockInfoNames; }
+
+ //===--------------------------------------------------------------------===//
+ // Block Manipulation
+ //===--------------------------------------------------------------------===//
+
+ /// Return true if we've already read and processed the block info block for
+ /// this Bitstream. We only process it for the first cursor that walks over
+ /// it.
+ bool hasBlockInfoRecords() const { return !BlockInfoRecords.empty(); }
+
+ /// If there is block info for the specified ID, return it, otherwise return
+ /// null.
+ const BlockInfo *getBlockInfo(unsigned BlockID) const {
+ // Common case, the most recent entry matches BlockID.
+ if (!BlockInfoRecords.empty() && BlockInfoRecords.back().BlockID == BlockID)
+ return &BlockInfoRecords.back();
+
+ for (unsigned i = 0, e = static_cast<unsigned>(BlockInfoRecords.size());
+ i != e; ++i)
+ if (BlockInfoRecords[i].BlockID == BlockID)
+ return &BlockInfoRecords[i];
+ return nullptr;
+ }
+
+ BlockInfo &getOrCreateBlockInfo(unsigned BlockID) {
+ if (const BlockInfo *BI = getBlockInfo(BlockID))
+ return *const_cast<BlockInfo*>(BI);
+
+ // Otherwise, add a new record.
+ BlockInfoRecords.emplace_back();
+ BlockInfoRecords.back().BlockID = BlockID;
+ return BlockInfoRecords.back();
+ }
+
+ /// Takes block info from the other bitstream reader.
+ ///
+ /// This is a "take" operation because BlockInfo records are non-trivial, and
+ /// indeed rather expensive.
+ void takeBlockInfo(BitstreamReader &&Other) {
+ assert(!hasBlockInfoRecords());
+ BlockInfoRecords = std::move(Other.BlockInfoRecords);
+ }
+};
+
+/// When advancing through a bitstream cursor, each advance can discover a few
+/// different kinds of entries:
+struct BitstreamEntry {
+ enum {
+ Error, // Malformed bitcode was found.
+ EndBlock, // We've reached the end of the current block, (or the end of the
+ // file, which is treated like a series of EndBlock records.
+ SubBlock, // This is the start of a new subblock of a specific ID.
+ Record // This is a record with a specific AbbrevID.
+ } Kind;
+
+ unsigned ID;
+
+ static BitstreamEntry getError() {
+ BitstreamEntry E; E.Kind = Error; return E;
+ }
+ static BitstreamEntry getEndBlock() {
+ BitstreamEntry E; E.Kind = EndBlock; return E;
+ }
+ static BitstreamEntry getSubBlock(unsigned ID) {
+ BitstreamEntry E; E.Kind = SubBlock; E.ID = ID; return E;
+ }
+ static BitstreamEntry getRecord(unsigned AbbrevID) {
+ BitstreamEntry E; E.Kind = Record; E.ID = AbbrevID; return E;
+ }
+};
+
+/// This represents a position within a bitcode file. There may be multiple
+/// independent cursors reading within one bitstream, each maintaining their own
+/// local state.
+///
+/// Unlike iterators, BitstreamCursors are heavy-weight objects that should not
+/// be passed by value.
+class BitstreamCursor {
+ BitstreamReader *BitStream;
+ size_t NextChar;
+
+ // The size of the bicode. 0 if we don't know it yet.
+ size_t Size;
+
+ /// This is the current data we have pulled from the stream but have not
+ /// returned to the client. This is specifically and intentionally defined to
+ /// follow the word size of the host machine for efficiency. We use word_t in
+ /// places that are aware of this to make it perfectly explicit what is going
+ /// on.
+ typedef size_t word_t;
+ word_t CurWord;
+
+ /// This is the number of bits in CurWord that are valid. This is always from
+ /// [0...bits_of(size_t)-1] inclusive.
+ unsigned BitsInCurWord;
+
+ // This is the declared size of code values used for the current block, in
+ // bits.
+ unsigned CurCodeSize;
+
+ /// Abbrevs installed at in this block.
+ std::vector<IntrusiveRefCntPtr<BitCodeAbbrev>> CurAbbrevs;
+
+ struct Block {
+ unsigned PrevCodeSize;
+ std::vector<IntrusiveRefCntPtr<BitCodeAbbrev>> PrevAbbrevs;
+ explicit Block(unsigned PCS) : PrevCodeSize(PCS) {}
+ };
+
+ /// This tracks the codesize of parent blocks.
+ SmallVector<Block, 8> BlockScope;
+
+
+public:
+ static const size_t MaxChunkSize = sizeof(word_t) * 8;
+
+ BitstreamCursor() { init(nullptr); }
+
+ explicit BitstreamCursor(BitstreamReader &R) { init(&R); }
+
+ void init(BitstreamReader *R) {
+ freeState();
+
+ BitStream = R;
+ NextChar = 0;
+ Size = 0;
+ BitsInCurWord = 0;
+ CurCodeSize = 2;
+ }
+
+ void freeState();
+
+ bool canSkipToPos(size_t pos) const {
+ // pos can be skipped to if it is a valid address or one byte past the end.
+ return pos == 0 || BitStream->getBitcodeBytes().isValidAddress(
+ static_cast<uint64_t>(pos - 1));
+ }
+
+ bool AtEndOfStream() {
+ if (BitsInCurWord != 0)
+ return false;
+ if (Size != 0)
+ return Size == NextChar;
+ fillCurWord();
+ return BitsInCurWord == 0;
+ }
+
+ /// Return the number of bits used to encode an abbrev #.
+ unsigned getAbbrevIDWidth() const { return CurCodeSize; }
+
+ /// Return the bit # of the bit we are reading.
+ uint64_t GetCurrentBitNo() const {
+ return NextChar*CHAR_BIT - BitsInCurWord;
+ }
+
+ BitstreamReader *getBitStreamReader() {
+ return BitStream;
+ }
+ const BitstreamReader *getBitStreamReader() const {
+ return BitStream;
+ }
+
+ /// Flags that modify the behavior of advance().
+ enum {
+ /// If this flag is used, the advance() method does not automatically pop
+ /// the block scope when the end of a block is reached.
+ AF_DontPopBlockAtEnd = 1,
+
+ /// If this flag is used, abbrev entries are returned just like normal
+ /// records.
+ AF_DontAutoprocessAbbrevs = 2
+ };
+
+ /// Advance the current bitstream, returning the next entry in the stream.
+ BitstreamEntry advance(unsigned Flags = 0) {
+ while (1) {
+ unsigned Code = ReadCode();
+ if (Code == bitc::END_BLOCK) {
+ // Pop the end of the block unless Flags tells us not to.
+ if (!(Flags & AF_DontPopBlockAtEnd) && ReadBlockEnd())
+ return BitstreamEntry::getError();
+ return BitstreamEntry::getEndBlock();
+ }
+
+ if (Code == bitc::ENTER_SUBBLOCK)
+ return BitstreamEntry::getSubBlock(ReadSubBlockID());
+
+ if (Code == bitc::DEFINE_ABBREV &&
+ !(Flags & AF_DontAutoprocessAbbrevs)) {
+ // We read and accumulate abbrev's, the client can't do anything with
+ // them anyway.
+ ReadAbbrevRecord();
+ continue;
+ }
+
+ return BitstreamEntry::getRecord(Code);
+ }
+ }
+
+ /// This is a convenience function for clients that don't expect any
+ /// subblocks. This just skips over them automatically.
+ BitstreamEntry advanceSkippingSubblocks(unsigned Flags = 0) {
+ while (1) {
+ // If we found a normal entry, return it.
+ BitstreamEntry Entry = advance(Flags);
+ if (Entry.Kind != BitstreamEntry::SubBlock)
+ return Entry;
+
+ // If we found a sub-block, just skip over it and check the next entry.
+ if (SkipBlock())
+ return BitstreamEntry::getError();
+ }
+ }
+
+ /// Reset the stream to the specified bit number.
+ void JumpToBit(uint64_t BitNo) {
+ size_t ByteNo = size_t(BitNo/8) & ~(sizeof(word_t)-1);
+ unsigned WordBitNo = unsigned(BitNo & (sizeof(word_t)*8-1));
+ assert(canSkipToPos(ByteNo) && "Invalid location");
+
+ // Move the cursor to the right word.
+ NextChar = ByteNo;
+ BitsInCurWord = 0;
+
+ // Skip over any bits that are already consumed.
+ if (WordBitNo)
+ Read(WordBitNo);
+ }
+
+ void fillCurWord() {
+ if (Size != 0 && NextChar >= Size)
+ report_fatal_error("Unexpected end of file");
+
+ // Read the next word from the stream.
+ uint8_t Array[sizeof(word_t)] = {0};
+
+ uint64_t BytesRead =
+ BitStream->getBitcodeBytes().readBytes(Array, sizeof(Array), NextChar);
+
+ // If we run out of data, stop at the end of the stream.
+ if (BytesRead == 0) {
+ CurWord = 0;
+ BitsInCurWord = 0;
+ Size = NextChar;
+ return;
+ }
+
+ CurWord =
+ support::endian::read<word_t, support::little, support::unaligned>(
+ Array);
+ NextChar += BytesRead;
+ BitsInCurWord = BytesRead * 8;
+ }
+
+ word_t Read(unsigned NumBits) {
+ static const unsigned BitsInWord = MaxChunkSize;
+
+ assert(NumBits && NumBits <= BitsInWord &&
+ "Cannot return zero or more than BitsInWord bits!");
+
+ static const unsigned Mask = sizeof(word_t) > 4 ? 0x3f : 0x1f;
+
+ // If the field is fully contained by CurWord, return it quickly.
+ if (BitsInCurWord >= NumBits) {
+ word_t R = CurWord & (~word_t(0) >> (BitsInWord - NumBits));
+
+ // Use a mask to avoid undefined behavior.
+ CurWord >>= (NumBits & Mask);
+
+ BitsInCurWord -= NumBits;
+ return R;
+ }
+
+ word_t R = BitsInCurWord ? CurWord : 0;
+ unsigned BitsLeft = NumBits - BitsInCurWord;
+
+ fillCurWord();
+
+ // If we run out of data, stop at the end of the stream.
+ if (BitsLeft > BitsInCurWord)
+ return 0;
+
+ word_t R2 = CurWord & (~word_t(0) >> (BitsInWord - BitsLeft));
+
+ // Use a mask to avoid undefined behavior.
+ CurWord >>= (BitsLeft & Mask);
+
+ BitsInCurWord -= BitsLeft;
+
+ R |= R2 << (NumBits - BitsLeft);
+
+ return R;
+ }
+
+ uint32_t ReadVBR(unsigned NumBits) {
+ uint32_t Piece = Read(NumBits);
+ if ((Piece & (1U << (NumBits-1))) == 0)
+ return Piece;
+
+ uint32_t Result = 0;
+ unsigned NextBit = 0;
+ while (1) {
+ Result |= (Piece & ((1U << (NumBits-1))-1)) << NextBit;
+
+ if ((Piece & (1U << (NumBits-1))) == 0)
+ return Result;
+
+ NextBit += NumBits-1;
+ Piece = Read(NumBits);
+ }
+ }
+
+ // Read a VBR that may have a value up to 64-bits in size. The chunk size of
+ // the VBR must still be <= 32 bits though.
+ uint64_t ReadVBR64(unsigned NumBits) {
+ uint32_t Piece = Read(NumBits);
+ if ((Piece & (1U << (NumBits-1))) == 0)
+ return uint64_t(Piece);
+
+ uint64_t Result = 0;
+ unsigned NextBit = 0;
+ while (1) {
+ Result |= uint64_t(Piece & ((1U << (NumBits-1))-1)) << NextBit;
+
+ if ((Piece & (1U << (NumBits-1))) == 0)
+ return Result;
+
+ NextBit += NumBits-1;
+ Piece = Read(NumBits);
+ }
+ }
+
+private:
+ void SkipToFourByteBoundary() {
+ // If word_t is 64-bits and if we've read less than 32 bits, just dump
+ // the bits we have up to the next 32-bit boundary.
+ if (sizeof(word_t) > 4 &&
+ BitsInCurWord >= 32) {
+ CurWord >>= BitsInCurWord-32;
+ BitsInCurWord = 32;
+ return;
+ }
+
+ BitsInCurWord = 0;
+ }
+public:
+
+ unsigned ReadCode() {
+ return Read(CurCodeSize);
+ }
+
+
+ // Block header:
+ // [ENTER_SUBBLOCK, blockid, newcodelen, <align4bytes>, blocklen]
+
+ /// Having read the ENTER_SUBBLOCK code, read the BlockID for the block.
+ unsigned ReadSubBlockID() {
+ return ReadVBR(bitc::BlockIDWidth);
+ }
+
+ /// Having read the ENTER_SUBBLOCK abbrevid and a BlockID, skip over the body
+ /// of this block. If the block record is malformed, return true.
+ bool SkipBlock() {
+ // Read and ignore the codelen value. Since we are skipping this block, we
+ // don't care what code widths are used inside of it.
+ ReadVBR(bitc::CodeLenWidth);
+ SkipToFourByteBoundary();
+ unsigned NumFourBytes = Read(bitc::BlockSizeWidth);
+
+ // Check that the block wasn't partially defined, and that the offset isn't
+ // bogus.
+ size_t SkipTo = GetCurrentBitNo() + NumFourBytes*4*8;
+ if (AtEndOfStream() || !canSkipToPos(SkipTo/8))
+ return true;
+
+ JumpToBit(SkipTo);
+ return false;
+ }
+
+ /// Having read the ENTER_SUBBLOCK abbrevid, enter the block, and return true
+ /// if the block has an error.
+ bool EnterSubBlock(unsigned BlockID, unsigned *NumWordsP = nullptr);
+
+ bool ReadBlockEnd() {
+ if (BlockScope.empty()) return true;
+
+ // Block tail:
+ // [END_BLOCK, <align4bytes>]
+ SkipToFourByteBoundary();
+
+ popBlockScope();
+ return false;
+ }
+
+private:
+
+ void popBlockScope() {
+ CurCodeSize = BlockScope.back().PrevCodeSize;
+
+ CurAbbrevs = std::move(BlockScope.back().PrevAbbrevs);
+ BlockScope.pop_back();
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Record Processing
+ //===--------------------------------------------------------------------===//
+
+public:
+ /// Return the abbreviation for the specified AbbrevId.
+ const BitCodeAbbrev *getAbbrev(unsigned AbbrevID) {
+ unsigned AbbrevNo = AbbrevID - bitc::FIRST_APPLICATION_ABBREV;
+ if (AbbrevNo >= CurAbbrevs.size())
+ report_fatal_error("Invalid abbrev number");
+ return CurAbbrevs[AbbrevNo].get();
+ }
+
+ /// Read the current record and discard it.
+ void skipRecord(unsigned AbbrevID);
+
+ unsigned readRecord(unsigned AbbrevID, SmallVectorImpl<uint64_t> &Vals,
+ StringRef *Blob = nullptr);
+
+ //===--------------------------------------------------------------------===//
+ // Abbrev Processing
+ //===--------------------------------------------------------------------===//
+ void ReadAbbrevRecord();
+
+ bool ReadBlockInfoBlock();
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h b/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h
new file mode 100644
index 0000000..438f4a6
--- /dev/null
+++ b/contrib/llvm/include/llvm/Bitcode/BitstreamWriter.h
@@ -0,0 +1,548 @@
+//===- BitstreamWriter.h - Low-level bitstream writer interface -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the BitstreamWriter class. This class can be used to
+// write an arbitrary bitstream, regardless of its contents.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_BITSTREAMWRITER_H
+#define LLVM_BITCODE_BITSTREAMWRITER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Bitcode/BitCodes.h"
+#include "llvm/Support/Endian.h"
+#include <vector>
+
+namespace llvm {
+
+class BitstreamWriter {
+ SmallVectorImpl<char> &Out;
+
+ /// CurBit - Always between 0 and 31 inclusive, specifies the next bit to use.
+ unsigned CurBit;
+
+ /// CurValue - The current value. Only bits < CurBit are valid.
+ uint32_t CurValue;
+
+ /// CurCodeSize - This is the declared size of code values used for the
+ /// current block, in bits.
+ unsigned CurCodeSize;
+
+ /// BlockInfoCurBID - When emitting a BLOCKINFO_BLOCK, this is the currently
+ /// selected BLOCK ID.
+ unsigned BlockInfoCurBID;
+
+ /// CurAbbrevs - Abbrevs installed at in this block.
+ std::vector<IntrusiveRefCntPtr<BitCodeAbbrev>> CurAbbrevs;
+
+ struct Block {
+ unsigned PrevCodeSize;
+ size_t StartSizeWord;
+ std::vector<IntrusiveRefCntPtr<BitCodeAbbrev>> PrevAbbrevs;
+ Block(unsigned PCS, size_t SSW) : PrevCodeSize(PCS), StartSizeWord(SSW) {}
+ };
+
+ /// BlockScope - This tracks the current blocks that we have entered.
+ std::vector<Block> BlockScope;
+
+ /// BlockInfo - This contains information emitted to BLOCKINFO_BLOCK blocks.
+ /// These describe abbreviations that all blocks of the specified ID inherit.
+ struct BlockInfo {
+ unsigned BlockID;
+ std::vector<IntrusiveRefCntPtr<BitCodeAbbrev>> Abbrevs;
+ };
+ std::vector<BlockInfo> BlockInfoRecords;
+
+ void WriteByte(unsigned char Value) {
+ Out.push_back(Value);
+ }
+
+ void WriteWord(unsigned Value) {
+ Value = support::endian::byte_swap<uint32_t, support::little>(Value);
+ Out.append(reinterpret_cast<const char *>(&Value),
+ reinterpret_cast<const char *>(&Value + 1));
+ }
+
+ size_t GetBufferOffset() const { return Out.size(); }
+
+ size_t GetWordIndex() const {
+ size_t Offset = GetBufferOffset();
+ assert((Offset & 3) == 0 && "Not 32-bit aligned");
+ return Offset / 4;
+ }
+
+public:
+ explicit BitstreamWriter(SmallVectorImpl<char> &O)
+ : Out(O), CurBit(0), CurValue(0), CurCodeSize(2) {}
+
+ ~BitstreamWriter() {
+ assert(CurBit == 0 && "Unflushed data remaining");
+ assert(BlockScope.empty() && CurAbbrevs.empty() && "Block imbalance");
+ }
+
+ /// \brief Retrieve the current position in the stream, in bits.
+ uint64_t GetCurrentBitNo() const { return GetBufferOffset() * 8 + CurBit; }
+
+ /// \brief Retrieve the number of bits currently used to encode an abbrev ID.
+ unsigned GetAbbrevIDWidth() const { return CurCodeSize; }
+
+ //===--------------------------------------------------------------------===//
+ // Basic Primitives for emitting bits to the stream.
+ //===--------------------------------------------------------------------===//
+
+ /// Backpatch a 32-bit word in the output at the given bit offset
+ /// with the specified value.
+ void BackpatchWord(uint64_t BitNo, unsigned NewWord) {
+ using namespace llvm::support;
+ unsigned ByteNo = BitNo / 8;
+ assert((!endian::readAtBitAlignment<uint32_t, little, unaligned>(
+ &Out[ByteNo], BitNo & 7)) &&
+ "Expected to be patching over 0-value placeholders");
+ endian::writeAtBitAlignment<uint32_t, little, unaligned>(
+ &Out[ByteNo], NewWord, BitNo & 7);
+ }
+
+ void Emit(uint32_t Val, unsigned NumBits) {
+ assert(NumBits && NumBits <= 32 && "Invalid value size!");
+ assert((Val & ~(~0U >> (32-NumBits))) == 0 && "High bits set!");
+ CurValue |= Val << CurBit;
+ if (CurBit + NumBits < 32) {
+ CurBit += NumBits;
+ return;
+ }
+
+ // Add the current word.
+ WriteWord(CurValue);
+
+ if (CurBit)
+ CurValue = Val >> (32-CurBit);
+ else
+ CurValue = 0;
+ CurBit = (CurBit+NumBits) & 31;
+ }
+
+ void Emit64(uint64_t Val, unsigned NumBits) {
+ if (NumBits <= 32)
+ Emit((uint32_t)Val, NumBits);
+ else {
+ Emit((uint32_t)Val, 32);
+ Emit((uint32_t)(Val >> 32), NumBits-32);
+ }
+ }
+
+ void FlushToWord() {
+ if (CurBit) {
+ WriteWord(CurValue);
+ CurBit = 0;
+ CurValue = 0;
+ }
+ }
+
+ void EmitVBR(uint32_t Val, unsigned NumBits) {
+ assert(NumBits <= 32 && "Too many bits to emit!");
+ uint32_t Threshold = 1U << (NumBits-1);
+
+ // Emit the bits with VBR encoding, NumBits-1 bits at a time.
+ while (Val >= Threshold) {
+ Emit((Val & ((1 << (NumBits-1))-1)) | (1 << (NumBits-1)), NumBits);
+ Val >>= NumBits-1;
+ }
+
+ Emit(Val, NumBits);
+ }
+
+ void EmitVBR64(uint64_t Val, unsigned NumBits) {
+ assert(NumBits <= 32 && "Too many bits to emit!");
+ if ((uint32_t)Val == Val)
+ return EmitVBR((uint32_t)Val, NumBits);
+
+ uint32_t Threshold = 1U << (NumBits-1);
+
+ // Emit the bits with VBR encoding, NumBits-1 bits at a time.
+ while (Val >= Threshold) {
+ Emit(((uint32_t)Val & ((1 << (NumBits-1))-1)) |
+ (1 << (NumBits-1)), NumBits);
+ Val >>= NumBits-1;
+ }
+
+ Emit((uint32_t)Val, NumBits);
+ }
+
+ /// EmitCode - Emit the specified code.
+ void EmitCode(unsigned Val) {
+ Emit(Val, CurCodeSize);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Block Manipulation
+ //===--------------------------------------------------------------------===//
+
+ /// getBlockInfo - If there is block info for the specified ID, return it,
+ /// otherwise return null.
+ BlockInfo *getBlockInfo(unsigned BlockID) {
+ // Common case, the most recent entry matches BlockID.
+ if (!BlockInfoRecords.empty() && BlockInfoRecords.back().BlockID == BlockID)
+ return &BlockInfoRecords.back();
+
+ for (unsigned i = 0, e = static_cast<unsigned>(BlockInfoRecords.size());
+ i != e; ++i)
+ if (BlockInfoRecords[i].BlockID == BlockID)
+ return &BlockInfoRecords[i];
+ return nullptr;
+ }
+
+ void EnterSubblock(unsigned BlockID, unsigned CodeLen) {
+ // Block header:
+ // [ENTER_SUBBLOCK, blockid, newcodelen, <align4bytes>, blocklen]
+ EmitCode(bitc::ENTER_SUBBLOCK);
+ EmitVBR(BlockID, bitc::BlockIDWidth);
+ EmitVBR(CodeLen, bitc::CodeLenWidth);
+ FlushToWord();
+
+ size_t BlockSizeWordIndex = GetWordIndex();
+ unsigned OldCodeSize = CurCodeSize;
+
+ // Emit a placeholder, which will be replaced when the block is popped.
+ Emit(0, bitc::BlockSizeWidth);
+
+ CurCodeSize = CodeLen;
+
+ // Push the outer block's abbrev set onto the stack, start out with an
+ // empty abbrev set.
+ BlockScope.emplace_back(OldCodeSize, BlockSizeWordIndex);
+ BlockScope.back().PrevAbbrevs.swap(CurAbbrevs);
+
+ // If there is a blockinfo for this BlockID, add all the predefined abbrevs
+ // to the abbrev list.
+ if (BlockInfo *Info = getBlockInfo(BlockID)) {
+ CurAbbrevs.insert(CurAbbrevs.end(), Info->Abbrevs.begin(),
+ Info->Abbrevs.end());
+ }
+ }
+
+ void ExitBlock() {
+ assert(!BlockScope.empty() && "Block scope imbalance!");
+ const Block &B = BlockScope.back();
+
+ // Block tail:
+ // [END_BLOCK, <align4bytes>]
+ EmitCode(bitc::END_BLOCK);
+ FlushToWord();
+
+ // Compute the size of the block, in words, not counting the size field.
+ size_t SizeInWords = GetWordIndex() - B.StartSizeWord - 1;
+ uint64_t BitNo = uint64_t(B.StartSizeWord) * 32;
+
+ // Update the block size field in the header of this sub-block.
+ BackpatchWord(BitNo, SizeInWords);
+
+ // Restore the inner block's code size and abbrev table.
+ CurCodeSize = B.PrevCodeSize;
+ CurAbbrevs = std::move(B.PrevAbbrevs);
+ BlockScope.pop_back();
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Record Emission
+ //===--------------------------------------------------------------------===//
+
+private:
+ /// EmitAbbreviatedLiteral - Emit a literal value according to its abbrev
+ /// record. This is a no-op, since the abbrev specifies the literal to use.
+ template<typename uintty>
+ void EmitAbbreviatedLiteral(const BitCodeAbbrevOp &Op, uintty V) {
+ assert(Op.isLiteral() && "Not a literal");
+ // If the abbrev specifies the literal value to use, don't emit
+ // anything.
+ assert(V == Op.getLiteralValue() &&
+ "Invalid abbrev for record!");
+ }
+
+ /// EmitAbbreviatedField - Emit a single scalar field value with the specified
+ /// encoding.
+ template<typename uintty>
+ void EmitAbbreviatedField(const BitCodeAbbrevOp &Op, uintty V) {
+ assert(!Op.isLiteral() && "Literals should use EmitAbbreviatedLiteral!");
+
+ // Encode the value as we are commanded.
+ switch (Op.getEncoding()) {
+ default: llvm_unreachable("Unknown encoding!");
+ case BitCodeAbbrevOp::Fixed:
+ if (Op.getEncodingData())
+ Emit((unsigned)V, (unsigned)Op.getEncodingData());
+ break;
+ case BitCodeAbbrevOp::VBR:
+ if (Op.getEncodingData())
+ EmitVBR64(V, (unsigned)Op.getEncodingData());
+ break;
+ case BitCodeAbbrevOp::Char6:
+ Emit(BitCodeAbbrevOp::EncodeChar6((char)V), 6);
+ break;
+ }
+ }
+
+ /// EmitRecordWithAbbrevImpl - This is the core implementation of the record
+ /// emission code. If BlobData is non-null, then it specifies an array of
+ /// data that should be emitted as part of the Blob or Array operand that is
+ /// known to exist at the end of the record. If Code is specified, then
+ /// it is the record code to emit before the Vals, which must not contain
+ /// the code.
+ template <typename uintty>
+ void EmitRecordWithAbbrevImpl(unsigned Abbrev, ArrayRef<uintty> Vals,
+ StringRef Blob, Optional<unsigned> Code) {
+ const char *BlobData = Blob.data();
+ unsigned BlobLen = (unsigned) Blob.size();
+ unsigned AbbrevNo = Abbrev-bitc::FIRST_APPLICATION_ABBREV;
+ assert(AbbrevNo < CurAbbrevs.size() && "Invalid abbrev #!");
+ const BitCodeAbbrev *Abbv = CurAbbrevs[AbbrevNo].get();
+
+ EmitCode(Abbrev);
+
+ unsigned i = 0, e = static_cast<unsigned>(Abbv->getNumOperandInfos());
+ if (Code) {
+ assert(e && "Expected non-empty abbreviation");
+ const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i++);
+
+ if (Op.isLiteral())
+ EmitAbbreviatedLiteral(Op, Code.getValue());
+ else {
+ assert(Op.getEncoding() != BitCodeAbbrevOp::Array &&
+ Op.getEncoding() != BitCodeAbbrevOp::Blob &&
+ "Expected literal or scalar");
+ EmitAbbreviatedField(Op, Code.getValue());
+ }
+ }
+
+ unsigned RecordIdx = 0;
+ for (; i != e; ++i) {
+ const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i);
+ if (Op.isLiteral()) {
+ assert(RecordIdx < Vals.size() && "Invalid abbrev/record");
+ EmitAbbreviatedLiteral(Op, Vals[RecordIdx]);
+ ++RecordIdx;
+ } else if (Op.getEncoding() == BitCodeAbbrevOp::Array) {
+ // Array case.
+ assert(i + 2 == e && "array op not second to last?");
+ const BitCodeAbbrevOp &EltEnc = Abbv->getOperandInfo(++i);
+
+ // If this record has blob data, emit it, otherwise we must have record
+ // entries to encode this way.
+ if (BlobData) {
+ assert(RecordIdx == Vals.size() &&
+ "Blob data and record entries specified for array!");
+ // Emit a vbr6 to indicate the number of elements present.
+ EmitVBR(static_cast<uint32_t>(BlobLen), 6);
+
+ // Emit each field.
+ for (unsigned i = 0; i != BlobLen; ++i)
+ EmitAbbreviatedField(EltEnc, (unsigned char)BlobData[i]);
+
+ // Know that blob data is consumed for assertion below.
+ BlobData = nullptr;
+ } else {
+ // Emit a vbr6 to indicate the number of elements present.
+ EmitVBR(static_cast<uint32_t>(Vals.size()-RecordIdx), 6);
+
+ // Emit each field.
+ for (unsigned e = Vals.size(); RecordIdx != e; ++RecordIdx)
+ EmitAbbreviatedField(EltEnc, Vals[RecordIdx]);
+ }
+ } else if (Op.getEncoding() == BitCodeAbbrevOp::Blob) {
+ // If this record has blob data, emit it, otherwise we must have record
+ // entries to encode this way.
+
+ // Emit a vbr6 to indicate the number of elements present.
+ if (BlobData) {
+ EmitVBR(static_cast<uint32_t>(BlobLen), 6);
+ assert(RecordIdx == Vals.size() &&
+ "Blob data and record entries specified for blob operand!");
+ } else {
+ EmitVBR(static_cast<uint32_t>(Vals.size()-RecordIdx), 6);
+ }
+
+ // Flush to a 32-bit alignment boundary.
+ FlushToWord();
+
+ // Emit each field as a literal byte.
+ if (BlobData) {
+ for (unsigned i = 0; i != BlobLen; ++i)
+ WriteByte((unsigned char)BlobData[i]);
+
+ // Know that blob data is consumed for assertion below.
+ BlobData = nullptr;
+ } else {
+ for (unsigned e = Vals.size(); RecordIdx != e; ++RecordIdx) {
+ assert(isUInt<8>(Vals[RecordIdx]) &&
+ "Value too large to emit as blob");
+ WriteByte((unsigned char)Vals[RecordIdx]);
+ }
+ }
+
+ // Align end to 32-bits.
+ while (GetBufferOffset() & 3)
+ WriteByte(0);
+ } else { // Single scalar field.
+ assert(RecordIdx < Vals.size() && "Invalid abbrev/record");
+ EmitAbbreviatedField(Op, Vals[RecordIdx]);
+ ++RecordIdx;
+ }
+ }
+ assert(RecordIdx == Vals.size() && "Not all record operands emitted!");
+ assert(BlobData == nullptr &&
+ "Blob data specified for record that doesn't use it!");
+ }
+
+public:
+
+ /// EmitRecord - Emit the specified record to the stream, using an abbrev if
+ /// we have one to compress the output.
+ template <typename Container>
+ void EmitRecord(unsigned Code, const Container &Vals, unsigned Abbrev = 0) {
+ if (!Abbrev) {
+ // If we don't have an abbrev to use, emit this in its fully unabbreviated
+ // form.
+ auto Count = static_cast<uint32_t>(makeArrayRef(Vals).size());
+ EmitCode(bitc::UNABBREV_RECORD);
+ EmitVBR(Code, 6);
+ EmitVBR(Count, 6);
+ for (unsigned i = 0, e = Count; i != e; ++i)
+ EmitVBR64(Vals[i], 6);
+ return;
+ }
+
+ EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals), StringRef(), Code);
+ }
+
+ /// EmitRecordWithAbbrev - Emit a record with the specified abbreviation.
+ /// Unlike EmitRecord, the code for the record should be included in Vals as
+ /// the first entry.
+ template <typename Container>
+ void EmitRecordWithAbbrev(unsigned Abbrev, const Container &Vals) {
+ EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals), StringRef(), None);
+ }
+
+ /// EmitRecordWithBlob - Emit the specified record to the stream, using an
+ /// abbrev that includes a blob at the end. The blob data to emit is
+ /// specified by the pointer and length specified at the end. In contrast to
+ /// EmitRecord, this routine expects that the first entry in Vals is the code
+ /// of the record.
+ template <typename Container>
+ void EmitRecordWithBlob(unsigned Abbrev, const Container &Vals,
+ StringRef Blob) {
+ EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals), Blob, None);
+ }
+ template <typename Container>
+ void EmitRecordWithBlob(unsigned Abbrev, const Container &Vals,
+ const char *BlobData, unsigned BlobLen) {
+ return EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals),
+ StringRef(BlobData, BlobLen), None);
+ }
+
+ /// EmitRecordWithArray - Just like EmitRecordWithBlob, works with records
+ /// that end with an array.
+ template <typename Container>
+ void EmitRecordWithArray(unsigned Abbrev, const Container &Vals,
+ StringRef Array) {
+ EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals), Array, None);
+ }
+ template <typename Container>
+ void EmitRecordWithArray(unsigned Abbrev, const Container &Vals,
+ const char *ArrayData, unsigned ArrayLen) {
+ return EmitRecordWithAbbrevImpl(Abbrev, makeArrayRef(Vals),
+ StringRef(ArrayData, ArrayLen), None);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Abbrev Emission
+ //===--------------------------------------------------------------------===//
+
+private:
+ // Emit the abbreviation as a DEFINE_ABBREV record.
+ void EncodeAbbrev(BitCodeAbbrev *Abbv) {
+ EmitCode(bitc::DEFINE_ABBREV);
+ EmitVBR(Abbv->getNumOperandInfos(), 5);
+ for (unsigned i = 0, e = static_cast<unsigned>(Abbv->getNumOperandInfos());
+ i != e; ++i) {
+ const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i);
+ Emit(Op.isLiteral(), 1);
+ if (Op.isLiteral()) {
+ EmitVBR64(Op.getLiteralValue(), 8);
+ } else {
+ Emit(Op.getEncoding(), 3);
+ if (Op.hasEncodingData())
+ EmitVBR64(Op.getEncodingData(), 5);
+ }
+ }
+ }
+public:
+
+ /// EmitAbbrev - This emits an abbreviation to the stream. Note that this
+ /// method takes ownership of the specified abbrev.
+ unsigned EmitAbbrev(BitCodeAbbrev *Abbv) {
+ // Emit the abbreviation as a record.
+ EncodeAbbrev(Abbv);
+ CurAbbrevs.push_back(Abbv);
+ return static_cast<unsigned>(CurAbbrevs.size())-1 +
+ bitc::FIRST_APPLICATION_ABBREV;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // BlockInfo Block Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EnterBlockInfoBlock - Start emitting the BLOCKINFO_BLOCK.
+ void EnterBlockInfoBlock(unsigned CodeWidth) {
+ EnterSubblock(bitc::BLOCKINFO_BLOCK_ID, CodeWidth);
+ BlockInfoCurBID = ~0U;
+ }
+private:
+ /// SwitchToBlockID - If we aren't already talking about the specified block
+ /// ID, emit a BLOCKINFO_CODE_SETBID record.
+ void SwitchToBlockID(unsigned BlockID) {
+ if (BlockInfoCurBID == BlockID) return;
+ SmallVector<unsigned, 2> V;
+ V.push_back(BlockID);
+ EmitRecord(bitc::BLOCKINFO_CODE_SETBID, V);
+ BlockInfoCurBID = BlockID;
+ }
+
+ BlockInfo &getOrCreateBlockInfo(unsigned BlockID) {
+ if (BlockInfo *BI = getBlockInfo(BlockID))
+ return *BI;
+
+ // Otherwise, add a new record.
+ BlockInfoRecords.emplace_back();
+ BlockInfoRecords.back().BlockID = BlockID;
+ return BlockInfoRecords.back();
+ }
+
+public:
+
+ /// EmitBlockInfoAbbrev - Emit a DEFINE_ABBREV record for the specified
+ /// BlockID.
+ unsigned EmitBlockInfoAbbrev(unsigned BlockID, BitCodeAbbrev *Abbv) {
+ SwitchToBlockID(BlockID);
+ EncodeAbbrev(Abbv);
+
+ // Add the abbrev to the specified block record.
+ BlockInfo &Info = getOrCreateBlockInfo(BlockID);
+ Info.Abbrevs.push_back(Abbv);
+
+ return Info.Abbrevs.size()-1+bitc::FIRST_APPLICATION_ABBREV;
+ }
+};
+
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
new file mode 100644
index 0000000..bcc84be
--- /dev/null
+++ b/contrib/llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -0,0 +1,502 @@
+//===- LLVMBitCodes.h - Enum values for the LLVM bitcode format -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines Bitcode enum values for LLVM IR bitcode files.
+//
+// The enum values defined in this file should be considered permanent. If
+// new features are added, they should have values added at the end of the
+// respective lists.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_LLVMBITCODES_H
+#define LLVM_BITCODE_LLVMBITCODES_H
+
+#include "llvm/Bitcode/BitCodes.h"
+
+namespace llvm {
+namespace bitc {
+ // The only top-level block type defined is for a module.
+enum BlockIDs {
+ // Blocks
+ MODULE_BLOCK_ID = FIRST_APPLICATION_BLOCKID,
+
+ // Module sub-block id's.
+ PARAMATTR_BLOCK_ID,
+ PARAMATTR_GROUP_BLOCK_ID,
+
+ CONSTANTS_BLOCK_ID,
+ FUNCTION_BLOCK_ID,
+
+ // Block intended to contains information on the bitcode versioning.
+ // Can be used to provide better error messages when we fail to parse a
+ // bitcode file.
+ IDENTIFICATION_BLOCK_ID,
+
+ VALUE_SYMTAB_BLOCK_ID,
+ METADATA_BLOCK_ID,
+ METADATA_ATTACHMENT_ID,
+
+ TYPE_BLOCK_ID_NEW,
+
+ USELIST_BLOCK_ID,
+
+ MODULE_STRTAB_BLOCK_ID,
+ FUNCTION_SUMMARY_BLOCK_ID,
+
+ OPERAND_BUNDLE_TAGS_BLOCK_ID,
+
+ METADATA_KIND_BLOCK_ID
+};
+
+/// Identification block contains a string that describes the producer details,
+/// and an epoch that defines the auto-upgrade capability.
+enum IdentificationCodes {
+ IDENTIFICATION_CODE_STRING = 1, // IDENTIFICATION: [strchr x N]
+ IDENTIFICATION_CODE_EPOCH = 2, // EPOCH: [epoch#]
+};
+
+/// The epoch that defines the auto-upgrade compatibility for the bitcode.
+///
+/// LLVM guarantees in a major release that a minor release can read bitcode
+/// generated by previous minor releases. We translate this by making the reader
+/// accepting only bitcode with the same epoch, except for the X.0 release which
+/// also accepts N-1.
+enum { BITCODE_CURRENT_EPOCH = 0 };
+
+ /// MODULE blocks have a number of optional fields and subblocks.
+ enum ModuleCodes {
+ MODULE_CODE_VERSION = 1, // VERSION: [version#]
+ MODULE_CODE_TRIPLE = 2, // TRIPLE: [strchr x N]
+ MODULE_CODE_DATALAYOUT = 3, // DATALAYOUT: [strchr x N]
+ MODULE_CODE_ASM = 4, // ASM: [strchr x N]
+ MODULE_CODE_SECTIONNAME = 5, // SECTIONNAME: [strchr x N]
+
+ // FIXME: Remove DEPLIB in 4.0.
+ MODULE_CODE_DEPLIB = 6, // DEPLIB: [strchr x N]
+
+ // GLOBALVAR: [pointer type, isconst, initid,
+ // linkage, alignment, section, visibility, threadlocal]
+ MODULE_CODE_GLOBALVAR = 7,
+
+ // FUNCTION: [type, callingconv, isproto, linkage, paramattrs, alignment,
+ // section, visibility, gc, unnamed_addr]
+ MODULE_CODE_FUNCTION = 8,
+
+ // ALIAS: [alias type, aliasee val#, linkage, visibility]
+ MODULE_CODE_ALIAS_OLD = 9,
+
+ // MODULE_CODE_PURGEVALS: [numvals]
+ MODULE_CODE_PURGEVALS = 10,
+
+ MODULE_CODE_GCNAME = 11, // GCNAME: [strchr x N]
+ MODULE_CODE_COMDAT = 12, // COMDAT: [selection_kind, name]
+
+ MODULE_CODE_VSTOFFSET = 13, // VSTOFFSET: [offset]
+
+ // ALIAS: [alias value type, addrspace, aliasee val#, linkage, visibility]
+ MODULE_CODE_ALIAS = 14,
+
+ // METADATA_VALUES: [numvals]
+ MODULE_CODE_METADATA_VALUES = 15,
+ };
+
+ /// PARAMATTR blocks have code for defining a parameter attribute set.
+ enum AttributeCodes {
+ // FIXME: Remove `PARAMATTR_CODE_ENTRY_OLD' in 4.0
+ PARAMATTR_CODE_ENTRY_OLD = 1, // ENTRY: [paramidx0, attr0,
+ // paramidx1, attr1...]
+ PARAMATTR_CODE_ENTRY = 2, // ENTRY: [paramidx0, attrgrp0,
+ // paramidx1, attrgrp1, ...]
+ PARAMATTR_GRP_CODE_ENTRY = 3 // ENTRY: [id, attr0, att1, ...]
+ };
+
+ /// TYPE blocks have codes for each type primitive they use.
+ enum TypeCodes {
+ TYPE_CODE_NUMENTRY = 1, // NUMENTRY: [numentries]
+
+ // Type Codes
+ TYPE_CODE_VOID = 2, // VOID
+ TYPE_CODE_FLOAT = 3, // FLOAT
+ TYPE_CODE_DOUBLE = 4, // DOUBLE
+ TYPE_CODE_LABEL = 5, // LABEL
+ TYPE_CODE_OPAQUE = 6, // OPAQUE
+ TYPE_CODE_INTEGER = 7, // INTEGER: [width]
+ TYPE_CODE_POINTER = 8, // POINTER: [pointee type]
+
+ TYPE_CODE_FUNCTION_OLD = 9, // FUNCTION: [vararg, attrid, retty,
+ // paramty x N]
+
+ TYPE_CODE_HALF = 10, // HALF
+
+ TYPE_CODE_ARRAY = 11, // ARRAY: [numelts, eltty]
+ TYPE_CODE_VECTOR = 12, // VECTOR: [numelts, eltty]
+
+ // These are not with the other floating point types because they're
+ // a late addition, and putting them in the right place breaks
+ // binary compatibility.
+ TYPE_CODE_X86_FP80 = 13, // X86 LONG DOUBLE
+ TYPE_CODE_FP128 = 14, // LONG DOUBLE (112 bit mantissa)
+ TYPE_CODE_PPC_FP128= 15, // PPC LONG DOUBLE (2 doubles)
+
+ TYPE_CODE_METADATA = 16, // METADATA
+
+ TYPE_CODE_X86_MMX = 17, // X86 MMX
+
+ TYPE_CODE_STRUCT_ANON = 18, // STRUCT_ANON: [ispacked, eltty x N]
+ TYPE_CODE_STRUCT_NAME = 19, // STRUCT_NAME: [strchr x N]
+ TYPE_CODE_STRUCT_NAMED = 20,// STRUCT_NAMED: [ispacked, eltty x N]
+
+ TYPE_CODE_FUNCTION = 21, // FUNCTION: [vararg, retty, paramty x N]
+
+ TYPE_CODE_TOKEN = 22 // TOKEN
+ };
+
+ enum OperandBundleTagCode {
+ OPERAND_BUNDLE_TAG = 1, // TAG: [strchr x N]
+ };
+
+ // The type symbol table only has one code (TST_ENTRY_CODE).
+ enum TypeSymtabCodes {
+ TST_CODE_ENTRY = 1 // TST_ENTRY: [typeid, namechar x N]
+ };
+
+ // Value symbol table codes.
+ enum ValueSymtabCodes {
+ VST_CODE_ENTRY = 1, // VST_ENTRY: [valueid, namechar x N]
+ VST_CODE_BBENTRY = 2, // VST_BBENTRY: [bbid, namechar x N]
+ VST_CODE_FNENTRY = 3, // VST_FNENTRY: [valueid, offset, namechar x N]
+ // VST_COMBINED_FNENTRY: [offset, namechar x N]
+ VST_CODE_COMBINED_FNENTRY = 4
+ };
+
+ // The module path symbol table only has one code (MST_CODE_ENTRY).
+ enum ModulePathSymtabCodes {
+ MST_CODE_ENTRY = 1, // MST_ENTRY: [modid, namechar x N]
+ };
+
+ // The function summary section uses different codes in the per-module
+ // and combined index cases.
+ enum FunctionSummarySymtabCodes {
+ FS_CODE_PERMODULE_ENTRY = 1, // FS_ENTRY: [valueid, islocal, instcount]
+ FS_CODE_COMBINED_ENTRY = 2, // FS_ENTRY: [modid, instcount]
+ };
+
+ enum MetadataCodes {
+ METADATA_STRING = 1, // MDSTRING: [values]
+ METADATA_VALUE = 2, // VALUE: [type num, value num]
+ METADATA_NODE = 3, // NODE: [n x md num]
+ METADATA_NAME = 4, // STRING: [values]
+ METADATA_DISTINCT_NODE = 5, // DISTINCT_NODE: [n x md num]
+ METADATA_KIND = 6, // [n x [id, name]]
+ METADATA_LOCATION = 7, // [distinct, line, col, scope, inlined-at?]
+ METADATA_OLD_NODE = 8, // OLD_NODE: [n x (type num, value num)]
+ METADATA_OLD_FN_NODE = 9, // OLD_FN_NODE: [n x (type num, value num)]
+ METADATA_NAMED_NODE = 10, // NAMED_NODE: [n x mdnodes]
+ METADATA_ATTACHMENT = 11, // [m x [value, [n x [id, mdnode]]]
+ METADATA_GENERIC_DEBUG = 12, // [distinct, tag, vers, header, n x md num]
+ METADATA_SUBRANGE = 13, // [distinct, count, lo]
+ METADATA_ENUMERATOR = 14, // [distinct, value, name]
+ METADATA_BASIC_TYPE = 15, // [distinct, tag, name, size, align, enc]
+ METADATA_FILE = 16, // [distinct, filename, directory]
+ METADATA_DERIVED_TYPE = 17, // [distinct, ...]
+ METADATA_COMPOSITE_TYPE= 18, // [distinct, ...]
+ METADATA_SUBROUTINE_TYPE=19, // [distinct, flags, types]
+ METADATA_COMPILE_UNIT = 20, // [distinct, ...]
+ METADATA_SUBPROGRAM = 21, // [distinct, ...]
+ METADATA_LEXICAL_BLOCK = 22, // [distinct, scope, file, line, column]
+ METADATA_LEXICAL_BLOCK_FILE=23,//[distinct, scope, file, discriminator]
+ METADATA_NAMESPACE = 24, // [distinct, scope, file, name, line]
+ METADATA_TEMPLATE_TYPE = 25, // [distinct, scope, name, type, ...]
+ METADATA_TEMPLATE_VALUE= 26, // [distinct, scope, name, type, value, ...]
+ METADATA_GLOBAL_VAR = 27, // [distinct, ...]
+ METADATA_LOCAL_VAR = 28, // [distinct, ...]
+ METADATA_EXPRESSION = 29, // [distinct, n x element]
+ METADATA_OBJC_PROPERTY = 30, // [distinct, name, file, line, ...]
+ METADATA_IMPORTED_ENTITY=31, // [distinct, tag, scope, entity, line, name]
+ METADATA_MODULE = 32, // [distinct, scope, name, ...]
+ METADATA_MACRO = 33, // [distinct, macinfo, line, name, value]
+ METADATA_MACRO_FILE = 34, // [distinct, macinfo, line, file, ...]
+ };
+
+ // The constants block (CONSTANTS_BLOCK_ID) describes emission for each
+ // constant and maintains an implicit current type value.
+ enum ConstantsCodes {
+ CST_CODE_SETTYPE = 1, // SETTYPE: [typeid]
+ CST_CODE_NULL = 2, // NULL
+ CST_CODE_UNDEF = 3, // UNDEF
+ CST_CODE_INTEGER = 4, // INTEGER: [intval]
+ CST_CODE_WIDE_INTEGER = 5, // WIDE_INTEGER: [n x intval]
+ CST_CODE_FLOAT = 6, // FLOAT: [fpval]
+ CST_CODE_AGGREGATE = 7, // AGGREGATE: [n x value number]
+ CST_CODE_STRING = 8, // STRING: [values]
+ CST_CODE_CSTRING = 9, // CSTRING: [values]
+ CST_CODE_CE_BINOP = 10, // CE_BINOP: [opcode, opval, opval]
+ CST_CODE_CE_CAST = 11, // CE_CAST: [opcode, opty, opval]
+ CST_CODE_CE_GEP = 12, // CE_GEP: [n x operands]
+ CST_CODE_CE_SELECT = 13, // CE_SELECT: [opval, opval, opval]
+ CST_CODE_CE_EXTRACTELT = 14, // CE_EXTRACTELT: [opty, opval, opval]
+ CST_CODE_CE_INSERTELT = 15, // CE_INSERTELT: [opval, opval, opval]
+ CST_CODE_CE_SHUFFLEVEC = 16, // CE_SHUFFLEVEC: [opval, opval, opval]
+ CST_CODE_CE_CMP = 17, // CE_CMP: [opty, opval, opval, pred]
+ CST_CODE_INLINEASM_OLD = 18, // INLINEASM: [sideeffect|alignstack,
+ // asmstr,conststr]
+ CST_CODE_CE_SHUFVEC_EX = 19, // SHUFVEC_EX: [opty, opval, opval, opval]
+ CST_CODE_CE_INBOUNDS_GEP = 20,// INBOUNDS_GEP: [n x operands]
+ CST_CODE_BLOCKADDRESS = 21, // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#]
+ CST_CODE_DATA = 22, // DATA: [n x elements]
+ CST_CODE_INLINEASM = 23 // INLINEASM: [sideeffect|alignstack|
+ // asmdialect,asmstr,conststr]
+ };
+
+ /// CastOpcodes - These are values used in the bitcode files to encode which
+ /// cast a CST_CODE_CE_CAST or a XXX refers to. The values of these enums
+ /// have no fixed relation to the LLVM IR enum values. Changing these will
+ /// break compatibility with old files.
+ enum CastOpcodes {
+ CAST_TRUNC = 0,
+ CAST_ZEXT = 1,
+ CAST_SEXT = 2,
+ CAST_FPTOUI = 3,
+ CAST_FPTOSI = 4,
+ CAST_UITOFP = 5,
+ CAST_SITOFP = 6,
+ CAST_FPTRUNC = 7,
+ CAST_FPEXT = 8,
+ CAST_PTRTOINT = 9,
+ CAST_INTTOPTR = 10,
+ CAST_BITCAST = 11,
+ CAST_ADDRSPACECAST = 12
+ };
+
+ /// BinaryOpcodes - These are values used in the bitcode files to encode which
+ /// binop a CST_CODE_CE_BINOP or a XXX refers to. The values of these enums
+ /// have no fixed relation to the LLVM IR enum values. Changing these will
+ /// break compatibility with old files.
+ enum BinaryOpcodes {
+ BINOP_ADD = 0,
+ BINOP_SUB = 1,
+ BINOP_MUL = 2,
+ BINOP_UDIV = 3,
+ BINOP_SDIV = 4, // overloaded for FP
+ BINOP_UREM = 5,
+ BINOP_SREM = 6, // overloaded for FP
+ BINOP_SHL = 7,
+ BINOP_LSHR = 8,
+ BINOP_ASHR = 9,
+ BINOP_AND = 10,
+ BINOP_OR = 11,
+ BINOP_XOR = 12
+ };
+
+ /// These are values used in the bitcode files to encode AtomicRMW operations.
+ /// The values of these enums have no fixed relation to the LLVM IR enum
+ /// values. Changing these will break compatibility with old files.
+ enum RMWOperations {
+ RMW_XCHG = 0,
+ RMW_ADD = 1,
+ RMW_SUB = 2,
+ RMW_AND = 3,
+ RMW_NAND = 4,
+ RMW_OR = 5,
+ RMW_XOR = 6,
+ RMW_MAX = 7,
+ RMW_MIN = 8,
+ RMW_UMAX = 9,
+ RMW_UMIN = 10
+ };
+
+ /// OverflowingBinaryOperatorOptionalFlags - Flags for serializing
+ /// OverflowingBinaryOperator's SubclassOptionalData contents.
+ enum OverflowingBinaryOperatorOptionalFlags {
+ OBO_NO_UNSIGNED_WRAP = 0,
+ OBO_NO_SIGNED_WRAP = 1
+ };
+
+ /// PossiblyExactOperatorOptionalFlags - Flags for serializing
+ /// PossiblyExactOperator's SubclassOptionalData contents.
+ enum PossiblyExactOperatorOptionalFlags {
+ PEO_EXACT = 0
+ };
+
+ /// Encoded AtomicOrdering values.
+ enum AtomicOrderingCodes {
+ ORDERING_NOTATOMIC = 0,
+ ORDERING_UNORDERED = 1,
+ ORDERING_MONOTONIC = 2,
+ ORDERING_ACQUIRE = 3,
+ ORDERING_RELEASE = 4,
+ ORDERING_ACQREL = 5,
+ ORDERING_SEQCST = 6
+ };
+
+ /// Encoded SynchronizationScope values.
+ enum AtomicSynchScopeCodes {
+ SYNCHSCOPE_SINGLETHREAD = 0,
+ SYNCHSCOPE_CROSSTHREAD = 1
+ };
+
+ /// Markers and flags for call instruction.
+ enum CallMarkersFlags {
+ CALL_TAIL = 0,
+ CALL_CCONV = 1,
+ CALL_MUSTTAIL = 14,
+ CALL_EXPLICIT_TYPE = 15,
+ CALL_NOTAIL = 16,
+ CALL_FMF = 17 // Call has optional fast-math-flags.
+ };
+
+ // The function body block (FUNCTION_BLOCK_ID) describes function bodies. It
+ // can contain a constant block (CONSTANTS_BLOCK_ID).
+ enum FunctionCodes {
+ FUNC_CODE_DECLAREBLOCKS = 1, // DECLAREBLOCKS: [n]
+
+ FUNC_CODE_INST_BINOP = 2, // BINOP: [opcode, ty, opval, opval]
+ FUNC_CODE_INST_CAST = 3, // CAST: [opcode, ty, opty, opval]
+ FUNC_CODE_INST_GEP_OLD = 4, // GEP: [n x operands]
+ FUNC_CODE_INST_SELECT = 5, // SELECT: [ty, opval, opval, opval]
+ FUNC_CODE_INST_EXTRACTELT = 6, // EXTRACTELT: [opty, opval, opval]
+ FUNC_CODE_INST_INSERTELT = 7, // INSERTELT: [ty, opval, opval, opval]
+ FUNC_CODE_INST_SHUFFLEVEC = 8, // SHUFFLEVEC: [ty, opval, opval, opval]
+ FUNC_CODE_INST_CMP = 9, // CMP: [opty, opval, opval, pred]
+
+ FUNC_CODE_INST_RET = 10, // RET: [opty,opval<both optional>]
+ FUNC_CODE_INST_BR = 11, // BR: [bb#, bb#, cond] or [bb#]
+ FUNC_CODE_INST_SWITCH = 12, // SWITCH: [opty, op0, op1, ...]
+ FUNC_CODE_INST_INVOKE = 13, // INVOKE: [attr, fnty, op0,op1, ...]
+ // 14 is unused.
+ FUNC_CODE_INST_UNREACHABLE = 15, // UNREACHABLE
+
+ FUNC_CODE_INST_PHI = 16, // PHI: [ty, val0,bb0, ...]
+ // 17 is unused.
+ // 18 is unused.
+ FUNC_CODE_INST_ALLOCA = 19, // ALLOCA: [instty, opty, op, align]
+ FUNC_CODE_INST_LOAD = 20, // LOAD: [opty, op, align, vol]
+ // 21 is unused.
+ // 22 is unused.
+ FUNC_CODE_INST_VAARG = 23, // VAARG: [valistty, valist, instty]
+ // This store code encodes the pointer type, rather than the value type
+ // this is so information only available in the pointer type (e.g. address
+ // spaces) is retained.
+ FUNC_CODE_INST_STORE_OLD = 24, // STORE: [ptrty,ptr,val, align, vol]
+ // 25 is unused.
+ FUNC_CODE_INST_EXTRACTVAL = 26, // EXTRACTVAL: [n x operands]
+ FUNC_CODE_INST_INSERTVAL = 27, // INSERTVAL: [n x operands]
+ // fcmp/icmp returning Int1TY or vector of Int1Ty. Same as CMP, exists to
+ // support legacy vicmp/vfcmp instructions.
+ FUNC_CODE_INST_CMP2 = 28, // CMP2: [opty, opval, opval, pred]
+ // new select on i1 or [N x i1]
+ FUNC_CODE_INST_VSELECT = 29, // VSELECT: [ty,opval,opval,predty,pred]
+ FUNC_CODE_INST_INBOUNDS_GEP_OLD = 30, // INBOUNDS_GEP: [n x operands]
+ FUNC_CODE_INST_INDIRECTBR = 31, // INDIRECTBR: [opty, op0, op1, ...]
+ // 32 is unused.
+ FUNC_CODE_DEBUG_LOC_AGAIN = 33, // DEBUG_LOC_AGAIN
+
+ FUNC_CODE_INST_CALL = 34, // CALL: [attr, cc, fnty, fnid, args...]
+
+ FUNC_CODE_DEBUG_LOC = 35, // DEBUG_LOC: [Line,Col,ScopeVal, IAVal]
+ FUNC_CODE_INST_FENCE = 36, // FENCE: [ordering, synchscope]
+ FUNC_CODE_INST_CMPXCHG_OLD = 37, // CMPXCHG: [ptrty,ptr,cmp,new, align, vol,
+ // ordering, synchscope]
+ FUNC_CODE_INST_ATOMICRMW = 38, // ATOMICRMW: [ptrty,ptr,val, operation,
+ // align, vol,
+ // ordering, synchscope]
+ FUNC_CODE_INST_RESUME = 39, // RESUME: [opval]
+ FUNC_CODE_INST_LANDINGPAD_OLD = 40, // LANDINGPAD: [ty,val,val,num,id0,val0...]
+ FUNC_CODE_INST_LOADATOMIC = 41, // LOAD: [opty, op, align, vol,
+ // ordering, synchscope]
+ FUNC_CODE_INST_STOREATOMIC_OLD = 42, // STORE: [ptrty,ptr,val, align, vol
+ // ordering, synchscope]
+ FUNC_CODE_INST_GEP = 43, // GEP: [inbounds, n x operands]
+ FUNC_CODE_INST_STORE = 44, // STORE: [ptrty,ptr,valty,val, align, vol]
+ FUNC_CODE_INST_STOREATOMIC = 45, // STORE: [ptrty,ptr,val, align, vol
+ FUNC_CODE_INST_CMPXCHG = 46, // CMPXCHG: [ptrty,ptr,valty,cmp,new, align,
+ // vol,ordering,synchscope]
+ FUNC_CODE_INST_LANDINGPAD = 47, // LANDINGPAD: [ty,val,num,id0,val0...]
+ FUNC_CODE_INST_CLEANUPRET = 48, // CLEANUPRET: [val] or [val,bb#]
+ FUNC_CODE_INST_CATCHRET = 49, // CATCHRET: [val,bb#]
+ FUNC_CODE_INST_CATCHPAD = 50, // CATCHPAD: [bb#,bb#,num,args...]
+ FUNC_CODE_INST_CLEANUPPAD = 51, // CLEANUPPAD: [num,args...]
+ FUNC_CODE_INST_CATCHSWITCH = 52, // CATCHSWITCH: [num,args...] or [num,args...,bb]
+ // 53 is unused.
+ // 54 is unused.
+ FUNC_CODE_OPERAND_BUNDLE = 55, // OPERAND_BUNDLE: [tag#, value...]
+ };
+
+ enum UseListCodes {
+ USELIST_CODE_DEFAULT = 1, // DEFAULT: [index..., value-id]
+ USELIST_CODE_BB = 2 // BB: [index..., bb-id]
+ };
+
+ enum AttributeKindCodes {
+ // = 0 is unused
+ ATTR_KIND_ALIGNMENT = 1,
+ ATTR_KIND_ALWAYS_INLINE = 2,
+ ATTR_KIND_BY_VAL = 3,
+ ATTR_KIND_INLINE_HINT = 4,
+ ATTR_KIND_IN_REG = 5,
+ ATTR_KIND_MIN_SIZE = 6,
+ ATTR_KIND_NAKED = 7,
+ ATTR_KIND_NEST = 8,
+ ATTR_KIND_NO_ALIAS = 9,
+ ATTR_KIND_NO_BUILTIN = 10,
+ ATTR_KIND_NO_CAPTURE = 11,
+ ATTR_KIND_NO_DUPLICATE = 12,
+ ATTR_KIND_NO_IMPLICIT_FLOAT = 13,
+ ATTR_KIND_NO_INLINE = 14,
+ ATTR_KIND_NON_LAZY_BIND = 15,
+ ATTR_KIND_NO_RED_ZONE = 16,
+ ATTR_KIND_NO_RETURN = 17,
+ ATTR_KIND_NO_UNWIND = 18,
+ ATTR_KIND_OPTIMIZE_FOR_SIZE = 19,
+ ATTR_KIND_READ_NONE = 20,
+ ATTR_KIND_READ_ONLY = 21,
+ ATTR_KIND_RETURNED = 22,
+ ATTR_KIND_RETURNS_TWICE = 23,
+ ATTR_KIND_S_EXT = 24,
+ ATTR_KIND_STACK_ALIGNMENT = 25,
+ ATTR_KIND_STACK_PROTECT = 26,
+ ATTR_KIND_STACK_PROTECT_REQ = 27,
+ ATTR_KIND_STACK_PROTECT_STRONG = 28,
+ ATTR_KIND_STRUCT_RET = 29,
+ ATTR_KIND_SANITIZE_ADDRESS = 30,
+ ATTR_KIND_SANITIZE_THREAD = 31,
+ ATTR_KIND_SANITIZE_MEMORY = 32,
+ ATTR_KIND_UW_TABLE = 33,
+ ATTR_KIND_Z_EXT = 34,
+ ATTR_KIND_BUILTIN = 35,
+ ATTR_KIND_COLD = 36,
+ ATTR_KIND_OPTIMIZE_NONE = 37,
+ ATTR_KIND_IN_ALLOCA = 38,
+ ATTR_KIND_NON_NULL = 39,
+ ATTR_KIND_JUMP_TABLE = 40,
+ ATTR_KIND_DEREFERENCEABLE = 41,
+ ATTR_KIND_DEREFERENCEABLE_OR_NULL = 42,
+ ATTR_KIND_CONVERGENT = 43,
+ ATTR_KIND_SAFESTACK = 44,
+ ATTR_KIND_ARGMEMONLY = 45,
+ ATTR_KIND_SWIFT_SELF = 46,
+ ATTR_KIND_SWIFT_ERROR = 47,
+ ATTR_KIND_NO_RECURSE = 48,
+ ATTR_KIND_INACCESSIBLEMEM_ONLY = 49,
+ ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY = 50
+ };
+
+ enum ComdatSelectionKindCodes {
+ COMDAT_SELECTION_KIND_ANY = 1,
+ COMDAT_SELECTION_KIND_EXACT_MATCH = 2,
+ COMDAT_SELECTION_KIND_LARGEST = 3,
+ COMDAT_SELECTION_KIND_NO_DUPLICATES = 4,
+ COMDAT_SELECTION_KIND_SAME_SIZE = 5,
+ };
+
+} // End bitc namespace
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h b/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
new file mode 100644
index 0000000..60d865f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Bitcode/ReaderWriter.h
@@ -0,0 +1,213 @@
+//===-- llvm/Bitcode/ReaderWriter.h - Bitcode reader/writers ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines interfaces to read and write LLVM bitcode files/streams.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_READERWRITER_H
+#define LLVM_BITCODE_READERWRITER_H
+
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/FunctionInfo.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+#include <string>
+
+namespace llvm {
+ class BitstreamWriter;
+ class DataStreamer;
+ class LLVMContext;
+ class Module;
+ class ModulePass;
+ class raw_ostream;
+
+ /// Read the header of the specified bitcode buffer and prepare for lazy
+ /// deserialization of function bodies. If ShouldLazyLoadMetadata is true,
+ /// lazily load metadata as well. If successful, this moves Buffer. On
+ /// error, this *does not* move Buffer.
+ ErrorOr<std::unique_ptr<Module>>
+ getLazyBitcodeModule(std::unique_ptr<MemoryBuffer> &&Buffer,
+ LLVMContext &Context,
+ bool ShouldLazyLoadMetadata = false);
+
+ /// Read the header of the specified stream and prepare for lazy
+ /// deserialization and streaming of function bodies.
+ ErrorOr<std::unique_ptr<Module>>
+ getStreamedBitcodeModule(StringRef Name,
+ std::unique_ptr<DataStreamer> Streamer,
+ LLVMContext &Context);
+
+ /// Read the header of the specified bitcode buffer and extract just the
+ /// triple information. If successful, this returns a string. On error, this
+ /// returns "".
+ std::string getBitcodeTargetTriple(MemoryBufferRef Buffer,
+ LLVMContext &Context);
+
+ /// Read the header of the specified bitcode buffer and extract just the
+ /// producer string information. If successful, this returns a string. On
+ /// error, this returns "".
+ std::string getBitcodeProducerString(MemoryBufferRef Buffer,
+ LLVMContext &Context);
+
+ /// Read the specified bitcode file, returning the module.
+ ErrorOr<std::unique_ptr<Module>> parseBitcodeFile(MemoryBufferRef Buffer,
+ LLVMContext &Context);
+
+ /// Check if the given bitcode buffer contains a function summary block.
+ bool hasFunctionSummary(MemoryBufferRef Buffer,
+ DiagnosticHandlerFunction DiagnosticHandler);
+
+ /// Parse the specified bitcode buffer, returning the function info index.
+ /// If IsLazy is true, parse the entire function summary into
+ /// the index. Otherwise skip the function summary section, and only create
+ /// an index object with a map from function name to function summary offset.
+ /// The index is used to perform lazy function summary reading later.
+ ErrorOr<std::unique_ptr<FunctionInfoIndex>>
+ getFunctionInfoIndex(MemoryBufferRef Buffer,
+ DiagnosticHandlerFunction DiagnosticHandler,
+ bool IsLazy = false);
+
+ /// This method supports lazy reading of function summary data from the
+ /// combined index during function importing. When reading the combined index
+ /// file, getFunctionInfoIndex is first invoked with IsLazy=true.
+ /// Then this method is called for each function considered for importing,
+ /// to parse the summary information for the given function name into
+ /// the index.
+ std::error_code readFunctionSummary(
+ MemoryBufferRef Buffer, DiagnosticHandlerFunction DiagnosticHandler,
+ StringRef FunctionName, std::unique_ptr<FunctionInfoIndex> Index);
+
+ /// \brief Write the specified module to the specified raw output stream.
+ ///
+ /// For streams where it matters, the given stream should be in "binary"
+ /// mode.
+ ///
+ /// If \c ShouldPreserveUseListOrder, encode the use-list order for each \a
+ /// Value in \c M. These will be reconstructed exactly when \a M is
+ /// deserialized.
+ ///
+ /// If \c EmitFunctionSummary, emit the function summary index (currently
+ /// for use in ThinLTO optimization).
+ void WriteBitcodeToFile(const Module *M, raw_ostream &Out,
+ bool ShouldPreserveUseListOrder = false,
+ bool EmitFunctionSummary = false);
+
+ /// Write the specified function summary index to the given raw output stream,
+ /// where it will be written in a new bitcode block. This is used when
+ /// writing the combined index file for ThinLTO.
+ void WriteFunctionSummaryToFile(const FunctionInfoIndex &Index,
+ raw_ostream &Out);
+
+ /// isBitcodeWrapper - Return true if the given bytes are the magic bytes
+ /// for an LLVM IR bitcode wrapper.
+ ///
+ inline bool isBitcodeWrapper(const unsigned char *BufPtr,
+ const unsigned char *BufEnd) {
+ // See if you can find the hidden message in the magic bytes :-).
+ // (Hint: it's a little-endian encoding.)
+ return BufPtr != BufEnd &&
+ BufPtr[0] == 0xDE &&
+ BufPtr[1] == 0xC0 &&
+ BufPtr[2] == 0x17 &&
+ BufPtr[3] == 0x0B;
+ }
+
+ /// isRawBitcode - Return true if the given bytes are the magic bytes for
+ /// raw LLVM IR bitcode (without a wrapper).
+ ///
+ inline bool isRawBitcode(const unsigned char *BufPtr,
+ const unsigned char *BufEnd) {
+ // These bytes sort of have a hidden message, but it's not in
+ // little-endian this time, and it's a little redundant.
+ return BufPtr != BufEnd &&
+ BufPtr[0] == 'B' &&
+ BufPtr[1] == 'C' &&
+ BufPtr[2] == 0xc0 &&
+ BufPtr[3] == 0xde;
+ }
+
+ /// isBitcode - Return true if the given bytes are the magic bytes for
+ /// LLVM IR bitcode, either with or without a wrapper.
+ ///
+ inline bool isBitcode(const unsigned char *BufPtr,
+ const unsigned char *BufEnd) {
+ return isBitcodeWrapper(BufPtr, BufEnd) ||
+ isRawBitcode(BufPtr, BufEnd);
+ }
+
+ /// SkipBitcodeWrapperHeader - Some systems wrap bc files with a special
+ /// header for padding or other reasons. The format of this header is:
+ ///
+ /// struct bc_header {
+ /// uint32_t Magic; // 0x0B17C0DE
+ /// uint32_t Version; // Version, currently always 0.
+ /// uint32_t BitcodeOffset; // Offset to traditional bitcode file.
+ /// uint32_t BitcodeSize; // Size of traditional bitcode file.
+ /// ... potentially other gunk ...
+ /// };
+ ///
+ /// This function is called when we find a file with a matching magic number.
+ /// In this case, skip down to the subsection of the file that is actually a
+ /// BC file.
+ /// If 'VerifyBufferSize' is true, check that the buffer is large enough to
+ /// contain the whole bitcode file.
+ inline bool SkipBitcodeWrapperHeader(const unsigned char *&BufPtr,
+ const unsigned char *&BufEnd,
+ bool VerifyBufferSize) {
+ enum {
+ KnownHeaderSize = 4*4, // Size of header we read.
+ OffsetField = 2*4, // Offset in bytes to Offset field.
+ SizeField = 3*4 // Offset in bytes to Size field.
+ };
+
+ // Must contain the header!
+ if (BufEnd-BufPtr < KnownHeaderSize) return true;
+
+ unsigned Offset = support::endian::read32le(&BufPtr[OffsetField]);
+ unsigned Size = support::endian::read32le(&BufPtr[SizeField]);
+
+ // Verify that Offset+Size fits in the file.
+ if (VerifyBufferSize && Offset+Size > unsigned(BufEnd-BufPtr))
+ return true;
+ BufPtr += Offset;
+ BufEnd = BufPtr+Size;
+ return false;
+ }
+
+ const std::error_category &BitcodeErrorCategory();
+ enum class BitcodeError { InvalidBitcodeSignature = 1, CorruptedBitcode };
+ inline std::error_code make_error_code(BitcodeError E) {
+ return std::error_code(static_cast<int>(E), BitcodeErrorCategory());
+ }
+
+ class BitcodeDiagnosticInfo : public DiagnosticInfo {
+ const Twine &Msg;
+ std::error_code EC;
+
+ public:
+ BitcodeDiagnosticInfo(std::error_code EC, DiagnosticSeverity Severity,
+ const Twine &Msg);
+ void print(DiagnosticPrinter &DP) const override;
+ std::error_code getError() const { return EC; }
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_Bitcode;
+ }
+ };
+
+} // End llvm namespace
+
+namespace std {
+template <> struct is_error_code_enum<llvm::BitcodeError> : std::true_type {};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/Analysis.h b/contrib/llvm/include/llvm/CodeGen/Analysis.h
new file mode 100644
index 0000000..38e64ad
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/Analysis.h
@@ -0,0 +1,126 @@
+//===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares several CodeGen-specific LLVM IR analysis utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ANALYSIS_H
+#define LLVM_CODEGEN_ANALYSIS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+
+namespace llvm {
+class GlobalValue;
+class MachineBasicBlock;
+class MachineFunction;
+class TargetLoweringBase;
+class TargetLowering;
+class TargetMachine;
+class SDNode;
+class SDValue;
+class SelectionDAG;
+struct EVT;
+
+/// \brief Compute the linearized index of a member in a nested
+/// aggregate/struct/array.
+///
+/// Given an LLVM IR aggregate type and a sequence of insertvalue or
+/// extractvalue indices that identify a member, return the linearized index of
+/// the start of the member, i.e the number of element in memory before the
+/// sought one. This is disconnected from the number of bytes.
+///
+/// \param Ty is the type indexed by \p Indices.
+/// \param Indices is an optional pointer in the indices list to the current
+/// index.
+/// \param IndicesEnd is the end of the indices list.
+/// \param CurIndex is the current index in the recursion.
+///
+/// \returns \p CurIndex plus the linear index in \p Ty the indices list.
+unsigned ComputeLinearIndex(Type *Ty,
+ const unsigned *Indices,
+ const unsigned *IndicesEnd,
+ unsigned CurIndex = 0);
+
+inline unsigned ComputeLinearIndex(Type *Ty,
+ ArrayRef<unsigned> Indices,
+ unsigned CurIndex = 0) {
+ return ComputeLinearIndex(Ty, Indices.begin(), Indices.end(), CurIndex);
+}
+
+/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
+/// EVTs that represent all the individual underlying
+/// non-aggregate types that comprise it.
+///
+/// If Offsets is non-null, it points to a vector to be filled in
+/// with the in-memory offsets of each of the individual values.
+///
+void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty,
+ SmallVectorImpl<EVT> &ValueVTs,
+ SmallVectorImpl<uint64_t> *Offsets = nullptr,
+ uint64_t StartingOffset = 0);
+
+/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
+GlobalValue *ExtractTypeInfo(Value *V);
+
+/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
+/// processed uses a memory 'm' constraint.
+bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
+ const TargetLowering &TLI);
+
+/// getFCmpCondCode - Return the ISD condition code corresponding to
+/// the given LLVM IR floating-point condition code. This includes
+/// consideration of global floating-point math flags.
+///
+ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
+
+/// getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats,
+/// return the equivalent code if we're allowed to assume that NaNs won't occur.
+ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC);
+
+/// getICmpCondCode - Return the ISD condition code corresponding to
+/// the given LLVM IR integer condition code.
+///
+ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
+
+/// Test if the given instruction is in a position to be optimized
+/// with a tail-call. This roughly means that it's in a block with
+/// a return and there's nothing that needs to be scheduled
+/// between it and the return.
+///
+/// This function only tests target-independent requirements.
+bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM);
+
+/// Test if given that the input instruction is in the tail call position if the
+/// return type or any attributes of the function will inhibit tail call
+/// optimization.
+bool returnTypeIsEligibleForTailCall(const Function *F,
+ const Instruction *I,
+ const ReturnInst *Ret,
+ const TargetLoweringBase &TLI);
+
+// True if GV can be left out of the object symbol table. This is the case
+// for linkonce_odr values whose address is not significant. While legal, it is
+// not normally profitable to omit them from the .o symbol table. Using this
+// analysis makes sense when the information can be passed down to the linker
+// or we are in LTO.
+bool canBeOmittedFromSymbolTable(const GlobalValue *GV);
+
+DenseMap<const MachineBasicBlock *, int>
+getFuncletMembership(const MachineFunction &MF);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
new file mode 100644
index 0000000..f5e778b
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -0,0 +1,557 @@
+//===-- llvm/CodeGen/AsmPrinter.h - AsmPrinter Framework --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a class to be used as the base class for target specific
+// asm writers. This class primarily handles common functionality used by
+// all asm writers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ASMPRINTER_H
+#define LLVM_CODEGEN_ASMPRINTER_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/DwarfStringPoolEntry.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+class AsmPrinterHandler;
+class BlockAddress;
+class ByteStreamer;
+class GCStrategy;
+class Constant;
+class ConstantArray;
+class DIE;
+class DIEAbbrev;
+class GCMetadataPrinter;
+class GlobalValue;
+class GlobalVariable;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineInstr;
+class MachineLocation;
+class MachineLoopInfo;
+class MachineLoop;
+class MachineConstantPoolValue;
+class MachineJumpTableInfo;
+class MachineModuleInfo;
+class MCAsmInfo;
+class MCCFIInstruction;
+class MCContext;
+class MCExpr;
+class MCInst;
+class MCSection;
+class MCStreamer;
+class MCSubtargetInfo;
+class MCSymbol;
+class MCTargetOptions;
+class MDNode;
+class DwarfDebug;
+class Mangler;
+class TargetLoweringObjectFile;
+class DataLayout;
+class TargetMachine;
+
+/// This class is intended to be used as a driving class for all asm writers.
+class AsmPrinter : public MachineFunctionPass {
+public:
+ /// Target machine description.
+ ///
+ TargetMachine &TM;
+
+ /// Target Asm Printer information.
+ ///
+ const MCAsmInfo *MAI;
+
+ /// This is the context for the output file that we are streaming. This owns
+ /// all of the global MC-related objects for the generated translation unit.
+ MCContext &OutContext;
+
+ /// This is the MCStreamer object for the file we are generating. This
+ /// contains the transient state for the current translation unit that we are
+ /// generating (such as the current section etc).
+ std::unique_ptr<MCStreamer> OutStreamer;
+
+ /// The current machine function.
+ const MachineFunction *MF;
+
+ /// This is a pointer to the current MachineModuleInfo.
+ MachineModuleInfo *MMI;
+
+ /// Name-mangler for global names.
+ ///
+ Mangler *Mang;
+
+ /// The symbol for the current function. This is recalculated at the beginning
+ /// of each call to runOnMachineFunction().
+ ///
+ MCSymbol *CurrentFnSym;
+
+ /// The symbol used to represent the start of the current function for the
+ /// purpose of calculating its size (e.g. using the .size directive). By
+ /// default, this is equal to CurrentFnSym.
+ MCSymbol *CurrentFnSymForSize;
+
+ /// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of
+ /// its number of uses by other globals.
+ typedef std::pair<const GlobalVariable *, unsigned> GOTEquivUsePair;
+ MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs;
+
+private:
+ MCSymbol *CurrentFnBegin;
+ MCSymbol *CurrentFnEnd;
+ MCSymbol *CurExceptionSym;
+
+ // The garbage collection metadata printer table.
+ void *GCMetadataPrinters; // Really a DenseMap.
+
+ /// Emit comments in assembly output if this is true.
+ ///
+ bool VerboseAsm;
+ static char ID;
+
+ /// If VerboseAsm is set, a pointer to the loop info for this function.
+ MachineLoopInfo *LI;
+
+ struct HandlerInfo {
+ AsmPrinterHandler *Handler;
+ const char *TimerName, *TimerGroupName;
+ HandlerInfo(AsmPrinterHandler *Handler, const char *TimerName,
+ const char *TimerGroupName)
+ : Handler(Handler), TimerName(TimerName),
+ TimerGroupName(TimerGroupName) {}
+ };
+ /// A vector of all debug/EH info emitters we should use. This vector
+ /// maintains ownership of the emitters.
+ SmallVector<HandlerInfo, 1> Handlers;
+
+ /// If the target supports dwarf debug info, this pointer is non-null.
+ DwarfDebug *DD;
+
+protected:
+ explicit AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer);
+
+public:
+ ~AsmPrinter() override;
+
+ DwarfDebug *getDwarfDebug() { return DD; }
+ DwarfDebug *getDwarfDebug() const { return DD; }
+
+ /// Return true if assembly output should contain comments.
+ ///
+ bool isVerbose() const { return VerboseAsm; }
+
+ /// Return a unique ID for the current function.
+ ///
+ unsigned getFunctionNumber() const;
+
+ MCSymbol *getFunctionBegin() const { return CurrentFnBegin; }
+ MCSymbol *getFunctionEnd() const { return CurrentFnEnd; }
+ MCSymbol *getCurExceptionSym();
+
+ /// Return information about object file lowering.
+ const TargetLoweringObjectFile &getObjFileLowering() const;
+
+ /// Return information about data layout.
+ const DataLayout &getDataLayout() const;
+
+ /// Return the pointer size from the TargetMachine
+ unsigned getPointerSize() const;
+
+ /// Return information about subtarget.
+ const MCSubtargetInfo &getSubtargetInfo() const;
+
+ void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
+
+ /// Return the target triple string.
+ StringRef getTargetTriple() const;
+
+ /// Return the current section we are emitting to.
+ const MCSection *getCurrentSection() const;
+
+ void getNameWithPrefix(SmallVectorImpl<char> &Name,
+ const GlobalValue *GV) const;
+
+ MCSymbol *getSymbol(const GlobalValue *GV) const;
+
+ //===------------------------------------------------------------------===//
+ // MachineFunctionPass Implementation.
+ //===------------------------------------------------------------------===//
+
+ /// Record analysis usage.
+ ///
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ /// Set up the AsmPrinter when we are working on a new module. If your pass
+ /// overrides this, it must make sure to explicitly call this implementation.
+ bool doInitialization(Module &M) override;
+
+ /// Shut down the asmprinter. If you override this in your pass, you must make
+ /// sure to call it explicitly.
+ bool doFinalization(Module &M) override;
+
+ /// Emit the specified function out to the OutStreamer.
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ SetupMachineFunction(MF);
+ EmitFunctionBody();
+ return false;
+ }
+
+ //===------------------------------------------------------------------===//
+ // Coarse grained IR lowering routines.
+ //===------------------------------------------------------------------===//
+
+ /// This should be called when a new MachineFunction is being processed from
+ /// runOnMachineFunction.
+ void SetupMachineFunction(MachineFunction &MF);
+
+ /// This method emits the body and trailer for a function.
+ void EmitFunctionBody();
+
+ void emitCFIInstruction(const MachineInstr &MI);
+
+ void emitFrameAlloc(const MachineInstr &MI);
+
+ enum CFIMoveType { CFI_M_None, CFI_M_EH, CFI_M_Debug };
+ CFIMoveType needsCFIMoves();
+
+ bool needsSEHMoves();
+
+ /// Print to the current output stream assembly representations of the
+ /// constants in the constant pool MCP. This is used to print out constants
+ /// which have been "spilled to memory" by the code generator.
+ ///
+ virtual void EmitConstantPool();
+
+ /// Print assembly representations of the jump tables used by the current
+ /// function to the current output stream.
+ ///
+ virtual void EmitJumpTableInfo();
+
+ /// Emit the control variable for an emulated TLS variable.
+ virtual void EmitEmulatedTLSControlVariable(const GlobalVariable *GV,
+ MCSymbol *EmittedSym,
+ bool AllZeroInitValue);
+
+ /// Emit the specified global variable to the .s file.
+ virtual void EmitGlobalVariable(const GlobalVariable *GV);
+
+ /// Check to see if the specified global is a special global used by LLVM. If
+ /// so, emit it and return true, otherwise do nothing and return false.
+ bool EmitSpecialLLVMGlobal(const GlobalVariable *GV);
+
+ /// Emit an alignment directive to the specified power of two boundary. For
+ /// example, if you pass in 3 here, you will get an 8 byte alignment. If a
+ /// global value is specified, and if that global has an explicit alignment
+ /// requested, it will override the alignment request if required for
+ /// correctness.
+ ///
+ void EmitAlignment(unsigned NumBits, const GlobalObject *GO = nullptr) const;
+
+ /// Lower the specified LLVM Constant to an MCExpr.
+ const MCExpr *lowerConstant(const Constant *CV);
+
+ /// \brief Print a general LLVM constant to the .s file.
+ void EmitGlobalConstant(const DataLayout &DL, const Constant *CV);
+
+ /// \brief Unnamed constant global variables solely contaning a pointer to
+ /// another globals variable act like a global variable "proxy", or GOT
+ /// equivalents, i.e., it's only used to hold the address of the latter. One
+ /// optimization is to replace accesses to these proxies by using the GOT
+ /// entry for the final global instead. Hence, we select GOT equivalent
+ /// candidates among all the module global variables, avoid emitting them
+ /// unnecessarily and finally replace references to them by pc relative
+ /// accesses to GOT entries.
+ void computeGlobalGOTEquivs(Module &M);
+
+ /// \brief Constant expressions using GOT equivalent globals may not be
+ /// eligible for PC relative GOT entry conversion, in such cases we need to
+ /// emit the proxies we previously omitted in EmitGlobalVariable.
+ void emitGlobalGOTEquivs();
+
+ //===------------------------------------------------------------------===//
+ // Overridable Hooks
+ //===------------------------------------------------------------------===//
+
+ // Targets can, or in the case of EmitInstruction, must implement these to
+ // customize output.
+
+ /// This virtual method can be overridden by targets that want to emit
+ /// something at the start of their file.
+ virtual void EmitStartOfAsmFile(Module &) {}
+
+ /// This virtual method can be overridden by targets that want to emit
+ /// something at the end of their file.
+ virtual void EmitEndOfAsmFile(Module &) {}
+
+ /// Targets can override this to emit stuff before the first basic block in
+ /// the function.
+ virtual void EmitFunctionBodyStart() {}
+
+ /// Targets can override this to emit stuff after the last basic block in the
+ /// function.
+ virtual void EmitFunctionBodyEnd() {}
+
+ /// Targets can override this to emit stuff at the start of a basic block.
+ /// By default, this method prints the label for the specified
+ /// MachineBasicBlock, an alignment (if present) and a comment describing it
+ /// if appropriate.
+ virtual void EmitBasicBlockStart(const MachineBasicBlock &MBB) const;
+
+ /// Targets can override this to emit stuff at the end of a basic block.
+ virtual void EmitBasicBlockEnd(const MachineBasicBlock &MBB) {}
+
+ /// Targets should implement this to emit instructions.
+ virtual void EmitInstruction(const MachineInstr *) {
+ llvm_unreachable("EmitInstruction not implemented");
+ }
+
+ /// Return the symbol for the specified constant pool entry.
+ virtual MCSymbol *GetCPISymbol(unsigned CPID) const;
+
+ virtual void EmitFunctionEntryLabel();
+
+ virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
+
+ /// Targets can override this to change how global constants that are part of
+ /// a C++ static/global constructor list are emitted.
+ virtual void EmitXXStructor(const DataLayout &DL, const Constant *CV) {
+ EmitGlobalConstant(DL, CV);
+ }
+
+ /// Return true if the basic block has exactly one predecessor and the control
+ /// transfer mechanism between the predecessor and this block is a
+ /// fall-through.
+ virtual bool
+ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const;
+
+ /// Targets can override this to customize the output of IMPLICIT_DEF
+ /// instructions in verbose mode.
+ virtual void emitImplicitDef(const MachineInstr *MI) const;
+
+ //===------------------------------------------------------------------===//
+ // Symbol Lowering Routines.
+ //===------------------------------------------------------------------===//
+public:
+ MCSymbol *createTempSymbol(const Twine &Name) const;
+
+ /// Return the MCSymbol for a private symbol with global value name as its
+ /// base, with the specified suffix.
+ MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
+ StringRef Suffix) const;
+
+ /// Return the MCSymbol for the specified ExternalSymbol.
+ MCSymbol *GetExternalSymbolSymbol(StringRef Sym) const;
+
+ /// Return the symbol for the specified jump table entry.
+ MCSymbol *GetJTISymbol(unsigned JTID, bool isLinkerPrivate = false) const;
+
+ /// Return the symbol for the specified jump table .set
+ /// FIXME: privatize to AsmPrinter.
+ MCSymbol *GetJTSetSymbol(unsigned UID, unsigned MBBID) const;
+
+ /// Return the MCSymbol used to satisfy BlockAddress uses of the specified
+ /// basic block.
+ MCSymbol *GetBlockAddressSymbol(const BlockAddress *BA) const;
+ MCSymbol *GetBlockAddressSymbol(const BasicBlock *BB) const;
+
+ //===------------------------------------------------------------------===//
+ // Emission Helper Routines.
+ //===------------------------------------------------------------------===//
+public:
+ /// This is just convenient handler for printing offsets.
+ void printOffset(int64_t Offset, raw_ostream &OS) const;
+
+ /// Emit a byte directive and value.
+ ///
+ void EmitInt8(int Value) const;
+
+ /// Emit a short directive and value.
+ ///
+ void EmitInt16(int Value) const;
+
+ /// Emit a long directive and value.
+ ///
+ void EmitInt32(int Value) const;
+
+ /// Emit something like ".long Hi-Lo" where the size in bytes of the directive
+ /// is specified by Size and Hi/Lo specify the labels. This implicitly uses
+ /// .set if it is available.
+ void EmitLabelDifference(const MCSymbol *Hi, const MCSymbol *Lo,
+ unsigned Size) const;
+
+ /// Emit something like ".long Label+Offset" where the size in bytes of the
+ /// directive is specified by Size and Label specifies the label. This
+ /// implicitly uses .set if it is available.
+ void EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
+ unsigned Size, bool IsSectionRelative = false) const;
+
+ /// Emit something like ".long Label" where the size in bytes of the directive
+ /// is specified by Size and Label specifies the label.
+ void EmitLabelReference(const MCSymbol *Label, unsigned Size,
+ bool IsSectionRelative = false) const {
+ EmitLabelPlusOffset(Label, 0, Size, IsSectionRelative);
+ }
+
+ //===------------------------------------------------------------------===//
+ // Dwarf Emission Helper Routines
+ //===------------------------------------------------------------------===//
+
+ /// Emit the specified signed leb128 value.
+ void EmitSLEB128(int64_t Value, const char *Desc = nullptr) const;
+
+ /// Emit the specified unsigned leb128 value.
+ void EmitULEB128(uint64_t Value, const char *Desc = nullptr,
+ unsigned PadTo = 0) const;
+
+ /// Emit a .byte 42 directive that corresponds to an encoding. If verbose
+ /// assembly output is enabled, we output comments describing the encoding.
+ /// Desc is a string saying what the encoding is specifying (e.g. "LSDA").
+ void EmitEncodingByte(unsigned Val, const char *Desc = nullptr) const;
+
+ /// Return the size of the encoding in bytes.
+ unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
+
+ /// Emit reference to a ttype global with a specified encoding.
+ void EmitTTypeReference(const GlobalValue *GV, unsigned Encoding) const;
+
+ /// Emit a reference to a symbol for use in dwarf. Different object formats
+ /// represent this in different ways. Some use a relocation others encode
+ /// the label offset in its section.
+ void emitDwarfSymbolReference(const MCSymbol *Label,
+ bool ForceOffset = false) const;
+
+ /// Emit the 4-byte offset of a string from the start of its section.
+ ///
+ /// When possible, emit a DwarfStringPool section offset without any
+ /// relocations, and without using the symbol. Otherwise, defers to \a
+ /// emitDwarfSymbolReference().
+ void emitDwarfStringOffset(DwarfStringPoolEntryRef S) const;
+
+ /// Get the value for DW_AT_APPLE_isa. Zero if no isa encoding specified.
+ virtual unsigned getISAEncoding() { return 0; }
+
+ /// EmitDwarfRegOp - Emit a dwarf register operation.
+ virtual void EmitDwarfRegOp(ByteStreamer &BS,
+ const MachineLocation &MLoc) const;
+
+ //===------------------------------------------------------------------===//
+ // Dwarf Lowering Routines
+ //===------------------------------------------------------------------===//
+
+ /// \brief Emit frame instruction to describe the layout of the frame.
+ void emitCFIInstruction(const MCCFIInstruction &Inst) const;
+
+ /// \brief Emit Dwarf abbreviation table.
+ template <typename T> void emitDwarfAbbrevs(const T &Abbrevs) const {
+ // For each abbreviation.
+ for (const auto &Abbrev : Abbrevs)
+ emitDwarfAbbrev(*Abbrev);
+
+ // Mark end of abbreviations.
+ EmitULEB128(0, "EOM(3)");
+ }
+
+ void emitDwarfAbbrev(const DIEAbbrev &Abbrev) const;
+
+ /// \brief Recursively emit Dwarf DIE tree.
+ void emitDwarfDIE(const DIE &Die) const;
+
+ //===------------------------------------------------------------------===//
+ // Inline Asm Support
+ //===------------------------------------------------------------------===//
+public:
+ // These are hooks that targets can override to implement inline asm
+ // support. These should probably be moved out of AsmPrinter someday.
+
+ /// Print information related to the specified machine instr that is
+ /// independent of the operand, and may be independent of the instr itself.
+ /// This can be useful for portably encoding the comment character or other
+ /// bits of target-specific knowledge into the asmstrings. The syntax used is
+ /// ${:comment}. Targets can override this to add support for their own
+ /// strange codes.
+ virtual void PrintSpecial(const MachineInstr *MI, raw_ostream &OS,
+ const char *Code) const;
+
+ /// Print the specified operand of MI, an INLINEASM instruction, using the
+ /// specified assembler variant. Targets should override this to format as
+ /// appropriate. This method can return true if the operand is erroneous.
+ virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+
+ /// Print the specified operand of MI, an INLINEASM instruction, using the
+ /// specified assembler variant as an address. Targets should override this to
+ /// format as appropriate. This method can return true if the operand is
+ /// erroneous.
+ virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &OS);
+
+ /// Let the target do anything it needs to do before emitting inlineasm.
+ /// \p StartInfo - the subtarget info before parsing inline asm
+ virtual void emitInlineAsmStart() const;
+
+ /// Let the target do anything it needs to do after emitting inlineasm.
+ /// This callback can be used restore the original mode in case the
+ /// inlineasm contains directives to switch modes.
+ /// \p StartInfo - the original subtarget info before inline asm
+ /// \p EndInfo - the final subtarget info after parsing the inline asm,
+ /// or NULL if the value is unknown.
+ virtual void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
+ const MCSubtargetInfo *EndInfo) const;
+
+private:
+ /// Private state for PrintSpecial()
+ // Assign a unique ID to this machine instruction.
+ mutable const MachineInstr *LastMI;
+ mutable unsigned LastFn;
+ mutable unsigned Counter;
+
+ /// This method emits the header for the current function.
+ virtual void EmitFunctionHeader();
+
+ /// Emit a blob of inline asm to the output streamer.
+ void
+ EmitInlineAsm(StringRef Str, const MCSubtargetInfo &STI,
+ const MCTargetOptions &MCOptions,
+ const MDNode *LocMDNode = nullptr,
+ InlineAsm::AsmDialect AsmDialect = InlineAsm::AD_ATT) const;
+
+ /// This method formats and emits the specified machine instruction that is an
+ /// inline asm.
+ void EmitInlineAsm(const MachineInstr *MI) const;
+
+ //===------------------------------------------------------------------===//
+ // Internal Implementation Details
+ //===------------------------------------------------------------------===//
+
+ /// This emits visibility information about symbol, if this is suported by the
+ /// target.
+ void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
+ bool IsDefinition = true) const;
+
+ void EmitLinkage(const GlobalValue *GV, MCSymbol *GVSym) const;
+
+ void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
+ const MachineBasicBlock *MBB, unsigned uid) const;
+ void EmitLLVMUsedList(const ConstantArray *InitList);
+ /// Emit llvm.ident metadata in an '.ident' directive.
+ void EmitModuleIdents(Module &M);
+ void EmitXXStructorList(const DataLayout &DL, const Constant *List,
+ bool isCtor);
+ GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy &C);
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/AtomicExpandUtils.h b/contrib/llvm/include/llvm/CodeGen/AtomicExpandUtils.h
new file mode 100644
index 0000000..ac18eac
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/AtomicExpandUtils.h
@@ -0,0 +1,57 @@
+//===-- AtomicExpandUtils.h - Utilities for expanding atomic instructions -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+class Value;
+class AtomicRMWInst;
+
+
+/// Parameters (see the expansion example below):
+/// (the builder, %addr, %loaded, %new_val, ordering,
+/// /* OUT */ %success, /* OUT */ %new_loaded)
+typedef function_ref<void(IRBuilder<> &, Value *, Value *, Value *,
+ AtomicOrdering, Value *&, Value *&)> CreateCmpXchgInstFun;
+
+/// \brief Expand an atomic RMW instruction into a loop utilizing
+/// cmpxchg. You'll want to make sure your target machine likes cmpxchg
+/// instructions in the first place and that there isn't another, better,
+/// transformation available (for example AArch32/AArch64 have linked loads).
+///
+/// This is useful in passes which can't rewrite the more exotic RMW
+/// instructions directly into a platform specific intrinsics (because, say,
+/// those intrinsics don't exist). If such a pass is able to expand cmpxchg
+/// instructions directly however, then, with this function, it could avoid two
+/// extra module passes (avoiding passes by `-atomic-expand` and itself). A
+/// specific example would be PNaCl's `RewriteAtomics` pass.
+///
+/// Given: atomicrmw some_op iN* %addr, iN %incr ordering
+///
+/// The standard expansion we produce is:
+/// [...]
+/// %init_loaded = load atomic iN* %addr
+/// br label %loop
+/// loop:
+/// %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
+/// %new = some_op iN %loaded, %incr
+/// ; This is what -atomic-expand will produce using this function on i686 targets:
+/// %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val
+/// %new_loaded = extractvalue { iN, i1 } %pair, 0
+/// %success = extractvalue { iN, i1 } %pair, 1
+/// ; End callback produced IR
+/// br i1 %success, label %atomicrmw.end, label %loop
+/// atomicrmw.end:
+/// [...]
+///
+/// Returns true if the containing function was modified.
+bool
+expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory);
+}
diff --git a/contrib/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/contrib/llvm/include/llvm/CodeGen/BasicTTIImpl.h
new file mode 100644
index 0000000..d99054e
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -0,0 +1,846 @@
+//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides a helper that implements much of the TTI interface in
+/// terms of the target-independent code generator and TargetLowering
+/// interfaces.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
+#define LLVM_CODEGEN_BASICTTIIMPL_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/TargetTransformInfoImpl.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+
+namespace llvm {
+
+extern cl::opt<unsigned> PartialUnrollingThreshold;
+
+/// \brief Base class which can be used to help build a TTI implementation.
+///
+/// This class provides as much implementation of the TTI interface as is
+/// possible using the target independent parts of the code generator.
+///
+/// In order to subclass it, your class must implement a getST() method to
+/// return the subtarget, and a getTLI() method to return the target lowering.
+/// We need these methods implemented in the derived class so that this class
+/// doesn't have to duplicate storage for them.
+template <typename T>
+class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
+private:
+ typedef TargetTransformInfoImplCRTPBase<T> BaseT;
+ typedef TargetTransformInfo TTI;
+
+ /// Estimate the overhead of scalarizing an instruction. Insert and Extract
+ /// are set if the result needs to be inserted and/or extracted from vectors.
+ unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
+ assert(Ty->isVectorTy() && "Can only scalarize vectors");
+ unsigned Cost = 0;
+
+ for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+ if (Insert)
+ Cost += static_cast<T *>(this)
+ ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
+ if (Extract)
+ Cost += static_cast<T *>(this)
+ ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+ }
+
+ return Cost;
+ }
+
+ /// Estimate the cost overhead of SK_Alternate shuffle.
+ unsigned getAltShuffleOverhead(Type *Ty) {
+ assert(Ty->isVectorTy() && "Can only shuffle vectors");
+ unsigned Cost = 0;
+ // Shuffle cost is equal to the cost of extracting element from its argument
+ // plus the cost of inserting them onto the result vector.
+
+ // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
+ // index 0 of first vector, index 1 of second vector,index 2 of first
+ // vector and finally index 3 of second vector and insert them at index
+ // <0,1,2,3> of result vector.
+ for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
+ Cost += static_cast<T *>(this)
+ ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
+ Cost += static_cast<T *>(this)
+ ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
+ }
+ return Cost;
+ }
+
+ /// \brief Local query method delegates up to T which *must* implement this!
+ const TargetSubtargetInfo *getST() const {
+ return static_cast<const T *>(this)->getST();
+ }
+
+ /// \brief Local query method delegates up to T which *must* implement this!
+ const TargetLoweringBase *getTLI() const {
+ return static_cast<const T *>(this)->getTLI();
+ }
+
+protected:
+ explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
+ : BaseT(DL) {}
+
+ using TargetTransformInfoImplBase::DL;
+
+public:
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ BasicTTIImplBase(const BasicTTIImplBase &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)) {}
+ BasicTTIImplBase(BasicTTIImplBase &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
+
+ /// \name Scalar TTI Implementations
+ /// @{
+
+ bool hasBranchDivergence() { return false; }
+
+ bool isSourceOfDivergence(const Value *V) { return false; }
+
+ bool isLegalAddImmediate(int64_t imm) {
+ return getTLI()->isLegalAddImmediate(imm);
+ }
+
+ bool isLegalICmpImmediate(int64_t imm) {
+ return getTLI()->isLegalICmpImmediate(imm);
+ }
+
+ bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+ bool HasBaseReg, int64_t Scale,
+ unsigned AddrSpace) {
+ TargetLoweringBase::AddrMode AM;
+ AM.BaseGV = BaseGV;
+ AM.BaseOffs = BaseOffset;
+ AM.HasBaseReg = HasBaseReg;
+ AM.Scale = Scale;
+ return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace);
+ }
+
+ int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
+ bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
+ TargetLoweringBase::AddrMode AM;
+ AM.BaseGV = BaseGV;
+ AM.BaseOffs = BaseOffset;
+ AM.HasBaseReg = HasBaseReg;
+ AM.Scale = Scale;
+ return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
+ }
+
+ bool isTruncateFree(Type *Ty1, Type *Ty2) {
+ return getTLI()->isTruncateFree(Ty1, Ty2);
+ }
+
+ bool isProfitableToHoist(Instruction *I) {
+ return getTLI()->isProfitableToHoist(I);
+ }
+
+ bool isTypeLegal(Type *Ty) {
+ EVT VT = getTLI()->getValueType(DL, Ty);
+ return getTLI()->isTypeLegal(VT);
+ }
+
+ unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<const Value *> Arguments) {
+ return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
+ }
+
+ unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Type *> ParamTys) {
+ if (IID == Intrinsic::cttz) {
+ if (getTLI()->isCheapToSpeculateCttz())
+ return TargetTransformInfo::TCC_Basic;
+ return TargetTransformInfo::TCC_Expensive;
+ }
+
+ if (IID == Intrinsic::ctlz) {
+ if (getTLI()->isCheapToSpeculateCtlz())
+ return TargetTransformInfo::TCC_Basic;
+ return TargetTransformInfo::TCC_Expensive;
+ }
+
+ return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
+ }
+
+ unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
+
+ unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
+
+ bool shouldBuildLookupTables() {
+ const TargetLoweringBase *TLI = getTLI();
+ return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
+ TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
+ }
+
+ bool haveFastSqrt(Type *Ty) {
+ const TargetLoweringBase *TLI = getTLI();
+ EVT VT = TLI->getValueType(DL, Ty);
+ return TLI->isTypeLegal(VT) &&
+ TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
+ }
+
+ unsigned getFPOpCost(Type *Ty) {
+ // By default, FP instructions are no more expensive since they are
+ // implemented in HW. Target specific TTI can override this.
+ return TargetTransformInfo::TCC_Basic;
+ }
+
+ unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
+ const TargetLoweringBase *TLI = getTLI();
+ switch (Opcode) {
+ default: break;
+ case Instruction::Trunc: {
+ if (TLI->isTruncateFree(OpTy, Ty))
+ return TargetTransformInfo::TCC_Free;
+ return TargetTransformInfo::TCC_Basic;
+ }
+ case Instruction::ZExt: {
+ if (TLI->isZExtFree(OpTy, Ty))
+ return TargetTransformInfo::TCC_Free;
+ return TargetTransformInfo::TCC_Basic;
+ }
+ }
+
+ return BaseT::getOperationCost(Opcode, Ty, OpTy);
+ }
+
+ void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP) {
+ // This unrolling functionality is target independent, but to provide some
+ // motivation for its intended use, for x86:
+
+ // According to the Intel 64 and IA-32 Architectures Optimization Reference
+ // Manual, Intel Core models and later have a loop stream detector (and
+ // associated uop queue) that can benefit from partial unrolling.
+ // The relevant requirements are:
+ // - The loop must have no more than 4 (8 for Nehalem and later) branches
+ // taken, and none of them may be calls.
+ // - The loop can have no more than 18 (28 for Nehalem and later) uops.
+
+ // According to the Software Optimization Guide for AMD Family 15h
+ // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
+ // and loop buffer which can benefit from partial unrolling.
+ // The relevant requirements are:
+ // - The loop must have fewer than 16 branches
+ // - The loop must have less than 40 uops in all executed loop branches
+
+ // The number of taken branches in a loop is hard to estimate here, and
+ // benchmarking has revealed that it is better not to be conservative when
+ // estimating the branch count. As a result, we'll ignore the branch limits
+ // until someone finds a case where it matters in practice.
+
+ unsigned MaxOps;
+ const TargetSubtargetInfo *ST = getST();
+ if (PartialUnrollingThreshold.getNumOccurrences() > 0)
+ MaxOps = PartialUnrollingThreshold;
+ else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
+ MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
+ else
+ return;
+
+ // Scan the loop: don't unroll loops with calls.
+ for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
+ ++I) {
+ BasicBlock *BB = *I;
+
+ for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
+ if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
+ ImmutableCallSite CS(&*J);
+ if (const Function *F = CS.getCalledFunction()) {
+ if (!static_cast<T *>(this)->isLoweredToCall(F))
+ continue;
+ }
+
+ return;
+ }
+ }
+
+ // Enable runtime and partial unrolling up to the specified size.
+ UP.Partial = UP.Runtime = true;
+ UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
+ }
+
+ /// @}
+
+ /// \name Vector TTI Implementations
+ /// @{
+
+ unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
+
+ unsigned getRegisterBitWidth(bool Vector) { return 32; }
+
+ unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
+
+ unsigned getArithmeticInstrCost(
+ unsigned Opcode, Type *Ty,
+ TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
+ TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
+ TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
+ TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None) {
+ // Check if any of the operands are vector operands.
+ const TargetLoweringBase *TLI = getTLI();
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
+
+ bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
+ // Assume that floating point arithmetic operations cost twice as much as
+ // integer operations.
+ unsigned OpCost = (IsFloat ? 2 : 1);
+
+ if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
+ // The operation is legal. Assume it costs 1.
+ // TODO: Once we have extract/insert subvector cost we need to use them.
+ return LT.first * OpCost;
+ }
+
+ if (!TLI->isOperationExpand(ISD, LT.second)) {
+ // If the operation is custom lowered then assume
+ // thare the code is twice as expensive.
+ return LT.first * 2 * OpCost;
+ }
+
+ // Else, assume that we need to scalarize this op.
+ if (Ty->isVectorTy()) {
+ unsigned Num = Ty->getVectorNumElements();
+ unsigned Cost = static_cast<T *>(this)
+ ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
+ // return the cost of multiple scalar invocation plus the cost of
+ // inserting
+ // and extracting the values.
+ return getScalarizationOverhead(Ty, true, true) + Num * Cost;
+ }
+
+ // We don't know anything about this scalar instruction.
+ return OpCost;
+ }
+
+ unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
+ Type *SubTp) {
+ if (Kind == TTI::SK_Alternate) {
+ return getAltShuffleOverhead(Tp);
+ }
+ return 1;
+ }
+
+ unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
+ const TargetLoweringBase *TLI = getTLI();
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+ std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
+ std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
+
+ // Check for NOOP conversions.
+ if (SrcLT.first == DstLT.first &&
+ SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
+
+ // Bitcast between types that are legalized to the same type are free.
+ if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
+ return 0;
+ }
+
+ if (Opcode == Instruction::Trunc &&
+ TLI->isTruncateFree(SrcLT.second, DstLT.second))
+ return 0;
+
+ if (Opcode == Instruction::ZExt &&
+ TLI->isZExtFree(SrcLT.second, DstLT.second))
+ return 0;
+
+ // If the cast is marked as legal (or promote) then assume low cost.
+ if (SrcLT.first == DstLT.first &&
+ TLI->isOperationLegalOrPromote(ISD, DstLT.second))
+ return 1;
+
+ // Handle scalar conversions.
+ if (!Src->isVectorTy() && !Dst->isVectorTy()) {
+
+ // Scalar bitcasts are usually free.
+ if (Opcode == Instruction::BitCast)
+ return 0;
+
+ // Just check the op cost. If the operation is legal then assume it costs
+ // 1.
+ if (!TLI->isOperationExpand(ISD, DstLT.second))
+ return 1;
+
+ // Assume that illegal scalar instruction are expensive.
+ return 4;
+ }
+
+ // Check vector-to-vector casts.
+ if (Dst->isVectorTy() && Src->isVectorTy()) {
+
+ // If the cast is between same-sized registers, then the check is simple.
+ if (SrcLT.first == DstLT.first &&
+ SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
+
+ // Assume that Zext is done using AND.
+ if (Opcode == Instruction::ZExt)
+ return 1;
+
+ // Assume that sext is done using SHL and SRA.
+ if (Opcode == Instruction::SExt)
+ return 2;
+
+ // Just check the op cost. If the operation is legal then assume it
+ // costs
+ // 1 and multiply by the type-legalization overhead.
+ if (!TLI->isOperationExpand(ISD, DstLT.second))
+ return SrcLT.first * 1;
+ }
+
+ // If we are converting vectors and the operation is illegal, or
+ // if the vectors are legalized to different types, estimate the
+ // scalarization costs.
+ unsigned Num = Dst->getVectorNumElements();
+ unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
+ Opcode, Dst->getScalarType(), Src->getScalarType());
+
+ // Return the cost of multiple scalar invocation plus the cost of
+ // inserting and extracting the values.
+ return getScalarizationOverhead(Dst, true, true) + Num * Cost;
+ }
+
+ // We already handled vector-to-vector and scalar-to-scalar conversions.
+ // This
+ // is where we handle bitcast between vectors and scalars. We need to assume
+ // that the conversion is scalarized in one way or another.
+ if (Opcode == Instruction::BitCast)
+ // Illegal bitcasts are done by storing and loading from a stack slot.
+ return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
+ : 0) +
+ (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
+ : 0);
+
+ llvm_unreachable("Unhandled cast");
+ }
+
+ unsigned getCFInstrCost(unsigned Opcode) {
+ // Branches are assumed to be predicted.
+ return 0;
+ }
+
+ unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
+ const TargetLoweringBase *TLI = getTLI();
+ int ISD = TLI->InstructionOpcodeToISD(Opcode);
+ assert(ISD && "Invalid opcode");
+
+ // Selects on vectors are actually vector selects.
+ if (ISD == ISD::SELECT) {
+ assert(CondTy && "CondTy must exist");
+ if (CondTy->isVectorTy())
+ ISD = ISD::VSELECT;
+ }
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
+
+ if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
+ !TLI->isOperationExpand(ISD, LT.second)) {
+ // The operation is legal. Assume it costs 1. Multiply
+ // by the type-legalization overhead.
+ return LT.first * 1;
+ }
+
+ // Otherwise, assume that the cast is scalarized.
+ if (ValTy->isVectorTy()) {
+ unsigned Num = ValTy->getVectorNumElements();
+ if (CondTy)
+ CondTy = CondTy->getScalarType();
+ unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
+ Opcode, ValTy->getScalarType(), CondTy);
+
+ // Return the cost of multiple scalar invocation plus the cost of
+ // inserting
+ // and extracting the values.
+ return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
+ }
+
+ // Unknown scalar opcode.
+ return 1;
+ }
+
+ unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
+ std::pair<unsigned, MVT> LT =
+ getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
+
+ return LT.first;
+ }
+
+ unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
+ unsigned AddressSpace) {
+ assert(!Src->isVoidTy() && "Invalid type");
+ std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
+
+ // Assuming that all loads of legal types cost 1.
+ unsigned Cost = LT.first;
+
+ if (Src->isVectorTy() &&
+ Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
+ // This is a vector load that legalizes to a larger type than the vector
+ // itself. Unless the corresponding extending load or truncating store is
+ // legal, then this will scalarize.
+ TargetLowering::LegalizeAction LA = TargetLowering::Expand;
+ EVT MemVT = getTLI()->getValueType(DL, Src);
+ if (Opcode == Instruction::Store)
+ LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
+ else
+ LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
+
+ if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
+ // This is a vector load/store for some illegal type that is scalarized.
+ // We must account for the cost of building or decomposing the vector.
+ Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
+ Opcode == Instruction::Store);
+ }
+ }
+
+ return Cost;
+ }
+
+ unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
+ unsigned Factor,
+ ArrayRef<unsigned> Indices,
+ unsigned Alignment,
+ unsigned AddressSpace) {
+ VectorType *VT = dyn_cast<VectorType>(VecTy);
+ assert(VT && "Expect a vector type for interleaved memory op");
+
+ unsigned NumElts = VT->getNumElements();
+ assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
+
+ unsigned NumSubElts = NumElts / Factor;
+ VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
+
+ // Firstly, the cost of load/store operation.
+ unsigned Cost = static_cast<T *>(this)->getMemoryOpCost(
+ Opcode, VecTy, Alignment, AddressSpace);
+
+ // Then plus the cost of interleave operation.
+ if (Opcode == Instruction::Load) {
+ // The interleave cost is similar to extract sub vectors' elements
+ // from the wide vector, and insert them into sub vectors.
+ //
+ // E.g. An interleaved load of factor 2 (with one member of index 0):
+ // %vec = load <8 x i32>, <8 x i32>* %ptr
+ // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
+ // The cost is estimated as extract elements at 0, 2, 4, 6 from the
+ // <8 x i32> vector and insert them into a <4 x i32> vector.
+
+ assert(Indices.size() <= Factor &&
+ "Interleaved memory op has too many members");
+
+ for (unsigned Index : Indices) {
+ assert(Index < Factor && "Invalid index for interleaved memory op");
+
+ // Extract elements from loaded vector for each sub vector.
+ for (unsigned i = 0; i < NumSubElts; i++)
+ Cost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::ExtractElement, VT, Index + i * Factor);
+ }
+
+ unsigned InsSubCost = 0;
+ for (unsigned i = 0; i < NumSubElts; i++)
+ InsSubCost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::InsertElement, SubVT, i);
+
+ Cost += Indices.size() * InsSubCost;
+ } else {
+ // The interleave cost is extract all elements from sub vectors, and
+ // insert them into the wide vector.
+ //
+ // E.g. An interleaved store of factor 2:
+ // %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
+ // store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
+ // The cost is estimated as extract all elements from both <4 x i32>
+ // vectors and insert into the <8 x i32> vector.
+
+ unsigned ExtSubCost = 0;
+ for (unsigned i = 0; i < NumSubElts; i++)
+ ExtSubCost += static_cast<T *>(this)->getVectorInstrCost(
+ Instruction::ExtractElement, SubVT, i);
+ Cost += ExtSubCost * Factor;
+
+ for (unsigned i = 0; i < NumElts; i++)
+ Cost += static_cast<T *>(this)
+ ->getVectorInstrCost(Instruction::InsertElement, VT, i);
+ }
+
+ return Cost;
+ }
+
+ /// Get intrinsic cost based on arguments
+ unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Value *> Args) {
+ switch (IID) {
+ default: {
+ SmallVector<Type *, 4> Types;
+ for (Value *Op : Args)
+ Types.push_back(Op->getType());
+ return getIntrinsicInstrCost(IID, RetTy, Types);
+ }
+ case Intrinsic::masked_scatter: {
+ Value *Mask = Args[3];
+ bool VarMask = !isa<Constant>(Mask);
+ unsigned Alignment = cast<ConstantInt>(Args[2])->getZExtValue();
+ return
+ static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Store,
+ Args[0]->getType(),
+ Args[1], VarMask,
+ Alignment);
+ }
+ case Intrinsic::masked_gather: {
+ Value *Mask = Args[2];
+ bool VarMask = !isa<Constant>(Mask);
+ unsigned Alignment = cast<ConstantInt>(Args[1])->getZExtValue();
+ return
+ static_cast<T *>(this)->getGatherScatterOpCost(Instruction::Load,
+ RetTy, Args[0], VarMask,
+ Alignment);
+ }
+ }
+ }
+
+ /// Get intrinsic cost based on argument types
+ unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
+ ArrayRef<Type *> Tys) {
+ unsigned ISD = 0;
+ switch (IID) {
+ default: {
+ // Assume that we need to scalarize this intrinsic.
+ unsigned ScalarizationCost = 0;
+ unsigned ScalarCalls = 1;
+ Type *ScalarRetTy = RetTy;
+ if (RetTy->isVectorTy()) {
+ ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
+ ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
+ ScalarRetTy = RetTy->getScalarType();
+ }
+ SmallVector<Type *, 4> ScalarTys;
+ for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+ Type *Ty = Tys[i];
+ if (Ty->isVectorTy()) {
+ ScalarizationCost += getScalarizationOverhead(Ty, false, true);
+ ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
+ Ty = Ty->getScalarType();
+ }
+ ScalarTys.push_back(Ty);
+ }
+ if (ScalarCalls == 1)
+ return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
+
+ unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
+ IID, ScalarRetTy, ScalarTys);
+
+ return ScalarCalls * ScalarCost + ScalarizationCost;
+ }
+ // Look for intrinsics that can be lowered directly or turned into a scalar
+ // intrinsic call.
+ case Intrinsic::sqrt:
+ ISD = ISD::FSQRT;
+ break;
+ case Intrinsic::sin:
+ ISD = ISD::FSIN;
+ break;
+ case Intrinsic::cos:
+ ISD = ISD::FCOS;
+ break;
+ case Intrinsic::exp:
+ ISD = ISD::FEXP;
+ break;
+ case Intrinsic::exp2:
+ ISD = ISD::FEXP2;
+ break;
+ case Intrinsic::log:
+ ISD = ISD::FLOG;
+ break;
+ case Intrinsic::log10:
+ ISD = ISD::FLOG10;
+ break;
+ case Intrinsic::log2:
+ ISD = ISD::FLOG2;
+ break;
+ case Intrinsic::fabs:
+ ISD = ISD::FABS;
+ break;
+ case Intrinsic::minnum:
+ ISD = ISD::FMINNUM;
+ break;
+ case Intrinsic::maxnum:
+ ISD = ISD::FMAXNUM;
+ break;
+ case Intrinsic::copysign:
+ ISD = ISD::FCOPYSIGN;
+ break;
+ case Intrinsic::floor:
+ ISD = ISD::FFLOOR;
+ break;
+ case Intrinsic::ceil:
+ ISD = ISD::FCEIL;
+ break;
+ case Intrinsic::trunc:
+ ISD = ISD::FTRUNC;
+ break;
+ case Intrinsic::nearbyint:
+ ISD = ISD::FNEARBYINT;
+ break;
+ case Intrinsic::rint:
+ ISD = ISD::FRINT;
+ break;
+ case Intrinsic::round:
+ ISD = ISD::FROUND;
+ break;
+ case Intrinsic::pow:
+ ISD = ISD::FPOW;
+ break;
+ case Intrinsic::fma:
+ ISD = ISD::FMA;
+ break;
+ case Intrinsic::fmuladd:
+ ISD = ISD::FMA;
+ break;
+ // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ return 0;
+ case Intrinsic::masked_store:
+ return static_cast<T *>(this)
+ ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
+ case Intrinsic::masked_load:
+ return static_cast<T *>(this)
+ ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
+ }
+
+ const TargetLoweringBase *TLI = getTLI();
+ std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
+
+ if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
+ // The operation is legal. Assume it costs 1.
+ // If the type is split to multiple registers, assume that there is some
+ // overhead to this.
+ // TODO: Once we have extract/insert subvector cost we need to use them.
+ if (LT.first > 1)
+ return LT.first * 2;
+ return LT.first * 1;
+ }
+
+ if (!TLI->isOperationExpand(ISD, LT.second)) {
+ // If the operation is custom lowered then assume
+ // thare the code is twice as expensive.
+ return LT.first * 2;
+ }
+
+ // If we can't lower fmuladd into an FMA estimate the cost as a floating
+ // point mul followed by an add.
+ if (IID == Intrinsic::fmuladd)
+ return static_cast<T *>(this)
+ ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
+ static_cast<T *>(this)
+ ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
+
+ // Else, assume that we need to scalarize this intrinsic. For math builtins
+ // this will emit a costly libcall, adding call overhead and spills. Make it
+ // very expensive.
+ if (RetTy->isVectorTy()) {
+ unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
+ unsigned ScalarCalls = RetTy->getVectorNumElements();
+ SmallVector<Type *, 4> ScalarTys;
+ for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+ Type *Ty = Tys[i];
+ if (Ty->isVectorTy())
+ Ty = Ty->getScalarType();
+ ScalarTys.push_back(Ty);
+ }
+ unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
+ IID, RetTy->getScalarType(), ScalarTys);
+ for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+ if (Tys[i]->isVectorTy()) {
+ ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
+ ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
+ }
+ }
+
+ return ScalarCalls * ScalarCost + ScalarizationCost;
+ }
+
+ // This is going to be turned into a library call, make it expensive.
+ return 10;
+ }
+
+ /// \brief Compute a cost of the given call instruction.
+ ///
+ /// Compute the cost of calling function F with return type RetTy and
+ /// argument types Tys. F might be nullptr, in this case the cost of an
+ /// arbitrary call with the specified signature will be returned.
+ /// This is used, for instance, when we estimate call of a vector
+ /// counterpart of the given function.
+ /// \param F Called function, might be nullptr.
+ /// \param RetTy Return value types.
+ /// \param Tys Argument types.
+ /// \returns The cost of Call instruction.
+ unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
+ return 10;
+ }
+
+ unsigned getNumberOfParts(Type *Tp) {
+ std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
+ return LT.first;
+ }
+
+ unsigned getAddressComputationCost(Type *Ty, bool IsComplex) { return 0; }
+
+ unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
+ assert(Ty->isVectorTy() && "Expect a vector type");
+ unsigned NumVecElts = Ty->getVectorNumElements();
+ unsigned NumReduxLevels = Log2_32(NumVecElts);
+ unsigned ArithCost =
+ NumReduxLevels *
+ static_cast<T *>(this)->getArithmeticInstrCost(Opcode, Ty);
+ // Assume the pairwise shuffles add a cost.
+ unsigned ShuffleCost =
+ NumReduxLevels * (IsPairwise + 1) *
+ static_cast<T *>(this)
+ ->getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
+ return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
+ }
+
+ /// @}
+};
+
+/// \brief Concrete BasicTTIImpl that can be used if no further customization
+/// is needed.
+class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
+ typedef BasicTTIImplBase<BasicTTIImpl> BaseT;
+ friend class BasicTTIImplBase<BasicTTIImpl>;
+
+ const TargetSubtargetInfo *ST;
+ const TargetLoweringBase *TLI;
+
+ const TargetSubtargetInfo *getST() const { return ST; }
+ const TargetLoweringBase *getTLI() const { return TLI; }
+
+public:
+ explicit BasicTTIImpl(const TargetMachine *ST, const Function &F);
+
+ // Provide value semantics. MSVC requires that we spell all of these out.
+ BasicTTIImpl(const BasicTTIImpl &Arg)
+ : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
+ BasicTTIImpl(BasicTTIImpl &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
+ TLI(std::move(Arg.TLI)) {}
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/CalcSpillWeights.h b/contrib/llvm/include/llvm/CodeGen/CalcSpillWeights.h
new file mode 100644
index 0000000..17c9415
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/CalcSpillWeights.h
@@ -0,0 +1,82 @@
+//===---------------- lib/CodeGen/CalcSpillWeights.h ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CODEGEN_CALCSPILLWEIGHTS_H
+#define LLVM_CODEGEN_CALCSPILLWEIGHTS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+
+namespace llvm {
+
+ class LiveInterval;
+ class LiveIntervals;
+ class MachineBlockFrequencyInfo;
+ class MachineLoopInfo;
+ class VirtRegMap;
+
+ /// \brief Normalize the spill weight of a live interval
+ ///
+ /// The spill weight of a live interval is computed as:
+ ///
+ /// (sum(use freq) + sum(def freq)) / (K + size)
+ ///
+ /// @param UseDefFreq Expected number of executed use and def instructions
+ /// per function call. Derived from block frequencies.
+ /// @param Size Size of live interval as returnexd by getSize()
+ /// @param NumInstr Number of instructions using this live interval
+ ///
+ static inline float normalizeSpillWeight(float UseDefFreq, unsigned Size,
+ unsigned NumInstr) {
+ // The constant 25 instructions is added to avoid depending too much on
+ // accidental SlotIndex gaps for small intervals. The effect is that small
+ // intervals have a spill weight that is mostly proportional to the number
+ // of uses, while large intervals get a spill weight that is closer to a use
+ // density.
+ return UseDefFreq / (Size + 25*SlotIndex::InstrDist);
+ }
+
+ /// \brief Calculate auxiliary information for a virtual register such as its
+ /// spill weight and allocation hint.
+ class VirtRegAuxInfo {
+ public:
+ typedef float (*NormalizingFn)(float, unsigned, unsigned);
+
+ private:
+ MachineFunction &MF;
+ LiveIntervals &LIS;
+ VirtRegMap *VRM;
+ const MachineLoopInfo &Loops;
+ const MachineBlockFrequencyInfo &MBFI;
+ DenseMap<unsigned, float> Hint;
+ NormalizingFn normalize;
+
+ public:
+ VirtRegAuxInfo(MachineFunction &mf, LiveIntervals &lis,
+ VirtRegMap *vrm, const MachineLoopInfo &loops,
+ const MachineBlockFrequencyInfo &mbfi,
+ NormalizingFn norm = normalizeSpillWeight)
+ : MF(mf), LIS(lis), VRM(vrm), Loops(loops), MBFI(mbfi), normalize(norm) {}
+
+ /// \brief (re)compute li's spill weight and allocation hint.
+ void calculateSpillWeightAndHint(LiveInterval &li);
+ };
+
+ /// \brief Compute spill weights and allocation hints for all virtual register
+ /// live intervals.
+ void calculateSpillWeightsAndHints(LiveIntervals &LIS, MachineFunction &MF,
+ VirtRegMap *VRM,
+ const MachineLoopInfo &MLI,
+ const MachineBlockFrequencyInfo &MBFI,
+ VirtRegAuxInfo::NormalizingFn norm =
+ normalizeSpillWeight);
+}
+
+#endif // LLVM_CODEGEN_CALCSPILLWEIGHTS_H
diff --git a/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h b/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
new file mode 100644
index 0000000..415abb9
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/CallingConvLower.h
@@ -0,0 +1,519 @@
+//===-- llvm/CallingConvLower.h - Calling Conventions -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the CCState and CCValAssign classes, used for lowering
+// and implementing calling conventions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_CALLINGCONVLOWER_H
+#define LLVM_CODEGEN_CALLINGCONVLOWER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Target/TargetCallingConv.h"
+
+namespace llvm {
+class CCState;
+class MVT;
+class TargetMachine;
+class TargetRegisterInfo;
+
+/// CCValAssign - Represent assignment of one arg/retval to a location.
+class CCValAssign {
+public:
+ enum LocInfo {
+ Full, // The value fills the full location.
+ SExt, // The value is sign extended in the location.
+ ZExt, // The value is zero extended in the location.
+ AExt, // The value is extended with undefined upper bits.
+ SExtUpper, // The value is in the upper bits of the location and should be
+ // sign extended when retrieved.
+ ZExtUpper, // The value is in the upper bits of the location and should be
+ // zero extended when retrieved.
+ AExtUpper, // The value is in the upper bits of the location and should be
+ // extended with undefined upper bits when retrieved.
+ BCvt, // The value is bit-converted in the location.
+ VExt, // The value is vector-widened in the location.
+ // FIXME: Not implemented yet. Code that uses AExt to mean
+ // vector-widen should be fixed to use VExt instead.
+ FPExt, // The floating-point value is fp-extended in the location.
+ Indirect // The location contains pointer to the value.
+ // TODO: a subset of the value is in the location.
+ };
+
+private:
+ /// ValNo - This is the value number begin assigned (e.g. an argument number).
+ unsigned ValNo;
+
+ /// Loc is either a stack offset or a register number.
+ unsigned Loc;
+
+ /// isMem - True if this is a memory loc, false if it is a register loc.
+ unsigned isMem : 1;
+
+ /// isCustom - True if this arg/retval requires special handling.
+ unsigned isCustom : 1;
+
+ /// Information about how the value is assigned.
+ LocInfo HTP : 6;
+
+ /// ValVT - The type of the value being assigned.
+ MVT ValVT;
+
+ /// LocVT - The type of the location being assigned to.
+ MVT LocVT;
+public:
+
+ static CCValAssign getReg(unsigned ValNo, MVT ValVT,
+ unsigned RegNo, MVT LocVT,
+ LocInfo HTP) {
+ CCValAssign Ret;
+ Ret.ValNo = ValNo;
+ Ret.Loc = RegNo;
+ Ret.isMem = false;
+ Ret.isCustom = false;
+ Ret.HTP = HTP;
+ Ret.ValVT = ValVT;
+ Ret.LocVT = LocVT;
+ return Ret;
+ }
+
+ static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT,
+ unsigned RegNo, MVT LocVT,
+ LocInfo HTP) {
+ CCValAssign Ret;
+ Ret = getReg(ValNo, ValVT, RegNo, LocVT, HTP);
+ Ret.isCustom = true;
+ return Ret;
+ }
+
+ static CCValAssign getMem(unsigned ValNo, MVT ValVT,
+ unsigned Offset, MVT LocVT,
+ LocInfo HTP) {
+ CCValAssign Ret;
+ Ret.ValNo = ValNo;
+ Ret.Loc = Offset;
+ Ret.isMem = true;
+ Ret.isCustom = false;
+ Ret.HTP = HTP;
+ Ret.ValVT = ValVT;
+ Ret.LocVT = LocVT;
+ return Ret;
+ }
+
+ static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT,
+ unsigned Offset, MVT LocVT,
+ LocInfo HTP) {
+ CCValAssign Ret;
+ Ret = getMem(ValNo, ValVT, Offset, LocVT, HTP);
+ Ret.isCustom = true;
+ return Ret;
+ }
+
+ // There is no need to differentiate between a pending CCValAssign and other
+ // kinds, as they are stored in a different list.
+ static CCValAssign getPending(unsigned ValNo, MVT ValVT, MVT LocVT,
+ LocInfo HTP, unsigned ExtraInfo = 0) {
+ return getReg(ValNo, ValVT, ExtraInfo, LocVT, HTP);
+ }
+
+ void convertToReg(unsigned RegNo) {
+ Loc = RegNo;
+ isMem = false;
+ }
+
+ void convertToMem(unsigned Offset) {
+ Loc = Offset;
+ isMem = true;
+ }
+
+ unsigned getValNo() const { return ValNo; }
+ MVT getValVT() const { return ValVT; }
+
+ bool isRegLoc() const { return !isMem; }
+ bool isMemLoc() const { return isMem; }
+
+ bool needsCustom() const { return isCustom; }
+
+ unsigned getLocReg() const { assert(isRegLoc()); return Loc; }
+ unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; }
+ unsigned getExtraInfo() const { return Loc; }
+ MVT getLocVT() const { return LocVT; }
+
+ LocInfo getLocInfo() const { return HTP; }
+ bool isExtInLoc() const {
+ return (HTP == AExt || HTP == SExt || HTP == ZExt);
+ }
+
+ bool isUpperBitsInLoc() const {
+ return HTP == AExtUpper || HTP == SExtUpper || HTP == ZExtUpper;
+ }
+};
+
+/// Describes a register that needs to be forwarded from the prologue to a
+/// musttail call.
+struct ForwardedRegister {
+ ForwardedRegister(unsigned VReg, MCPhysReg PReg, MVT VT)
+ : VReg(VReg), PReg(PReg), VT(VT) {}
+ unsigned VReg;
+ MCPhysReg PReg;
+ MVT VT;
+};
+
+/// CCAssignFn - This function assigns a location for Val, updating State to
+/// reflect the change. It returns 'true' if it failed to handle Val.
+typedef bool CCAssignFn(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+/// CCCustomFn - This function assigns a location for Val, possibly updating
+/// all args to reflect changes and indicates if it handled it. It must set
+/// isCustom if it handles the arg and returns true.
+typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State);
+
+/// ParmContext - This enum tracks whether calling convention lowering is in
+/// the context of prologue or call generation. Not all backends make use of
+/// this information.
+typedef enum { Unknown, Prologue, Call } ParmContext;
+
+/// CCState - This class holds information needed while lowering arguments and
+/// return values. It captures which registers are already assigned and which
+/// stack slots are used. It provides accessors to allocate these values.
+class CCState {
+private:
+ CallingConv::ID CallingConv;
+ bool IsVarArg;
+ MachineFunction &MF;
+ const TargetRegisterInfo &TRI;
+ SmallVectorImpl<CCValAssign> &Locs;
+ LLVMContext &Context;
+
+ unsigned StackOffset;
+ unsigned MaxStackArgAlign;
+ SmallVector<uint32_t, 16> UsedRegs;
+ SmallVector<CCValAssign, 4> PendingLocs;
+
+ // ByValInfo and SmallVector<ByValInfo, 4> ByValRegs:
+ //
+ // Vector of ByValInfo instances (ByValRegs) is introduced for byval registers
+ // tracking.
+ // Or, in another words it tracks byval parameters that are stored in
+ // general purpose registers.
+ //
+ // For 4 byte stack alignment,
+ // instance index means byval parameter number in formal
+ // arguments set. Assume, we have some "struct_type" with size = 4 bytes,
+ // then, for function "foo":
+ //
+ // i32 foo(i32 %p, %struct_type* %r, i32 %s, %struct_type* %t)
+ //
+ // ByValRegs[0] describes how "%r" is stored (Begin == r1, End == r2)
+ // ByValRegs[1] describes how "%t" is stored (Begin == r3, End == r4).
+ //
+ // In case of 8 bytes stack alignment,
+ // ByValRegs may also contain information about wasted registers.
+ // In function shown above, r3 would be wasted according to AAPCS rules.
+ // And in that case ByValRegs[1].Waste would be "true".
+ // ByValRegs vector size still would be 2,
+ // while "%t" goes to the stack: it wouldn't be described in ByValRegs.
+ //
+ // Supposed use-case for this collection:
+ // 1. Initially ByValRegs is empty, InRegsParamsProcessed is 0.
+ // 2. HandleByVal fillups ByValRegs.
+ // 3. Argument analysis (LowerFormatArguments, for example). After
+ // some byval argument was analyzed, InRegsParamsProcessed is increased.
+ struct ByValInfo {
+ ByValInfo(unsigned B, unsigned E, bool IsWaste = false) :
+ Begin(B), End(E), Waste(IsWaste) {}
+ // First register allocated for current parameter.
+ unsigned Begin;
+
+ // First after last register allocated for current parameter.
+ unsigned End;
+
+ // Means that current range of registers doesn't belong to any
+ // parameters. It was wasted due to stack alignment rules.
+ // For more information see:
+ // AAPCS, 5.5 Parameter Passing, Stage C, C.3.
+ bool Waste;
+ };
+ SmallVector<ByValInfo, 4 > ByValRegs;
+
+ // InRegsParamsProcessed - shows how many instances of ByValRegs was proceed
+ // during argument analysis.
+ unsigned InRegsParamsProcessed;
+
+protected:
+ ParmContext CallOrPrologue;
+
+public:
+ CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
+ SmallVectorImpl<CCValAssign> &locs, LLVMContext &C);
+
+ void addLoc(const CCValAssign &V) {
+ Locs.push_back(V);
+ }
+
+ LLVMContext &getContext() const { return Context; }
+ MachineFunction &getMachineFunction() const { return MF; }
+ CallingConv::ID getCallingConv() const { return CallingConv; }
+ bool isVarArg() const { return IsVarArg; }
+
+ /// getNextStackOffset - Return the next stack offset such that all stack
+ /// slots satisfy their alignment requirements.
+ unsigned getNextStackOffset() const {
+ return StackOffset;
+ }
+
+ /// getAlignedCallFrameSize - Return the size of the call frame needed to
+ /// be able to store all arguments and such that the alignment requirement
+ /// of each of the arguments is satisfied.
+ unsigned getAlignedCallFrameSize() const {
+ return RoundUpToAlignment(StackOffset, MaxStackArgAlign);
+ }
+
+ /// isAllocated - Return true if the specified register (or an alias) is
+ /// allocated.
+ bool isAllocated(unsigned Reg) const {
+ return UsedRegs[Reg/32] & (1 << (Reg&31));
+ }
+
+ /// AnalyzeFormalArguments - Analyze an array of argument values,
+ /// incorporating info about the formals into this state.
+ void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
+ CCAssignFn Fn);
+
+ /// AnalyzeReturn - Analyze the returned values of a return,
+ /// incorporating info about the result values into this state.
+ void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ CCAssignFn Fn);
+
+ /// CheckReturn - Analyze the return values of a function, returning
+ /// true if the return can be performed without sret-demotion, and
+ /// false otherwise.
+ bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
+ CCAssignFn Fn);
+
+ /// AnalyzeCallOperands - Analyze the outgoing arguments to a call,
+ /// incorporating info about the passed values into this state.
+ void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
+ CCAssignFn Fn);
+
+ /// AnalyzeCallOperands - Same as above except it takes vectors of types
+ /// and argument flags.
+ void AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+ CCAssignFn Fn);
+
+ /// AnalyzeCallResult - Analyze the return values of a call,
+ /// incorporating info about the passed values into this state.
+ void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
+ CCAssignFn Fn);
+
+ /// AnalyzeCallResult - Same as above except it's specialized for calls which
+ /// produce a single value.
+ void AnalyzeCallResult(MVT VT, CCAssignFn Fn);
+
+ /// getFirstUnallocated - Return the index of the first unallocated register
+ /// in the set, or Regs.size() if they are all allocated.
+ unsigned getFirstUnallocated(ArrayRef<MCPhysReg> Regs) const {
+ for (unsigned i = 0; i < Regs.size(); ++i)
+ if (!isAllocated(Regs[i]))
+ return i;
+ return Regs.size();
+ }
+
+ /// AllocateReg - Attempt to allocate one register. If it is not available,
+ /// return zero. Otherwise, return the register, marking it and any aliases
+ /// as allocated.
+ unsigned AllocateReg(unsigned Reg) {
+ if (isAllocated(Reg)) return 0;
+ MarkAllocated(Reg);
+ return Reg;
+ }
+
+ /// Version of AllocateReg with extra register to be shadowed.
+ unsigned AllocateReg(unsigned Reg, unsigned ShadowReg) {
+ if (isAllocated(Reg)) return 0;
+ MarkAllocated(Reg);
+ MarkAllocated(ShadowReg);
+ return Reg;
+ }
+
+ /// AllocateReg - Attempt to allocate one of the specified registers. If none
+ /// are available, return zero. Otherwise, return the first one available,
+ /// marking it and any aliases as allocated.
+ unsigned AllocateReg(ArrayRef<MCPhysReg> Regs) {
+ unsigned FirstUnalloc = getFirstUnallocated(Regs);
+ if (FirstUnalloc == Regs.size())
+ return 0; // Didn't find the reg.
+
+ // Mark the register and any aliases as allocated.
+ unsigned Reg = Regs[FirstUnalloc];
+ MarkAllocated(Reg);
+ return Reg;
+ }
+
+ /// AllocateRegBlock - Attempt to allocate a block of RegsRequired consecutive
+ /// registers. If this is not possible, return zero. Otherwise, return the first
+ /// register of the block that were allocated, marking the entire block as allocated.
+ unsigned AllocateRegBlock(ArrayRef<MCPhysReg> Regs, unsigned RegsRequired) {
+ if (RegsRequired > Regs.size())
+ return 0;
+
+ for (unsigned StartIdx = 0; StartIdx <= Regs.size() - RegsRequired;
+ ++StartIdx) {
+ bool BlockAvailable = true;
+ // Check for already-allocated regs in this block
+ for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
+ if (isAllocated(Regs[StartIdx + BlockIdx])) {
+ BlockAvailable = false;
+ break;
+ }
+ }
+ if (BlockAvailable) {
+ // Mark the entire block as allocated
+ for (unsigned BlockIdx = 0; BlockIdx < RegsRequired; ++BlockIdx) {
+ MarkAllocated(Regs[StartIdx + BlockIdx]);
+ }
+ return Regs[StartIdx];
+ }
+ }
+ // No block was available
+ return 0;
+ }
+
+ /// Version of AllocateReg with list of registers to be shadowed.
+ unsigned AllocateReg(ArrayRef<MCPhysReg> Regs, const MCPhysReg *ShadowRegs) {
+ unsigned FirstUnalloc = getFirstUnallocated(Regs);
+ if (FirstUnalloc == Regs.size())
+ return 0; // Didn't find the reg.
+
+ // Mark the register and any aliases as allocated.
+ unsigned Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc];
+ MarkAllocated(Reg);
+ MarkAllocated(ShadowReg);
+ return Reg;
+ }
+
+ /// AllocateStack - Allocate a chunk of stack space with the specified size
+ /// and alignment.
+ unsigned AllocateStack(unsigned Size, unsigned Align) {
+ assert(Align && ((Align - 1) & Align) == 0); // Align is power of 2.
+ StackOffset = RoundUpToAlignment(StackOffset, Align);
+ unsigned Result = StackOffset;
+ StackOffset += Size;
+ MaxStackArgAlign = std::max(Align, MaxStackArgAlign);
+ MF.getFrameInfo()->ensureMaxAlignment(Align);
+ return Result;
+ }
+
+ /// Version of AllocateStack with extra register to be shadowed.
+ unsigned AllocateStack(unsigned Size, unsigned Align, unsigned ShadowReg) {
+ MarkAllocated(ShadowReg);
+ return AllocateStack(Size, Align);
+ }
+
+ /// Version of AllocateStack with list of extra registers to be shadowed.
+ /// Note that, unlike AllocateReg, this shadows ALL of the shadow registers.
+ unsigned AllocateStack(unsigned Size, unsigned Align,
+ ArrayRef<MCPhysReg> ShadowRegs) {
+ for (unsigned i = 0; i < ShadowRegs.size(); ++i)
+ MarkAllocated(ShadowRegs[i]);
+ return AllocateStack(Size, Align);
+ }
+
+ // HandleByVal - Allocate a stack slot large enough to pass an argument by
+ // value. The size and alignment information of the argument is encoded in its
+ // parameter attribute.
+ void HandleByVal(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags);
+
+ // Returns count of byval arguments that are to be stored (even partly)
+ // in registers.
+ unsigned getInRegsParamsCount() const { return ByValRegs.size(); }
+
+ // Returns count of byval in-regs arguments proceed.
+ unsigned getInRegsParamsProcessed() const { return InRegsParamsProcessed; }
+
+ // Get information about N-th byval parameter that is stored in registers.
+ // Here "ByValParamIndex" is N.
+ void getInRegsParamInfo(unsigned InRegsParamRecordIndex,
+ unsigned& BeginReg, unsigned& EndReg) const {
+ assert(InRegsParamRecordIndex < ByValRegs.size() &&
+ "Wrong ByVal parameter index");
+
+ const ByValInfo& info = ByValRegs[InRegsParamRecordIndex];
+ BeginReg = info.Begin;
+ EndReg = info.End;
+ }
+
+ // Add information about parameter that is kept in registers.
+ void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd) {
+ ByValRegs.push_back(ByValInfo(RegBegin, RegEnd));
+ }
+
+ // Goes either to next byval parameter (excluding "waste" record), or
+ // to the end of collection.
+ // Returns false, if end is reached.
+ bool nextInRegsParam() {
+ unsigned e = ByValRegs.size();
+ if (InRegsParamsProcessed < e)
+ ++InRegsParamsProcessed;
+ return InRegsParamsProcessed < e;
+ }
+
+ // Clear byval registers tracking info.
+ void clearByValRegsInfo() {
+ InRegsParamsProcessed = 0;
+ ByValRegs.clear();
+ }
+
+ // Rewind byval registers tracking info.
+ void rewindByValRegsInfo() {
+ InRegsParamsProcessed = 0;
+ }
+
+ ParmContext getCallOrPrologue() const { return CallOrPrologue; }
+
+ // Get list of pending assignments
+ SmallVectorImpl<llvm::CCValAssign> &getPendingLocs() {
+ return PendingLocs;
+ }
+
+ /// Compute the remaining unused register parameters that would be used for
+ /// the given value type. This is useful when varargs are passed in the
+ /// registers that normal prototyped parameters would be passed in, or for
+ /// implementing perfect forwarding.
+ void getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs, MVT VT,
+ CCAssignFn Fn);
+
+ /// Compute the set of registers that need to be preserved and forwarded to
+ /// any musttail calls.
+ void analyzeMustTailForwardedRegisters(
+ SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
+ CCAssignFn Fn);
+
+private:
+ /// MarkAllocated - Mark a register and all of its aliases as allocated.
+ void MarkAllocated(unsigned Reg);
+};
+
+
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/CommandFlags.h b/contrib/llvm/include/llvm/CodeGen/CommandFlags.h
new file mode 100644
index 0000000..0d37dc0
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/CommandFlags.h
@@ -0,0 +1,380 @@
+//===-- CommandFlags.h - Command Line Flags Interface -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains codegen-specific flags that are shared between different
+// command line tools. The tools "llc" and "opt" both use this file to prevent
+// flag duplication.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_COMMANDFLAGS_H
+#define LLVM_CODEGEN_COMMANDFLAGS_H
+
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCTargetOptionsCommandFlags.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRecip.h"
+#include <string>
+using namespace llvm;
+
+cl::opt<std::string>
+MArch("march", cl::desc("Architecture to generate code for (see --version)"));
+
+cl::opt<std::string>
+MCPU("mcpu",
+ cl::desc("Target a specific cpu type (-mcpu=help for details)"),
+ cl::value_desc("cpu-name"),
+ cl::init(""));
+
+cl::list<std::string>
+MAttrs("mattr",
+ cl::CommaSeparated,
+ cl::desc("Target specific attributes (-mattr=help for details)"),
+ cl::value_desc("a1,+a2,-a3,..."));
+
+cl::opt<Reloc::Model>
+RelocModel("relocation-model",
+ cl::desc("Choose relocation model"),
+ cl::init(Reloc::Default),
+ cl::values(
+ clEnumValN(Reloc::Default, "default",
+ "Target default relocation model"),
+ clEnumValN(Reloc::Static, "static",
+ "Non-relocatable code"),
+ clEnumValN(Reloc::PIC_, "pic",
+ "Fully relocatable, position independent code"),
+ clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
+ "Relocatable external references, non-relocatable code"),
+ clEnumValEnd));
+
+cl::opt<ThreadModel::Model>
+TMModel("thread-model",
+ cl::desc("Choose threading model"),
+ cl::init(ThreadModel::POSIX),
+ cl::values(clEnumValN(ThreadModel::POSIX, "posix",
+ "POSIX thread model"),
+ clEnumValN(ThreadModel::Single, "single",
+ "Single thread model"),
+ clEnumValEnd));
+
+cl::opt<llvm::CodeModel::Model>
+CMModel("code-model",
+ cl::desc("Choose code model"),
+ cl::init(CodeModel::Default),
+ cl::values(clEnumValN(CodeModel::Default, "default",
+ "Target default code model"),
+ clEnumValN(CodeModel::Small, "small",
+ "Small code model"),
+ clEnumValN(CodeModel::Kernel, "kernel",
+ "Kernel code model"),
+ clEnumValN(CodeModel::Medium, "medium",
+ "Medium code model"),
+ clEnumValN(CodeModel::Large, "large",
+ "Large code model"),
+ clEnumValEnd));
+
+cl::opt<TargetMachine::CodeGenFileType>
+FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
+ cl::desc("Choose a file type (not all types are supported by all targets):"),
+ cl::values(
+ clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
+ "Emit an assembly ('.s') file"),
+ clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
+ "Emit a native object ('.o') file"),
+ clEnumValN(TargetMachine::CGFT_Null, "null",
+ "Emit nothing, for performance testing"),
+ clEnumValEnd));
+
+cl::opt<bool>
+EnableFPMAD("enable-fp-mad",
+ cl::desc("Enable less precise MAD instructions to be generated"),
+ cl::init(false));
+
+cl::opt<bool>
+DisableFPElim("disable-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableUnsafeFPMath("enable-unsafe-fp-math",
+ cl::desc("Enable optimizations that may decrease FP precision"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableNoInfsFPMath("enable-no-infs-fp-math",
+ cl::desc("Enable FP math optimizations that assume no +-Infs"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableNoNaNsFPMath("enable-no-nans-fp-math",
+ cl::desc("Enable FP math optimizations that assume no NaNs"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
+ cl::Hidden,
+ cl::desc("Force codegen to assume rounding mode can change dynamically"),
+ cl::init(false));
+
+cl::opt<llvm::FloatABI::ABIType>
+FloatABIForCalls("float-abi",
+ cl::desc("Choose float ABI type"),
+ cl::init(FloatABI::Default),
+ cl::values(
+ clEnumValN(FloatABI::Default, "default",
+ "Target default float ABI type"),
+ clEnumValN(FloatABI::Soft, "soft",
+ "Soft float ABI (implied by -soft-float)"),
+ clEnumValN(FloatABI::Hard, "hard",
+ "Hard float ABI (uses FP registers)"),
+ clEnumValEnd));
+
+cl::opt<llvm::FPOpFusion::FPOpFusionMode>
+FuseFPOps("fp-contract",
+ cl::desc("Enable aggressive formation of fused FP ops"),
+ cl::init(FPOpFusion::Standard),
+ cl::values(
+ clEnumValN(FPOpFusion::Fast, "fast",
+ "Fuse FP ops whenever profitable"),
+ clEnumValN(FPOpFusion::Standard, "on",
+ "Only fuse 'blessed' FP ops."),
+ clEnumValN(FPOpFusion::Strict, "off",
+ "Only fuse FP ops when the result won't be affected."),
+ clEnumValEnd));
+
+cl::list<std::string>
+ReciprocalOps("recip",
+ cl::CommaSeparated,
+ cl::desc("Choose reciprocal operation types and parameters."),
+ cl::value_desc("all,none,default,divf,!vec-sqrtd,vec-divd:0,sqrt:9..."));
+
+cl::opt<bool>
+DontPlaceZerosInBSS("nozero-initialized-in-bss",
+ cl::desc("Don't place zero-initialized symbols into bss section"),
+ cl::init(false));
+
+cl::opt<bool>
+EnableGuaranteedTailCallOpt("tailcallopt",
+ cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
+ cl::init(false));
+
+cl::opt<bool>
+DisableTailCalls("disable-tail-calls",
+ cl::desc("Never emit tail calls"),
+ cl::init(false));
+
+cl::opt<unsigned>
+OverrideStackAlignment("stack-alignment",
+ cl::desc("Override default stack alignment"),
+ cl::init(0));
+
+cl::opt<bool>
+StackRealign("stackrealign",
+ cl::desc("Force align the stack to the minimum alignment"),
+ cl::init(false));
+
+cl::opt<std::string>
+TrapFuncName("trap-func", cl::Hidden,
+ cl::desc("Emit a call to trap function rather than a trap instruction"),
+ cl::init(""));
+
+cl::opt<bool>
+EnablePIE("enable-pie",
+ cl::desc("Assume the creation of a position independent executable."),
+ cl::init(false));
+
+cl::opt<bool>
+UseCtors("use-ctors",
+ cl::desc("Use .ctors instead of .init_array."),
+ cl::init(false));
+
+cl::opt<std::string> StopAfter("stop-after",
+ cl::desc("Stop compilation after a specific pass"),
+ cl::value_desc("pass-name"),
+ cl::init(""));
+cl::opt<std::string> StartAfter("start-after",
+ cl::desc("Resume compilation after a specific pass"),
+ cl::value_desc("pass-name"),
+ cl::init(""));
+
+cl::opt<std::string>
+ RunPass("run-pass", cl::desc("Run compiler only for one specific pass"),
+ cl::value_desc("pass-name"), cl::init(""));
+
+cl::opt<bool> DataSections("data-sections",
+ cl::desc("Emit data into separate sections"),
+ cl::init(false));
+
+cl::opt<bool>
+FunctionSections("function-sections",
+ cl::desc("Emit functions into separate sections"),
+ cl::init(false));
+
+cl::opt<bool> EmulatedTLS("emulated-tls",
+ cl::desc("Use emulated TLS model"),
+ cl::init(false));
+
+cl::opt<bool> UniqueSectionNames("unique-section-names",
+ cl::desc("Give unique names to every section"),
+ cl::init(true));
+
+cl::opt<llvm::JumpTable::JumpTableType>
+JTableType("jump-table-type",
+ cl::desc("Choose the type of Jump-Instruction Table for jumptable."),
+ cl::init(JumpTable::Single),
+ cl::values(
+ clEnumValN(JumpTable::Single, "single",
+ "Create a single table for all jumptable functions"),
+ clEnumValN(JumpTable::Arity, "arity",
+ "Create one table per number of parameters."),
+ clEnumValN(JumpTable::Simplified, "simplified",
+ "Create one table per simplified function type."),
+ clEnumValN(JumpTable::Full, "full",
+ "Create one table per unique function type."),
+ clEnumValEnd));
+
+cl::opt<llvm::EABI> EABIVersion(
+ "meabi", cl::desc("Set EABI type (default depends on triple):"),
+ cl::init(EABI::Default),
+ cl::values(clEnumValN(EABI::Default, "default",
+ "Triple default EABI version"),
+ clEnumValN(EABI::EABI4, "4", "EABI version 4"),
+ clEnumValN(EABI::EABI5, "5", "EABI version 5"),
+ clEnumValN(EABI::GNU, "gnu", "EABI GNU"), clEnumValEnd));
+
+cl::opt<DebuggerKind>
+DebuggerTuningOpt("debugger-tune",
+ cl::desc("Tune debug info for a particular debugger"),
+ cl::init(DebuggerKind::Default),
+ cl::values(
+ clEnumValN(DebuggerKind::GDB, "gdb", "gdb"),
+ clEnumValN(DebuggerKind::LLDB, "lldb", "lldb"),
+ clEnumValN(DebuggerKind::SCE, "sce",
+ "SCE targets (e.g. PS4)"),
+ clEnumValEnd));
+
+// Common utility function tightly tied to the options listed here. Initializes
+// a TargetOptions object with CodeGen flags and returns it.
+static inline TargetOptions InitTargetOptionsFromCodeGenFlags() {
+ TargetOptions Options;
+ Options.LessPreciseFPMADOption = EnableFPMAD;
+ Options.AllowFPOpFusion = FuseFPOps;
+ Options.Reciprocals = TargetRecip(ReciprocalOps);
+ Options.UnsafeFPMath = EnableUnsafeFPMath;
+ Options.NoInfsFPMath = EnableNoInfsFPMath;
+ Options.NoNaNsFPMath = EnableNoNaNsFPMath;
+ Options.HonorSignDependentRoundingFPMathOption =
+ EnableHonorSignDependentRoundingFPMath;
+ if (FloatABIForCalls != FloatABI::Default)
+ Options.FloatABIType = FloatABIForCalls;
+ Options.NoZerosInBSS = DontPlaceZerosInBSS;
+ Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt;
+ Options.StackAlignmentOverride = OverrideStackAlignment;
+ Options.PositionIndependentExecutable = EnablePIE;
+ Options.UseInitArray = !UseCtors;
+ Options.DataSections = DataSections;
+ Options.FunctionSections = FunctionSections;
+ Options.UniqueSectionNames = UniqueSectionNames;
+ Options.EmulatedTLS = EmulatedTLS;
+
+ Options.MCOptions = InitMCTargetOptionsFromFlags();
+ Options.JTType = JTableType;
+
+ Options.ThreadModel = TMModel;
+ Options.EABIVersion = EABIVersion;
+ Options.DebuggerTuning = DebuggerTuningOpt;
+
+ return Options;
+}
+
+static inline std::string getCPUStr() {
+ // If user asked for the 'native' CPU, autodetect here. If autodection fails,
+ // this will set the CPU to an empty string which tells the target to
+ // pick a basic default.
+ if (MCPU == "native")
+ return sys::getHostCPUName();
+
+ return MCPU;
+}
+
+static inline std::string getFeaturesStr() {
+ SubtargetFeatures Features;
+
+ // If user asked for the 'native' CPU, we need to autodetect features.
+ // This is necessary for x86 where the CPU might not support all the
+ // features the autodetected CPU name lists in the target. For example,
+ // not all Sandybridge processors support AVX.
+ if (MCPU == "native") {
+ StringMap<bool> HostFeatures;
+ if (sys::getHostCPUFeatures(HostFeatures))
+ for (auto &F : HostFeatures)
+ Features.AddFeature(F.first(), F.second);
+ }
+
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+
+ return Features.getString();
+}
+
+/// \brief Set function attributes of functions in Module M based on CPU,
+/// Features, and command line flags.
+static inline void setFunctionAttributes(StringRef CPU, StringRef Features,
+ Module &M) {
+ for (auto &F : M) {
+ auto &Ctx = F.getContext();
+ AttributeSet Attrs = F.getAttributes(), NewAttrs;
+
+ if (!CPU.empty())
+ NewAttrs = NewAttrs.addAttribute(Ctx, AttributeSet::FunctionIndex,
+ "target-cpu", CPU);
+
+ if (!Features.empty())
+ NewAttrs = NewAttrs.addAttribute(Ctx, AttributeSet::FunctionIndex,
+ "target-features", Features);
+
+ if (DisableFPElim.getNumOccurrences() > 0)
+ NewAttrs = NewAttrs.addAttribute(Ctx, AttributeSet::FunctionIndex,
+ "no-frame-pointer-elim",
+ DisableFPElim ? "true" : "false");
+
+ if (DisableTailCalls.getNumOccurrences() > 0)
+ NewAttrs = NewAttrs.addAttribute(Ctx, AttributeSet::FunctionIndex,
+ "disable-tail-calls",
+ toStringRef(DisableTailCalls));
+
+ if (StackRealign)
+ NewAttrs = NewAttrs.addAttribute(Ctx, AttributeSet::FunctionIndex,
+ "stackrealign");
+
+ if (TrapFuncName.getNumOccurrences() > 0)
+ for (auto &B : F)
+ for (auto &I : B)
+ if (auto *Call = dyn_cast<CallInst>(&I))
+ if (const auto *F = Call->getCalledFunction())
+ if (F->getIntrinsicID() == Intrinsic::debugtrap ||
+ F->getIntrinsicID() == Intrinsic::trap)
+ Call->addAttribute(llvm::AttributeSet::FunctionIndex,
+ "trap-func-name", TrapFuncName);
+
+ // Let NewAttrs override Attrs.
+ NewAttrs = Attrs.addAttributes(Ctx, AttributeSet::FunctionIndex, NewAttrs);
+ F.setAttributes(NewAttrs);
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/DAGCombine.h b/contrib/llvm/include/llvm/CodeGen/DAGCombine.h
new file mode 100644
index 0000000..8b591900
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/DAGCombine.h
@@ -0,0 +1,25 @@
+//===-- llvm/CodeGen/DAGCombine.h ------- SelectionDAG Nodes ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+
+#ifndef LLVM_CODEGEN_DAGCOMBINE_H
+#define LLVM_CODEGEN_DAGCOMBINE_H
+
+namespace llvm {
+
+enum CombineLevel {
+ BeforeLegalizeTypes,
+ AfterLegalizeTypes,
+ AfterLegalizeVectorOps,
+ AfterLegalizeDAG
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h b/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h
new file mode 100644
index 0000000..40ec201
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/DFAPacketizer.h
@@ -0,0 +1,214 @@
+//=- llvm/CodeGen/DFAPacketizer.h - DFA Packetizer for VLIW ---*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This class implements a deterministic finite automaton (DFA) based
+// packetizing mechanism for VLIW architectures. It provides APIs to
+// determine whether there exists a legal mapping of instructions to
+// functional unit assignments in a packet. The DFA is auto-generated from
+// the target's Schedule.td file.
+//
+// A DFA consists of 3 major elements: states, inputs, and transitions. For
+// the packetizing mechanism, the input is the set of instruction classes for
+// a target. The state models all possible combinations of functional unit
+// consumption for a given set of instructions in a packet. A transition
+// models the addition of an instruction to a packet. In the DFA constructed
+// by this class, if an instruction can be added to a packet, then a valid
+// transition exists from the corresponding state. Invalid transitions
+// indicate that the instruction cannot be added to the current packet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DFAPACKETIZER_H
+#define LLVM_CODEGEN_DFAPACKETIZER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include <map>
+
+namespace llvm {
+
+class MCInstrDesc;
+class MachineInstr;
+class MachineLoopInfo;
+class MachineDominatorTree;
+class InstrItineraryData;
+class DefaultVLIWScheduler;
+class SUnit;
+
+// --------------------------------------------------------------------
+// Definitions shared between DFAPacketizer.cpp and DFAPacketizerEmitter.cpp
+
+// DFA_MAX_RESTERMS * DFA_MAX_RESOURCES must fit within sizeof DFAInput.
+// This is verified in DFAPacketizer.cpp:DFAPacketizer::DFAPacketizer.
+//
+// e.g. terms x resource bit combinations that fit in uint32_t:
+// 4 terms x 8 bits = 32 bits
+// 3 terms x 10 bits = 30 bits
+// 2 terms x 16 bits = 32 bits
+//
+// e.g. terms x resource bit combinations that fit in uint64_t:
+// 8 terms x 8 bits = 64 bits
+// 7 terms x 9 bits = 63 bits
+// 6 terms x 10 bits = 60 bits
+// 5 terms x 12 bits = 60 bits
+// 4 terms x 16 bits = 64 bits <--- current
+// 3 terms x 21 bits = 63 bits
+// 2 terms x 32 bits = 64 bits
+//
+#define DFA_MAX_RESTERMS 4 // The max # of AND'ed resource terms.
+#define DFA_MAX_RESOURCES 16 // The max # of resource bits in one term.
+
+typedef uint64_t DFAInput;
+typedef int64_t DFAStateInput;
+#define DFA_TBLTYPE "int64_t" // For generating DFAStateInputTable.
+// --------------------------------------------------------------------
+
+class DFAPacketizer {
+private:
+ typedef std::pair<unsigned, DFAInput> UnsignPair;
+
+ const InstrItineraryData *InstrItins;
+ int CurrentState;
+ const DFAStateInput (*DFAStateInputTable)[2];
+ const unsigned *DFAStateEntryTable;
+
+ // CachedTable is a map from <FromState, Input> to ToState.
+ DenseMap<UnsignPair, unsigned> CachedTable;
+
+ // ReadTable - Read the DFA transition table and update CachedTable.
+ void ReadTable(unsigned state);
+
+public:
+ DFAPacketizer(const InstrItineraryData *I, const DFAStateInput (*SIT)[2],
+ const unsigned *SET);
+
+ // Reset the current state to make all resources available.
+ void clearResources() {
+ CurrentState = 0;
+ }
+
+ // getInsnInput - Return the DFAInput for an instruction class.
+ DFAInput getInsnInput(unsigned InsnClass);
+
+ // getInsnInput - Return the DFAInput for an instruction class input vector.
+ static DFAInput getInsnInput(const std::vector<unsigned> &InsnClass);
+
+ // canReserveResources - Check if the resources occupied by a MCInstrDesc
+ // are available in the current state.
+ bool canReserveResources(const llvm::MCInstrDesc *MID);
+
+ // reserveResources - Reserve the resources occupied by a MCInstrDesc and
+ // change the current state to reflect that change.
+ void reserveResources(const llvm::MCInstrDesc *MID);
+
+ // canReserveResources - Check if the resources occupied by a machine
+ // instruction are available in the current state.
+ bool canReserveResources(llvm::MachineInstr *MI);
+
+ // reserveResources - Reserve the resources occupied by a machine
+ // instruction and change the current state to reflect that change.
+ void reserveResources(llvm::MachineInstr *MI);
+
+ const InstrItineraryData *getInstrItins() const { return InstrItins; }
+};
+
+// VLIWPacketizerList - Implements a simple VLIW packetizer using DFA. The
+// packetizer works on machine basic blocks. For each instruction I in BB, the
+// packetizer consults the DFA to see if machine resources are available to
+// execute I. If so, the packetizer checks if I depends on any instruction J in
+// the current packet. If no dependency is found, I is added to current packet
+// and machine resource is marked as taken. If any dependency is found, a target
+// API call is made to prune the dependence.
+class VLIWPacketizerList {
+protected:
+ MachineFunction &MF;
+ const TargetInstrInfo *TII;
+ AliasAnalysis *AA;
+
+ // The VLIW Scheduler.
+ DefaultVLIWScheduler *VLIWScheduler;
+
+ // Vector of instructions assigned to the current packet.
+ std::vector<MachineInstr*> CurrentPacketMIs;
+ // DFA resource tracker.
+ DFAPacketizer *ResourceTracker;
+
+ // Generate MI -> SU map.
+ std::map<MachineInstr*, SUnit*> MIToSUnit;
+
+public:
+ // The AliasAnalysis parameter can be nullptr.
+ VLIWPacketizerList(MachineFunction &MF, MachineLoopInfo &MLI,
+ AliasAnalysis *AA);
+
+ virtual ~VLIWPacketizerList();
+
+ // PacketizeMIs - Implement this API in the backend to bundle instructions.
+ void PacketizeMIs(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator BeginItr,
+ MachineBasicBlock::iterator EndItr);
+
+ // getResourceTracker - return ResourceTracker
+ DFAPacketizer *getResourceTracker() {return ResourceTracker;}
+
+ // addToPacket - Add MI to the current packet.
+ virtual MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
+ MachineBasicBlock::iterator MII = MI;
+ CurrentPacketMIs.push_back(MI);
+ ResourceTracker->reserveResources(MI);
+ return MII;
+ }
+
+ // End the current packet and reset the state of the packetizer.
+ // Overriding this function allows the target-specific packetizer
+ // to perform custom finalization.
+ virtual void endPacket(MachineBasicBlock *MBB, MachineInstr *MI);
+
+ // initPacketizerState - perform initialization before packetizing
+ // an instruction. This function is supposed to be overrided by
+ // the target dependent packetizer.
+ virtual void initPacketizerState() { return; }
+
+ // ignorePseudoInstruction - Ignore bundling of pseudo instructions.
+ virtual bool ignorePseudoInstruction(const MachineInstr *I,
+ const MachineBasicBlock *MBB) {
+ return false;
+ }
+
+ // isSoloInstruction - return true if instruction MI can not be packetized
+ // with any other instruction, which means that MI itself is a packet.
+ virtual bool isSoloInstruction(const MachineInstr *MI) {
+ return true;
+ }
+
+ // Check if the packetizer should try to add the given instruction to
+ // the current packet. One reasons for which it may not be desirable
+ // to include an instruction in the current packet could be that it
+ // would cause a stall.
+ // If this function returns "false", the current packet will be ended,
+ // and the instruction will be added to the next packet.
+ virtual bool shouldAddToPacket(const MachineInstr *MI) {
+ return true;
+ }
+
+ // isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
+ // together.
+ virtual bool isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) {
+ return false;
+ }
+
+ // isLegalToPruneDependencies - Is it legal to prune dependece between SUI
+ // and SUJ.
+ virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
+ return false;
+ }
+
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/DIE.h b/contrib/llvm/include/llvm/CodeGen/DIE.h
new file mode 100644
index 0000000..fa612d9
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/DIE.h
@@ -0,0 +1,764 @@
+//===--- lib/CodeGen/DIE.h - DWARF Info Entries -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Data structures for DWARF info entries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
+#define LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/DwarfStringPoolEntry.h"
+#include "llvm/Support/Dwarf.h"
+#include <vector>
+
+namespace llvm {
+class AsmPrinter;
+class MCExpr;
+class MCSymbol;
+class raw_ostream;
+class DwarfTypeUnit;
+
+//===--------------------------------------------------------------------===//
+/// DIEAbbrevData - Dwarf abbreviation data, describes one attribute of a
+/// Dwarf abbreviation.
+class DIEAbbrevData {
+ /// Attribute - Dwarf attribute code.
+ ///
+ dwarf::Attribute Attribute;
+
+ /// Form - Dwarf form code.
+ ///
+ dwarf::Form Form;
+
+public:
+ DIEAbbrevData(dwarf::Attribute A, dwarf::Form F) : Attribute(A), Form(F) {}
+
+ // Accessors.
+ dwarf::Attribute getAttribute() const { return Attribute; }
+ dwarf::Form getForm() const { return Form; }
+
+ /// Profile - Used to gather unique data for the abbreviation folding set.
+ ///
+ void Profile(FoldingSetNodeID &ID) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEAbbrev - Dwarf abbreviation, describes the organization of a debug
+/// information object.
+class DIEAbbrev : public FoldingSetNode {
+ /// Unique number for node.
+ ///
+ unsigned Number;
+
+ /// Tag - Dwarf tag code.
+ ///
+ dwarf::Tag Tag;
+
+ /// Children - Whether or not this node has children.
+ ///
+ // This cheats a bit in all of the uses since the values in the standard
+ // are 0 and 1 for no children and children respectively.
+ bool Children;
+
+ /// Data - Raw data bytes for abbreviation.
+ ///
+ SmallVector<DIEAbbrevData, 12> Data;
+
+public:
+ DIEAbbrev(dwarf::Tag T, bool C) : Tag(T), Children(C), Data() {}
+
+ // Accessors.
+ dwarf::Tag getTag() const { return Tag; }
+ unsigned getNumber() const { return Number; }
+ bool hasChildren() const { return Children; }
+ const SmallVectorImpl<DIEAbbrevData> &getData() const { return Data; }
+ void setChildrenFlag(bool hasChild) { Children = hasChild; }
+ void setNumber(unsigned N) { Number = N; }
+
+ /// AddAttribute - Adds another set of attribute information to the
+ /// abbreviation.
+ void AddAttribute(dwarf::Attribute Attribute, dwarf::Form Form) {
+ Data.push_back(DIEAbbrevData(Attribute, Form));
+ }
+
+ /// Profile - Used to gather unique data for the abbreviation folding set.
+ ///
+ void Profile(FoldingSetNodeID &ID) const;
+
+ /// Emit - Print the abbreviation using the specified asm printer.
+ ///
+ void Emit(const AsmPrinter *AP) const;
+
+ void print(raw_ostream &O);
+ void dump();
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEInteger - An integer value DIE.
+///
+class DIEInteger {
+ uint64_t Integer;
+
+public:
+ explicit DIEInteger(uint64_t I) : Integer(I) {}
+
+ /// BestForm - Choose the best form for integer.
+ ///
+ static dwarf::Form BestForm(bool IsSigned, uint64_t Int) {
+ if (IsSigned) {
+ const int64_t SignedInt = Int;
+ if ((char)Int == SignedInt)
+ return dwarf::DW_FORM_data1;
+ if ((short)Int == SignedInt)
+ return dwarf::DW_FORM_data2;
+ if ((int)Int == SignedInt)
+ return dwarf::DW_FORM_data4;
+ } else {
+ if ((unsigned char)Int == Int)
+ return dwarf::DW_FORM_data1;
+ if ((unsigned short)Int == Int)
+ return dwarf::DW_FORM_data2;
+ if ((unsigned int)Int == Int)
+ return dwarf::DW_FORM_data4;
+ }
+ return dwarf::DW_FORM_data8;
+ }
+
+ uint64_t getValue() const { return Integer; }
+ void setValue(uint64_t Val) { Integer = Val; }
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+ void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEExpr - An expression DIE.
+//
+class DIEExpr {
+ const MCExpr *Expr;
+
+public:
+ explicit DIEExpr(const MCExpr *E) : Expr(E) {}
+
+ /// getValue - Get MCExpr.
+ ///
+ const MCExpr *getValue() const { return Expr; }
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+ void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIELabel - A label DIE.
+//
+class DIELabel {
+ const MCSymbol *Label;
+
+public:
+ explicit DIELabel(const MCSymbol *L) : Label(L) {}
+
+ /// getValue - Get MCSymbol.
+ ///
+ const MCSymbol *getValue() const { return Label; }
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+ void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEDelta - A simple label difference DIE.
+///
+class DIEDelta {
+ const MCSymbol *LabelHi;
+ const MCSymbol *LabelLo;
+
+public:
+ DIEDelta(const MCSymbol *Hi, const MCSymbol *Lo) : LabelHi(Hi), LabelLo(Lo) {}
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+ void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEString - A container for string values.
+///
+class DIEString {
+ DwarfStringPoolEntryRef S;
+
+public:
+ DIEString(DwarfStringPoolEntryRef S) : S(S) {}
+
+ /// getString - Grab the string out of the object.
+ StringRef getString() const { return S.getString(); }
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+ void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEEntry - A pointer to another debug information entry. An instance of
+/// this class can also be used as a proxy for a debug information entry not
+/// yet defined (ie. types.)
+class DIE;
+class DIEEntry {
+ DIE *Entry;
+
+ DIEEntry() = delete;
+
+public:
+ explicit DIEEntry(DIE &E) : Entry(&E) {}
+
+ DIE &getEntry() const { return *Entry; }
+
+ /// Returns size of a ref_addr entry.
+ static unsigned getRefAddrSize(const AsmPrinter *AP);
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+ return Form == dwarf::DW_FORM_ref_addr ? getRefAddrSize(AP)
+ : sizeof(int32_t);
+ }
+
+ void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// \brief A signature reference to a type unit.
+class DIETypeSignature {
+ const DwarfTypeUnit *Unit;
+
+ DIETypeSignature() = delete;
+
+public:
+ explicit DIETypeSignature(const DwarfTypeUnit &Unit) : Unit(&Unit) {}
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
+ assert(Form == dwarf::DW_FORM_ref_sig8);
+ return 8;
+ }
+
+ void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIELocList - Represents a pointer to a location list in the debug_loc
+/// section.
+//
+class DIELocList {
+ // Index into the .debug_loc vector.
+ size_t Index;
+
+public:
+ DIELocList(size_t I) : Index(I) {}
+
+ /// getValue - Grab the current index out.
+ size_t getValue() const { return Index; }
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+ void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEValue - A debug information entry value. Some of these roughly correlate
+/// to DWARF attribute classes.
+///
+class DIEBlock;
+class DIELoc;
+class DIEValue {
+public:
+ enum Type {
+ isNone,
+#define HANDLE_DIEVALUE(T) is##T,
+#include "llvm/CodeGen/DIEValue.def"
+ };
+
+private:
+ /// Ty - Type of data stored in the value.
+ ///
+ Type Ty = isNone;
+ dwarf::Attribute Attribute = (dwarf::Attribute)0;
+ dwarf::Form Form = (dwarf::Form)0;
+
+ /// Storage for the value.
+ ///
+ /// All values that aren't standard layout (or are larger than 8 bytes)
+ /// should be stored by reference instead of by value.
+ typedef AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel,
+ DIEDelta *, DIEEntry, DIETypeSignature,
+ DIEBlock *, DIELoc *, DIELocList> ValTy;
+ static_assert(sizeof(ValTy) <= sizeof(uint64_t) ||
+ sizeof(ValTy) <= sizeof(void *),
+ "Expected all large types to be stored via pointer");
+
+ /// Underlying stored value.
+ ValTy Val;
+
+ template <class T> void construct(T V) {
+ static_assert(std::is_standard_layout<T>::value ||
+ std::is_pointer<T>::value,
+ "Expected standard layout or pointer");
+ new (reinterpret_cast<void *>(Val.buffer)) T(V);
+ }
+
+ template <class T> T *get() { return reinterpret_cast<T *>(Val.buffer); }
+ template <class T> const T *get() const {
+ return reinterpret_cast<const T *>(Val.buffer);
+ }
+ template <class T> void destruct() { get<T>()->~T(); }
+
+ /// Destroy the underlying value.
+ ///
+ /// This should get optimized down to a no-op. We could skip it if we could
+ /// add a static assert on \a std::is_trivially_copyable(), but we currently
+ /// support versions of GCC that don't understand that.
+ void destroyVal() {
+ switch (Ty) {
+ case isNone:
+ return;
+#define HANDLE_DIEVALUE_SMALL(T) \
+ case is##T: \
+ destruct<DIE##T>();
+ return;
+#define HANDLE_DIEVALUE_LARGE(T) \
+ case is##T: \
+ destruct<const DIE##T *>();
+ return;
+#include "llvm/CodeGen/DIEValue.def"
+ }
+ }
+
+ /// Copy the underlying value.
+ ///
+ /// This should get optimized down to a simple copy. We need to actually
+ /// construct the value, rather than calling memcpy, to satisfy strict
+ /// aliasing rules.
+ void copyVal(const DIEValue &X) {
+ switch (Ty) {
+ case isNone:
+ return;
+#define HANDLE_DIEVALUE_SMALL(T) \
+ case is##T: \
+ construct<DIE##T>(*X.get<DIE##T>()); \
+ return;
+#define HANDLE_DIEVALUE_LARGE(T) \
+ case is##T: \
+ construct<const DIE##T *>(*X.get<const DIE##T *>()); \
+ return;
+#include "llvm/CodeGen/DIEValue.def"
+ }
+ }
+
+public:
+ DIEValue() = default;
+ DIEValue(const DIEValue &X) : Ty(X.Ty), Attribute(X.Attribute), Form(X.Form) {
+ copyVal(X);
+ }
+ DIEValue &operator=(const DIEValue &X) {
+ destroyVal();
+ Ty = X.Ty;
+ Attribute = X.Attribute;
+ Form = X.Form;
+ copyVal(X);
+ return *this;
+ }
+ ~DIEValue() { destroyVal(); }
+
+#define HANDLE_DIEVALUE_SMALL(T) \
+ DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T &V) \
+ : Ty(is##T), Attribute(Attribute), Form(Form) { \
+ construct<DIE##T>(V); \
+ }
+#define HANDLE_DIEVALUE_LARGE(T) \
+ DIEValue(dwarf::Attribute Attribute, dwarf::Form Form, const DIE##T *V) \
+ : Ty(is##T), Attribute(Attribute), Form(Form) { \
+ assert(V && "Expected valid value"); \
+ construct<const DIE##T *>(V); \
+ }
+#include "llvm/CodeGen/DIEValue.def"
+
+ // Accessors
+ Type getType() const { return Ty; }
+ dwarf::Attribute getAttribute() const { return Attribute; }
+ dwarf::Form getForm() const { return Form; }
+ explicit operator bool() const { return Ty; }
+
+#define HANDLE_DIEVALUE_SMALL(T) \
+ const DIE##T &getDIE##T() const { \
+ assert(getType() == is##T && "Expected " #T); \
+ return *get<DIE##T>(); \
+ }
+#define HANDLE_DIEVALUE_LARGE(T) \
+ const DIE##T &getDIE##T() const { \
+ assert(getType() == is##T && "Expected " #T); \
+ return **get<const DIE##T *>(); \
+ }
+#include "llvm/CodeGen/DIEValue.def"
+
+ /// EmitValue - Emit value via the Dwarf writer.
+ ///
+ void EmitValue(const AsmPrinter *AP) const;
+
+ /// SizeOf - Return the size of a value in bytes.
+ ///
+ unsigned SizeOf(const AsmPrinter *AP) const;
+
+ void print(raw_ostream &O) const;
+ void dump() const;
+};
+
+struct IntrusiveBackListNode {
+ PointerIntPair<IntrusiveBackListNode *, 1> Next;
+ IntrusiveBackListNode() : Next(this, true) {}
+
+ IntrusiveBackListNode *getNext() const {
+ return Next.getInt() ? nullptr : Next.getPointer();
+ }
+};
+
+struct IntrusiveBackListBase {
+ typedef IntrusiveBackListNode Node;
+ Node *Last = nullptr;
+
+ bool empty() const { return !Last; }
+ void push_back(Node &N) {
+ assert(N.Next.getPointer() == &N && "Expected unlinked node");
+ assert(N.Next.getInt() == true && "Expected unlinked node");
+
+ if (Last) {
+ N.Next = Last->Next;
+ Last->Next.setPointerAndInt(&N, false);
+ }
+ Last = &N;
+ }
+};
+
+template <class T> class IntrusiveBackList : IntrusiveBackListBase {
+public:
+ using IntrusiveBackListBase::empty;
+ void push_back(T &N) { IntrusiveBackListBase::push_back(N); }
+ T &back() { return *static_cast<T *>(Last); }
+ const T &back() const { return *static_cast<T *>(Last); }
+
+ class const_iterator;
+ class iterator
+ : public iterator_facade_base<iterator, std::forward_iterator_tag, T> {
+ friend class const_iterator;
+ Node *N = nullptr;
+
+ public:
+ iterator() = default;
+ explicit iterator(T *N) : N(N) {}
+
+ iterator &operator++() {
+ N = N->getNext();
+ return *this;
+ }
+
+ explicit operator bool() const { return N; }
+ T &operator*() const { return *static_cast<T *>(N); }
+
+ bool operator==(const iterator &X) const { return N == X.N; }
+ bool operator!=(const iterator &X) const { return N != X.N; }
+ };
+
+ class const_iterator
+ : public iterator_facade_base<const_iterator, std::forward_iterator_tag,
+ const T> {
+ const Node *N = nullptr;
+
+ public:
+ const_iterator() = default;
+ // Placate MSVC by explicitly scoping 'iterator'.
+ const_iterator(typename IntrusiveBackList<T>::iterator X) : N(X.N) {}
+ explicit const_iterator(const T *N) : N(N) {}
+
+ const_iterator &operator++() {
+ N = N->getNext();
+ return *this;
+ }
+
+ explicit operator bool() const { return N; }
+ const T &operator*() const { return *static_cast<const T *>(N); }
+
+ bool operator==(const const_iterator &X) const { return N == X.N; }
+ bool operator!=(const const_iterator &X) const { return N != X.N; }
+ };
+
+ iterator begin() {
+ return Last ? iterator(static_cast<T *>(Last->Next.getPointer())) : end();
+ }
+ const_iterator begin() const {
+ return const_cast<IntrusiveBackList *>(this)->begin();
+ }
+ iterator end() { return iterator(); }
+ const_iterator end() const { return const_iterator(); }
+
+ static iterator toIterator(T &N) { return iterator(&N); }
+ static const_iterator toIterator(const T &N) { return const_iterator(&N); }
+};
+
+/// A list of DIE values.
+///
+/// This is a singly-linked list, but instead of reversing the order of
+/// insertion, we keep a pointer to the back of the list so we can push in
+/// order.
+///
+/// There are two main reasons to choose a linked list over a customized
+/// vector-like data structure.
+///
+/// 1. For teardown efficiency, we want DIEs to be BumpPtrAllocated. Using a
+/// linked list here makes this way easier to accomplish.
+/// 2. Carrying an extra pointer per \a DIEValue isn't expensive. 45% of DIEs
+/// have 2 or fewer values, and 90% have 5 or fewer. A vector would be
+/// over-allocated by 50% on average anyway, the same cost as the
+/// linked-list node.
+class DIEValueList {
+ struct Node : IntrusiveBackListNode {
+ DIEValue V;
+ explicit Node(DIEValue V) : V(V) {}
+ };
+
+ typedef IntrusiveBackList<Node> ListTy;
+ ListTy List;
+
+public:
+ class const_value_iterator;
+ class value_iterator
+ : public iterator_adaptor_base<value_iterator, ListTy::iterator,
+ std::forward_iterator_tag, DIEValue> {
+ friend class const_value_iterator;
+ typedef iterator_adaptor_base<value_iterator, ListTy::iterator,
+ std::forward_iterator_tag,
+ DIEValue> iterator_adaptor;
+
+ public:
+ value_iterator() = default;
+ explicit value_iterator(ListTy::iterator X) : iterator_adaptor(X) {}
+
+ explicit operator bool() const { return bool(wrapped()); }
+ DIEValue &operator*() const { return wrapped()->V; }
+ };
+
+ class const_value_iterator : public iterator_adaptor_base<
+ const_value_iterator, ListTy::const_iterator,
+ std::forward_iterator_tag, const DIEValue> {
+ typedef iterator_adaptor_base<const_value_iterator, ListTy::const_iterator,
+ std::forward_iterator_tag,
+ const DIEValue> iterator_adaptor;
+
+ public:
+ const_value_iterator() = default;
+ const_value_iterator(DIEValueList::value_iterator X)
+ : iterator_adaptor(X.wrapped()) {}
+ explicit const_value_iterator(ListTy::const_iterator X)
+ : iterator_adaptor(X) {}
+
+ explicit operator bool() const { return bool(wrapped()); }
+ const DIEValue &operator*() const { return wrapped()->V; }
+ };
+
+ typedef iterator_range<value_iterator> value_range;
+ typedef iterator_range<const_value_iterator> const_value_range;
+
+ value_iterator addValue(BumpPtrAllocator &Alloc, DIEValue V) {
+ List.push_back(*new (Alloc) Node(V));
+ return value_iterator(ListTy::toIterator(List.back()));
+ }
+ template <class T>
+ value_iterator addValue(BumpPtrAllocator &Alloc, dwarf::Attribute Attribute,
+ dwarf::Form Form, T &&Value) {
+ return addValue(Alloc, DIEValue(Attribute, Form, std::forward<T>(Value)));
+ }
+
+ value_range values() {
+ return llvm::make_range(value_iterator(List.begin()),
+ value_iterator(List.end()));
+ }
+ const_value_range values() const {
+ return llvm::make_range(const_value_iterator(List.begin()),
+ const_value_iterator(List.end()));
+ }
+};
+
+//===--------------------------------------------------------------------===//
+/// DIE - A structured debug information entry. Has an abbreviation which
+/// describes its organization.
+class DIE : IntrusiveBackListNode, public DIEValueList {
+ friend class IntrusiveBackList<DIE>;
+
+ /// Offset - Offset in debug info section.
+ ///
+ unsigned Offset;
+
+ /// Size - Size of instance + children.
+ ///
+ unsigned Size;
+
+ unsigned AbbrevNumber = ~0u;
+
+ /// Tag - Dwarf tag code.
+ ///
+ dwarf::Tag Tag = (dwarf::Tag)0;
+
+ /// Children DIEs.
+ IntrusiveBackList<DIE> Children;
+
+ DIE *Parent = nullptr;
+
+ DIE() = delete;
+ explicit DIE(dwarf::Tag Tag) : Offset(0), Size(0), Tag(Tag) {}
+
+public:
+ static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) {
+ return new (Alloc) DIE(Tag);
+ }
+
+ // Accessors.
+ unsigned getAbbrevNumber() const { return AbbrevNumber; }
+ dwarf::Tag getTag() const { return Tag; }
+ unsigned getOffset() const { return Offset; }
+ unsigned getSize() const { return Size; }
+ bool hasChildren() const { return !Children.empty(); }
+
+ typedef IntrusiveBackList<DIE>::iterator child_iterator;
+ typedef IntrusiveBackList<DIE>::const_iterator const_child_iterator;
+ typedef iterator_range<child_iterator> child_range;
+ typedef iterator_range<const_child_iterator> const_child_range;
+
+ child_range children() {
+ return llvm::make_range(Children.begin(), Children.end());
+ }
+ const_child_range children() const {
+ return llvm::make_range(Children.begin(), Children.end());
+ }
+
+ DIE *getParent() const { return Parent; }
+
+ /// Generate the abbreviation for this DIE.
+ ///
+ /// Calculate the abbreviation for this, which should be uniqued and
+ /// eventually used to call \a setAbbrevNumber().
+ DIEAbbrev generateAbbrev() const;
+
+ /// Set the abbreviation number for this DIE.
+ void setAbbrevNumber(unsigned I) { AbbrevNumber = I; }
+
+ /// Climb up the parent chain to get the compile or type unit DIE this DIE
+ /// belongs to.
+ const DIE *getUnit() const;
+ /// Similar to getUnit, returns null when DIE is not added to an
+ /// owner yet.
+ const DIE *getUnitOrNull() const;
+ void setOffset(unsigned O) { Offset = O; }
+ void setSize(unsigned S) { Size = S; }
+
+ /// Add a child to the DIE.
+ DIE &addChild(DIE *Child) {
+ assert(!Child->getParent() && "Child should be orphaned");
+ Child->Parent = this;
+ Children.push_back(*Child);
+ return Children.back();
+ }
+
+ /// Find a value in the DIE with the attribute given.
+ ///
+ /// Returns a default-constructed DIEValue (where \a DIEValue::getType()
+ /// gives \a DIEValue::isNone) if no such attribute exists.
+ DIEValue findAttribute(dwarf::Attribute Attribute) const;
+
+ void print(raw_ostream &O, unsigned IndentCount = 0) const;
+ void dump();
+};
+
+//===--------------------------------------------------------------------===//
+/// DIELoc - Represents an expression location.
+//
+class DIELoc : public DIEValueList {
+ mutable unsigned Size; // Size in bytes excluding size header.
+
+public:
+ DIELoc() : Size(0) {}
+
+ /// ComputeSize - Calculate the size of the location expression.
+ ///
+ unsigned ComputeSize(const AsmPrinter *AP) const;
+
+ /// BestForm - Choose the best form for data.
+ ///
+ dwarf::Form BestForm(unsigned DwarfVersion) const {
+ if (DwarfVersion > 3)
+ return dwarf::DW_FORM_exprloc;
+ // Pre-DWARF4 location expressions were blocks and not exprloc.
+ if ((unsigned char)Size == Size)
+ return dwarf::DW_FORM_block1;
+ if ((unsigned short)Size == Size)
+ return dwarf::DW_FORM_block2;
+ if ((unsigned int)Size == Size)
+ return dwarf::DW_FORM_block4;
+ return dwarf::DW_FORM_block;
+ }
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+ void print(raw_ostream &O) const;
+};
+
+//===--------------------------------------------------------------------===//
+/// DIEBlock - Represents a block of values.
+//
+class DIEBlock : public DIEValueList {
+ mutable unsigned Size; // Size in bytes excluding size header.
+
+public:
+ DIEBlock() : Size(0) {}
+
+ /// ComputeSize - Calculate the size of the location expression.
+ ///
+ unsigned ComputeSize(const AsmPrinter *AP) const;
+
+ /// BestForm - Choose the best form for data.
+ ///
+ dwarf::Form BestForm() const {
+ if ((unsigned char)Size == Size)
+ return dwarf::DW_FORM_block1;
+ if ((unsigned short)Size == Size)
+ return dwarf::DW_FORM_block2;
+ if ((unsigned int)Size == Size)
+ return dwarf::DW_FORM_block4;
+ return dwarf::DW_FORM_block;
+ }
+
+ void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
+ unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
+
+ void print(raw_ostream &O) const;
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/DIEValue.def b/contrib/llvm/include/llvm/CodeGen/DIEValue.def
new file mode 100644
index 0000000..2cfae7b
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/DIEValue.def
@@ -0,0 +1,47 @@
+//===- llvm/CodeGen/DIEValue.def - DIEValue types ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Macros for running through all types of DIEValue.
+//
+//===----------------------------------------------------------------------===//
+
+#if !(defined HANDLE_DIEVALUE || defined HANDLE_DIEVALUE_SMALL || \
+ defined HANDLE_DIEVALUE_LARGE)
+#error "Missing macro definition of HANDLE_DIEVALUE"
+#endif
+
+// Handler for all values.
+#ifndef HANDLE_DIEVALUE
+#define HANDLE_DIEVALUE(T)
+#endif
+
+// Handler for small values.
+#ifndef HANDLE_DIEVALUE_SMALL
+#define HANDLE_DIEVALUE_SMALL(T) HANDLE_DIEVALUE(T)
+#endif
+
+// Handler for large values.
+#ifndef HANDLE_DIEVALUE_LARGE
+#define HANDLE_DIEVALUE_LARGE(T) HANDLE_DIEVALUE(T)
+#endif
+
+HANDLE_DIEVALUE_SMALL(Integer)
+HANDLE_DIEVALUE_SMALL(String)
+HANDLE_DIEVALUE_SMALL(Expr)
+HANDLE_DIEVALUE_SMALL(Label)
+HANDLE_DIEVALUE_LARGE(Delta)
+HANDLE_DIEVALUE_SMALL(Entry)
+HANDLE_DIEVALUE_SMALL(TypeSignature)
+HANDLE_DIEVALUE_LARGE(Block)
+HANDLE_DIEVALUE_LARGE(Loc)
+HANDLE_DIEVALUE_SMALL(LocList)
+
+#undef HANDLE_DIEVALUE
+#undef HANDLE_DIEVALUE_SMALL
+#undef HANDLE_DIEVALUE_LARGE
diff --git a/contrib/llvm/include/llvm/CodeGen/DwarfStringPoolEntry.h b/contrib/llvm/include/llvm/CodeGen/DwarfStringPoolEntry.h
new file mode 100644
index 0000000..fc2b5dd
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/DwarfStringPoolEntry.h
@@ -0,0 +1,51 @@
+//===- llvm/CodeGen/DwarfStringPoolEntry.h - String pool entry --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
+#define LLVM_CODEGEN_DWARFSTRINGPOOLENTRY_H
+
+#include "llvm/ADT/StringMap.h"
+
+namespace llvm {
+
+class MCSymbol;
+
+/// Data for a string pool entry.
+struct DwarfStringPoolEntry {
+ MCSymbol *Symbol;
+ unsigned Offset;
+ unsigned Index;
+};
+
+/// String pool entry reference.
+struct DwarfStringPoolEntryRef {
+ const StringMapEntry<DwarfStringPoolEntry> *I = nullptr;
+
+public:
+ DwarfStringPoolEntryRef() = default;
+ explicit DwarfStringPoolEntryRef(
+ const StringMapEntry<DwarfStringPoolEntry> &I)
+ : I(&I) {}
+
+ explicit operator bool() const { return I; }
+ MCSymbol *getSymbol() const {
+ assert(I->second.Symbol && "No symbol available!");
+ return I->second.Symbol;
+ }
+ unsigned getOffset() const { return I->second.Offset; }
+ unsigned getIndex() const { return I->second.Index; }
+ StringRef getString() const { return I->first(); }
+
+ bool operator==(const DwarfStringPoolEntryRef &X) const { return I == X.I; }
+ bool operator!=(const DwarfStringPoolEntryRef &X) const { return I != X.I; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h b/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h
new file mode 100644
index 0000000..c31fad2
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/EdgeBundles.h
@@ -0,0 +1,64 @@
+//===-------- EdgeBundles.h - Bundles of CFG edges --------------*- c++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The EdgeBundles analysis forms equivalence classes of CFG edges such that all
+// edges leaving a machine basic block are in the same bundle, and all edges
+// leaving a basic block are in the same bundle.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_EDGEBUNDLES_H
+#define LLVM_CODEGEN_EDGEBUNDLES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/IntEqClasses.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class EdgeBundles : public MachineFunctionPass {
+ const MachineFunction *MF;
+
+ /// EC - Each edge bundle is an equivalence class. The keys are:
+ /// 2*BB->getNumber() -> Ingoing bundle.
+ /// 2*BB->getNumber()+1 -> Outgoing bundle.
+ IntEqClasses EC;
+
+ /// Blocks - Map each bundle to a list of basic block numbers.
+ SmallVector<SmallVector<unsigned, 8>, 4> Blocks;
+
+public:
+ static char ID;
+ EdgeBundles() : MachineFunctionPass(ID) {}
+
+ /// getBundle - Return the ingoing (Out = false) or outgoing (Out = true)
+ /// bundle number for basic block #N
+ unsigned getBundle(unsigned N, bool Out) const { return EC[2 * N + Out]; }
+
+ /// getNumBundles - Return the total number of bundles in the CFG.
+ unsigned getNumBundles() const { return EC.getNumClasses(); }
+
+ /// getBlocks - Return an array of blocks that are connected to Bundle.
+ ArrayRef<unsigned> getBlocks(unsigned Bundle) const { return Blocks[Bundle]; }
+
+ /// getMachineFunction - Return the last machine function computed.
+ const MachineFunction *getMachineFunction() const { return MF; }
+
+ /// view - Visualize the annotated bipartite CFG with Graphviz.
+ void view() const;
+
+private:
+ bool runOnMachineFunction(MachineFunction&) override;
+ void getAnalysisUsage(AnalysisUsage&) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/FastISel.h b/contrib/llvm/include/llvm/CodeGen/FastISel.h
new file mode 100644
index 0000000..cc4e370
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/FastISel.h
@@ -0,0 +1,579 @@
+//===-- FastISel.h - Definition of the FastISel class ---*- C++ -*---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the FastISel class.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FASTISEL_H
+#define LLVM_CODEGEN_FASTISEL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+
+/// \brief This is a fast-path instruction selection class that generates poor
+/// code and doesn't support illegal types or non-trivial lowering, but runs
+/// quickly.
+class FastISel {
+public:
+ struct ArgListEntry {
+ Value *Val;
+ Type *Ty;
+ bool IsSExt : 1;
+ bool IsZExt : 1;
+ bool IsInReg : 1;
+ bool IsSRet : 1;
+ bool IsNest : 1;
+ bool IsByVal : 1;
+ bool IsInAlloca : 1;
+ bool IsReturned : 1;
+ uint16_t Alignment;
+
+ ArgListEntry()
+ : Val(nullptr), Ty(nullptr), IsSExt(false), IsZExt(false),
+ IsInReg(false), IsSRet(false), IsNest(false), IsByVal(false),
+ IsInAlloca(false), IsReturned(false), Alignment(0) {}
+
+ /// \brief Set CallLoweringInfo attribute flags based on a call instruction
+ /// and called function attributes.
+ void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
+ };
+ typedef std::vector<ArgListEntry> ArgListTy;
+
+ struct CallLoweringInfo {
+ Type *RetTy;
+ bool RetSExt : 1;
+ bool RetZExt : 1;
+ bool IsVarArg : 1;
+ bool IsInReg : 1;
+ bool DoesNotReturn : 1;
+ bool IsReturnValueUsed : 1;
+
+ // \brief IsTailCall Should be modified by implementations of FastLowerCall
+ // that perform tail call conversions.
+ bool IsTailCall;
+
+ unsigned NumFixedArgs;
+ CallingConv::ID CallConv;
+ const Value *Callee;
+ MCSymbol *Symbol;
+ ArgListTy Args;
+ ImmutableCallSite *CS;
+ MachineInstr *Call;
+ unsigned ResultReg;
+ unsigned NumResultRegs;
+
+ bool IsPatchPoint;
+
+ SmallVector<Value *, 16> OutVals;
+ SmallVector<ISD::ArgFlagsTy, 16> OutFlags;
+ SmallVector<unsigned, 16> OutRegs;
+ SmallVector<ISD::InputArg, 4> Ins;
+ SmallVector<unsigned, 4> InRegs;
+
+ CallLoweringInfo()
+ : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
+ IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
+ IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
+ Callee(nullptr), Symbol(nullptr), CS(nullptr), Call(nullptr),
+ ResultReg(0), NumResultRegs(0), IsPatchPoint(false) {}
+
+ CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
+ const Value *Target, ArgListTy &&ArgsList,
+ ImmutableCallSite &Call) {
+ RetTy = ResultTy;
+ Callee = Target;
+
+ IsInReg = Call.paramHasAttr(0, Attribute::InReg);
+ DoesNotReturn = Call.doesNotReturn();
+ IsVarArg = FuncTy->isVarArg();
+ IsReturnValueUsed = !Call.getInstruction()->use_empty();
+ RetSExt = Call.paramHasAttr(0, Attribute::SExt);
+ RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
+
+ CallConv = Call.getCallingConv();
+ Args = std::move(ArgsList);
+ NumFixedArgs = FuncTy->getNumParams();
+
+ CS = &Call;
+
+ return *this;
+ }
+
+ CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
+ MCSymbol *Target, ArgListTy &&ArgsList,
+ ImmutableCallSite &Call,
+ unsigned FixedArgs = ~0U) {
+ RetTy = ResultTy;
+ Callee = Call.getCalledValue();
+ Symbol = Target;
+
+ IsInReg = Call.paramHasAttr(0, Attribute::InReg);
+ DoesNotReturn = Call.doesNotReturn();
+ IsVarArg = FuncTy->isVarArg();
+ IsReturnValueUsed = !Call.getInstruction()->use_empty();
+ RetSExt = Call.paramHasAttr(0, Attribute::SExt);
+ RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
+
+ CallConv = Call.getCallingConv();
+ Args = std::move(ArgsList);
+ NumFixedArgs = (FixedArgs == ~0U) ? FuncTy->getNumParams() : FixedArgs;
+
+ CS = &Call;
+
+ return *this;
+ }
+
+ CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
+ const Value *Target, ArgListTy &&ArgsList,
+ unsigned FixedArgs = ~0U) {
+ RetTy = ResultTy;
+ Callee = Target;
+ CallConv = CC;
+ Args = std::move(ArgsList);
+ NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
+ return *this;
+ }
+
+ CallLoweringInfo &setCallee(const DataLayout &DL, MCContext &Ctx,
+ CallingConv::ID CC, Type *ResultTy,
+ const char *Target, ArgListTy &&ArgsList,
+ unsigned FixedArgs = ~0U);
+
+ CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
+ MCSymbol *Target, ArgListTy &&ArgsList,
+ unsigned FixedArgs = ~0U) {
+ RetTy = ResultTy;
+ Symbol = Target;
+ CallConv = CC;
+ Args = std::move(ArgsList);
+ NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
+ return *this;
+ }
+
+ CallLoweringInfo &setTailCall(bool Value = true) {
+ IsTailCall = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setIsPatchPoint(bool Value = true) {
+ IsPatchPoint = Value;
+ return *this;
+ }
+
+ ArgListTy &getArgs() { return Args; }
+
+ void clearOuts() {
+ OutVals.clear();
+ OutFlags.clear();
+ OutRegs.clear();
+ }
+
+ void clearIns() {
+ Ins.clear();
+ InRegs.clear();
+ }
+ };
+
+protected:
+ DenseMap<const Value *, unsigned> LocalValueMap;
+ FunctionLoweringInfo &FuncInfo;
+ MachineFunction *MF;
+ MachineRegisterInfo &MRI;
+ MachineFrameInfo &MFI;
+ MachineConstantPool &MCP;
+ DebugLoc DbgLoc;
+ const TargetMachine &TM;
+ const DataLayout &DL;
+ const TargetInstrInfo &TII;
+ const TargetLowering &TLI;
+ const TargetRegisterInfo &TRI;
+ const TargetLibraryInfo *LibInfo;
+ bool SkipTargetIndependentISel;
+
+ /// \brief The position of the last instruction for materializing constants
+ /// for use in the current block. It resets to EmitStartPt when it makes sense
+ /// (for example, it's usually profitable to avoid function calls between the
+ /// definition and the use)
+ MachineInstr *LastLocalValue;
+
+ /// \brief The top most instruction in the current block that is allowed for
+ /// emitting local variables. LastLocalValue resets to EmitStartPt when it
+ /// makes sense (for example, on function calls)
+ MachineInstr *EmitStartPt;
+
+public:
+ /// \brief Return the position of the last instruction emitted for
+ /// materializing constants for use in the current block.
+ MachineInstr *getLastLocalValue() { return LastLocalValue; }
+
+ /// \brief Update the position of the last instruction emitted for
+ /// materializing constants for use in the current block.
+ void setLastLocalValue(MachineInstr *I) {
+ EmitStartPt = I;
+ LastLocalValue = I;
+ }
+
+ /// \brief Set the current block to which generated machine instructions will
+ /// be appended, and clear the local CSE map.
+ void startNewBlock();
+
+ /// \brief Return current debug location information.
+ DebugLoc getCurDebugLoc() const { return DbgLoc; }
+
+ /// \brief Do "fast" instruction selection for function arguments and append
+ /// the machine instructions to the current block. Returns true when
+ /// successful.
+ bool lowerArguments();
+
+ /// \brief Do "fast" instruction selection for the given LLVM IR instruction
+ /// and append the generated machine instructions to the current block.
+ /// Returns true if selection was successful.
+ bool selectInstruction(const Instruction *I);
+
+ /// \brief Do "fast" instruction selection for the given LLVM IR operator
+ /// (Instruction or ConstantExpr), and append generated machine instructions
+ /// to the current block. Return true if selection was successful.
+ bool selectOperator(const User *I, unsigned Opcode);
+
+ /// \brief Create a virtual register and arrange for it to be assigned the
+ /// value for the given LLVM value.
+ unsigned getRegForValue(const Value *V);
+
+ /// \brief Look up the value to see if its value is already cached in a
+ /// register. It may be defined by instructions across blocks or defined
+ /// locally.
+ unsigned lookUpRegForValue(const Value *V);
+
+ /// \brief This is a wrapper around getRegForValue that also takes care of
+ /// truncating or sign-extending the given getelementptr index value.
+ std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
+
+ /// \brief We're checking to see if we can fold \p LI into \p FoldInst. Note
+ /// that we could have a sequence where multiple LLVM IR instructions are
+ /// folded into the same machineinstr. For example we could have:
+ ///
+ /// A: x = load i32 *P
+ /// B: y = icmp A, 42
+ /// C: br y, ...
+ ///
+ /// In this scenario, \p LI is "A", and \p FoldInst is "C". We know about "B"
+ /// (and any other folded instructions) because it is between A and C.
+ ///
+ /// If we succeed folding, return true.
+ bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst);
+
+ /// \brief The specified machine instr operand is a vreg, and that vreg is
+ /// being provided by the specified load instruction. If possible, try to
+ /// fold the load as an operand to the instruction, returning true if
+ /// possible.
+ ///
+ /// This method should be implemented by targets.
+ virtual bool tryToFoldLoadIntoMI(MachineInstr * /*MI*/, unsigned /*OpNo*/,
+ const LoadInst * /*LI*/) {
+ return false;
+ }
+
+ /// \brief Reset InsertPt to prepare for inserting instructions into the
+ /// current block.
+ void recomputeInsertPt();
+
+ /// \brief Remove all dead instructions between the I and E.
+ void removeDeadCode(MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator E);
+
+ struct SavePoint {
+ MachineBasicBlock::iterator InsertPt;
+ DebugLoc DL;
+ };
+
+ /// \brief Prepare InsertPt to begin inserting instructions into the local
+ /// value area and return the old insert position.
+ SavePoint enterLocalValueArea();
+
+ /// \brief Reset InsertPt to the given old insert position.
+ void leaveLocalValueArea(SavePoint Old);
+
+ virtual ~FastISel();
+
+protected:
+ explicit FastISel(FunctionLoweringInfo &FuncInfo,
+ const TargetLibraryInfo *LibInfo,
+ bool SkipTargetIndependentISel = false);
+
+ /// \brief This method is called by target-independent code when the normal
+ /// FastISel process fails to select an instruction. This gives targets a
+ /// chance to emit code for anything that doesn't fit into FastISel's
+ /// framework. It returns true if it was successful.
+ virtual bool fastSelectInstruction(const Instruction *I) = 0;
+
+ /// \brief This method is called by target-independent code to do target-
+ /// specific argument lowering. It returns true if it was successful.
+ virtual bool fastLowerArguments();
+
+ /// \brief This method is called by target-independent code to do target-
+ /// specific call lowering. It returns true if it was successful.
+ virtual bool fastLowerCall(CallLoweringInfo &CLI);
+
+ /// \brief This method is called by target-independent code to do target-
+ /// specific intrinsic lowering. It returns true if it was successful.
+ virtual bool fastLowerIntrinsicCall(const IntrinsicInst *II);
+
+ /// \brief This method is called by target-independent code to request that an
+ /// instruction with the given type and opcode be emitted.
+ virtual unsigned fastEmit_(MVT VT, MVT RetVT, unsigned Opcode);
+
+ /// \brief This method is called by target-independent code to request that an
+ /// instruction with the given type, opcode, and register operand be emitted.
+ virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+ bool Op0IsKill);
+
+ /// \brief This method is called by target-independent code to request that an
+ /// instruction with the given type, opcode, and register operands be emitted.
+ virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+ bool Op0IsKill, unsigned Op1, bool Op1IsKill);
+
+ /// \brief This method is called by target-independent code to request that an
+ /// instruction with the given type, opcode, and register and immediate
+ // operands be emitted.
+ virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+ bool Op0IsKill, uint64_t Imm);
+
+ /// \brief This method is called by target-independent code to request that an
+ /// instruction with the given type, opcode, and register and floating-point
+ /// immediate operands be emitted.
+ virtual unsigned fastEmit_rf(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0,
+ bool Op0IsKill, const ConstantFP *FPImm);
+
+ /// \brief This method is called by target-independent code to request that an
+ /// instruction with the given type, opcode, and register and immediate
+ /// operands be emitted.
+ virtual unsigned fastEmit_rri(MVT VT, MVT RetVT, unsigned Opcode,
+ unsigned Op0, bool Op0IsKill, unsigned Op1,
+ bool Op1IsKill, uint64_t Imm);
+
+ /// \brief This method is a wrapper of fastEmit_ri.
+ ///
+ /// It first tries to emit an instruction with an immediate operand using
+ /// fastEmit_ri. If that fails, it materializes the immediate into a register
+ /// and try fastEmit_rr instead.
+ unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill,
+ uint64_t Imm, MVT ImmType);
+
+ /// \brief This method is called by target-independent code to request that an
+ /// instruction with the given type, opcode, and immediate operand be emitted.
+ virtual unsigned fastEmit_i(MVT VT, MVT RetVT, unsigned Opcode, uint64_t Imm);
+
+ /// \brief This method is called by target-independent code to request that an
+ /// instruction with the given type, opcode, and floating-point immediate
+ /// operand be emitted.
+ virtual unsigned fastEmit_f(MVT VT, MVT RetVT, unsigned Opcode,
+ const ConstantFP *FPImm);
+
+ /// \brief Emit a MachineInstr with no operands and a result register in the
+ /// given register class.
+ unsigned fastEmitInst_(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC);
+
+ /// \brief Emit a MachineInstr with one register operand and a result register
+ /// in the given register class.
+ unsigned fastEmitInst_r(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC, unsigned Op0,
+ bool Op0IsKill);
+
+ /// \brief Emit a MachineInstr with two register operands and a result
+ /// register in the given register class.
+ unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC, unsigned Op0,
+ bool Op0IsKill, unsigned Op1, bool Op1IsKill);
+
+ /// \brief Emit a MachineInstr with three register operands and a result
+ /// register in the given register class.
+ unsigned fastEmitInst_rrr(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC, unsigned Op0,
+ bool Op0IsKill, unsigned Op1, bool Op1IsKill,
+ unsigned Op2, bool Op2IsKill);
+
+ /// \brief Emit a MachineInstr with a register operand, an immediate, and a
+ /// result register in the given register class.
+ unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC, unsigned Op0,
+ bool Op0IsKill, uint64_t Imm);
+
+ /// \brief Emit a MachineInstr with one register operand and two immediate
+ /// operands.
+ unsigned fastEmitInst_rii(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC, unsigned Op0,
+ bool Op0IsKill, uint64_t Imm1, uint64_t Imm2);
+
+ /// \brief Emit a MachineInstr with a floating point immediate, and a result
+ /// register in the given register class.
+ unsigned fastEmitInst_f(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ const ConstantFP *FPImm);
+
+ /// \brief Emit a MachineInstr with two register operands, an immediate, and a
+ /// result register in the given register class.
+ unsigned fastEmitInst_rri(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC, unsigned Op0,
+ bool Op0IsKill, unsigned Op1, bool Op1IsKill,
+ uint64_t Imm);
+
+ /// \brief Emit a MachineInstr with a single immediate operand, and a result
+ /// register in the given register class.
+ unsigned fastEmitInst_i(unsigned MachineInstrOpcode,
+ const TargetRegisterClass *RC, uint64_t Imm);
+
+ /// \brief Emit a MachineInstr for an extract_subreg from a specified index of
+ /// a superregister to a specified type.
+ unsigned fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0, bool Op0IsKill,
+ uint32_t Idx);
+
+ /// \brief Emit MachineInstrs to compute the value of Op with all but the
+ /// least significant bit set to zero.
+ unsigned fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill);
+
+ /// \brief Emit an unconditional branch to the given block, unless it is the
+ /// immediate (fall-through) successor, and update the CFG.
+ void fastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
+
+ /// Emit an unconditional branch to \p FalseMBB, obtains the branch weight
+ /// and adds TrueMBB and FalseMBB to the successor list.
+ void finishCondBranch(const BasicBlock *BranchBB, MachineBasicBlock *TrueMBB,
+ MachineBasicBlock *FalseMBB);
+
+ /// \brief Update the value map to include the new mapping for this
+ /// instruction, or insert an extra copy to get the result in a previous
+ /// determined register.
+ ///
+ /// NOTE: This is only necessary because we might select a block that uses a
+ /// value before we select the block that defines the value. It might be
+ /// possible to fix this by selecting blocks in reverse postorder.
+ void updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs = 1);
+
+ unsigned createResultReg(const TargetRegisterClass *RC);
+
+ /// \brief Try to constrain Op so that it is usable by argument OpNum of the
+ /// provided MCInstrDesc. If this fails, create a new virtual register in the
+ /// correct class and COPY the value there.
+ unsigned constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
+ unsigned OpNum);
+
+ /// \brief Emit a constant in a register using target-specific logic, such as
+ /// constant pool loads.
+ virtual unsigned fastMaterializeConstant(const Constant *C) { return 0; }
+
+ /// \brief Emit an alloca address in a register using target-specific logic.
+ virtual unsigned fastMaterializeAlloca(const AllocaInst *C) { return 0; }
+
+ /// \brief Emit the floating-point constant +0.0 in a register using target-
+ /// specific logic.
+ virtual unsigned fastMaterializeFloatZero(const ConstantFP *CF) {
+ return 0;
+ }
+
+ /// \brief Check if \c Add is an add that can be safely folded into \c GEP.
+ ///
+ /// \c Add can be folded into \c GEP if:
+ /// - \c Add is an add,
+ /// - \c Add's size matches \c GEP's,
+ /// - \c Add is in the same basic block as \c GEP, and
+ /// - \c Add has a constant operand.
+ bool canFoldAddIntoGEP(const User *GEP, const Value *Add);
+
+ /// \brief Test whether the given value has exactly one use.
+ bool hasTrivialKill(const Value *V);
+
+ /// \brief Create a machine mem operand from the given instruction.
+ MachineMemOperand *createMachineMemOperandFor(const Instruction *I) const;
+
+ CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const;
+
+ bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs);
+ bool lowerCallTo(const CallInst *CI, const char *SymbolName,
+ unsigned NumArgs);
+ bool lowerCallTo(CallLoweringInfo &CLI);
+
+ bool isCommutativeIntrinsic(IntrinsicInst const *II) {
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::sadd_with_overflow:
+ case Intrinsic::uadd_with_overflow:
+ case Intrinsic::smul_with_overflow:
+ case Intrinsic::umul_with_overflow:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+
+ bool lowerCall(const CallInst *I);
+ /// \brief Select and emit code for a binary operator instruction, which has
+ /// an opcode which directly corresponds to the given ISD opcode.
+ bool selectBinaryOp(const User *I, unsigned ISDOpcode);
+ bool selectFNeg(const User *I);
+ bool selectGetElementPtr(const User *I);
+ bool selectStackmap(const CallInst *I);
+ bool selectPatchpoint(const CallInst *I);
+ bool selectCall(const User *Call);
+ bool selectIntrinsicCall(const IntrinsicInst *II);
+ bool selectBitCast(const User *I);
+ bool selectCast(const User *I, unsigned Opcode);
+ bool selectExtractValue(const User *I);
+ bool selectInsertValue(const User *I);
+
+private:
+ /// \brief Handle PHI nodes in successor blocks.
+ ///
+ /// Emit code to ensure constants are copied into registers when needed.
+ /// Remember the virtual registers that need to be added to the Machine PHI
+ /// nodes as input. We cannot just directly add them, because expansion might
+ /// result in multiple MBB's for one BB. As such, the start of the BB might
+ /// correspond to a different MBB than the end.
+ bool handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
+
+ /// \brief Helper for materializeRegForValue to materialize a constant in a
+ /// target-independent way.
+ unsigned materializeConstant(const Value *V, MVT VT);
+
+ /// \brief Helper for getRegForVale. This function is called when the value
+ /// isn't already available in a register and must be materialized with new
+ /// instructions.
+ unsigned materializeRegForValue(const Value *V, MVT VT);
+
+ /// \brief Clears LocalValueMap and moves the area for the new local variables
+ /// to the beginning of the block. It helps to avoid spilling cached variables
+ /// across heavy instructions like calls.
+ void flushLocalValueMap();
+
+ /// \brief Removes dead local value instructions after SavedLastLocalvalue.
+ void removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue);
+
+ /// \brief Insertion point before trying to select the current instruction.
+ MachineBasicBlock::iterator SavedInsertPt;
+
+ /// \brief Add a stackmap or patchpoint intrinsic call's live variable
+ /// operands to a stackmap or patchpoint machine instruction.
+ bool addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
+ const CallInst *CI, unsigned StartIdx);
+ bool lowerCallOperands(const CallInst *CI, unsigned ArgIdx, unsigned NumArgs,
+ const Value *Callee, bool ForceRetVoidTy,
+ CallLoweringInfo &CLI);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/FaultMaps.h b/contrib/llvm/include/llvm/CodeGen/FaultMaps.h
new file mode 100644
index 0000000..f4b6463
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/FaultMaps.h
@@ -0,0 +1,220 @@
+//===------------------- FaultMaps.h - The "FaultMaps" section --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FAULTMAPS_H
+#define LLVM_CODEGEN_FAULTMAPS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Format.h"
+
+#include <vector>
+#include <map>
+
+namespace llvm {
+
+class AsmPrinter;
+class MCExpr;
+class MCSymbol;
+class MCStreamer;
+
+class FaultMaps {
+public:
+ enum FaultKind { FaultingLoad = 1, FaultKindMax };
+
+ static const char *faultTypeToString(FaultKind);
+
+ explicit FaultMaps(AsmPrinter &AP);
+
+ void recordFaultingOp(FaultKind FaultTy, const MCSymbol *HandlerLabel);
+ void serializeToFaultMapSection();
+
+private:
+ static const char *WFMP;
+
+ struct FaultInfo {
+ FaultKind Kind;
+ const MCExpr *FaultingOffsetExpr;
+ const MCExpr *HandlerOffsetExpr;
+
+ FaultInfo()
+ : Kind(FaultKindMax), FaultingOffsetExpr(nullptr),
+ HandlerOffsetExpr(nullptr) {}
+
+ explicit FaultInfo(FaultMaps::FaultKind Kind, const MCExpr *FaultingOffset,
+ const MCExpr *HandlerOffset)
+ : Kind(Kind), FaultingOffsetExpr(FaultingOffset),
+ HandlerOffsetExpr(HandlerOffset) {}
+ };
+
+ typedef std::vector<FaultInfo> FunctionFaultInfos;
+
+ // We'd like to keep a stable iteration order for FunctionInfos to help
+ // FileCheck based testing.
+ struct MCSymbolComparator {
+ bool operator()(const MCSymbol *LHS, const MCSymbol *RHS) const {
+ return LHS->getName() < RHS->getName();
+ }
+ };
+
+ std::map<const MCSymbol *, FunctionFaultInfos, MCSymbolComparator>
+ FunctionInfos;
+ AsmPrinter &AP;
+
+ void emitFunctionInfo(const MCSymbol *FnLabel, const FunctionFaultInfos &FFI);
+};
+
+/// A parser for the __llvm_faultmaps section generated by the FaultMaps class
+/// above. This parser is version locked with with the __llvm_faultmaps section
+/// generated by the version of LLVM that includes it. No guarantees are made
+/// with respect to forward or backward compatibility.
+class FaultMapParser {
+ typedef uint8_t FaultMapVersionType;
+ static const size_t FaultMapVersionOffset = 0;
+
+ typedef uint8_t Reserved0Type;
+ static const size_t Reserved0Offset =
+ FaultMapVersionOffset + sizeof(FaultMapVersionType);
+
+ typedef uint16_t Reserved1Type;
+ static const size_t Reserved1Offset = Reserved0Offset + sizeof(Reserved0Type);
+
+ typedef uint32_t NumFunctionsType;
+ static const size_t NumFunctionsOffset =
+ Reserved1Offset + sizeof(Reserved1Type);
+
+ static const size_t FunctionInfosOffset =
+ NumFunctionsOffset + sizeof(NumFunctionsType);
+
+ const uint8_t *P;
+ const uint8_t *E;
+
+ template <typename T> static T read(const uint8_t *P, const uint8_t *E) {
+ assert(P + sizeof(T) <= E && "out of bounds read!");
+ return support::endian::read<T, support::little, 1>(P);
+ }
+
+public:
+ class FunctionFaultInfoAccessor {
+ typedef uint32_t FaultKindType;
+ static const size_t FaultKindOffset = 0;
+
+ typedef uint32_t FaultingPCOffsetType;
+ static const size_t FaultingPCOffsetOffset =
+ FaultKindOffset + sizeof(FaultKindType);
+
+ typedef uint32_t HandlerPCOffsetType;
+ static const size_t HandlerPCOffsetOffset =
+ FaultingPCOffsetOffset + sizeof(FaultingPCOffsetType);
+
+ const uint8_t *P;
+ const uint8_t *E;
+
+ public:
+ static const size_t Size =
+ HandlerPCOffsetOffset + sizeof(HandlerPCOffsetType);
+
+ explicit FunctionFaultInfoAccessor(const uint8_t *P, const uint8_t *E)
+ : P(P), E(E) {}
+
+ FaultKindType getFaultKind() const {
+ return read<FaultKindType>(P + FaultKindOffset, E);
+ }
+
+ FaultingPCOffsetType getFaultingPCOffset() const {
+ return read<FaultingPCOffsetType>(P + FaultingPCOffsetOffset, E);
+ }
+
+ HandlerPCOffsetType getHandlerPCOffset() const {
+ return read<HandlerPCOffsetType>(P + HandlerPCOffsetOffset, E);
+ }
+ };
+
+ class FunctionInfoAccessor {
+ typedef uint64_t FunctionAddrType;
+ static const size_t FunctionAddrOffset = 0;
+
+ typedef uint32_t NumFaultingPCsType;
+ static const size_t NumFaultingPCsOffset =
+ FunctionAddrOffset + sizeof(FunctionAddrType);
+
+ typedef uint32_t ReservedType;
+ static const size_t ReservedOffset =
+ NumFaultingPCsOffset + sizeof(NumFaultingPCsType);
+
+ static const size_t FunctionFaultInfosOffset =
+ ReservedOffset + sizeof(ReservedType);
+
+ static const size_t FunctionInfoHeaderSize = FunctionFaultInfosOffset;
+
+ const uint8_t *P;
+ const uint8_t *E;
+
+ public:
+ FunctionInfoAccessor() : P(nullptr), E(nullptr) {}
+
+ explicit FunctionInfoAccessor(const uint8_t *P, const uint8_t *E)
+ : P(P), E(E) {}
+
+ FunctionAddrType getFunctionAddr() const {
+ return read<FunctionAddrType>(P + FunctionAddrOffset, E);
+ }
+
+ NumFaultingPCsType getNumFaultingPCs() const {
+ return read<NumFaultingPCsType>(P + NumFaultingPCsOffset, E);
+ }
+
+ FunctionFaultInfoAccessor getFunctionFaultInfoAt(uint32_t Index) const {
+ assert(Index < getNumFaultingPCs() && "index out of bounds!");
+ const uint8_t *Begin = P + FunctionFaultInfosOffset +
+ FunctionFaultInfoAccessor::Size * Index;
+ return FunctionFaultInfoAccessor(Begin, E);
+ }
+
+ FunctionInfoAccessor getNextFunctionInfo() const {
+ size_t MySize = FunctionInfoHeaderSize +
+ getNumFaultingPCs() * FunctionFaultInfoAccessor::Size;
+
+ const uint8_t *Begin = P + MySize;
+ assert(Begin < E && "out of bounds!");
+ return FunctionInfoAccessor(Begin, E);
+ }
+ };
+
+ explicit FaultMapParser(const uint8_t *Begin, const uint8_t *End)
+ : P(Begin), E(End) {}
+
+ FaultMapVersionType getFaultMapVersion() const {
+ auto Version = read<FaultMapVersionType>(P + FaultMapVersionOffset, E);
+ assert(Version == 1 && "only version 1 supported!");
+ return Version;
+ }
+
+ NumFunctionsType getNumFunctions() const {
+ return read<NumFunctionsType>(P + NumFunctionsOffset, E);
+ }
+
+ FunctionInfoAccessor getFirstFunctionInfo() const {
+ const uint8_t *Begin = P + FunctionInfosOffset;
+ return FunctionInfoAccessor(Begin, E);
+ }
+};
+
+raw_ostream &
+operator<<(raw_ostream &OS, const FaultMapParser::FunctionFaultInfoAccessor &);
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const FaultMapParser::FunctionInfoAccessor &);
+
+raw_ostream &operator<<(raw_ostream &OS, const FaultMapParser &);
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
new file mode 100644
index 0000000..09a9991
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -0,0 +1,262 @@
+//===-- FunctionLoweringInfo.h - Lower functions from LLVM IR to CodeGen --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This implements routines for translating functions from LLVM IR into
+// Machine IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+#define LLVM_CODEGEN_FUNCTIONLOWERINGINFO_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include <vector>
+
+namespace llvm {
+
+class AllocaInst;
+class BasicBlock;
+class BranchProbabilityInfo;
+class CallInst;
+class Function;
+class GlobalVariable;
+class Instruction;
+class MachineInstr;
+class MachineBasicBlock;
+class MachineFunction;
+class MachineModuleInfo;
+class MachineRegisterInfo;
+class SelectionDAG;
+class MVT;
+class TargetLowering;
+class Value;
+
+//===--------------------------------------------------------------------===//
+/// FunctionLoweringInfo - This contains information that is global to a
+/// function that is used when lowering a region of the function.
+///
+class FunctionLoweringInfo {
+public:
+ const Function *Fn;
+ MachineFunction *MF;
+ const TargetLowering *TLI;
+ MachineRegisterInfo *RegInfo;
+ BranchProbabilityInfo *BPI;
+ /// CanLowerReturn - true iff the function's return value can be lowered to
+ /// registers.
+ bool CanLowerReturn;
+
+ /// True if part of the CSRs will be handled via explicit copies.
+ bool SplitCSR;
+
+ /// DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg
+ /// allocated to hold a pointer to the hidden sret parameter.
+ unsigned DemoteRegister;
+
+ /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
+ DenseMap<const BasicBlock*, MachineBasicBlock *> MBBMap;
+
+ /// ValueMap - Since we emit code for the function a basic block at a time,
+ /// we must remember which virtual registers hold the values for
+ /// cross-basic-block values.
+ DenseMap<const Value *, unsigned> ValueMap;
+
+ /// Track virtual registers created for exception pointers.
+ DenseMap<const Value *, unsigned> CatchPadExceptionPointers;
+
+ // Keep track of frame indices allocated for statepoints as they could be used
+ // across basic block boundaries.
+ // Key of the map is statepoint instruction, value is a map from spilled
+ // llvm Value to the optional stack stack slot index.
+ // If optional is unspecified it means that we have visited this value
+ // but didn't spill it.
+ typedef DenseMap<const Value*, Optional<int>> StatepointSpilledValueMapTy;
+ DenseMap<const Instruction*, StatepointSpilledValueMapTy>
+ StatepointRelocatedValues;
+
+ /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
+ /// the entry block. This allows the allocas to be efficiently referenced
+ /// anywhere in the function.
+ DenseMap<const AllocaInst*, int> StaticAllocaMap;
+
+ /// ByValArgFrameIndexMap - Keep track of frame indices for byval arguments.
+ DenseMap<const Argument*, int> ByValArgFrameIndexMap;
+
+ /// ArgDbgValues - A list of DBG_VALUE instructions created during isel for
+ /// function arguments that are inserted after scheduling is completed.
+ SmallVector<MachineInstr*, 8> ArgDbgValues;
+
+ /// RegFixups - Registers which need to be replaced after isel is done.
+ DenseMap<unsigned, unsigned> RegFixups;
+
+ /// StatepointStackSlots - A list of temporary stack slots (frame indices)
+ /// used to spill values at a statepoint. We store them here to enable
+ /// reuse of the same stack slots across different statepoints in different
+ /// basic blocks.
+ SmallVector<unsigned, 50> StatepointStackSlots;
+
+ /// MBB - The current block.
+ MachineBasicBlock *MBB;
+
+ /// MBB - The current insert position inside the current block.
+ MachineBasicBlock::iterator InsertPt;
+
+ struct LiveOutInfo {
+ unsigned NumSignBits : 31;
+ bool IsValid : 1;
+ APInt KnownOne, KnownZero;
+ LiveOutInfo() : NumSignBits(0), IsValid(true), KnownOne(1, 0),
+ KnownZero(1, 0) {}
+ };
+
+ /// Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND)
+ /// for a value.
+ DenseMap<const Value *, ISD::NodeType> PreferredExtendType;
+
+ /// VisitedBBs - The set of basic blocks visited thus far by instruction
+ /// selection.
+ SmallPtrSet<const BasicBlock*, 4> VisitedBBs;
+
+ /// PHINodesToUpdate - A list of phi instructions whose operand list will
+ /// be updated after processing the current basic block.
+ /// TODO: This isn't per-function state, it's per-basic-block state. But
+ /// there's no other convenient place for it to live right now.
+ std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
+ unsigned OrigNumPHINodesToUpdate;
+
+ /// If the current MBB is a landing pad, the exception pointer and exception
+ /// selector registers are copied into these virtual registers by
+ /// SelectionDAGISel::PrepareEHLandingPad().
+ unsigned ExceptionPointerVirtReg, ExceptionSelectorVirtReg;
+
+ /// set - Initialize this FunctionLoweringInfo with the given Function
+ /// and its associated MachineFunction.
+ ///
+ void set(const Function &Fn, MachineFunction &MF, SelectionDAG *DAG);
+
+ /// clear - Clear out all the function-specific state. This returns this
+ /// FunctionLoweringInfo to an empty state, ready to be used for a
+ /// different function.
+ void clear();
+
+ /// isExportedInst - Return true if the specified value is an instruction
+ /// exported from its block.
+ bool isExportedInst(const Value *V) {
+ return ValueMap.count(V);
+ }
+
+ unsigned CreateReg(MVT VT);
+
+ unsigned CreateRegs(Type *Ty);
+
+ unsigned InitializeRegForValue(const Value *V) {
+ // Tokens never live in vregs.
+ if (V->getType()->isTokenTy())
+ return 0;
+ unsigned &R = ValueMap[V];
+ assert(R == 0 && "Already initialized this value register!");
+ return R = CreateRegs(V->getType());
+ }
+
+ /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
+ /// register is a PHI destination and the PHI's LiveOutInfo is not valid.
+ const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg) {
+ if (!LiveOutRegInfo.inBounds(Reg))
+ return nullptr;
+
+ const LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
+ if (!LOI->IsValid)
+ return nullptr;
+
+ return LOI;
+ }
+
+ /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
+ /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
+ /// the register's LiveOutInfo is for a smaller bit width, it is extended to
+ /// the larger bit width by zero extension. The bit width must be no smaller
+ /// than the LiveOutInfo's existing bit width.
+ const LiveOutInfo *GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth);
+
+ /// AddLiveOutRegInfo - Adds LiveOutInfo for a register.
+ void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits,
+ const APInt &KnownZero, const APInt &KnownOne) {
+ // Only install this information if it tells us something.
+ if (NumSignBits == 1 && KnownZero == 0 && KnownOne == 0)
+ return;
+
+ LiveOutRegInfo.grow(Reg);
+ LiveOutInfo &LOI = LiveOutRegInfo[Reg];
+ LOI.NumSignBits = NumSignBits;
+ LOI.KnownOne = KnownOne;
+ LOI.KnownZero = KnownZero;
+ }
+
+ /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
+ /// register based on the LiveOutInfo of its operands.
+ void ComputePHILiveOutRegInfo(const PHINode*);
+
+ /// InvalidatePHILiveOutRegInfo - Invalidates a PHI's LiveOutInfo, to be
+ /// called when a block is visited before all of its predecessors.
+ void InvalidatePHILiveOutRegInfo(const PHINode *PN) {
+ // PHIs with no uses have no ValueMap entry.
+ DenseMap<const Value*, unsigned>::const_iterator It = ValueMap.find(PN);
+ if (It == ValueMap.end())
+ return;
+
+ unsigned Reg = It->second;
+ if (Reg == 0)
+ return;
+
+ LiveOutRegInfo.grow(Reg);
+ LiveOutRegInfo[Reg].IsValid = false;
+ }
+
+ /// setArgumentFrameIndex - Record frame index for the byval
+ /// argument.
+ void setArgumentFrameIndex(const Argument *A, int FI);
+
+ /// getArgumentFrameIndex - Get frame index for the byval argument.
+ int getArgumentFrameIndex(const Argument *A);
+
+ unsigned getCatchPadExceptionPointerVReg(const Value *CPI,
+ const TargetRegisterClass *RC);
+
+private:
+ void addSEHHandlersForLPads(ArrayRef<const LandingPadInst *> LPads);
+
+ /// LiveOutRegInfo - Information about live out vregs.
+ IndexedMap<LiveOutInfo, VirtReg2IndexFunctor> LiveOutRegInfo;
+};
+
+/// ComputeUsesVAFloatArgument - Determine if any floating-point values are
+/// being passed to this variadic function, and set the MachineModuleInfo's
+/// usesVAFloatArgument flag if so. This flag is used to emit an undefined
+/// reference to _fltused on Windows, which will link in MSVCRT's
+/// floating-point support.
+void ComputeUsesVAFloatArgument(const CallInst &I, MachineModuleInfo *MMI);
+
+/// AddLandingPadInfo - Extract the exception handling information from the
+/// landingpad instruction and add them to the specified machine module info.
+void AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
+ MachineBasicBlock *MBB);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/GCMetadata.h b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
new file mode 100644
index 0000000..163117b
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/GCMetadata.h
@@ -0,0 +1,206 @@
+//===-- GCMetadata.h - Garbage collector metadata ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the GCFunctionInfo and GCModuleInfo classes, which are
+// used as a communication channel from the target code generator to the target
+// garbage collectors. This interface allows code generators and garbage
+// collectors to be developed independently.
+//
+// The GCFunctionInfo class logs the data necessary to build a type accurate
+// stack map. The code generator outputs:
+//
+// - Safe points as specified by the GCStrategy's NeededSafePoints.
+// - Stack offsets for GC roots, as specified by calls to llvm.gcroot
+//
+// As a refinement, liveness analysis calculates the set of live roots at each
+// safe point. Liveness analysis is not presently performed by the code
+// generator, so all roots are assumed live.
+//
+// GCModuleInfo simply collects GCFunctionInfo instances for each Function as
+// they are compiled. This accretion is necessary for collectors which must emit
+// a stack map for the compilation unit as a whole. Therefore, GCFunctionInfo
+// outlives the MachineFunction from which it is derived and must not refer to
+// any code generator data structures.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCMETADATA_H
+#define LLVM_CODEGEN_GCMETADATA_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/Pass.h"
+#include <memory>
+
+namespace llvm {
+class AsmPrinter;
+class Constant;
+class MCSymbol;
+
+/// GCPoint - Metadata for a collector-safe point in machine code.
+///
+struct GCPoint {
+ GC::PointKind Kind; ///< The kind of the safe point.
+ MCSymbol *Label; ///< A label.
+ DebugLoc Loc;
+
+ GCPoint(GC::PointKind K, MCSymbol *L, DebugLoc DL)
+ : Kind(K), Label(L), Loc(DL) {}
+};
+
+/// GCRoot - Metadata for a pointer to an object managed by the garbage
+/// collector.
+struct GCRoot {
+ int Num; ///< Usually a frame index.
+ int StackOffset; ///< Offset from the stack pointer.
+ const Constant *Metadata; ///< Metadata straight from the call
+ ///< to llvm.gcroot.
+
+ GCRoot(int N, const Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
+};
+
+/// Garbage collection metadata for a single function. Currently, this
+/// information only applies to GCStrategies which use GCRoot.
+class GCFunctionInfo {
+public:
+ typedef std::vector<GCPoint>::iterator iterator;
+ typedef std::vector<GCRoot>::iterator roots_iterator;
+ typedef std::vector<GCRoot>::const_iterator live_iterator;
+
+private:
+ const Function &F;
+ GCStrategy &S;
+ uint64_t FrameSize;
+ std::vector<GCRoot> Roots;
+ std::vector<GCPoint> SafePoints;
+
+ // FIXME: Liveness. A 2D BitVector, perhaps?
+ //
+ // BitVector Liveness;
+ //
+ // bool islive(int point, int root) =
+ // Liveness[point * SafePoints.size() + root]
+ //
+ // The bit vector is the more compact representation where >3.2% of roots
+ // are live per safe point (1.5% on 64-bit hosts).
+
+public:
+ GCFunctionInfo(const Function &F, GCStrategy &S);
+ ~GCFunctionInfo();
+
+ /// getFunction - Return the function to which this metadata applies.
+ ///
+ const Function &getFunction() const { return F; }
+
+ /// getStrategy - Return the GC strategy for the function.
+ ///
+ GCStrategy &getStrategy() { return S; }
+
+ /// addStackRoot - Registers a root that lives on the stack. Num is the
+ /// stack object ID for the alloca (if the code generator is
+ // using MachineFrameInfo).
+ void addStackRoot(int Num, const Constant *Metadata) {
+ Roots.push_back(GCRoot(Num, Metadata));
+ }
+
+ /// removeStackRoot - Removes a root.
+ roots_iterator removeStackRoot(roots_iterator position) {
+ return Roots.erase(position);
+ }
+
+ /// addSafePoint - Notes the existence of a safe point. Num is the ID of the
+ /// label just prior to the safe point (if the code generator is using
+ /// MachineModuleInfo).
+ void addSafePoint(GC::PointKind Kind, MCSymbol *Label, DebugLoc DL) {
+ SafePoints.emplace_back(Kind, Label, DL);
+ }
+
+ /// getFrameSize/setFrameSize - Records the function's frame size.
+ ///
+ uint64_t getFrameSize() const { return FrameSize; }
+ void setFrameSize(uint64_t S) { FrameSize = S; }
+
+ /// begin/end - Iterators for safe points.
+ ///
+ iterator begin() { return SafePoints.begin(); }
+ iterator end() { return SafePoints.end(); }
+ size_t size() const { return SafePoints.size(); }
+
+ /// roots_begin/roots_end - Iterators for all roots in the function.
+ ///
+ roots_iterator roots_begin() { return Roots.begin(); }
+ roots_iterator roots_end() { return Roots.end(); }
+ size_t roots_size() const { return Roots.size(); }
+
+ /// live_begin/live_end - Iterators for live roots at a given safe point.
+ ///
+ live_iterator live_begin(const iterator &p) { return roots_begin(); }
+ live_iterator live_end(const iterator &p) { return roots_end(); }
+ size_t live_size(const iterator &p) const { return roots_size(); }
+};
+
+/// An analysis pass which caches information about the entire Module.
+/// Records both the function level information used by GCRoots and a
+/// cache of the 'active' gc strategy objects for the current Module.
+class GCModuleInfo : public ImmutablePass {
+ /// An owning list of all GCStrategies which have been created
+ SmallVector<std::unique_ptr<GCStrategy>, 1> GCStrategyList;
+ /// A helper map to speedup lookups into the above list
+ StringMap<GCStrategy*> GCStrategyMap;
+
+public:
+ /// Lookup the GCStrategy object associated with the given gc name.
+ /// Objects are owned internally; No caller should attempt to delete the
+ /// returned objects.
+ GCStrategy *getGCStrategy(const StringRef Name);
+
+ /// List of per function info objects. In theory, Each of these
+ /// may be associated with a different GC.
+ typedef std::vector<std::unique_ptr<GCFunctionInfo>> FuncInfoVec;
+
+ FuncInfoVec::iterator funcinfo_begin() { return Functions.begin(); }
+ FuncInfoVec::iterator funcinfo_end() { return Functions.end(); }
+
+private:
+ /// Owning list of all GCFunctionInfos associated with this Module
+ FuncInfoVec Functions;
+
+ /// Non-owning map to bypass linear search when finding the GCFunctionInfo
+ /// associated with a particular Function.
+ typedef DenseMap<const Function *, GCFunctionInfo *> finfo_map_type;
+ finfo_map_type FInfoMap;
+
+public:
+ typedef SmallVector<std::unique_ptr<GCStrategy>,1>::const_iterator iterator;
+
+ static char ID;
+
+ GCModuleInfo();
+
+ /// clear - Resets the pass. Any pass, which uses GCModuleInfo, should
+ /// call it in doFinalization().
+ ///
+ void clear();
+
+ /// begin/end - Iterators for used strategies.
+ ///
+ iterator begin() const { return GCStrategyList.begin(); }
+ iterator end() const { return GCStrategyList.end(); }
+
+ /// get - Look up function metadata. This is currently assumed
+ /// have the side effect of initializing the associated GCStrategy. That
+ /// will soon change.
+ GCFunctionInfo &getFunctionInfo(const Function &F);
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h b/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
new file mode 100644
index 0000000..2208470
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/GCMetadataPrinter.h
@@ -0,0 +1,64 @@
+//===-- llvm/CodeGen/GCMetadataPrinter.h - Prints asm GC tables -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The abstract base class GCMetadataPrinter supports writing GC metadata tables
+// as assembly code. This is a separate class from GCStrategy in order to allow
+// users of the LLVM JIT to avoid linking with the AsmWriter.
+//
+// Subclasses of GCMetadataPrinter must be registered using the
+// GCMetadataPrinterRegistry. This is separate from the GCStrategy itself
+// because these subclasses are logically plugins for the AsmWriter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCMETADATAPRINTER_H
+#define LLVM_CODEGEN_GCMETADATAPRINTER_H
+
+#include "llvm/CodeGen/GCMetadata.h"
+#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/Support/Registry.h"
+
+namespace llvm {
+
+class GCMetadataPrinter;
+
+/// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
+/// defaults from Registry.
+typedef Registry<GCMetadataPrinter> GCMetadataPrinterRegistry;
+
+/// GCMetadataPrinter - Emits GC metadata as assembly code. Instances are
+/// created, managed, and owned by the AsmPrinter.
+class GCMetadataPrinter {
+private:
+ GCStrategy *S;
+ friend class AsmPrinter;
+
+protected:
+ // May only be subclassed.
+ GCMetadataPrinter();
+
+private:
+ GCMetadataPrinter(const GCMetadataPrinter &) = delete;
+ GCMetadataPrinter &operator=(const GCMetadataPrinter &) = delete;
+
+public:
+ GCStrategy &getStrategy() { return *S; }
+
+ /// Called before the assembly for the module is generated by
+ /// the AsmPrinter (but after target specific hooks.)
+ virtual void beginAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}
+ /// Called after the assembly for the module is generated by
+ /// the AsmPrinter (but before target specific hooks)
+ virtual void finishAssembly(Module &M, GCModuleInfo &Info, AsmPrinter &AP) {}
+
+ virtual ~GCMetadataPrinter();
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/GCStrategy.h b/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
new file mode 100644
index 0000000..3088a86
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/GCStrategy.h
@@ -0,0 +1,177 @@
+//===-- llvm/CodeGen/GCStrategy.h - Garbage collection ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// GCStrategy coordinates code generation algorithms and implements some itself
+// in order to generate code compatible with a target code generator as
+// specified in a function's 'gc' attribute. Algorithms are enabled by setting
+// flags in a subclass's constructor, and some virtual methods can be
+// overridden.
+//
+// GCStrategy is relevant for implementations using either gc.root or
+// gc.statepoint based lowering strategies, but is currently focused mostly on
+// options for gc.root. This will change over time.
+//
+// When requested by a subclass of GCStrategy, the gc.root implementation will
+// populate GCModuleInfo and GCFunctionInfo with that about each Function in
+// the Module that opts in to garbage collection. Specifically:
+//
+// - Safe points
+// Garbage collection is generally only possible at certain points in code.
+// GCStrategy can request that the collector insert such points:
+//
+// - At and after any call to a subroutine
+// - Before returning from the current function
+// - Before backwards branches (loops)
+//
+// - Roots
+// When a reference to a GC-allocated object exists on the stack, it must be
+// stored in an alloca registered with llvm.gcoot.
+//
+// This information can used to emit the metadata tables which are required by
+// the target garbage collector runtime.
+//
+// When used with gc.statepoint, information about safepoint and roots can be
+// found in the binary StackMap section after code generation. Safepoint
+// placement is currently the responsibility of the frontend, though late
+// insertion support is planned. gc.statepoint does not currently support
+// custom stack map formats; such can be generated by parsing the standard
+// stack map section if desired.
+//
+// The read and write barrier support can be used with either implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GCSTRATEGY_H
+#define LLVM_IR_GCSTRATEGY_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Registry.h"
+#include <string>
+
+namespace llvm {
+namespace GC {
+/// PointKind - Used to indicate whether the address of the call instruction
+/// or the address after the call instruction is listed in the stackmap. For
+/// most runtimes, PostCall safepoints are appropriate.
+///
+enum PointKind {
+ PreCall, ///< Instr is a call instruction.
+ PostCall ///< Instr is the return address of a call.
+};
+}
+
+/// GCStrategy describes a garbage collector algorithm's code generation
+/// requirements, and provides overridable hooks for those needs which cannot
+/// be abstractly described. GCStrategy objects must be looked up through
+/// the Function. The objects themselves are owned by the Context and must
+/// be immutable.
+class GCStrategy {
+private:
+ std::string Name;
+ friend class GCModuleInfo;
+
+protected:
+ bool UseStatepoints; /// Uses gc.statepoints as opposed to gc.roots,
+ /// if set, none of the other options can be
+ /// anything but their default values.
+
+ unsigned NeededSafePoints; ///< Bitmask of required safe points.
+ bool CustomReadBarriers; ///< Default is to insert loads.
+ bool CustomWriteBarriers; ///< Default is to insert stores.
+ bool CustomRoots; ///< Default is to pass through to backend.
+ bool InitRoots; ///< If set, roots are nulled during lowering.
+ bool UsesMetadata; ///< If set, backend must emit metadata tables.
+
+public:
+ GCStrategy();
+ virtual ~GCStrategy() {}
+
+ /// Return the name of the GC strategy. This is the value of the collector
+ /// name string specified on functions which use this strategy.
+ const std::string &getName() const { return Name; }
+
+ /// By default, write barriers are replaced with simple store
+ /// instructions. If true, you must provide a custom pass to lower
+ /// calls to @llvm.gcwrite.
+ bool customWriteBarrier() const { return CustomWriteBarriers; }
+
+ /// By default, read barriers are replaced with simple load
+ /// instructions. If true, you must provide a custom pass to lower
+ /// calls to @llvm.gcread.
+ bool customReadBarrier() const { return CustomReadBarriers; }
+
+ /// Returns true if this strategy is expecting the use of gc.statepoints,
+ /// and false otherwise.
+ bool useStatepoints() const { return UseStatepoints; }
+
+ /** @name Statepoint Specific Properties */
+ ///@{
+
+ /// If the type specified can be reliably distinguished, returns true for
+ /// pointers to GC managed locations and false for pointers to non-GC
+ /// managed locations. Note a GCStrategy can always return 'None' (i.e. an
+ /// empty optional indicating it can't reliably distinguish.
+ virtual Optional<bool> isGCManagedPointer(const Type *Ty) const {
+ return None;
+ }
+ ///@}
+
+ /** @name GCRoot Specific Properties
+ * These properties and overrides only apply to collector strategies using
+ * GCRoot.
+ */
+ ///@{
+
+ /// True if safe points of any kind are required. By default, none are
+ /// recorded.
+ bool needsSafePoints() const { return NeededSafePoints != 0; }
+
+ /// True if the given kind of safe point is required. By default, none are
+ /// recorded.
+ bool needsSafePoint(GC::PointKind Kind) const {
+ return (NeededSafePoints & 1 << Kind) != 0;
+ }
+
+ /// By default, roots are left for the code generator so it can generate a
+ /// stack map. If true, you must provide a custom pass to lower
+ /// calls to @llvm.gcroot.
+ bool customRoots() const { return CustomRoots; }
+
+ /// If set, gcroot intrinsics should initialize their allocas to null
+ /// before the first use. This is necessary for most GCs and is enabled by
+ /// default.
+ bool initializeRoots() const { return InitRoots; }
+
+ /// If set, appropriate metadata tables must be emitted by the back-end
+ /// (assembler, JIT, or otherwise). For statepoint, this method is
+ /// currently unsupported. The stackmap information can be found in the
+ /// StackMap section as described in the documentation.
+ bool usesMetadata() const { return UsesMetadata; }
+
+ ///@}
+};
+
+/// Subclasses of GCStrategy are made available for use during compilation by
+/// adding them to the global GCRegistry. This can done either within the
+/// LLVM source tree or via a loadable plugin. An example registeration
+/// would be:
+/// static GCRegistry::Add<CustomGC> X("custom-name",
+/// "my custom supper fancy gc strategy");
+///
+/// Note that to use a custom GCMetadataPrinter w/gc.roots, you must also
+/// register your GCMetadataPrinter subclass with the
+/// GCMetadataPrinterRegistery as well.
+typedef Registry<GCStrategy> GCRegistry;
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/GCs.h b/contrib/llvm/include/llvm/CodeGen/GCs.h
new file mode 100644
index 0000000..5207f80
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/GCs.h
@@ -0,0 +1,46 @@
+//===-- GCs.h - Garbage collector linkage hacks ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains hack functions to force linking in the GC components.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCS_H
+#define LLVM_CODEGEN_GCS_H
+
+namespace llvm {
+class GCStrategy;
+class GCMetadataPrinter;
+
+/// FIXME: Collector instances are not useful on their own. These no longer
+/// serve any purpose except to link in the plugins.
+
+/// Creates a CoreCLR-compatible garbage collector.
+void linkCoreCLRGC();
+
+/// Creates an ocaml-compatible garbage collector.
+void linkOcamlGC();
+
+/// Creates an ocaml-compatible metadata printer.
+void linkOcamlGCPrinter();
+
+/// Creates an erlang-compatible garbage collector.
+void linkErlangGC();
+
+/// Creates an erlang-compatible metadata printer.
+void linkErlangGCPrinter();
+
+/// Creates a shadow stack garbage collector. This collector requires no code
+/// generator support.
+void linkShadowStackGC();
+
+void linkStatepointExampleGC();
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
new file mode 100644
index 0000000..158ff3c
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -0,0 +1,939 @@
+//===-- llvm/CodeGen/ISDOpcodes.h - CodeGen opcodes -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares codegen opcodes and related utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ISDOPCODES_H
+#define LLVM_CODEGEN_ISDOPCODES_H
+
+namespace llvm {
+
+/// ISD namespace - This namespace contains an enum which represents all of the
+/// SelectionDAG node types and value types.
+///
+namespace ISD {
+
+ //===--------------------------------------------------------------------===//
+ /// ISD::NodeType enum - This enum defines the target-independent operators
+ /// for a SelectionDAG.
+ ///
+ /// Targets may also define target-dependent operator codes for SDNodes. For
+ /// example, on x86, these are the enum values in the X86ISD namespace.
+ /// Targets should aim to use target-independent operators to model their
+ /// instruction sets as much as possible, and only use target-dependent
+ /// operators when they have special requirements.
+ ///
+ /// Finally, during and after selection proper, SNodes may use special
+ /// operator codes that correspond directly with MachineInstr opcodes. These
+ /// are used to represent selected instructions. See the isMachineOpcode()
+ /// and getMachineOpcode() member functions of SDNode.
+ ///
+ enum NodeType {
+ /// DELETED_NODE - This is an illegal value that is used to catch
+ /// errors. This opcode is not a legal opcode for any node.
+ DELETED_NODE,
+
+ /// EntryToken - This is the marker used to indicate the start of a region.
+ EntryToken,
+
+ /// TokenFactor - This node takes multiple tokens as input and produces a
+ /// single token result. This is used to represent the fact that the operand
+ /// operators are independent of each other.
+ TokenFactor,
+
+ /// AssertSext, AssertZext - These nodes record if a register contains a
+ /// value that has already been zero or sign extended from a narrower type.
+ /// These nodes take two operands. The first is the node that has already
+ /// been extended, and the second is a value type node indicating the width
+ /// of the extension
+ AssertSext, AssertZext,
+
+ /// Various leaf nodes.
+ BasicBlock, VALUETYPE, CONDCODE, Register, RegisterMask,
+ Constant, ConstantFP,
+ GlobalAddress, GlobalTLSAddress, FrameIndex,
+ JumpTable, ConstantPool, ExternalSymbol, BlockAddress,
+
+ /// The address of the GOT
+ GLOBAL_OFFSET_TABLE,
+
+ /// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
+ /// llvm.returnaddress on the DAG. These nodes take one operand, the index
+ /// of the frame or return address to return. An index of zero corresponds
+ /// to the current function's frame or return address, an index of one to
+ /// the parent's frame or return address, and so on.
+ FRAMEADDR, RETURNADDR,
+
+ /// LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
+ /// Materializes the offset from the local object pointer of another
+ /// function to a particular local object passed to llvm.localescape. The
+ /// operand is the MCSymbol label used to represent this offset, since
+ /// typically the offset is not known until after code generation of the
+ /// parent.
+ LOCAL_RECOVER,
+
+ /// READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on
+ /// the DAG, which implements the named register global variables extension.
+ READ_REGISTER,
+ WRITE_REGISTER,
+
+ /// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
+ /// first (possible) on-stack argument. This is needed for correct stack
+ /// adjustment during unwind.
+ FRAME_TO_ARGS_OFFSET,
+
+ /// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
+ /// 'eh_return' gcc dwarf builtin, which is used to return from
+ /// exception. The general meaning is: adjust stack by OFFSET and pass
+ /// execution to HANDLER. Many platform-related details also :)
+ EH_RETURN,
+
+ /// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
+ /// This corresponds to the eh.sjlj.setjmp intrinsic.
+ /// It takes an input chain and a pointer to the jump buffer as inputs
+ /// and returns an outchain.
+ EH_SJLJ_SETJMP,
+
+ /// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
+ /// This corresponds to the eh.sjlj.longjmp intrinsic.
+ /// It takes an input chain and a pointer to the jump buffer as inputs
+ /// and returns an outchain.
+ EH_SJLJ_LONGJMP,
+
+ /// OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN)
+ /// The target initializes the dispatch table here.
+ EH_SJLJ_SETUP_DISPATCH,
+
+ /// TargetConstant* - Like Constant*, but the DAG does not do any folding,
+ /// simplification, or lowering of the constant. They are used for constants
+ /// which are known to fit in the immediate fields of their users, or for
+ /// carrying magic numbers which are not values which need to be
+ /// materialized in registers.
+ TargetConstant,
+ TargetConstantFP,
+
+ /// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
+ /// anything else with this node, and this is valid in the target-specific
+ /// dag, turning into a GlobalAddress operand.
+ TargetGlobalAddress,
+ TargetGlobalTLSAddress,
+ TargetFrameIndex,
+ TargetJumpTable,
+ TargetConstantPool,
+ TargetExternalSymbol,
+ TargetBlockAddress,
+
+ MCSymbol,
+
+ /// TargetIndex - Like a constant pool entry, but with completely
+ /// target-dependent semantics. Holds target flags, a 32-bit index, and a
+ /// 64-bit index. Targets can use this however they like.
+ TargetIndex,
+
+ /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
+ /// This node represents a target intrinsic function with no side effects.
+ /// The first operand is the ID number of the intrinsic from the
+ /// llvm::Intrinsic namespace. The operands to the intrinsic follow. The
+ /// node returns the result of the intrinsic.
+ INTRINSIC_WO_CHAIN,
+
+ /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
+ /// This node represents a target intrinsic function with side effects that
+ /// returns a result. The first operand is a chain pointer. The second is
+ /// the ID number of the intrinsic from the llvm::Intrinsic namespace. The
+ /// operands to the intrinsic follow. The node has two results, the result
+ /// of the intrinsic and an output chain.
+ INTRINSIC_W_CHAIN,
+
+ /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
+ /// This node represents a target intrinsic function with side effects that
+ /// does not return a result. The first operand is a chain pointer. The
+ /// second is the ID number of the intrinsic from the llvm::Intrinsic
+ /// namespace. The operands to the intrinsic follow.
+ INTRINSIC_VOID,
+
+ /// CopyToReg - This node has three operands: a chain, a register number to
+ /// set to this value, and a value.
+ CopyToReg,
+
+ /// CopyFromReg - This node indicates that the input value is a virtual or
+ /// physical register that is defined outside of the scope of this
+ /// SelectionDAG. The register is available from the RegisterSDNode object.
+ CopyFromReg,
+
+ /// UNDEF - An undefined node.
+ UNDEF,
+
+ /// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
+ /// a Constant, which is required to be operand #1) half of the integer or
+ /// float value specified as operand #0. This is only for use before
+ /// legalization, for values that will be broken into multiple registers.
+ EXTRACT_ELEMENT,
+
+ /// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
+ /// Given two values of the same integer value type, this produces a value
+ /// twice as big. Like EXTRACT_ELEMENT, this can only be used before
+ /// legalization.
+ BUILD_PAIR,
+
+ /// MERGE_VALUES - This node takes multiple discrete operands and returns
+ /// them all as its individual results. This nodes has exactly the same
+ /// number of inputs and outputs. This node is useful for some pieces of the
+ /// code generator that want to think about a single node with multiple
+ /// results, not multiple nodes.
+ MERGE_VALUES,
+
+ /// Simple integer binary arithmetic operators.
+ ADD, SUB, MUL, SDIV, UDIV, SREM, UREM,
+
+ /// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
+ /// a signed/unsigned value of type i[2*N], and return the full value as
+ /// two results, each of type iN.
+ SMUL_LOHI, UMUL_LOHI,
+
+ /// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
+ /// remainder result.
+ SDIVREM, UDIVREM,
+
+ /// CARRY_FALSE - This node is used when folding other nodes,
+ /// like ADDC/SUBC, which indicate the carry result is always false.
+ CARRY_FALSE,
+
+ /// Carry-setting nodes for multiple precision addition and subtraction.
+ /// These nodes take two operands of the same value type, and produce two
+ /// results. The first result is the normal add or sub result, the second
+ /// result is the carry flag result.
+ ADDC, SUBC,
+
+ /// Carry-using nodes for multiple precision addition and subtraction. These
+ /// nodes take three operands: The first two are the normal lhs and rhs to
+ /// the add or sub, and the third is the input carry flag. These nodes
+ /// produce two results; the normal result of the add or sub, and the output
+ /// carry flag. These nodes both read and write a carry flag to allow them
+ /// to them to be chained together for add and sub of arbitrarily large
+ /// values.
+ ADDE, SUBE,
+
+ /// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
+ /// These nodes take two operands: the normal LHS and RHS to the add. They
+ /// produce two results: the normal result of the add, and a boolean that
+ /// indicates if an overflow occurred (*not* a flag, because it may be store
+ /// to memory, etc.). If the type of the boolean is not i1 then the high
+ /// bits conform to getBooleanContents.
+ /// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
+ SADDO, UADDO,
+
+ /// Same for subtraction.
+ SSUBO, USUBO,
+
+ /// Same for multiplication.
+ SMULO, UMULO,
+
+ /// Simple binary floating point operators.
+ FADD, FSUB, FMUL, FDIV, FREM,
+
+ /// FMA - Perform a * b + c with no intermediate rounding step.
+ FMA,
+
+ /// FMAD - Perform a * b + c, while getting the same result as the
+ /// separately rounded operations.
+ FMAD,
+
+ /// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
+ /// DAG node does not require that X and Y have the same type, just that
+ /// they are both floating point. X and the result must have the same type.
+ /// FCOPYSIGN(f32, f64) is allowed.
+ FCOPYSIGN,
+
+ /// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
+ /// value as an integer 0/1 value.
+ FGETSIGN,
+
+ /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the
+ /// specified, possibly variable, elements. The number of elements is
+ /// required to be a power of two. The types of the operands must all be
+ /// the same and must match the vector element type, except that integer
+ /// types are allowed to be larger than the element type, in which case
+ /// the operands are implicitly truncated.
+ BUILD_VECTOR,
+
+ /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
+ /// at IDX replaced with VAL. If the type of VAL is larger than the vector
+ /// element type then VAL is truncated before replacement.
+ INSERT_VECTOR_ELT,
+
+ /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
+ /// identified by the (potentially variable) element number IDX. If the
+ /// return type is an integer type larger than the element type of the
+ /// vector, the result is extended to the width of the return type.
+ EXTRACT_VECTOR_ELT,
+
+ /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
+ /// vector type with the same length and element type, this produces a
+ /// concatenated vector result value, with length equal to the sum of the
+ /// lengths of the input vectors.
+ CONCAT_VECTORS,
+
+ /// INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector
+ /// with VECTOR2 inserted into VECTOR1 at the (potentially
+ /// variable) element number IDX, which must be a multiple of the
+ /// VECTOR2 vector length. The elements of VECTOR1 starting at
+ /// IDX are overwritten with VECTOR2. Elements IDX through
+ /// vector_length(VECTOR2) must be valid VECTOR1 indices.
+ INSERT_SUBVECTOR,
+
+ /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an
+ /// vector value) starting with the element number IDX, which must be a
+ /// constant multiple of the result vector length.
+ EXTRACT_SUBVECTOR,
+
+ /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
+ /// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int
+ /// values that indicate which value (or undef) each result element will
+ /// get. These constant ints are accessible through the
+ /// ShuffleVectorSDNode class. This is quite similar to the Altivec
+ /// 'vperm' instruction, except that the indices must be constants and are
+ /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
+ VECTOR_SHUFFLE,
+
+ /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
+ /// scalar value into element 0 of the resultant vector type. The top
+ /// elements 1 to N-1 of the N-element vector are undefined. The type
+ /// of the operand must match the vector element type, except when they
+ /// are integer types. In this case the operand is allowed to be wider
+ /// than the vector element type, and is implicitly truncated to it.
+ SCALAR_TO_VECTOR,
+
+ /// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
+ /// producing an unsigned/signed value of type i[2*N], then return the top
+ /// part.
+ MULHU, MULHS,
+
+ /// [US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned
+ /// integers.
+ SMIN, SMAX, UMIN, UMAX,
+
+ /// Bitwise operators - logical and, logical or, logical xor.
+ AND, OR, XOR,
+
+ /// Shift and rotation operations. After legalization, the type of the
+ /// shift amount is known to be TLI.getShiftAmountTy(). Before legalization
+ /// the shift amount can be any type, but care must be taken to ensure it is
+ /// large enough. TLI.getShiftAmountTy() is i8 on some targets, but before
+ /// legalization, types like i1024 can occur and i8 doesn't have enough bits
+ /// to represent the shift amount.
+ /// When the 1st operand is a vector, the shift amount must be in the same
+ /// type. (TLI.getShiftAmountTy() will return the same type when the input
+ /// type is a vector.)
+ SHL, SRA, SRL, ROTL, ROTR,
+
+ /// Byte Swap and Counting operators.
+ BSWAP, CTTZ, CTLZ, CTPOP, BITREVERSE,
+
+ /// Bit counting operators with an undefined result for zero inputs.
+ CTTZ_ZERO_UNDEF, CTLZ_ZERO_UNDEF,
+
+ /// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
+ /// i1 then the high bits must conform to getBooleanContents.
+ SELECT,
+
+ /// Select with a vector condition (op #0) and two vector operands (ops #1
+ /// and #2), returning a vector result. All vectors have the same length.
+ /// Much like the scalar select and setcc, each bit in the condition selects
+ /// whether the corresponding result element is taken from op #1 or op #2.
+ /// At first, the VSELECT condition is of vXi1 type. Later, targets may
+ /// change the condition type in order to match the VSELECT node using a
+ /// pattern. The condition follows the BooleanContent format of the target.
+ VSELECT,
+
+ /// Select with condition operator - This selects between a true value and
+ /// a false value (ops #2 and #3) based on the boolean result of comparing
+ /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
+ /// condition code in op #4, a CondCodeSDNode.
+ SELECT_CC,
+
+ /// SetCC operator - This evaluates to a true value iff the condition is
+ /// true. If the result value type is not i1 then the high bits conform
+ /// to getBooleanContents. The operands to this are the left and right
+ /// operands to compare (ops #0, and #1) and the condition code to compare
+ /// them with (op #2) as a CondCodeSDNode. If the operands are vector types
+ /// then the result type must also be a vector type.
+ SETCC,
+
+ /// Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but
+ /// op #2 is a *carry value*. This operator checks the result of
+ /// "LHS - RHS - Carry", and can be used to compare two wide integers:
+ /// (setcce lhshi rhshi (subc lhslo rhslo) cc). Only valid for integers.
+ SETCCE,
+
+ /// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
+ /// integer shift operations. The operation ordering is:
+ /// [Lo,Hi] = op [LoLHS,HiLHS], Amt
+ SHL_PARTS, SRA_PARTS, SRL_PARTS,
+
+ /// Conversion operators. These are all single input single output
+ /// operations. For all of these, the result type must be strictly
+ /// wider or narrower (depending on the operation) than the source
+ /// type.
+
+ /// SIGN_EXTEND - Used for integer types, replicating the sign bit
+ /// into new bits.
+ SIGN_EXTEND,
+
+ /// ZERO_EXTEND - Used for integer types, zeroing the new bits.
+ ZERO_EXTEND,
+
+ /// ANY_EXTEND - Used for integer types. The high bits are undefined.
+ ANY_EXTEND,
+
+ /// TRUNCATE - Completely drop the high bits.
+ TRUNCATE,
+
+ /// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
+ /// depends on the first letter) to floating point.
+ SINT_TO_FP,
+ UINT_TO_FP,
+
+ /// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
+ /// sign extend a small value in a large integer register (e.g. sign
+ /// extending the low 8 bits of a 32-bit register to fill the top 24 bits
+ /// with the 7th bit). The size of the smaller type is indicated by the 1th
+ /// operand, a ValueType node.
+ SIGN_EXTEND_INREG,
+
+ /// ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an
+ /// in-register any-extension of the low lanes of an integer vector. The
+ /// result type must have fewer elements than the operand type, and those
+ /// elements must be larger integer types such that the total size of the
+ /// operand type and the result type match. Each of the low operand
+ /// elements is any-extended into the corresponding, wider result
+ /// elements with the high bits becoming undef.
+ ANY_EXTEND_VECTOR_INREG,
+
+ /// SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an
+ /// in-register sign-extension of the low lanes of an integer vector. The
+ /// result type must have fewer elements than the operand type, and those
+ /// elements must be larger integer types such that the total size of the
+ /// operand type and the result type match. Each of the low operand
+ /// elements is sign-extended into the corresponding, wider result
+ /// elements.
+ // FIXME: The SIGN_EXTEND_INREG node isn't specifically limited to
+ // scalars, but it also doesn't handle vectors well. Either it should be
+ // restricted to scalars or this node (and its handling) should be merged
+ // into it.
+ SIGN_EXTEND_VECTOR_INREG,
+
+ /// ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an
+ /// in-register zero-extension of the low lanes of an integer vector. The
+ /// result type must have fewer elements than the operand type, and those
+ /// elements must be larger integer types such that the total size of the
+ /// operand type and the result type match. Each of the low operand
+ /// elements is zero-extended into the corresponding, wider result
+ /// elements.
+ ZERO_EXTEND_VECTOR_INREG,
+
+ /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
+ /// integer.
+ FP_TO_SINT,
+ FP_TO_UINT,
+
+ /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
+ /// down to the precision of the destination VT. TRUNC is a flag, which is
+ /// always an integer that is zero or one. If TRUNC is 0, this is a
+ /// normal rounding, if it is 1, this FP_ROUND is known to not change the
+ /// value of Y.
+ ///
+ /// The TRUNC = 1 case is used in cases where we know that the value will
+ /// not be modified by the node, because Y is not using any of the extra
+ /// precision of source type. This allows certain transformations like
+ /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
+ /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
+ FP_ROUND,
+
+ /// FLT_ROUNDS_ - Returns current rounding mode:
+ /// -1 Undefined
+ /// 0 Round to 0
+ /// 1 Round to nearest
+ /// 2 Round to +inf
+ /// 3 Round to -inf
+ FLT_ROUNDS_,
+
+ /// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and
+ /// rounds it to a floating point value. It then promotes it and returns it
+ /// in a register of the same size. This operation effectively just
+ /// discards excess precision. The type to round down to is specified by
+ /// the VT operand, a VTSDNode.
+ FP_ROUND_INREG,
+
+ /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
+ FP_EXTEND,
+
+ /// BITCAST - This operator converts between integer, vector and FP
+ /// values, as if the value was stored to memory with one type and loaded
+ /// from the same address with the other type (or equivalently for vector
+ /// format conversions, etc). The source and result are required to have
+ /// the same bit size (e.g. f32 <-> i32). This can also be used for
+ /// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
+ /// getNode().
+ BITCAST,
+
+ /// ADDRSPACECAST - This operator converts between pointers of different
+ /// address spaces.
+ ADDRSPACECAST,
+
+ /// CONVERT_RNDSAT - This operator is used to support various conversions
+ /// between various types (float, signed, unsigned and vectors of those
+ /// types) with rounding and saturation. NOTE: Avoid using this operator as
+ /// most target don't support it and the operator might be removed in the
+ /// future. It takes the following arguments:
+ /// 0) value
+ /// 1) dest type (type to convert to)
+ /// 2) src type (type to convert from)
+ /// 3) rounding imm
+ /// 4) saturation imm
+ /// 5) ISD::CvtCode indicating the type of conversion to do
+ CONVERT_RNDSAT,
+
+ /// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
+ /// and truncation for half-precision (16 bit) floating numbers. These nodes
+ /// form a semi-softened interface for dealing with f16 (as an i16), which
+ /// is often a storage-only type but has native conversions.
+ FP16_TO_FP, FP_TO_FP16,
+
+ /// FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+ /// FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+ /// FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary
+ /// floating point operations. These are inspired by libm.
+ FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+ FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+ FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR,
+ /// FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two
+ /// values.
+ /// In the case where a single input is NaN, the non-NaN input is returned.
+ ///
+ /// The return value of (FMINNUM 0.0, -0.0) could be either 0.0 or -0.0.
+ FMINNUM, FMAXNUM,
+ /// FMINNAN/FMAXNAN - Behave identically to FMINNUM/FMAXNUM, except that
+ /// when a single input is NaN, NaN is returned.
+ FMINNAN, FMAXNAN,
+
+ /// FSINCOS - Compute both fsin and fcos as a single operation.
+ FSINCOS,
+
+ /// LOAD and STORE have token chains as their first operand, then the same
+ /// operands as an LLVM load/store instruction, then an offset node that
+ /// is added / subtracted from the base pointer to form the address (for
+ /// indexed memory ops).
+ LOAD, STORE,
+
+ /// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
+ /// to a specified boundary. This node always has two return values: a new
+ /// stack pointer value and a chain. The first operand is the token chain,
+ /// the second is the number of bytes to allocate, and the third is the
+ /// alignment boundary. The size is guaranteed to be a multiple of the
+ /// stack alignment, and the alignment is guaranteed to be bigger than the
+ /// stack alignment (if required) or 0 to get standard stack alignment.
+ DYNAMIC_STACKALLOC,
+
+ /// Control flow instructions. These all have token chains.
+
+ /// BR - Unconditional branch. The first operand is the chain
+ /// operand, the second is the MBB to branch to.
+ BR,
+
+ /// BRIND - Indirect branch. The first operand is the chain, the second
+ /// is the value to branch to, which must be of the same type as the
+ /// target's pointer type.
+ BRIND,
+
+ /// BR_JT - Jumptable branch. The first operand is the chain, the second
+ /// is the jumptable index, the last one is the jumptable entry index.
+ BR_JT,
+
+ /// BRCOND - Conditional branch. The first operand is the chain, the
+ /// second is the condition, the third is the block to branch to if the
+ /// condition is true. If the type of the condition is not i1, then the
+ /// high bits must conform to getBooleanContents.
+ BRCOND,
+
+ /// BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
+ /// that the condition is represented as condition code, and two nodes to
+ /// compare, rather than as a combined SetCC node. The operands in order
+ /// are chain, cc, lhs, rhs, block to branch to if condition is true.
+ BR_CC,
+
+ /// INLINEASM - Represents an inline asm block. This node always has two
+ /// return values: a chain and a flag result. The inputs are as follows:
+ /// Operand #0 : Input chain.
+ /// Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
+ /// Operand #2 : a MDNodeSDNode with the !srcloc metadata.
+ /// Operand #3 : HasSideEffect, IsAlignStack bits.
+ /// After this, it is followed by a list of operands with this format:
+ /// ConstantSDNode: Flags that encode whether it is a mem or not, the
+ /// of operands that follow, etc. See InlineAsm.h.
+ /// ... however many operands ...
+ /// Operand #last: Optional, an incoming flag.
+ ///
+ /// The variable width operands are required to represent target addressing
+ /// modes as a single "operand", even though they may have multiple
+ /// SDOperands.
+ INLINEASM,
+
+ /// EH_LABEL - Represents a label in mid basic block used to track
+ /// locations needed for debug and exception handling tables. These nodes
+ /// take a chain as input and return a chain.
+ EH_LABEL,
+
+ /// CATCHPAD - Represents a catchpad instruction.
+ CATCHPAD,
+
+ /// CATCHRET - Represents a return from a catch block funclet. Used for
+ /// MSVC compatible exception handling. Takes a chain operand and a
+ /// destination basic block operand.
+ CATCHRET,
+
+ /// CLEANUPRET - Represents a return from a cleanup block funclet. Used for
+ /// MSVC compatible exception handling. Takes only a chain operand.
+ CLEANUPRET,
+
+ /// STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
+ /// value, the same type as the pointer type for the system, and an output
+ /// chain.
+ STACKSAVE,
+
+ /// STACKRESTORE has two operands, an input chain and a pointer to restore
+ /// to it returns an output chain.
+ STACKRESTORE,
+
+ /// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
+ /// of a call sequence, and carry arbitrary information that target might
+ /// want to know. The first operand is a chain, the rest are specified by
+ /// the target and not touched by the DAG optimizers.
+ /// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
+ CALLSEQ_START, // Beginning of a call sequence
+ CALLSEQ_END, // End of a call sequence
+
+ /// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
+ /// and the alignment. It returns a pair of values: the vaarg value and a
+ /// new chain.
+ VAARG,
+
+ /// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
+ /// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
+ /// source.
+ VACOPY,
+
+ /// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
+ /// pointer, and a SRCVALUE.
+ VAEND, VASTART,
+
+ /// SRCVALUE - This is a node type that holds a Value* that is used to
+ /// make reference to a value in the LLVM IR.
+ SRCVALUE,
+
+ /// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
+ /// reference metadata in the IR.
+ MDNODE_SDNODE,
+
+ /// PCMARKER - This corresponds to the pcmarker intrinsic.
+ PCMARKER,
+
+ /// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
+ /// It produces a chain and one i64 value. The only operand is a chain.
+ /// If i64 is not legal, the result will be expanded into smaller values.
+ /// Still, it returns an i64, so targets should set legality for i64.
+ /// The result is the content of the architecture-specific cycle
+ /// counter-like register (or other high accuracy low latency clock source).
+ READCYCLECOUNTER,
+
+ /// HANDLENODE node - Used as a handle for various purposes.
+ HANDLENODE,
+
+ /// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic. It
+ /// takes as input a token chain, the pointer to the trampoline, the pointer
+ /// to the nested function, the pointer to pass for the 'nest' parameter, a
+ /// SRCVALUE for the trampoline and another for the nested function
+ /// (allowing targets to access the original Function*).
+ /// It produces a token chain as output.
+ INIT_TRAMPOLINE,
+
+ /// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
+ /// It takes a pointer to the trampoline and produces a (possibly) new
+ /// pointer to the same trampoline with platform-specific adjustments
+ /// applied. The pointer it returns points to an executable block of code.
+ ADJUST_TRAMPOLINE,
+
+ /// TRAP - Trapping instruction
+ TRAP,
+
+ /// DEBUGTRAP - Trap intended to get the attention of a debugger.
+ DEBUGTRAP,
+
+ /// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
+ /// is the chain. The other operands are the address to prefetch,
+ /// read / write specifier, locality specifier and instruction / data cache
+ /// specifier.
+ PREFETCH,
+
+ /// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
+ /// This corresponds to the fence instruction. It takes an input chain, and
+ /// two integer constants: an AtomicOrdering and a SynchronizationScope.
+ ATOMIC_FENCE,
+
+ /// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
+ /// This corresponds to "load atomic" instruction.
+ ATOMIC_LOAD,
+
+ /// OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val)
+ /// This corresponds to "store atomic" instruction.
+ ATOMIC_STORE,
+
+ /// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
+ /// For double-word atomic operations:
+ /// ValLo, ValHi, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmpLo, cmpHi,
+ /// swapLo, swapHi)
+ /// This corresponds to the cmpxchg instruction.
+ ATOMIC_CMP_SWAP,
+
+ /// Val, Success, OUTCHAIN
+ /// = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap)
+ /// N.b. this is still a strong cmpxchg operation, so
+ /// Success == "Val == cmp".
+ ATOMIC_CMP_SWAP_WITH_SUCCESS,
+
+ /// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
+ /// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
+ /// For double-word atomic operations:
+ /// ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi)
+ /// ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi)
+ /// These correspond to the atomicrmw instruction.
+ ATOMIC_SWAP,
+ ATOMIC_LOAD_ADD,
+ ATOMIC_LOAD_SUB,
+ ATOMIC_LOAD_AND,
+ ATOMIC_LOAD_OR,
+ ATOMIC_LOAD_XOR,
+ ATOMIC_LOAD_NAND,
+ ATOMIC_LOAD_MIN,
+ ATOMIC_LOAD_MAX,
+ ATOMIC_LOAD_UMIN,
+ ATOMIC_LOAD_UMAX,
+
+ // Masked load and store - consecutive vector load and store operations
+ // with additional mask operand that prevents memory accesses to the
+ // masked-off lanes.
+ MLOAD, MSTORE,
+
+ // Masked gather and scatter - load and store operations for a vector of
+ // random addresses with additional mask operand that prevents memory
+ // accesses to the masked-off lanes.
+ MGATHER, MSCATTER,
+
+ /// This corresponds to the llvm.lifetime.* intrinsics. The first operand
+ /// is the chain and the second operand is the alloca pointer.
+ LIFETIME_START, LIFETIME_END,
+
+ /// GC_TRANSITION_START/GC_TRANSITION_END - These operators mark the
+ /// beginning and end of GC transition sequence, and carry arbitrary
+ /// information that target might need for lowering. The first operand is
+ /// a chain, the rest are specified by the target and not touched by the DAG
+ /// optimizers. GC_TRANSITION_START..GC_TRANSITION_END pairs may not be
+ /// nested.
+ GC_TRANSITION_START,
+ GC_TRANSITION_END,
+
+ /// GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of
+ /// the most recent dynamic alloca. For most targets that would be 0, but
+ /// for some others (e.g. PowerPC, PowerPC64) that would be compile-time
+ /// known nonzero constant. The only operand here is the chain.
+ GET_DYNAMIC_AREA_OFFSET,
+
+ /// BUILTIN_OP_END - This must be the last enum value in this list.
+ /// The target-specific pre-isel opcode values start here.
+ BUILTIN_OP_END
+ };
+
+ /// FIRST_TARGET_MEMORY_OPCODE - Target-specific pre-isel operations
+ /// which do not reference a specific memory location should be less than
+ /// this value. Those that do must not be less than this value, and can
+ /// be used with SelectionDAG::getMemIntrinsicNode.
+ static const int FIRST_TARGET_MEMORY_OPCODE = BUILTIN_OP_END+300;
+
+ //===--------------------------------------------------------------------===//
+ /// MemIndexedMode enum - This enum defines the load / store indexed
+ /// addressing modes.
+ ///
+ /// UNINDEXED "Normal" load / store. The effective address is already
+ /// computed and is available in the base pointer. The offset
+ /// operand is always undefined. In addition to producing a
+ /// chain, an unindexed load produces one value (result of the
+ /// load); an unindexed store does not produce a value.
+ ///
+ /// PRE_INC Similar to the unindexed mode where the effective address is
+ /// PRE_DEC the value of the base pointer add / subtract the offset.
+ /// It considers the computation as being folded into the load /
+ /// store operation (i.e. the load / store does the address
+ /// computation as well as performing the memory transaction).
+ /// The base operand is always undefined. In addition to
+ /// producing a chain, pre-indexed load produces two values
+ /// (result of the load and the result of the address
+ /// computation); a pre-indexed store produces one value (result
+ /// of the address computation).
+ ///
+ /// POST_INC The effective address is the value of the base pointer. The
+ /// POST_DEC value of the offset operand is then added to / subtracted
+ /// from the base after memory transaction. In addition to
+ /// producing a chain, post-indexed load produces two values
+ /// (the result of the load and the result of the base +/- offset
+ /// computation); a post-indexed store produces one value (the
+ /// the result of the base +/- offset computation).
+ enum MemIndexedMode {
+ UNINDEXED = 0,
+ PRE_INC,
+ PRE_DEC,
+ POST_INC,
+ POST_DEC,
+ LAST_INDEXED_MODE
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// LoadExtType enum - This enum defines the three variants of LOADEXT
+ /// (load with extension).
+ ///
+ /// SEXTLOAD loads the integer operand and sign extends it to a larger
+ /// integer result type.
+ /// ZEXTLOAD loads the integer operand and zero extends it to a larger
+ /// integer result type.
+ /// EXTLOAD is used for two things: floating point extending loads and
+ /// integer extending loads [the top bits are undefined].
+ enum LoadExtType {
+ NON_EXTLOAD = 0,
+ EXTLOAD,
+ SEXTLOAD,
+ ZEXTLOAD,
+ LAST_LOADEXT_TYPE
+ };
+
+ NodeType getExtForLoadExtType(bool IsFP, LoadExtType);
+
+ //===--------------------------------------------------------------------===//
+ /// ISD::CondCode enum - These are ordered carefully to make the bitfields
+ /// below work out, when considering SETFALSE (something that never exists
+ /// dynamically) as 0. "U" -> Unsigned (for integer operands) or Unordered
+ /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
+ /// to. If the "N" column is 1, the result of the comparison is undefined if
+ /// the input is a NAN.
+ ///
+ /// All of these (except for the 'always folded ops') should be handled for
+ /// floating point. For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
+ /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
+ ///
+ /// Note that these are laid out in a specific order to allow bit-twiddling
+ /// to transform conditions.
+ enum CondCode {
+ // Opcode N U L G E Intuitive operation
+ SETFALSE, // 0 0 0 0 Always false (always folded)
+ SETOEQ, // 0 0 0 1 True if ordered and equal
+ SETOGT, // 0 0 1 0 True if ordered and greater than
+ SETOGE, // 0 0 1 1 True if ordered and greater than or equal
+ SETOLT, // 0 1 0 0 True if ordered and less than
+ SETOLE, // 0 1 0 1 True if ordered and less than or equal
+ SETONE, // 0 1 1 0 True if ordered and operands are unequal
+ SETO, // 0 1 1 1 True if ordered (no nans)
+ SETUO, // 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
+ SETUEQ, // 1 0 0 1 True if unordered or equal
+ SETUGT, // 1 0 1 0 True if unordered or greater than
+ SETUGE, // 1 0 1 1 True if unordered, greater than, or equal
+ SETULT, // 1 1 0 0 True if unordered or less than
+ SETULE, // 1 1 0 1 True if unordered, less than, or equal
+ SETUNE, // 1 1 1 0 True if unordered or not equal
+ SETTRUE, // 1 1 1 1 Always true (always folded)
+ // Don't care operations: undefined if the input is a nan.
+ SETFALSE2, // 1 X 0 0 0 Always false (always folded)
+ SETEQ, // 1 X 0 0 1 True if equal
+ SETGT, // 1 X 0 1 0 True if greater than
+ SETGE, // 1 X 0 1 1 True if greater than or equal
+ SETLT, // 1 X 1 0 0 True if less than
+ SETLE, // 1 X 1 0 1 True if less than or equal
+ SETNE, // 1 X 1 1 0 True if not equal
+ SETTRUE2, // 1 X 1 1 1 Always true (always folded)
+
+ SETCC_INVALID // Marker value.
+ };
+
+ /// isSignedIntSetCC - Return true if this is a setcc instruction that
+ /// performs a signed comparison when used with integer operands.
+ inline bool isSignedIntSetCC(CondCode Code) {
+ return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
+ }
+
+ /// isUnsignedIntSetCC - Return true if this is a setcc instruction that
+ /// performs an unsigned comparison when used with integer operands.
+ inline bool isUnsignedIntSetCC(CondCode Code) {
+ return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
+ }
+
+ /// isTrueWhenEqual - Return true if the specified condition returns true if
+ /// the two operands to the condition are equal. Note that if one of the two
+ /// operands is a NaN, this value is meaningless.
+ inline bool isTrueWhenEqual(CondCode Cond) {
+ return ((int)Cond & 1) != 0;
+ }
+
+ /// getUnorderedFlavor - This function returns 0 if the condition is always
+ /// false if an operand is a NaN, 1 if the condition is always true if the
+ /// operand is a NaN, and 2 if the condition is undefined if the operand is a
+ /// NaN.
+ inline unsigned getUnorderedFlavor(CondCode Cond) {
+ return ((int)Cond >> 3) & 3;
+ }
+
+ /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
+ /// 'op' is a valid SetCC operation.
+ CondCode getSetCCInverse(CondCode Operation, bool isInteger);
+
+ /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
+ /// when given the operation for (X op Y).
+ CondCode getSetCCSwappedOperands(CondCode Operation);
+
+ /// getSetCCOrOperation - Return the result of a logical OR between different
+ /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This
+ /// function returns SETCC_INVALID if it is not possible to represent the
+ /// resultant comparison.
+ CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, bool isInteger);
+
+ /// getSetCCAndOperation - Return the result of a logical AND between
+ /// different comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
+ /// function returns SETCC_INVALID if it is not possible to represent the
+ /// resultant comparison.
+ CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger);
+
+ //===--------------------------------------------------------------------===//
+ /// CvtCode enum - This enum defines the various converts CONVERT_RNDSAT
+ /// supports.
+ enum CvtCode {
+ CVT_FF, /// Float from Float
+ CVT_FS, /// Float from Signed
+ CVT_FU, /// Float from Unsigned
+ CVT_SF, /// Signed from Float
+ CVT_UF, /// Unsigned from Float
+ CVT_SS, /// Signed from Signed
+ CVT_SU, /// Signed from Unsigned
+ CVT_US, /// Unsigned from Signed
+ CVT_UU, /// Unsigned from Unsigned
+ CVT_INVALID /// Marker - Invalid opcode
+ };
+
+} // end llvm::ISD namespace
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h b/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h
new file mode 100644
index 0000000..a404b9b
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/IntrinsicLowering.h
@@ -0,0 +1,58 @@
+//===-- IntrinsicLowering.h - Intrinsic Function Lowering -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the IntrinsicLowering interface. This interface allows
+// addition of domain-specific or front-end specific intrinsics to LLVM without
+// having to modify all of the C backend or interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_INTRINSICLOWERING_H
+#define LLVM_CODEGEN_INTRINSICLOWERING_H
+
+#include "llvm/IR/Intrinsics.h"
+
+namespace llvm {
+class CallInst;
+class Module;
+class DataLayout;
+
+class IntrinsicLowering {
+ const DataLayout &DL;
+
+ bool Warned;
+
+public:
+ explicit IntrinsicLowering(const DataLayout &DL) : DL(DL), Warned(false) {}
+
+ /// AddPrototypes - This method, if called, causes all of the prototypes
+ /// that might be needed by an intrinsic lowering implementation to be
+ /// inserted into the module specified.
+ void AddPrototypes(Module &M);
+
+ /// LowerIntrinsicCall - This method replaces a call with the LLVM function
+ /// which should be used to implement the specified intrinsic function call.
+ /// If an intrinsic function must be implemented by the code generator
+ /// (such as va_start), this function should print a message and abort.
+ ///
+ /// Otherwise, if an intrinsic function call can be lowered, the code to
+ /// implement it (often a call to a non-intrinsic function) is inserted
+ /// _after_ the call instruction and the call is deleted. The caller must
+ /// be capable of handling this kind of change.
+ ///
+ void LowerIntrinsicCall(CallInst *CI);
+
+ /// LowerToByteSwap - Replace a call instruction into a call to bswap
+ /// intrinsic. Return false if it has determined the call is not a
+ /// simple integer bswap.
+ static bool LowerToByteSwap(CallInst *CI);
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h b/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
new file mode 100644
index 0000000..f347f66
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LatencyPriorityQueue.h
@@ -0,0 +1,98 @@
+//===---- LatencyPriorityQueue.h - A latency-oriented priority queue ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the LatencyPriorityQueue class, which is a
+// SchedulingPriorityQueue that schedules using latency information to
+// reduce the length of the critical path through the basic block.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
+#define LLVM_CODEGEN_LATENCYPRIORITYQUEUE_H
+
+#include "llvm/CodeGen/ScheduleDAG.h"
+
+namespace llvm {
+ class LatencyPriorityQueue;
+
+ /// Sorting functions for the Available queue.
+ struct latency_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+ LatencyPriorityQueue *PQ;
+ explicit latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}
+
+ bool operator()(const SUnit* left, const SUnit* right) const;
+ };
+
+ class LatencyPriorityQueue : public SchedulingPriorityQueue {
+ // SUnits - The SUnits for the current graph.
+ std::vector<SUnit> *SUnits;
+
+ /// NumNodesSolelyBlocking - This vector contains, for every node in the
+ /// Queue, the number of nodes that the node is the sole unscheduled
+ /// predecessor for. This is used as a tie-breaker heuristic for better
+ /// mobility.
+ std::vector<unsigned> NumNodesSolelyBlocking;
+
+ /// Queue - The queue.
+ std::vector<SUnit*> Queue;
+ latency_sort Picker;
+
+ public:
+ LatencyPriorityQueue() : Picker(this) {
+ }
+
+ bool isBottomUp() const override { return false; }
+
+ void initNodes(std::vector<SUnit> &sunits) override {
+ SUnits = &sunits;
+ NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+ }
+
+ void addNode(const SUnit *SU) override {
+ NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+ }
+
+ void updateNode(const SUnit *SU) override {
+ }
+
+ void releaseState() override {
+ SUnits = nullptr;
+ }
+
+ unsigned getLatency(unsigned NodeNum) const {
+ assert(NodeNum < (*SUnits).size());
+ return (*SUnits)[NodeNum].getHeight();
+ }
+
+ unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
+ assert(NodeNum < NumNodesSolelyBlocking.size());
+ return NumNodesSolelyBlocking[NodeNum];
+ }
+
+ bool empty() const override { return Queue.empty(); }
+
+ void push(SUnit *U) override;
+
+ SUnit *pop() override;
+
+ void remove(SUnit *SU) override;
+
+ // scheduledNode - As nodes are scheduled, we look to see if there are any
+ // successor nodes that have a single unscheduled predecessor. If so, that
+ // single predecessor has a higher priority, since scheduling it will make
+ // the node available.
+ void scheduledNode(SUnit *Node) override;
+
+private:
+ void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
+ SUnit *getSingleUnscheduledPred(SUnit *SU);
+ };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h b/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h
new file mode 100644
index 0000000..7d7e48a
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LexicalScopes.h
@@ -0,0 +1,257 @@
+//===- LexicalScopes.cpp - Collecting lexical scope info -*- C++ -*--------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements LexicalScopes analysis.
+//
+// This pass collects lexical scope information and maps machine instructions
+// to respective lexical scopes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LEXICALSCOPES_H
+#define LLVM_CODEGEN_LEXICALSCOPES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/ValueHandle.h"
+#include <unordered_map>
+#include <utility>
+namespace llvm {
+
+class MachineInstr;
+class MachineBasicBlock;
+class MachineFunction;
+
+//===----------------------------------------------------------------------===//
+/// InsnRange - This is used to track range of instructions with identical
+/// lexical scope.
+///
+typedef std::pair<const MachineInstr *, const MachineInstr *> InsnRange;
+
+//===----------------------------------------------------------------------===//
+/// LexicalScope - This class is used to track scope information.
+///
+class LexicalScope {
+
+public:
+ LexicalScope(LexicalScope *P, const DILocalScope *D, const DILocation *I,
+ bool A)
+ : Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A),
+ LastInsn(nullptr), FirstInsn(nullptr), DFSIn(0), DFSOut(0) {
+ assert((!D || D->isResolved()) && "Expected resolved node");
+ assert((!I || I->isResolved()) && "Expected resolved node");
+ if (Parent)
+ Parent->addChild(this);
+ }
+
+ // Accessors.
+ LexicalScope *getParent() const { return Parent; }
+ const MDNode *getDesc() const { return Desc; }
+ const DILocation *getInlinedAt() const { return InlinedAtLocation; }
+ const DILocalScope *getScopeNode() const { return Desc; }
+ bool isAbstractScope() const { return AbstractScope; }
+ SmallVectorImpl<LexicalScope *> &getChildren() { return Children; }
+ SmallVectorImpl<InsnRange> &getRanges() { return Ranges; }
+
+ /// addChild - Add a child scope.
+ void addChild(LexicalScope *S) { Children.push_back(S); }
+
+ /// openInsnRange - This scope covers instruction range starting from MI.
+ void openInsnRange(const MachineInstr *MI) {
+ if (!FirstInsn)
+ FirstInsn = MI;
+
+ if (Parent)
+ Parent->openInsnRange(MI);
+ }
+
+ /// extendInsnRange - Extend the current instruction range covered by
+ /// this scope.
+ void extendInsnRange(const MachineInstr *MI) {
+ assert(FirstInsn && "MI Range is not open!");
+ LastInsn = MI;
+ if (Parent)
+ Parent->extendInsnRange(MI);
+ }
+
+ /// closeInsnRange - Create a range based on FirstInsn and LastInsn collected
+ /// until now. This is used when a new scope is encountered while walking
+ /// machine instructions.
+ void closeInsnRange(LexicalScope *NewScope = nullptr) {
+ assert(LastInsn && "Last insn missing!");
+ Ranges.push_back(InsnRange(FirstInsn, LastInsn));
+ FirstInsn = nullptr;
+ LastInsn = nullptr;
+ // If Parent dominates NewScope then do not close Parent's instruction
+ // range.
+ if (Parent && (!NewScope || !Parent->dominates(NewScope)))
+ Parent->closeInsnRange(NewScope);
+ }
+
+ /// dominates - Return true if current scope dominates given lexical scope.
+ bool dominates(const LexicalScope *S) const {
+ if (S == this)
+ return true;
+ if (DFSIn < S->getDFSIn() && DFSOut > S->getDFSOut())
+ return true;
+ return false;
+ }
+
+ // Depth First Search support to walk and manipulate LexicalScope hierarchy.
+ unsigned getDFSOut() const { return DFSOut; }
+ void setDFSOut(unsigned O) { DFSOut = O; }
+ unsigned getDFSIn() const { return DFSIn; }
+ void setDFSIn(unsigned I) { DFSIn = I; }
+
+ /// dump - print lexical scope.
+ void dump(unsigned Indent = 0) const;
+
+private:
+ LexicalScope *Parent; // Parent to this scope.
+ const DILocalScope *Desc; // Debug info descriptor.
+ const DILocation *InlinedAtLocation; // Location at which this
+ // scope is inlined.
+ bool AbstractScope; // Abstract Scope
+ SmallVector<LexicalScope *, 4> Children; // Scopes defined in scope.
+ // Contents not owned.
+ SmallVector<InsnRange, 4> Ranges;
+
+ const MachineInstr *LastInsn; // Last instruction of this scope.
+ const MachineInstr *FirstInsn; // First instruction of this scope.
+ unsigned DFSIn, DFSOut; // In & Out Depth use to determine
+ // scope nesting.
+};
+
+//===----------------------------------------------------------------------===//
+/// LexicalScopes - This class provides interface to collect and use lexical
+/// scoping information from machine instruction.
+///
+class LexicalScopes {
+public:
+ LexicalScopes() : MF(nullptr), CurrentFnLexicalScope(nullptr) {}
+
+ /// initialize - Scan machine function and constuct lexical scope nest, resets
+ /// the instance if necessary.
+ void initialize(const MachineFunction &);
+
+ /// releaseMemory - release memory.
+ void reset();
+
+ /// empty - Return true if there is any lexical scope information available.
+ bool empty() { return CurrentFnLexicalScope == nullptr; }
+
+ /// getCurrentFunctionScope - Return lexical scope for the current function.
+ LexicalScope *getCurrentFunctionScope() const {
+ return CurrentFnLexicalScope;
+ }
+
+ /// getMachineBasicBlocks - Populate given set using machine basic blocks
+ /// which have machine instructions that belong to lexical scope identified by
+ /// DebugLoc.
+ void getMachineBasicBlocks(const DILocation *DL,
+ SmallPtrSetImpl<const MachineBasicBlock *> &MBBs);
+
+ /// dominates - Return true if DebugLoc's lexical scope dominates at least one
+ /// machine instruction's lexical scope in a given machine basic block.
+ bool dominates(const DILocation *DL, MachineBasicBlock *MBB);
+
+ /// findLexicalScope - Find lexical scope, either regular or inlined, for the
+ /// given DebugLoc. Return NULL if not found.
+ LexicalScope *findLexicalScope(const DILocation *DL);
+
+ /// getAbstractScopesList - Return a reference to list of abstract scopes.
+ ArrayRef<LexicalScope *> getAbstractScopesList() const {
+ return AbstractScopesList;
+ }
+
+ /// findAbstractScope - Find an abstract scope or return null.
+ LexicalScope *findAbstractScope(const DILocalScope *N) {
+ auto I = AbstractScopeMap.find(N);
+ return I != AbstractScopeMap.end() ? &I->second : nullptr;
+ }
+
+ /// findInlinedScope - Find an inlined scope for the given scope/inlined-at.
+ LexicalScope *findInlinedScope(const DILocalScope *N, const DILocation *IA) {
+ auto I = InlinedLexicalScopeMap.find(std::make_pair(N, IA));
+ return I != InlinedLexicalScopeMap.end() ? &I->second : nullptr;
+ }
+
+ /// findLexicalScope - Find regular lexical scope or return null.
+ LexicalScope *findLexicalScope(const DILocalScope *N) {
+ auto I = LexicalScopeMap.find(N);
+ return I != LexicalScopeMap.end() ? &I->second : nullptr;
+ }
+
+ /// dump - Print data structures to dbgs().
+ void dump();
+
+ /// getOrCreateAbstractScope - Find or create an abstract lexical scope.
+ LexicalScope *getOrCreateAbstractScope(const DILocalScope *Scope);
+
+private:
+ /// getOrCreateLexicalScope - Find lexical scope for the given Scope/IA. If
+ /// not available then create new lexical scope.
+ LexicalScope *getOrCreateLexicalScope(const DILocalScope *Scope,
+ const DILocation *IA = nullptr);
+ LexicalScope *getOrCreateLexicalScope(const DILocation *DL) {
+ return DL ? getOrCreateLexicalScope(DL->getScope(), DL->getInlinedAt())
+ : nullptr;
+ }
+
+ /// getOrCreateRegularScope - Find or create a regular lexical scope.
+ LexicalScope *getOrCreateRegularScope(const DILocalScope *Scope);
+
+ /// getOrCreateInlinedScope - Find or create an inlined lexical scope.
+ LexicalScope *getOrCreateInlinedScope(const DILocalScope *Scope,
+ const DILocation *InlinedAt);
+
+ /// extractLexicalScopes - Extract instruction ranges for each lexical scopes
+ /// for the given machine function.
+ void extractLexicalScopes(SmallVectorImpl<InsnRange> &MIRanges,
+ DenseMap<const MachineInstr *, LexicalScope *> &M);
+ void constructScopeNest(LexicalScope *Scope);
+ void
+ assignInstructionRanges(SmallVectorImpl<InsnRange> &MIRanges,
+ DenseMap<const MachineInstr *, LexicalScope *> &M);
+
+private:
+ const MachineFunction *MF;
+
+ /// LexicalScopeMap - Tracks the scopes in the current function.
+ // Use an unordered_map to ensure value pointer validity over insertion.
+ std::unordered_map<const DILocalScope *, LexicalScope> LexicalScopeMap;
+
+ /// InlinedLexicalScopeMap - Tracks inlined function scopes in current
+ /// function.
+ std::unordered_map<std::pair<const DILocalScope *, const DILocation *>,
+ LexicalScope,
+ pair_hash<const DILocalScope *, const DILocation *>>
+ InlinedLexicalScopeMap;
+
+ /// AbstractScopeMap - These scopes are not included LexicalScopeMap.
+ // Use an unordered_map to ensure value pointer validity over insertion.
+ std::unordered_map<const DILocalScope *, LexicalScope> AbstractScopeMap;
+
+ /// AbstractScopesList - Tracks abstract scopes constructed while processing
+ /// a function.
+ SmallVector<LexicalScope *, 4> AbstractScopesList;
+
+ /// CurrentFnLexicalScope - Top level scope for the current function.
+ ///
+ LexicalScope *CurrentFnLexicalScope;
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/LinkAllAsmWriterComponents.h b/contrib/llvm/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
new file mode 100644
index 0000000..c3046da
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
@@ -0,0 +1,38 @@
+//===- llvm/Codegen/LinkAllAsmWriterComponents.h ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all assembler writer related passes for tools like
+// llc that need this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
+#define LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
+
+#include "llvm/CodeGen/GCs.h"
+#include <cstdlib>
+
+namespace {
+ struct ForceAsmWriterLinking {
+ ForceAsmWriterLinking() {
+ // We must reference the plug-ins in such a way that compilers will not
+ // delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough
+ // to know that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+
+ llvm::linkOcamlGCPrinter();
+ llvm::linkErlangGCPrinter();
+
+ }
+ } ForceAsmWriterLinking; // Force link by creating a global definition.
+}
+
+#endif // LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
diff --git a/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h b/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
new file mode 100644
index 0000000..fee131e
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -0,0 +1,59 @@
+//===- llvm/Codegen/LinkAllCodegenComponents.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all codegen related passes for tools like lli and
+// llc that need this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
+#define LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
+
+#include "llvm/CodeGen/GCs.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+#include <cstdlib>
+
+namespace {
+ struct ForceCodegenLinking {
+ ForceCodegenLinking() {
+ // We must reference the passes in such a way that compilers will not
+ // delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough
+ // to know that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+
+ (void) llvm::createFastRegisterAllocator();
+ (void) llvm::createBasicRegisterAllocator();
+ (void) llvm::createGreedyRegisterAllocator();
+ (void) llvm::createDefaultPBQPRegisterAllocator();
+
+ llvm::linkCoreCLRGC();
+ llvm::linkOcamlGC();
+ llvm::linkErlangGC();
+ llvm::linkShadowStackGC();
+ llvm::linkStatepointExampleGC();
+
+ (void) llvm::createBURRListDAGScheduler(nullptr,
+ llvm::CodeGenOpt::Default);
+ (void) llvm::createSourceListDAGScheduler(nullptr,
+ llvm::CodeGenOpt::Default);
+ (void) llvm::createHybridListDAGScheduler(nullptr,
+ llvm::CodeGenOpt::Default);
+ (void) llvm::createFastDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
+ (void) llvm::createDefaultScheduler(nullptr, llvm::CodeGenOpt::Default);
+ (void) llvm::createVLIWDAGScheduler(nullptr, llvm::CodeGenOpt::Default);
+
+ }
+ } ForceCodegenLinking; // Force link by creating a global definition.
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveInterval.h b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
new file mode 100644
index 0000000..0157bf9
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LiveInterval.h
@@ -0,0 +1,868 @@
+//===-- llvm/CodeGen/LiveInterval.h - Interval representation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveRange and LiveInterval classes. Given some
+// numbering of each the machine instructions an interval [i, j) is said to be a
+// live range for register v if there is no instruction with number j' >= j
+// such that v is live at j' and there is no instruction with number i' < i such
+// that v is live at i'. In this implementation ranges can have holes,
+// i.e. a range might look like [1,20), [50,65), [1000,1001). Each
+// individual segment is represented as an instance of LiveRange::Segment,
+// and the whole range is represented as an instance of LiveRange.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
+#define LLVM_CODEGEN_LIVEINTERVAL_H
+
+#include "llvm/ADT/IntEqClasses.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include <cassert>
+#include <climits>
+#include <set>
+
+namespace llvm {
+ class CoalescerPair;
+ class LiveIntervals;
+ class MachineInstr;
+ class MachineRegisterInfo;
+ class TargetRegisterInfo;
+ class raw_ostream;
+ template <typename T, unsigned Small> class SmallPtrSet;
+
+ /// VNInfo - Value Number Information.
+ /// This class holds information about a machine level values, including
+ /// definition and use points.
+ ///
+ class VNInfo {
+ public:
+ typedef BumpPtrAllocator Allocator;
+
+ /// The ID number of this value.
+ unsigned id;
+
+ /// The index of the defining instruction.
+ SlotIndex def;
+
+ /// VNInfo constructor.
+ VNInfo(unsigned i, SlotIndex d)
+ : id(i), def(d)
+ { }
+
+ /// VNInfo construtor, copies values from orig, except for the value number.
+ VNInfo(unsigned i, const VNInfo &orig)
+ : id(i), def(orig.def)
+ { }
+
+ /// Copy from the parameter into this VNInfo.
+ void copyFrom(VNInfo &src) {
+ def = src.def;
+ }
+
+ /// Returns true if this value is defined by a PHI instruction (or was,
+ /// PHI instructions may have been eliminated).
+ /// PHI-defs begin at a block boundary, all other defs begin at register or
+ /// EC slots.
+ bool isPHIDef() const { return def.isBlock(); }
+
+ /// Returns true if this value is unused.
+ bool isUnused() const { return !def.isValid(); }
+
+ /// Mark this value as unused.
+ void markUnused() { def = SlotIndex(); }
+ };
+
+ /// Result of a LiveRange query. This class hides the implementation details
+ /// of live ranges, and it should be used as the primary interface for
+ /// examining live ranges around instructions.
+ class LiveQueryResult {
+ VNInfo *const EarlyVal;
+ VNInfo *const LateVal;
+ const SlotIndex EndPoint;
+ const bool Kill;
+
+ public:
+ LiveQueryResult(VNInfo *EarlyVal, VNInfo *LateVal, SlotIndex EndPoint,
+ bool Kill)
+ : EarlyVal(EarlyVal), LateVal(LateVal), EndPoint(EndPoint), Kill(Kill)
+ {}
+
+ /// Return the value that is live-in to the instruction. This is the value
+ /// that will be read by the instruction's use operands. Return NULL if no
+ /// value is live-in.
+ VNInfo *valueIn() const {
+ return EarlyVal;
+ }
+
+ /// Return true if the live-in value is killed by this instruction. This
+ /// means that either the live range ends at the instruction, or it changes
+ /// value.
+ bool isKill() const {
+ return Kill;
+ }
+
+ /// Return true if this instruction has a dead def.
+ bool isDeadDef() const {
+ return EndPoint.isDead();
+ }
+
+ /// Return the value leaving the instruction, if any. This can be a
+ /// live-through value, or a live def. A dead def returns NULL.
+ VNInfo *valueOut() const {
+ return isDeadDef() ? nullptr : LateVal;
+ }
+
+ /// Returns the value alive at the end of the instruction, if any. This can
+ /// be a live-through value, a live def or a dead def.
+ VNInfo *valueOutOrDead() const {
+ return LateVal;
+ }
+
+ /// Return the value defined by this instruction, if any. This includes
+ /// dead defs, it is the value created by the instruction's def operands.
+ VNInfo *valueDefined() const {
+ return EarlyVal == LateVal ? nullptr : LateVal;
+ }
+
+ /// Return the end point of the last live range segment to interact with
+ /// the instruction, if any.
+ ///
+ /// The end point is an invalid SlotIndex only if the live range doesn't
+ /// intersect the instruction at all.
+ ///
+ /// The end point may be at or past the end of the instruction's basic
+ /// block. That means the value was live out of the block.
+ SlotIndex endPoint() const {
+ return EndPoint;
+ }
+ };
+
+ /// This class represents the liveness of a register, stack slot, etc.
+ /// It manages an ordered list of Segment objects.
+ /// The Segments are organized in a static single assignment form: At places
+ /// where a new value is defined or different values reach a CFG join a new
+ /// segment with a new value number is used.
+ class LiveRange {
+ public:
+
+ /// This represents a simple continuous liveness interval for a value.
+ /// The start point is inclusive, the end point exclusive. These intervals
+ /// are rendered as [start,end).
+ struct Segment {
+ SlotIndex start; // Start point of the interval (inclusive)
+ SlotIndex end; // End point of the interval (exclusive)
+ VNInfo *valno; // identifier for the value contained in this segment.
+
+ Segment() : valno(nullptr) {}
+
+ Segment(SlotIndex S, SlotIndex E, VNInfo *V)
+ : start(S), end(E), valno(V) {
+ assert(S < E && "Cannot create empty or backwards segment");
+ }
+
+ /// Return true if the index is covered by this segment.
+ bool contains(SlotIndex I) const {
+ return start <= I && I < end;
+ }
+
+ /// Return true if the given interval, [S, E), is covered by this segment.
+ bool containsInterval(SlotIndex S, SlotIndex E) const {
+ assert((S < E) && "Backwards interval?");
+ return (start <= S && S < end) && (start < E && E <= end);
+ }
+
+ bool operator<(const Segment &Other) const {
+ return std::tie(start, end) < std::tie(Other.start, Other.end);
+ }
+ bool operator==(const Segment &Other) const {
+ return start == Other.start && end == Other.end;
+ }
+
+ void dump() const;
+ };
+
+ typedef SmallVector<Segment,4> Segments;
+ typedef SmallVector<VNInfo*,4> VNInfoList;
+
+ Segments segments; // the liveness segments
+ VNInfoList valnos; // value#'s
+
+ // The segment set is used temporarily to accelerate initial computation
+ // of live ranges of physical registers in computeRegUnitRange.
+ // After that the set is flushed to the segment vector and deleted.
+ typedef std::set<Segment> SegmentSet;
+ std::unique_ptr<SegmentSet> segmentSet;
+
+ typedef Segments::iterator iterator;
+ iterator begin() { return segments.begin(); }
+ iterator end() { return segments.end(); }
+
+ typedef Segments::const_iterator const_iterator;
+ const_iterator begin() const { return segments.begin(); }
+ const_iterator end() const { return segments.end(); }
+
+ typedef VNInfoList::iterator vni_iterator;
+ vni_iterator vni_begin() { return valnos.begin(); }
+ vni_iterator vni_end() { return valnos.end(); }
+
+ typedef VNInfoList::const_iterator const_vni_iterator;
+ const_vni_iterator vni_begin() const { return valnos.begin(); }
+ const_vni_iterator vni_end() const { return valnos.end(); }
+
+ /// Constructs a new LiveRange object.
+ LiveRange(bool UseSegmentSet = false)
+ : segmentSet(UseSegmentSet ? llvm::make_unique<SegmentSet>()
+ : nullptr) {}
+
+ /// Constructs a new LiveRange object by copying segments and valnos from
+ /// another LiveRange.
+ LiveRange(const LiveRange &Other, BumpPtrAllocator &Allocator) {
+ assert(Other.segmentSet == nullptr &&
+ "Copying of LiveRanges with active SegmentSets is not supported");
+
+ // Duplicate valnos.
+ for (const VNInfo *VNI : Other.valnos) {
+ createValueCopy(VNI, Allocator);
+ }
+ // Now we can copy segments and remap their valnos.
+ for (const Segment &S : Other.segments) {
+ segments.push_back(Segment(S.start, S.end, valnos[S.valno->id]));
+ }
+ }
+
+ /// advanceTo - Advance the specified iterator to point to the Segment
+ /// containing the specified position, or end() if the position is past the
+ /// end of the range. If no Segment contains this position, but the
+ /// position is in a hole, this method returns an iterator pointing to the
+ /// Segment immediately after the hole.
+ iterator advanceTo(iterator I, SlotIndex Pos) {
+ assert(I != end());
+ if (Pos >= endIndex())
+ return end();
+ while (I->end <= Pos) ++I;
+ return I;
+ }
+
+ const_iterator advanceTo(const_iterator I, SlotIndex Pos) const {
+ assert(I != end());
+ if (Pos >= endIndex())
+ return end();
+ while (I->end <= Pos) ++I;
+ return I;
+ }
+
+ /// find - Return an iterator pointing to the first segment that ends after
+ /// Pos, or end(). This is the same as advanceTo(begin(), Pos), but faster
+ /// when searching large ranges.
+ ///
+ /// If Pos is contained in a Segment, that segment is returned.
+ /// If Pos is in a hole, the following Segment is returned.
+ /// If Pos is beyond endIndex, end() is returned.
+ iterator find(SlotIndex Pos);
+
+ const_iterator find(SlotIndex Pos) const {
+ return const_cast<LiveRange*>(this)->find(Pos);
+ }
+
+ void clear() {
+ valnos.clear();
+ segments.clear();
+ }
+
+ size_t size() const {
+ return segments.size();
+ }
+
+ bool hasAtLeastOneValue() const { return !valnos.empty(); }
+
+ bool containsOneValue() const { return valnos.size() == 1; }
+
+ unsigned getNumValNums() const { return (unsigned)valnos.size(); }
+
+ /// getValNumInfo - Returns pointer to the specified val#.
+ ///
+ inline VNInfo *getValNumInfo(unsigned ValNo) {
+ return valnos[ValNo];
+ }
+ inline const VNInfo *getValNumInfo(unsigned ValNo) const {
+ return valnos[ValNo];
+ }
+
+ /// containsValue - Returns true if VNI belongs to this range.
+ bool containsValue(const VNInfo *VNI) const {
+ return VNI && VNI->id < getNumValNums() && VNI == getValNumInfo(VNI->id);
+ }
+
+ /// getNextValue - Create a new value number and return it. MIIdx specifies
+ /// the instruction that defines the value number.
+ VNInfo *getNextValue(SlotIndex def, VNInfo::Allocator &VNInfoAllocator) {
+ VNInfo *VNI =
+ new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), def);
+ valnos.push_back(VNI);
+ return VNI;
+ }
+
+ /// createDeadDef - Make sure the range has a value defined at Def.
+ /// If one already exists, return it. Otherwise allocate a new value and
+ /// add liveness for a dead def.
+ VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNInfoAllocator);
+
+ /// Create a copy of the given value. The new value will be identical except
+ /// for the Value number.
+ VNInfo *createValueCopy(const VNInfo *orig,
+ VNInfo::Allocator &VNInfoAllocator) {
+ VNInfo *VNI =
+ new (VNInfoAllocator) VNInfo((unsigned)valnos.size(), *orig);
+ valnos.push_back(VNI);
+ return VNI;
+ }
+
+ /// RenumberValues - Renumber all values in order of appearance and remove
+ /// unused values.
+ void RenumberValues();
+
+ /// MergeValueNumberInto - This method is called when two value numbers
+ /// are found to be equivalent. This eliminates V1, replacing all
+ /// segments with the V1 value number with the V2 value number. This can
+ /// cause merging of V1/V2 values numbers and compaction of the value space.
+ VNInfo* MergeValueNumberInto(VNInfo *V1, VNInfo *V2);
+
+ /// Merge all of the live segments of a specific val# in RHS into this live
+ /// range as the specified value number. The segments in RHS are allowed
+ /// to overlap with segments in the current range, it will replace the
+ /// value numbers of the overlaped live segments with the specified value
+ /// number.
+ void MergeSegmentsInAsValue(const LiveRange &RHS, VNInfo *LHSValNo);
+
+ /// MergeValueInAsValue - Merge all of the segments of a specific val#
+ /// in RHS into this live range as the specified value number.
+ /// The segments in RHS are allowed to overlap with segments in the
+ /// current range, but only if the overlapping segments have the
+ /// specified value number.
+ void MergeValueInAsValue(const LiveRange &RHS,
+ const VNInfo *RHSValNo, VNInfo *LHSValNo);
+
+ bool empty() const { return segments.empty(); }
+
+ /// beginIndex - Return the lowest numbered slot covered.
+ SlotIndex beginIndex() const {
+ assert(!empty() && "Call to beginIndex() on empty range.");
+ return segments.front().start;
+ }
+
+ /// endNumber - return the maximum point of the range of the whole,
+ /// exclusive.
+ SlotIndex endIndex() const {
+ assert(!empty() && "Call to endIndex() on empty range.");
+ return segments.back().end;
+ }
+
+ bool expiredAt(SlotIndex index) const {
+ return index >= endIndex();
+ }
+
+ bool liveAt(SlotIndex index) const {
+ const_iterator r = find(index);
+ return r != end() && r->start <= index;
+ }
+
+ /// Return the segment that contains the specified index, or null if there
+ /// is none.
+ const Segment *getSegmentContaining(SlotIndex Idx) const {
+ const_iterator I = FindSegmentContaining(Idx);
+ return I == end() ? nullptr : &*I;
+ }
+
+ /// Return the live segment that contains the specified index, or null if
+ /// there is none.
+ Segment *getSegmentContaining(SlotIndex Idx) {
+ iterator I = FindSegmentContaining(Idx);
+ return I == end() ? nullptr : &*I;
+ }
+
+ /// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
+ VNInfo *getVNInfoAt(SlotIndex Idx) const {
+ const_iterator I = FindSegmentContaining(Idx);
+ return I == end() ? nullptr : I->valno;
+ }
+
+ /// getVNInfoBefore - Return the VNInfo that is live up to but not
+ /// necessarilly including Idx, or NULL. Use this to find the reaching def
+ /// used by an instruction at this SlotIndex position.
+ VNInfo *getVNInfoBefore(SlotIndex Idx) const {
+ const_iterator I = FindSegmentContaining(Idx.getPrevSlot());
+ return I == end() ? nullptr : I->valno;
+ }
+
+ /// Return an iterator to the segment that contains the specified index, or
+ /// end() if there is none.
+ iterator FindSegmentContaining(SlotIndex Idx) {
+ iterator I = find(Idx);
+ return I != end() && I->start <= Idx ? I : end();
+ }
+
+ const_iterator FindSegmentContaining(SlotIndex Idx) const {
+ const_iterator I = find(Idx);
+ return I != end() && I->start <= Idx ? I : end();
+ }
+
+ /// overlaps - Return true if the intersection of the two live ranges is
+ /// not empty.
+ bool overlaps(const LiveRange &other) const {
+ if (other.empty())
+ return false;
+ return overlapsFrom(other, other.begin());
+ }
+
+ /// overlaps - Return true if the two ranges have overlapping segments
+ /// that are not coalescable according to CP.
+ ///
+ /// Overlapping segments where one range is defined by a coalescable
+ /// copy are allowed.
+ bool overlaps(const LiveRange &Other, const CoalescerPair &CP,
+ const SlotIndexes&) const;
+
+ /// overlaps - Return true if the live range overlaps an interval specified
+ /// by [Start, End).
+ bool overlaps(SlotIndex Start, SlotIndex End) const;
+
+ /// overlapsFrom - Return true if the intersection of the two live ranges
+ /// is not empty. The specified iterator is a hint that we can begin
+ /// scanning the Other range starting at I.
+ bool overlapsFrom(const LiveRange &Other, const_iterator I) const;
+
+ /// Returns true if all segments of the @p Other live range are completely
+ /// covered by this live range.
+ /// Adjacent live ranges do not affect the covering:the liverange
+ /// [1,5](5,10] covers (3,7].
+ bool covers(const LiveRange &Other) const;
+
+ /// Add the specified Segment to this range, merging segments as
+ /// appropriate. This returns an iterator to the inserted segment (which
+ /// may have grown since it was inserted).
+ iterator addSegment(Segment S);
+
+ /// If this range is live before @p Use in the basic block that starts at
+ /// @p StartIdx, extend it to be live up to @p Use, and return the value. If
+ /// there is no segment before @p Use, return nullptr.
+ VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Use);
+
+ /// join - Join two live ranges (this, and other) together. This applies
+ /// mappings to the value numbers in the LHS/RHS ranges as specified. If
+ /// the ranges are not joinable, this aborts.
+ void join(LiveRange &Other,
+ const int *ValNoAssignments,
+ const int *RHSValNoAssignments,
+ SmallVectorImpl<VNInfo *> &NewVNInfo);
+
+ /// True iff this segment is a single segment that lies between the
+ /// specified boundaries, exclusively. Vregs live across a backedge are not
+ /// considered local. The boundaries are expected to lie within an extended
+ /// basic block, so vregs that are not live out should contain no holes.
+ bool isLocal(SlotIndex Start, SlotIndex End) const {
+ return beginIndex() > Start.getBaseIndex() &&
+ endIndex() < End.getBoundaryIndex();
+ }
+
+ /// Remove the specified segment from this range. Note that the segment
+ /// must be a single Segment in its entirety.
+ void removeSegment(SlotIndex Start, SlotIndex End,
+ bool RemoveDeadValNo = false);
+
+ void removeSegment(Segment S, bool RemoveDeadValNo = false) {
+ removeSegment(S.start, S.end, RemoveDeadValNo);
+ }
+
+ /// Remove segment pointed to by iterator @p I from this range. This does
+ /// not remove dead value numbers.
+ iterator removeSegment(iterator I) {
+ return segments.erase(I);
+ }
+
+ /// Query Liveness at Idx.
+ /// The sub-instruction slot of Idx doesn't matter, only the instruction
+ /// it refers to is considered.
+ LiveQueryResult Query(SlotIndex Idx) const {
+ // Find the segment that enters the instruction.
+ const_iterator I = find(Idx.getBaseIndex());
+ const_iterator E = end();
+ if (I == E)
+ return LiveQueryResult(nullptr, nullptr, SlotIndex(), false);
+
+ // Is this an instruction live-in segment?
+ // If Idx is the start index of a basic block, include live-in segments
+ // that start at Idx.getBaseIndex().
+ VNInfo *EarlyVal = nullptr;
+ VNInfo *LateVal = nullptr;
+ SlotIndex EndPoint;
+ bool Kill = false;
+ if (I->start <= Idx.getBaseIndex()) {
+ EarlyVal = I->valno;
+ EndPoint = I->end;
+ // Move to the potentially live-out segment.
+ if (SlotIndex::isSameInstr(Idx, I->end)) {
+ Kill = true;
+ if (++I == E)
+ return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill);
+ }
+ // Special case: A PHIDef value can have its def in the middle of a
+ // segment if the value happens to be live out of the layout
+ // predecessor.
+ // Such a value is not live-in.
+ if (EarlyVal->def == Idx.getBaseIndex())
+ EarlyVal = nullptr;
+ }
+ // I now points to the segment that may be live-through, or defined by
+ // this instr. Ignore segments starting after the current instr.
+ if (!SlotIndex::isEarlierInstr(Idx, I->start)) {
+ LateVal = I->valno;
+ EndPoint = I->end;
+ }
+ return LiveQueryResult(EarlyVal, LateVal, EndPoint, Kill);
+ }
+
+ /// removeValNo - Remove all the segments defined by the specified value#.
+ /// Also remove the value# from value# list.
+ void removeValNo(VNInfo *ValNo);
+
+ /// Returns true if the live range is zero length, i.e. no live segments
+ /// span instructions. It doesn't pay to spill such a range.
+ bool isZeroLength(SlotIndexes *Indexes) const {
+ for (const Segment &S : segments)
+ if (Indexes->getNextNonNullIndex(S.start).getBaseIndex() <
+ S.end.getBaseIndex())
+ return false;
+ return true;
+ }
+
+ bool operator<(const LiveRange& other) const {
+ const SlotIndex &thisIndex = beginIndex();
+ const SlotIndex &otherIndex = other.beginIndex();
+ return thisIndex < otherIndex;
+ }
+
+ /// Flush segment set into the regular segment vector.
+ /// The method is to be called after the live range
+ /// has been created, if use of the segment set was
+ /// activated in the constructor of the live range.
+ void flushSegmentSet();
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+ /// \brief Walk the range and assert if any invariants fail to hold.
+ ///
+ /// Note that this is a no-op when asserts are disabled.
+#ifdef NDEBUG
+ void verify() const {}
+#else
+ void verify() const;
+#endif
+
+ protected:
+ /// Append a segment to the list of segments.
+ void append(const LiveRange::Segment S);
+
+ private:
+ friend class LiveRangeUpdater;
+ void addSegmentToSet(Segment S);
+ void markValNoForDeletion(VNInfo *V);
+
+ };
+
+ inline raw_ostream &operator<<(raw_ostream &OS, const LiveRange &LR) {
+ LR.print(OS);
+ return OS;
+ }
+
+ /// LiveInterval - This class represents the liveness of a register,
+ /// or stack slot.
+ class LiveInterval : public LiveRange {
+ public:
+ typedef LiveRange super;
+
+ /// A live range for subregisters. The LaneMask specifies which parts of the
+ /// super register are covered by the interval.
+ /// (@sa TargetRegisterInfo::getSubRegIndexLaneMask()).
+ class SubRange : public LiveRange {
+ public:
+ SubRange *Next;
+ LaneBitmask LaneMask;
+
+ /// Constructs a new SubRange object.
+ SubRange(LaneBitmask LaneMask)
+ : Next(nullptr), LaneMask(LaneMask) {
+ }
+
+ /// Constructs a new SubRange object by copying liveness from @p Other.
+ SubRange(LaneBitmask LaneMask, const LiveRange &Other,
+ BumpPtrAllocator &Allocator)
+ : LiveRange(Other, Allocator), Next(nullptr), LaneMask(LaneMask) {
+ }
+ };
+
+ private:
+ SubRange *SubRanges; ///< Single linked list of subregister live ranges.
+
+ public:
+ const unsigned reg; // the register or stack slot of this interval.
+ float weight; // weight of this interval
+
+ LiveInterval(unsigned Reg, float Weight)
+ : SubRanges(nullptr), reg(Reg), weight(Weight) {}
+
+ ~LiveInterval() {
+ clearSubRanges();
+ }
+
+ template<typename T>
+ class SingleLinkedListIterator {
+ T *P;
+ public:
+ SingleLinkedListIterator<T>(T *P) : P(P) {}
+ SingleLinkedListIterator<T> &operator++() {
+ P = P->Next;
+ return *this;
+ }
+ SingleLinkedListIterator<T> &operator++(int) {
+ SingleLinkedListIterator res = *this;
+ ++*this;
+ return res;
+ }
+ bool operator!=(const SingleLinkedListIterator<T> &Other) {
+ return P != Other.operator->();
+ }
+ bool operator==(const SingleLinkedListIterator<T> &Other) {
+ return P == Other.operator->();
+ }
+ T &operator*() const {
+ return *P;
+ }
+ T *operator->() const {
+ return P;
+ }
+ };
+
+ typedef SingleLinkedListIterator<SubRange> subrange_iterator;
+ subrange_iterator subrange_begin() {
+ return subrange_iterator(SubRanges);
+ }
+ subrange_iterator subrange_end() {
+ return subrange_iterator(nullptr);
+ }
+
+ typedef SingleLinkedListIterator<const SubRange> const_subrange_iterator;
+ const_subrange_iterator subrange_begin() const {
+ return const_subrange_iterator(SubRanges);
+ }
+ const_subrange_iterator subrange_end() const {
+ return const_subrange_iterator(nullptr);
+ }
+
+ iterator_range<subrange_iterator> subranges() {
+ return make_range(subrange_begin(), subrange_end());
+ }
+
+ iterator_range<const_subrange_iterator> subranges() const {
+ return make_range(subrange_begin(), subrange_end());
+ }
+
+ /// Creates a new empty subregister live range. The range is added at the
+ /// beginning of the subrange list; subrange iterators stay valid.
+ SubRange *createSubRange(BumpPtrAllocator &Allocator,
+ LaneBitmask LaneMask) {
+ SubRange *Range = new (Allocator) SubRange(LaneMask);
+ appendSubRange(Range);
+ return Range;
+ }
+
+ /// Like createSubRange() but the new range is filled with a copy of the
+ /// liveness information in @p CopyFrom.
+ SubRange *createSubRangeFrom(BumpPtrAllocator &Allocator,
+ LaneBitmask LaneMask,
+ const LiveRange &CopyFrom) {
+ SubRange *Range = new (Allocator) SubRange(LaneMask, CopyFrom, Allocator);
+ appendSubRange(Range);
+ return Range;
+ }
+
+ /// Returns true if subregister liveness information is available.
+ bool hasSubRanges() const {
+ return SubRanges != nullptr;
+ }
+
+ /// Removes all subregister liveness information.
+ void clearSubRanges();
+
+ /// Removes all subranges without any segments (subranges without segments
+ /// are not considered valid and should only exist temporarily).
+ void removeEmptySubRanges();
+
+ /// Construct main live range by merging the SubRanges of @p LI.
+ void constructMainRangeFromSubranges(const SlotIndexes &Indexes,
+ VNInfo::Allocator &VNIAllocator);
+
+ /// getSize - Returns the sum of sizes of all the LiveRange's.
+ ///
+ unsigned getSize() const;
+
+ /// isSpillable - Can this interval be spilled?
+ bool isSpillable() const {
+ return weight != llvm::huge_valf;
+ }
+
+ /// markNotSpillable - Mark interval as not spillable
+ void markNotSpillable() {
+ weight = llvm::huge_valf;
+ }
+
+ bool operator<(const LiveInterval& other) const {
+ const SlotIndex &thisIndex = beginIndex();
+ const SlotIndex &otherIndex = other.beginIndex();
+ return std::tie(thisIndex, reg) < std::tie(otherIndex, other.reg);
+ }
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+ /// \brief Walks the interval and assert if any invariants fail to hold.
+ ///
+ /// Note that this is a no-op when asserts are disabled.
+#ifdef NDEBUG
+ void verify(const MachineRegisterInfo *MRI = nullptr) const {}
+#else
+ void verify(const MachineRegisterInfo *MRI = nullptr) const;
+#endif
+
+ private:
+ /// Appends @p Range to SubRanges list.
+ void appendSubRange(SubRange *Range) {
+ Range->Next = SubRanges;
+ SubRanges = Range;
+ }
+
+ /// Free memory held by SubRange.
+ void freeSubRange(SubRange *S);
+ };
+
+ inline raw_ostream &operator<<(raw_ostream &OS, const LiveInterval &LI) {
+ LI.print(OS);
+ return OS;
+ }
+
+ raw_ostream &operator<<(raw_ostream &OS, const LiveRange::Segment &S);
+
+ inline bool operator<(SlotIndex V, const LiveRange::Segment &S) {
+ return V < S.start;
+ }
+
+ inline bool operator<(const LiveRange::Segment &S, SlotIndex V) {
+ return S.start < V;
+ }
+
+ /// Helper class for performant LiveRange bulk updates.
+ ///
+ /// Calling LiveRange::addSegment() repeatedly can be expensive on large
+ /// live ranges because segments after the insertion point may need to be
+ /// shifted. The LiveRangeUpdater class can defer the shifting when adding
+ /// many segments in order.
+ ///
+ /// The LiveRange will be in an invalid state until flush() is called.
+ class LiveRangeUpdater {
+ LiveRange *LR;
+ SlotIndex LastStart;
+ LiveRange::iterator WriteI;
+ LiveRange::iterator ReadI;
+ SmallVector<LiveRange::Segment, 16> Spills;
+ void mergeSpills();
+
+ public:
+ /// Create a LiveRangeUpdater for adding segments to LR.
+ /// LR will temporarily be in an invalid state until flush() is called.
+ LiveRangeUpdater(LiveRange *lr = nullptr) : LR(lr) {}
+
+ ~LiveRangeUpdater() { flush(); }
+
+ /// Add a segment to LR and coalesce when possible, just like
+ /// LR.addSegment(). Segments should be added in increasing start order for
+ /// best performance.
+ void add(LiveRange::Segment);
+
+ void add(SlotIndex Start, SlotIndex End, VNInfo *VNI) {
+ add(LiveRange::Segment(Start, End, VNI));
+ }
+
+ /// Return true if the LR is currently in an invalid state, and flush()
+ /// needs to be called.
+ bool isDirty() const { return LastStart.isValid(); }
+
+ /// Flush the updater state to LR so it is valid and contains all added
+ /// segments.
+ void flush();
+
+ /// Select a different destination live range.
+ void setDest(LiveRange *lr) {
+ if (LR != lr && isDirty())
+ flush();
+ LR = lr;
+ }
+
+ /// Get the current destination live range.
+ LiveRange *getDest() const { return LR; }
+
+ void dump() const;
+ void print(raw_ostream&) const;
+ };
+
+ inline raw_ostream &operator<<(raw_ostream &OS, const LiveRangeUpdater &X) {
+ X.print(OS);
+ return OS;
+ }
+
+ /// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a
+ /// LiveInterval into equivalence clases of connected components. A
+ /// LiveInterval that has multiple connected components can be broken into
+ /// multiple LiveIntervals.
+ ///
+ /// Given a LiveInterval that may have multiple connected components, run:
+ ///
+ /// unsigned numComps = ConEQ.Classify(LI);
+ /// if (numComps > 1) {
+ /// // allocate numComps-1 new LiveIntervals into LIS[1..]
+ /// ConEQ.Distribute(LIS);
+ /// }
+
+ class ConnectedVNInfoEqClasses {
+ LiveIntervals &LIS;
+ IntEqClasses EqClass;
+
+ public:
+ explicit ConnectedVNInfoEqClasses(LiveIntervals &lis) : LIS(lis) {}
+
+ /// Classify - Classify the values in LI into connected components.
+ /// Return the number of connected components.
+ unsigned Classify(const LiveInterval *LI);
+
+ /// getEqClass - Classify creates equivalence classes numbered 0..N. Return
+ /// the equivalence class assigned the VNI.
+ unsigned getEqClass(const VNInfo *VNI) const { return EqClass[VNI->id]; }
+
+ /// Distribute values in \p LI into a separate LiveIntervals
+ /// for each connected component. LIV must have an empty LiveInterval for
+ /// each additional connected component. The first connected component is
+ /// left in \p LI.
+ void Distribute(LiveInterval &LI, LiveInterval *LIV[],
+ MachineRegisterInfo &MRI);
+ };
+
+}
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
new file mode 100644
index 0000000..87421e2
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -0,0 +1,449 @@
+//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveInterval analysis pass. Given some numbering of
+// each the machine instructions (in this implemention depth-first order) an
+// interval [i, j) is said to be a live interval for register v if there is no
+// instruction with number j' > j such that v is live at j' and there is no
+// instruction with number i' < i such that v is live at i'. In this
+// implementation intervals can have holes, i.e. an interval might look like
+// [1,20), [50,65), [1000,1001).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
+#define LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
+
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include <cmath>
+#include <iterator>
+
+namespace llvm {
+
+extern cl::opt<bool> UseSegmentSetForPhysRegs;
+
+ class BitVector;
+ class BlockFrequency;
+ class LiveRangeCalc;
+ class LiveVariables;
+ class MachineDominatorTree;
+ class MachineLoopInfo;
+ class TargetRegisterInfo;
+ class MachineRegisterInfo;
+ class TargetInstrInfo;
+ class TargetRegisterClass;
+ class VirtRegMap;
+ class MachineBlockFrequencyInfo;
+
+ class LiveIntervals : public MachineFunctionPass {
+ MachineFunction* MF;
+ MachineRegisterInfo* MRI;
+ const TargetRegisterInfo* TRI;
+ const TargetInstrInfo* TII;
+ AliasAnalysis *AA;
+ SlotIndexes* Indexes;
+ MachineDominatorTree *DomTree;
+ LiveRangeCalc *LRCalc;
+
+ /// Special pool allocator for VNInfo's (LiveInterval val#).
+ ///
+ VNInfo::Allocator VNInfoAllocator;
+
+ /// Live interval pointers for all the virtual registers.
+ IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals;
+
+ /// RegMaskSlots - Sorted list of instructions with register mask operands.
+ /// Always use the 'r' slot, RegMasks are normal clobbers, not early
+ /// clobbers.
+ SmallVector<SlotIndex, 8> RegMaskSlots;
+
+ /// RegMaskBits - This vector is parallel to RegMaskSlots, it holds a
+ /// pointer to the corresponding register mask. This pointer can be
+ /// recomputed as:
+ ///
+ /// MI = Indexes->getInstructionFromIndex(RegMaskSlot[N]);
+ /// unsigned OpNum = findRegMaskOperand(MI);
+ /// RegMaskBits[N] = MI->getOperand(OpNum).getRegMask();
+ ///
+ /// This is kept in a separate vector partly because some standard
+ /// libraries don't support lower_bound() with mixed objects, partly to
+ /// improve locality when searching in RegMaskSlots.
+ /// Also see the comment in LiveInterval::find().
+ SmallVector<const uint32_t*, 8> RegMaskBits;
+
+ /// For each basic block number, keep (begin, size) pairs indexing into the
+ /// RegMaskSlots and RegMaskBits arrays.
+ /// Note that basic block numbers may not be layout contiguous, that's why
+ /// we can't just keep track of the first register mask in each basic
+ /// block.
+ SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks;
+
+ /// Keeps a live range set for each register unit to track fixed physreg
+ /// interference.
+ SmallVector<LiveRange*, 0> RegUnitRanges;
+
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ LiveIntervals();
+ ~LiveIntervals() override;
+
+ // Calculate the spill weight to assign to a single instruction.
+ static float getSpillWeight(bool isDef, bool isUse,
+ const MachineBlockFrequencyInfo *MBFI,
+ const MachineInstr *Instr);
+
+ LiveInterval &getInterval(unsigned Reg) {
+ if (hasInterval(Reg))
+ return *VirtRegIntervals[Reg];
+ else
+ return createAndComputeVirtRegInterval(Reg);
+ }
+
+ const LiveInterval &getInterval(unsigned Reg) const {
+ return const_cast<LiveIntervals*>(this)->getInterval(Reg);
+ }
+
+ bool hasInterval(unsigned Reg) const {
+ return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg];
+ }
+
+ // Interval creation.
+ LiveInterval &createEmptyInterval(unsigned Reg) {
+ assert(!hasInterval(Reg) && "Interval already exists!");
+ VirtRegIntervals.grow(Reg);
+ VirtRegIntervals[Reg] = createInterval(Reg);
+ return *VirtRegIntervals[Reg];
+ }
+
+ LiveInterval &createAndComputeVirtRegInterval(unsigned Reg) {
+ LiveInterval &LI = createEmptyInterval(Reg);
+ computeVirtRegInterval(LI);
+ return LI;
+ }
+
+ // Interval removal.
+ void removeInterval(unsigned Reg) {
+ delete VirtRegIntervals[Reg];
+ VirtRegIntervals[Reg] = nullptr;
+ }
+
+ /// Given a register and an instruction, adds a live segment from that
+ /// instruction to the end of its MBB.
+ LiveInterval::Segment addSegmentToEndOfBlock(unsigned reg,
+ MachineInstr* startInst);
+
+ /// After removing some uses of a register, shrink its live range to just
+ /// the remaining uses. This method does not compute reaching defs for new
+ /// uses, and it doesn't remove dead defs.
+ /// Dead PHIDef values are marked as unused. New dead machine instructions
+ /// are added to the dead vector. Returns true if the interval may have been
+ /// separated into multiple connected components.
+ bool shrinkToUses(LiveInterval *li,
+ SmallVectorImpl<MachineInstr*> *dead = nullptr);
+
+ /// Specialized version of
+ /// shrinkToUses(LiveInterval *li, SmallVectorImpl<MachineInstr*> *dead)
+ /// that works on a subregister live range and only looks at uses matching
+ /// the lane mask of the subregister range.
+ /// This may leave the subrange empty which needs to be cleaned up with
+ /// LiveInterval::removeEmptySubranges() afterwards.
+ void shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg);
+
+ /// extendToIndices - Extend the live range of LI to reach all points in
+ /// Indices. The points in the Indices array must be jointly dominated by
+ /// existing defs in LI. PHI-defs are added as needed to maintain SSA form.
+ ///
+ /// If a SlotIndex in Indices is the end index of a basic block, LI will be
+ /// extended to be live out of the basic block.
+ ///
+ /// See also LiveRangeCalc::extend().
+ void extendToIndices(LiveRange &LR, ArrayRef<SlotIndex> Indices);
+
+
+ /// If @p LR has a live value at @p Kill, prune its live range by removing
+ /// any liveness reachable from Kill. Add live range end points to
+ /// EndPoints such that extendToIndices(LI, EndPoints) will reconstruct the
+ /// value's live range.
+ ///
+ /// Calling pruneValue() and extendToIndices() can be used to reconstruct
+ /// SSA form after adding defs to a virtual register.
+ void pruneValue(LiveRange &LR, SlotIndex Kill,
+ SmallVectorImpl<SlotIndex> *EndPoints);
+
+ SlotIndexes *getSlotIndexes() const {
+ return Indexes;
+ }
+
+ AliasAnalysis *getAliasAnalysis() const {
+ return AA;
+ }
+
+ /// isNotInMIMap - returns true if the specified machine instr has been
+ /// removed or was never entered in the map.
+ bool isNotInMIMap(const MachineInstr* Instr) const {
+ return !Indexes->hasIndex(Instr);
+ }
+
+ /// Returns the base index of the given instruction.
+ SlotIndex getInstructionIndex(const MachineInstr *instr) const {
+ return Indexes->getInstructionIndex(instr);
+ }
+
+ /// Returns the instruction associated with the given index.
+ MachineInstr* getInstructionFromIndex(SlotIndex index) const {
+ return Indexes->getInstructionFromIndex(index);
+ }
+
+ /// Return the first index in the given basic block.
+ SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
+ return Indexes->getMBBStartIdx(mbb);
+ }
+
+ /// Return the last index in the given basic block.
+ SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
+ return Indexes->getMBBEndIdx(mbb);
+ }
+
+ bool isLiveInToMBB(const LiveRange &LR,
+ const MachineBasicBlock *mbb) const {
+ return LR.liveAt(getMBBStartIdx(mbb));
+ }
+
+ bool isLiveOutOfMBB(const LiveRange &LR,
+ const MachineBasicBlock *mbb) const {
+ return LR.liveAt(getMBBEndIdx(mbb).getPrevSlot());
+ }
+
+ MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
+ return Indexes->getMBBFromIndex(index);
+ }
+
+ void insertMBBInMaps(MachineBasicBlock *MBB) {
+ Indexes->insertMBBInMaps(MBB);
+ assert(unsigned(MBB->getNumber()) == RegMaskBlocks.size() &&
+ "Blocks must be added in order.");
+ RegMaskBlocks.push_back(std::make_pair(RegMaskSlots.size(), 0));
+ }
+
+ SlotIndex InsertMachineInstrInMaps(MachineInstr *MI) {
+ return Indexes->insertMachineInstrInMaps(MI);
+ }
+
+ void InsertMachineInstrRangeInMaps(MachineBasicBlock::iterator B,
+ MachineBasicBlock::iterator E) {
+ for (MachineBasicBlock::iterator I = B; I != E; ++I)
+ Indexes->insertMachineInstrInMaps(I);
+ }
+
+ void RemoveMachineInstrFromMaps(MachineInstr *MI) {
+ Indexes->removeMachineInstrFromMaps(MI);
+ }
+
+ void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
+ Indexes->replaceMachineInstrInMaps(MI, NewMI);
+ }
+
+ VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void releaseMemory() override;
+
+ /// runOnMachineFunction - pass entry point
+ bool runOnMachineFunction(MachineFunction&) override;
+
+ /// print - Implement the dump method.
+ void print(raw_ostream &O, const Module* = nullptr) const override;
+
+ /// intervalIsInOneMBB - If LI is confined to a single basic block, return
+ /// a pointer to that block. If LI is live in to or out of any block,
+ /// return NULL.
+ MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const;
+
+ /// Returns true if VNI is killed by any PHI-def values in LI.
+ /// This may conservatively return true to avoid expensive computations.
+ bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const;
+
+ /// addKillFlags - Add kill flags to any instruction that kills a virtual
+ /// register.
+ void addKillFlags(const VirtRegMap*);
+
+ /// handleMove - call this method to notify LiveIntervals that
+ /// instruction 'mi' has been moved within a basic block. This will update
+ /// the live intervals for all operands of mi. Moves between basic blocks
+ /// are not supported.
+ ///
+ /// \param UpdateFlags Update live intervals for nonallocatable physregs.
+ void handleMove(MachineInstr* MI, bool UpdateFlags = false);
+
+ /// moveIntoBundle - Update intervals for operands of MI so that they
+ /// begin/end on the SlotIndex for BundleStart.
+ ///
+ /// \param UpdateFlags Update live intervals for nonallocatable physregs.
+ ///
+ /// Requires MI and BundleStart to have SlotIndexes, and assumes
+ /// existing liveness is accurate. BundleStart should be the first
+ /// instruction in the Bundle.
+ void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart,
+ bool UpdateFlags = false);
+
+ /// repairIntervalsInRange - Update live intervals for instructions in a
+ /// range of iterators. It is intended for use after target hooks that may
+ /// insert or remove instructions, and is only efficient for a small number
+ /// of instructions.
+ ///
+ /// OrigRegs is a vector of registers that were originally used by the
+ /// instructions in the range between the two iterators.
+ ///
+ /// Currently, the only only changes that are supported are simple removal
+ /// and addition of uses.
+ void repairIntervalsInRange(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
+ ArrayRef<unsigned> OrigRegs);
+
+ // Register mask functions.
+ //
+ // Machine instructions may use a register mask operand to indicate that a
+ // large number of registers are clobbered by the instruction. This is
+ // typically used for calls.
+ //
+ // For compile time performance reasons, these clobbers are not recorded in
+ // the live intervals for individual physical registers. Instead,
+ // LiveIntervalAnalysis maintains a sorted list of instructions with
+ // register mask operands.
+
+ /// getRegMaskSlots - Returns a sorted array of slot indices of all
+ /// instructions with register mask operands.
+ ArrayRef<SlotIndex> getRegMaskSlots() const { return RegMaskSlots; }
+
+ /// getRegMaskSlotsInBlock - Returns a sorted array of slot indices of all
+ /// instructions with register mask operands in the basic block numbered
+ /// MBBNum.
+ ArrayRef<SlotIndex> getRegMaskSlotsInBlock(unsigned MBBNum) const {
+ std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
+ return getRegMaskSlots().slice(P.first, P.second);
+ }
+
+ /// getRegMaskBits() - Returns an array of register mask pointers
+ /// corresponding to getRegMaskSlots().
+ ArrayRef<const uint32_t*> getRegMaskBits() const { return RegMaskBits; }
+
+ /// getRegMaskBitsInBlock - Returns an array of mask pointers corresponding
+ /// to getRegMaskSlotsInBlock(MBBNum).
+ ArrayRef<const uint32_t*> getRegMaskBitsInBlock(unsigned MBBNum) const {
+ std::pair<unsigned, unsigned> P = RegMaskBlocks[MBBNum];
+ return getRegMaskBits().slice(P.first, P.second);
+ }
+
+ /// checkRegMaskInterference - Test if LI is live across any register mask
+ /// instructions, and compute a bit mask of physical registers that are not
+ /// clobbered by any of them.
+ ///
+ /// Returns false if LI doesn't cross any register mask instructions. In
+ /// that case, the bit vector is not filled in.
+ bool checkRegMaskInterference(LiveInterval &LI,
+ BitVector &UsableRegs);
+
+ // Register unit functions.
+ //
+ // Fixed interference occurs when MachineInstrs use physregs directly
+ // instead of virtual registers. This typically happens when passing
+ // arguments to a function call, or when instructions require operands in
+ // fixed registers.
+ //
+ // Each physreg has one or more register units, see MCRegisterInfo. We
+ // track liveness per register unit to handle aliasing registers more
+ // efficiently.
+
+ /// getRegUnit - Return the live range for Unit.
+ /// It will be computed if it doesn't exist.
+ LiveRange &getRegUnit(unsigned Unit) {
+ LiveRange *LR = RegUnitRanges[Unit];
+ if (!LR) {
+ // Compute missing ranges on demand.
+ // Use segment set to speed-up initial computation of the live range.
+ RegUnitRanges[Unit] = LR = new LiveRange(UseSegmentSetForPhysRegs);
+ computeRegUnitRange(*LR, Unit);
+ }
+ return *LR;
+ }
+
+ /// getCachedRegUnit - Return the live range for Unit if it has already
+ /// been computed, or NULL if it hasn't been computed yet.
+ LiveRange *getCachedRegUnit(unsigned Unit) {
+ return RegUnitRanges[Unit];
+ }
+
+ const LiveRange *getCachedRegUnit(unsigned Unit) const {
+ return RegUnitRanges[Unit];
+ }
+
+ /// Remove value numbers and related live segments starting at position
+ /// @p Pos that are part of any liverange of physical register @p Reg or one
+ /// of its subregisters.
+ void removePhysRegDefAt(unsigned Reg, SlotIndex Pos);
+
+ /// Remove value number and related live segments of @p LI and its subranges
+ /// that start at position @p Pos.
+ void removeVRegDefAt(LiveInterval &LI, SlotIndex Pos);
+
+ /// Split separate components in LiveInterval \p LI into separate intervals.
+ void splitSeparateComponents(LiveInterval &LI,
+ SmallVectorImpl<LiveInterval*> &SplitLIs);
+
+ private:
+ /// Compute live intervals for all virtual registers.
+ void computeVirtRegs();
+
+ /// Compute RegMaskSlots and RegMaskBits.
+ void computeRegMasks();
+
+ /// Walk the values in @p LI and check for dead values:
+ /// - Dead PHIDef values are marked as unused.
+ /// - Dead operands are marked as such.
+ /// - Completely dead machine instructions are added to the @p dead vector
+ /// if it is not nullptr.
+ /// Returns true if any PHI value numbers have been removed which may
+ /// have separated the interval into multiple connected components.
+ bool computeDeadValues(LiveInterval &LI,
+ SmallVectorImpl<MachineInstr*> *dead);
+
+ static LiveInterval* createInterval(unsigned Reg);
+
+ void printInstrs(raw_ostream &O) const;
+ void dumpInstrs() const;
+
+ void computeLiveInRegUnits();
+ void computeRegUnitRange(LiveRange&, unsigned Unit);
+ void computeVirtRegInterval(LiveInterval&);
+
+
+ /// Helper function for repairIntervalsInRange(), walks backwards and
+ /// creates/modifies live segments in @p LR to match the operands found.
+ /// Only full operands or operands with subregisters matching @p LaneMask
+ /// are considered.
+ void repairOldRegInRange(MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
+ const SlotIndex endIdx, LiveRange &LR,
+ unsigned Reg, LaneBitmask LaneMask = ~0u);
+
+ class HMEditor;
+ };
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveIntervalUnion.h b/contrib/llvm/include/llvm/CodeGen/LiveIntervalUnion.h
new file mode 100644
index 0000000..f0f1637
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LiveIntervalUnion.h
@@ -0,0 +1,216 @@
+//===-- LiveIntervalUnion.h - Live interval union data struct --*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// LiveIntervalUnion is a union of live segments across multiple live virtual
+// registers. This may be used during coalescing to represent a congruence
+// class, or during register allocation to model liveness of a physical
+// register.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVALUNION_H
+#define LLVM_CODEGEN_LIVEINTERVALUNION_H
+
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/CodeGen/LiveInterval.h"
+
+namespace llvm {
+
+class TargetRegisterInfo;
+
+#ifndef NDEBUG
+// forward declaration
+template <unsigned Element> class SparseBitVector;
+typedef SparseBitVector<128> LiveVirtRegBitSet;
+#endif
+
+/// Compare a live virtual register segment to a LiveIntervalUnion segment.
+inline bool
+overlap(const LiveInterval::Segment &VRSeg,
+ const IntervalMap<SlotIndex, LiveInterval*>::const_iterator &LUSeg) {
+ return VRSeg.start < LUSeg.stop() && LUSeg.start() < VRSeg.end;
+}
+
+/// Union of live intervals that are strong candidates for coalescing into a
+/// single register (either physical or virtual depending on the context). We
+/// expect the constituent live intervals to be disjoint, although we may
+/// eventually make exceptions to handle value-based interference.
+class LiveIntervalUnion {
+ // A set of live virtual register segments that supports fast insertion,
+ // intersection, and removal.
+ // Mapping SlotIndex intervals to virtual register numbers.
+ typedef IntervalMap<SlotIndex, LiveInterval*> LiveSegments;
+
+public:
+ // SegmentIter can advance to the next segment ordered by starting position
+ // which may belong to a different live virtual register. We also must be able
+ // to reach the current segment's containing virtual register.
+ typedef LiveSegments::iterator SegmentIter;
+
+ // LiveIntervalUnions share an external allocator.
+ typedef LiveSegments::Allocator Allocator;
+
+ class Query;
+
+private:
+ unsigned Tag; // unique tag for current contents.
+ LiveSegments Segments; // union of virtual reg segments
+
+public:
+ explicit LiveIntervalUnion(Allocator &a) : Tag(0), Segments(a) {}
+
+ // Iterate over all segments in the union of live virtual registers ordered
+ // by their starting position.
+ SegmentIter begin() { return Segments.begin(); }
+ SegmentIter end() { return Segments.end(); }
+ SegmentIter find(SlotIndex x) { return Segments.find(x); }
+ bool empty() const { return Segments.empty(); }
+ SlotIndex startIndex() const { return Segments.start(); }
+
+ // Provide public access to the underlying map to allow overlap iteration.
+ typedef LiveSegments Map;
+ const Map &getMap() { return Segments; }
+
+ /// getTag - Return an opaque tag representing the current state of the union.
+ unsigned getTag() const { return Tag; }
+
+ /// changedSince - Return true if the union change since getTag returned tag.
+ bool changedSince(unsigned tag) const { return tag != Tag; }
+
+ // Add a live virtual register to this union and merge its segments.
+ void unify(LiveInterval &VirtReg, const LiveRange &Range);
+ void unify(LiveInterval &VirtReg) {
+ unify(VirtReg, VirtReg);
+ }
+
+ // Remove a live virtual register's segments from this union.
+ void extract(LiveInterval &VirtReg, const LiveRange &Range);
+ void extract(LiveInterval &VirtReg) {
+ extract(VirtReg, VirtReg);
+ }
+
+ // Remove all inserted virtual registers.
+ void clear() { Segments.clear(); ++Tag; }
+
+ // Print union, using TRI to translate register names
+ void print(raw_ostream &OS, const TargetRegisterInfo *TRI) const;
+
+#ifndef NDEBUG
+ // Verify the live intervals in this union and add them to the visited set.
+ void verify(LiveVirtRegBitSet& VisitedVRegs);
+#endif
+
+ /// Query interferences between a single live virtual register and a live
+ /// interval union.
+ class Query {
+ LiveIntervalUnion *LiveUnion;
+ LiveInterval *VirtReg;
+ LiveInterval::iterator VirtRegI; // current position in VirtReg
+ SegmentIter LiveUnionI; // current position in LiveUnion
+ SmallVector<LiveInterval*,4> InterferingVRegs;
+ bool CheckedFirstInterference;
+ bool SeenAllInterferences;
+ bool SeenUnspillableVReg;
+ unsigned Tag, UserTag;
+
+ public:
+ Query(): LiveUnion(), VirtReg(), Tag(0), UserTag(0) {}
+
+ Query(LiveInterval *VReg, LiveIntervalUnion *LIU):
+ LiveUnion(LIU), VirtReg(VReg), CheckedFirstInterference(false),
+ SeenAllInterferences(false), SeenUnspillableVReg(false)
+ {}
+
+ void clear() {
+ LiveUnion = nullptr;
+ VirtReg = nullptr;
+ InterferingVRegs.clear();
+ CheckedFirstInterference = false;
+ SeenAllInterferences = false;
+ SeenUnspillableVReg = false;
+ Tag = 0;
+ UserTag = 0;
+ }
+
+ void init(unsigned UTag, LiveInterval *VReg, LiveIntervalUnion *LIU) {
+ assert(VReg && LIU && "Invalid arguments");
+ if (UserTag == UTag && VirtReg == VReg &&
+ LiveUnion == LIU && !LIU->changedSince(Tag)) {
+ // Retain cached results, e.g. firstInterference.
+ return;
+ }
+ clear();
+ LiveUnion = LIU;
+ VirtReg = VReg;
+ Tag = LIU->getTag();
+ UserTag = UTag;
+ }
+
+ LiveInterval &virtReg() const {
+ assert(VirtReg && "uninitialized");
+ return *VirtReg;
+ }
+
+ // Does this live virtual register interfere with the union?
+ bool checkInterference() { return collectInterferingVRegs(1); }
+
+ // Count the virtual registers in this union that interfere with this
+ // query's live virtual register, up to maxInterferingRegs.
+ unsigned collectInterferingVRegs(unsigned MaxInterferingRegs = UINT_MAX);
+
+ // Was this virtual register visited during collectInterferingVRegs?
+ bool isSeenInterference(LiveInterval *VReg) const;
+
+ // Did collectInterferingVRegs collect all interferences?
+ bool seenAllInterferences() const { return SeenAllInterferences; }
+
+ // Did collectInterferingVRegs encounter an unspillable vreg?
+ bool seenUnspillableVReg() const { return SeenUnspillableVReg; }
+
+ // Vector generated by collectInterferingVRegs.
+ const SmallVectorImpl<LiveInterval*> &interferingVRegs() const {
+ return InterferingVRegs;
+ }
+
+ private:
+ Query(const Query&) = delete;
+ void operator=(const Query&) = delete;
+ };
+
+ // Array of LiveIntervalUnions.
+ class Array {
+ unsigned Size;
+ LiveIntervalUnion *LIUs;
+ public:
+ Array() : Size(0), LIUs(nullptr) {}
+ ~Array() { clear(); }
+
+ // Initialize the array to have Size entries.
+ // Reuse an existing allocation if the size matches.
+ void init(LiveIntervalUnion::Allocator&, unsigned Size);
+
+ unsigned size() const { return Size; }
+
+ void clear();
+
+ LiveIntervalUnion& operator[](unsigned idx) {
+ assert(idx < Size && "idx out of bounds");
+ return LIUs[idx];
+ }
+
+ const LiveIntervalUnion& operator[](unsigned Idx) const {
+ assert(Idx < Size && "Idx out of bounds");
+ return LIUs[Idx];
+ }
+ };
+};
+
+} // end namespace llvm
+
+#endif // !defined(LLVM_CODEGEN_LIVEINTERVALUNION_H)
diff --git a/contrib/llvm/include/llvm/CodeGen/LivePhysRegs.h b/contrib/llvm/include/llvm/CodeGen/LivePhysRegs.h
new file mode 100644
index 0000000..3bdf5ae
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LivePhysRegs.h
@@ -0,0 +1,147 @@
+//===- llvm/CodeGen/LivePhysRegs.h - Live Physical Register Set -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LivePhysRegs utility for tracking liveness of
+// physical registers. This can be used for ad-hoc liveness tracking after
+// register allocation. You can start with the live-ins/live-outs at the
+// beginning/end of a block and update the information while walking the
+// instructions inside the block. This implementation tracks the liveness on a
+// sub-register granularity.
+//
+// We assume that the high bits of a physical super-register are not preserved
+// unless the instruction has an implicit-use operand reading the super-
+// register.
+//
+// X86 Example:
+// %YMM0<def> = ...
+// %XMM0<def> = ... (Kills %XMM0, all %XMM0s sub-registers, and %YMM0)
+//
+// %YMM0<def> = ...
+// %XMM0<def> = ..., %YMM0<imp-use> (%YMM0 and all its sub-registers are alive)
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEPHYSREGS_H
+#define LLVM_CODEGEN_LIVEPHYSREGS_H
+
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include <cassert>
+
+namespace llvm {
+
+class MachineInstr;
+
+/// \brief A set of live physical registers with functions to track liveness
+/// when walking backward/forward through a basic block.
+class LivePhysRegs {
+ const TargetRegisterInfo *TRI;
+ SparseSet<unsigned> LiveRegs;
+
+ LivePhysRegs(const LivePhysRegs&) = delete;
+ LivePhysRegs &operator=(const LivePhysRegs&) = delete;
+public:
+ /// \brief Constructs a new empty LivePhysRegs set.
+ LivePhysRegs() : TRI(nullptr), LiveRegs() {}
+
+ /// \brief Constructs and initialize an empty LivePhysRegs set.
+ LivePhysRegs(const TargetRegisterInfo *TRI) : TRI(TRI) {
+ assert(TRI && "Invalid TargetRegisterInfo pointer.");
+ LiveRegs.setUniverse(TRI->getNumRegs());
+ }
+
+ /// \brief Clear and initialize the LivePhysRegs set.
+ void init(const TargetRegisterInfo *TRI) {
+ assert(TRI && "Invalid TargetRegisterInfo pointer.");
+ this->TRI = TRI;
+ LiveRegs.clear();
+ LiveRegs.setUniverse(TRI->getNumRegs());
+ }
+
+ /// \brief Clears the LivePhysRegs set.
+ void clear() { LiveRegs.clear(); }
+
+ /// \brief Returns true if the set is empty.
+ bool empty() const { return LiveRegs.empty(); }
+
+ /// \brief Adds a physical register and all its sub-registers to the set.
+ void addReg(unsigned Reg) {
+ assert(TRI && "LivePhysRegs is not initialized.");
+ assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
+ for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
+ SubRegs.isValid(); ++SubRegs)
+ LiveRegs.insert(*SubRegs);
+ }
+
+ /// \brief Removes a physical register, all its sub-registers, and all its
+ /// super-registers from the set.
+ void removeReg(unsigned Reg) {
+ assert(TRI && "LivePhysRegs is not initialized.");
+ assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
+ for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true);
+ SubRegs.isValid(); ++SubRegs)
+ LiveRegs.erase(*SubRegs);
+ for (MCSuperRegIterator SuperRegs(Reg, TRI, /*IncludeSelf=*/false);
+ SuperRegs.isValid(); ++SuperRegs)
+ LiveRegs.erase(*SuperRegs);
+ }
+
+ /// \brief Removes physical registers clobbered by the regmask operand @p MO.
+ void removeRegsInMask(const MachineOperand &MO,
+ SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers);
+
+ /// \brief Returns true if register @p Reg is contained in the set. This also
+ /// works if only the super register of @p Reg has been defined, because we
+ /// always add also all sub-registers to the set.
+ bool contains(unsigned Reg) const { return LiveRegs.count(Reg); }
+
+ /// \brief Simulates liveness when stepping backwards over an
+ /// instruction(bundle): Remove Defs, add uses. This is the recommended way of
+ /// calculating liveness.
+ void stepBackward(const MachineInstr &MI);
+
+ /// \brief Simulates liveness when stepping forward over an
+ /// instruction(bundle): Remove killed-uses, add defs. This is the not
+ /// recommended way, because it depends on accurate kill flags. If possible
+ /// use stepBackward() instead of this function.
+ /// The clobbers set will be the list of registers either defined or clobbered
+ /// by a regmask. The operand will identify whether this is a regmask or
+ /// register operand.
+ void stepForward(const MachineInstr &MI,
+ SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> &Clobbers);
+
+ /// \brief Adds all live-in registers of basic block @p MBB; After prologue/
+ /// epilogue insertion \p AddPristines should be set to true to insert the
+ /// pristine registers.
+ void addLiveIns(const MachineBasicBlock *MBB, bool AddPristines = false);
+
+ /// \brief Adds all live-out registers of basic block @p MBB; After prologue/
+ /// epilogue insertion \p AddPristinesAndCSRs should be set to true.
+ void addLiveOuts(const MachineBasicBlock *MBB,
+ bool AddPristinesAndCSRs = false);
+
+ typedef SparseSet<unsigned>::const_iterator const_iterator;
+ const_iterator begin() const { return LiveRegs.begin(); }
+ const_iterator end() const { return LiveRegs.end(); }
+
+ /// \brief Prints the currently live registers to @p OS.
+ void print(raw_ostream &OS) const;
+
+ /// \brief Dumps the currently live registers to the debug output.
+ void dump() const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
+ LR.print(OS);
+ return OS;
+}
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h b/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h
new file mode 100644
index 0000000..2271e33
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LiveRangeEdit.h
@@ -0,0 +1,233 @@
+//===---- LiveRangeEdit.h - Basic tools for split and spill -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The LiveRangeEdit class represents changes done to a virtual register when it
+// is spilled or split.
+//
+// The parent register is never changed. Instead, a number of new virtual
+// registers are created and added to the newRegs vector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVERANGEEDIT_H
+#define LLVM_CODEGEN_LIVERANGEEDIT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
+namespace llvm {
+
+class LiveIntervals;
+class MachineBlockFrequencyInfo;
+class MachineLoopInfo;
+class VirtRegMap;
+
+class LiveRangeEdit : private MachineRegisterInfo::Delegate {
+public:
+ /// Callback methods for LiveRangeEdit owners.
+ class Delegate {
+ virtual void anchor();
+ public:
+ /// Called immediately before erasing a dead machine instruction.
+ virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}
+
+ /// Called when a virtual register is no longer used. Return false to defer
+ /// its deletion from LiveIntervals.
+ virtual bool LRE_CanEraseVirtReg(unsigned) { return true; }
+
+ /// Called before shrinking the live range of a virtual register.
+ virtual void LRE_WillShrinkVirtReg(unsigned) {}
+
+ /// Called after cloning a virtual register.
+ /// This is used for new registers representing connected components of Old.
+ virtual void LRE_DidCloneVirtReg(unsigned New, unsigned Old) {}
+
+ virtual ~Delegate() {}
+ };
+
+private:
+ LiveInterval *Parent;
+ SmallVectorImpl<unsigned> &NewRegs;
+ MachineRegisterInfo &MRI;
+ LiveIntervals &LIS;
+ VirtRegMap *VRM;
+ const TargetInstrInfo &TII;
+ Delegate *const TheDelegate;
+
+ /// FirstNew - Index of the first register added to NewRegs.
+ const unsigned FirstNew;
+
+ /// ScannedRemattable - true when remattable values have been identified.
+ bool ScannedRemattable;
+
+ /// Remattable - Values defined by remattable instructions as identified by
+ /// tii.isTriviallyReMaterializable().
+ SmallPtrSet<const VNInfo*,4> Remattable;
+
+ /// Rematted - Values that were actually rematted, and so need to have their
+ /// live range trimmed or entirely removed.
+ SmallPtrSet<const VNInfo*,4> Rematted;
+
+ /// scanRemattable - Identify the Parent values that may rematerialize.
+ void scanRemattable(AliasAnalysis *aa);
+
+ /// allUsesAvailableAt - Return true if all registers used by OrigMI at
+ /// OrigIdx are also available with the same value at UseIdx.
+ bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
+ SlotIndex UseIdx) const;
+
+ /// foldAsLoad - If LI has a single use and a single def that can be folded as
+ /// a load, eliminate the register by folding the def into the use.
+ bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead);
+
+ typedef SetVector<LiveInterval*,
+ SmallVector<LiveInterval*, 8>,
+ SmallPtrSet<LiveInterval*, 8> > ToShrinkSet;
+ /// Helper for eliminateDeadDefs.
+ void eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink);
+
+ /// MachineRegisterInfo callback to notify when new virtual
+ /// registers are created.
+ void MRI_NoteNewVirtualRegister(unsigned VReg) override;
+
+ /// \brief Check if MachineOperand \p MO is a last use/kill either in the
+ /// main live range of \p LI or in one of the matching subregister ranges.
+ bool useIsKill(const LiveInterval &LI, const MachineOperand &MO) const;
+
+public:
+ /// Create a LiveRangeEdit for breaking down parent into smaller pieces.
+ /// @param parent The register being spilled or split.
+ /// @param newRegs List to receive any new registers created. This needn't be
+ /// empty initially, any existing registers are ignored.
+ /// @param MF The MachineFunction the live range edit is taking place in.
+ /// @param lis The collection of all live intervals in this function.
+ /// @param vrm Map of virtual registers to physical registers for this
+ /// function. If NULL, no virtual register map updates will
+ /// be done. This could be the case if called before Regalloc.
+ LiveRangeEdit(LiveInterval *parent, SmallVectorImpl<unsigned> &newRegs,
+ MachineFunction &MF, LiveIntervals &lis, VirtRegMap *vrm,
+ Delegate *delegate = nullptr)
+ : Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis),
+ VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()),
+ TheDelegate(delegate), FirstNew(newRegs.size()),
+ ScannedRemattable(false) {
+ MRI.setDelegate(this);
+ }
+
+ ~LiveRangeEdit() override { MRI.resetDelegate(this); }
+
+ LiveInterval &getParent() const {
+ assert(Parent && "No parent LiveInterval");
+ return *Parent;
+ }
+ unsigned getReg() const { return getParent().reg; }
+
+ /// Iterator for accessing the new registers added by this edit.
+ typedef SmallVectorImpl<unsigned>::const_iterator iterator;
+ iterator begin() const { return NewRegs.begin()+FirstNew; }
+ iterator end() const { return NewRegs.end(); }
+ unsigned size() const { return NewRegs.size()-FirstNew; }
+ bool empty() const { return size() == 0; }
+ unsigned get(unsigned idx) const { return NewRegs[idx+FirstNew]; }
+
+ ArrayRef<unsigned> regs() const {
+ return makeArrayRef(NewRegs).slice(FirstNew);
+ }
+
+ /// createEmptyIntervalFrom - Create a new empty interval based on OldReg.
+ LiveInterval &createEmptyIntervalFrom(unsigned OldReg);
+
+ /// createFrom - Create a new virtual register based on OldReg.
+ unsigned createFrom(unsigned OldReg);
+
+ /// create - Create a new register with the same class and original slot as
+ /// parent.
+ LiveInterval &createEmptyInterval() {
+ return createEmptyIntervalFrom(getReg());
+ }
+
+ unsigned create() {
+ return createFrom(getReg());
+ }
+
+ /// anyRematerializable - Return true if any parent values may be
+ /// rematerializable.
+ /// This function must be called before any rematerialization is attempted.
+ bool anyRematerializable(AliasAnalysis*);
+
+ /// checkRematerializable - Manually add VNI to the list of rematerializable
+ /// values if DefMI may be rematerializable.
+ bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI,
+ AliasAnalysis*);
+
+ /// Remat - Information needed to rematerialize at a specific location.
+ struct Remat {
+ VNInfo *ParentVNI; // parent_'s value at the remat location.
+ MachineInstr *OrigMI; // Instruction defining ParentVNI.
+ explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(nullptr) {}
+ };
+
+ /// canRematerializeAt - Determine if ParentVNI can be rematerialized at
+ /// UseIdx. It is assumed that parent_.getVNINfoAt(UseIdx) == ParentVNI.
+ /// When cheapAsAMove is set, only cheap remats are allowed.
+ bool canRematerializeAt(Remat &RM,
+ SlotIndex UseIdx,
+ bool cheapAsAMove);
+
+ /// rematerializeAt - Rematerialize RM.ParentVNI into DestReg by inserting an
+ /// instruction into MBB before MI. The new instruction is mapped, but
+ /// liveness is not updated.
+ /// Return the SlotIndex of the new instruction.
+ SlotIndex rematerializeAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg,
+ const Remat &RM,
+ const TargetRegisterInfo&,
+ bool Late = false);
+
+ /// markRematerialized - explicitly mark a value as rematerialized after doing
+ /// it manually.
+ void markRematerialized(const VNInfo *ParentVNI) {
+ Rematted.insert(ParentVNI);
+ }
+
+ /// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
+ bool didRematerialize(const VNInfo *ParentVNI) const {
+ return Rematted.count(ParentVNI);
+ }
+
+ /// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try
+ /// to erase it from LIS.
+ void eraseVirtReg(unsigned Reg);
+
+ /// eliminateDeadDefs - Try to delete machine instructions that are now dead
+ /// (allDefsAreDead returns true). This may cause live intervals to be trimmed
+ /// and further dead efs to be eliminated.
+ /// RegsBeingSpilled lists registers currently being spilled by the register
+ /// allocator. These registers should not be split into new intervals
+ /// as currently those new intervals are not guaranteed to spill.
+ void eliminateDeadDefs(SmallVectorImpl<MachineInstr*> &Dead,
+ ArrayRef<unsigned> RegsBeingSpilled = None);
+
+ /// calculateRegClassAndHint - Recompute register class and hint for each new
+ /// register.
+ void calculateRegClassAndHint(MachineFunction&,
+ const MachineLoopInfo&,
+ const MachineBlockFrequencyInfo&);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveRegMatrix.h b/contrib/llvm/include/llvm/CodeGen/LiveRegMatrix.h
new file mode 100644
index 0000000..e169058
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LiveRegMatrix.h
@@ -0,0 +1,148 @@
+//===-- LiveRegMatrix.h - Track register interference ---------*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The LiveRegMatrix analysis pass keeps track of virtual register interference
+// along two dimensions: Slot indexes and register units. The matrix is used by
+// register allocators to ensure that no interfering virtual registers get
+// assigned to overlapping physical registers.
+//
+// Register units are defined in MCRegisterInfo.h, they represent the smallest
+// unit of interference when dealing with overlapping physical registers. The
+// LiveRegMatrix is represented as a LiveIntervalUnion per register unit. When
+// a virtual register is assigned to a physical register, the live range for
+// the virtual register is inserted into the LiveIntervalUnion for each regunit
+// in the physreg.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEREGMATRIX_H
+#define LLVM_CODEGEN_LIVEREGMATRIX_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/LiveIntervalUnion.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+class LiveInterval;
+class LiveIntervalAnalysis;
+class TargetRegisterInfo;
+class VirtRegMap;
+
+class LiveRegMatrix : public MachineFunctionPass {
+ const TargetRegisterInfo *TRI;
+ LiveIntervals *LIS;
+ VirtRegMap *VRM;
+
+ // UserTag changes whenever virtual registers have been modified.
+ unsigned UserTag;
+
+ // The matrix is represented as a LiveIntervalUnion per register unit.
+ LiveIntervalUnion::Allocator LIUAlloc;
+ LiveIntervalUnion::Array Matrix;
+
+ // Cached queries per register unit.
+ std::unique_ptr<LiveIntervalUnion::Query[]> Queries;
+
+ // Cached register mask interference info.
+ unsigned RegMaskTag;
+ unsigned RegMaskVirtReg;
+ BitVector RegMaskUsable;
+
+ // MachineFunctionPass boilerplate.
+ void getAnalysisUsage(AnalysisUsage&) const override;
+ bool runOnMachineFunction(MachineFunction&) override;
+ void releaseMemory() override;
+public:
+ static char ID;
+ LiveRegMatrix();
+
+ //===--------------------------------------------------------------------===//
+ // High-level interface.
+ //===--------------------------------------------------------------------===//
+ //
+ // Check for interference before assigning virtual registers to physical
+ // registers.
+ //
+
+ /// Invalidate cached interference queries after modifying virtual register
+ /// live ranges. Interference checks may return stale information unless
+ /// caches are invalidated.
+ void invalidateVirtRegs() { ++UserTag; }
+
+ enum InterferenceKind {
+ /// No interference, go ahead and assign.
+ IK_Free = 0,
+
+ /// Virtual register interference. There are interfering virtual registers
+ /// assigned to PhysReg or its aliases. This interference could be resolved
+ /// by unassigning those other virtual registers.
+ IK_VirtReg,
+
+ /// Register unit interference. A fixed live range is in the way, typically
+ /// argument registers for a call. This can't be resolved by unassigning
+ /// other virtual registers.
+ IK_RegUnit,
+
+ /// RegMask interference. The live range is crossing an instruction with a
+ /// regmask operand that doesn't preserve PhysReg. This typically means
+ /// VirtReg is live across a call, and PhysReg isn't call-preserved.
+ IK_RegMask
+ };
+
+ /// Check for interference before assigning VirtReg to PhysReg.
+ /// If this function returns IK_Free, it is legal to assign(VirtReg, PhysReg).
+ /// When there is more than one kind of interference, the InterferenceKind
+ /// with the highest enum value is returned.
+ InterferenceKind checkInterference(LiveInterval &VirtReg, unsigned PhysReg);
+
+ /// Assign VirtReg to PhysReg.
+ /// This will mark VirtReg's live range as occupied in the LiveRegMatrix and
+ /// update VirtRegMap. The live range is expected to be available in PhysReg.
+ void assign(LiveInterval &VirtReg, unsigned PhysReg);
+
+ /// Unassign VirtReg from its PhysReg.
+ /// Assuming that VirtReg was previously assigned to a PhysReg, this undoes
+ /// the assignment and updates VirtRegMap accordingly.
+ void unassign(LiveInterval &VirtReg);
+
+ /// Returns true if the given \p PhysReg has any live intervals assigned.
+ bool isPhysRegUsed(unsigned PhysReg) const;
+
+ //===--------------------------------------------------------------------===//
+ // Low-level interface.
+ //===--------------------------------------------------------------------===//
+ //
+ // Provide access to the underlying LiveIntervalUnions.
+ //
+
+ /// Check for regmask interference only.
+ /// Return true if VirtReg crosses a regmask operand that clobbers PhysReg.
+ /// If PhysReg is null, check if VirtReg crosses any regmask operands.
+ bool checkRegMaskInterference(LiveInterval &VirtReg, unsigned PhysReg = 0);
+
+ /// Check for regunit interference only.
+ /// Return true if VirtReg overlaps a fixed assignment of one of PhysRegs's
+ /// register units.
+ bool checkRegUnitInterference(LiveInterval &VirtReg, unsigned PhysReg);
+
+ /// Query a line of the assigned virtual register matrix directly.
+ /// Use MCRegUnitIterator to enumerate all regunits in the desired PhysReg.
+ /// This returns a reference to an internal Query data structure that is only
+ /// valid until the next query() call.
+ LiveIntervalUnion::Query &query(LiveInterval &VirtReg, unsigned RegUnit);
+
+ /// Directly access the live interval unions per regunit.
+ /// This returns an array indexed by the regunit number.
+ LiveIntervalUnion *getLiveUnions() { return &Matrix[0]; }
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_LIVEREGMATRIX_H
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveStackAnalysis.h b/contrib/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
new file mode 100644
index 0000000..3ffbe3d
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LiveStackAnalysis.h
@@ -0,0 +1,98 @@
+//===-- LiveStackAnalysis.h - Live Stack Slot Analysis ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the live stack slot analysis pass. It is analogous to
+// live interval analysis except it's analyzing liveness of stack slots rather
+// than registers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVESTACKANALYSIS_H
+#define LLVM_CODEGEN_LIVESTACKANALYSIS_H
+
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include <map>
+#include <unordered_map>
+
+namespace llvm {
+
+class LiveStacks : public MachineFunctionPass {
+ const TargetRegisterInfo *TRI;
+
+ /// Special pool allocator for VNInfo's (LiveInterval val#).
+ ///
+ VNInfo::Allocator VNInfoAllocator;
+
+ /// S2IMap - Stack slot indices to live interval mapping.
+ ///
+ typedef std::unordered_map<int, LiveInterval> SS2IntervalMap;
+ SS2IntervalMap S2IMap;
+
+ /// S2RCMap - Stack slot indices to register class mapping.
+ std::map<int, const TargetRegisterClass *> S2RCMap;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ LiveStacks() : MachineFunctionPass(ID) {
+ initializeLiveStacksPass(*PassRegistry::getPassRegistry());
+ }
+
+ typedef SS2IntervalMap::iterator iterator;
+ typedef SS2IntervalMap::const_iterator const_iterator;
+ const_iterator begin() const { return S2IMap.begin(); }
+ const_iterator end() const { return S2IMap.end(); }
+ iterator begin() { return S2IMap.begin(); }
+ iterator end() { return S2IMap.end(); }
+
+ unsigned getNumIntervals() const { return (unsigned)S2IMap.size(); }
+
+ LiveInterval &getOrCreateInterval(int Slot, const TargetRegisterClass *RC);
+
+ LiveInterval &getInterval(int Slot) {
+ assert(Slot >= 0 && "Spill slot indice must be >= 0");
+ SS2IntervalMap::iterator I = S2IMap.find(Slot);
+ assert(I != S2IMap.end() && "Interval does not exist for stack slot");
+ return I->second;
+ }
+
+ const LiveInterval &getInterval(int Slot) const {
+ assert(Slot >= 0 && "Spill slot indice must be >= 0");
+ SS2IntervalMap::const_iterator I = S2IMap.find(Slot);
+ assert(I != S2IMap.end() && "Interval does not exist for stack slot");
+ return I->second;
+ }
+
+ bool hasInterval(int Slot) const { return S2IMap.count(Slot); }
+
+ const TargetRegisterClass *getIntervalRegClass(int Slot) const {
+ assert(Slot >= 0 && "Spill slot indice must be >= 0");
+ std::map<int, const TargetRegisterClass *>::const_iterator I =
+ S2RCMap.find(Slot);
+ assert(I != S2RCMap.end() &&
+ "Register class info does not exist for stack slot");
+ return I->second;
+ }
+
+ VNInfo::Allocator &getVNInfoAllocator() { return VNInfoAllocator; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void releaseMemory() override;
+
+ /// runOnMachineFunction - pass entry point
+ bool runOnMachineFunction(MachineFunction &) override;
+
+ /// print - Implement the dump method.
+ void print(raw_ostream &O, const Module * = nullptr) const override;
+};
+}
+
+#endif /* LLVM_CODEGEN_LIVESTACK_ANALYSIS_H */
diff --git a/contrib/llvm/include/llvm/CodeGen/LiveVariables.h b/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
new file mode 100644
index 0000000..55b97dc
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/LiveVariables.h
@@ -0,0 +1,311 @@
+//===-- llvm/CodeGen/LiveVariables.h - Live Variable Analysis ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveVariables analysis pass. For each machine
+// instruction in the function, this pass calculates the set of registers that
+// are immediately dead after the instruction (i.e., the instruction calculates
+// the value, but it is never used) and the set of registers that are used by
+// the instruction, but are never used after the instruction (i.e., they are
+// killed).
+//
+// This class computes live variables using a sparse implementation based on
+// the machine code SSA form. This class computes live variable information for
+// each virtual and _register allocatable_ physical register in a function. It
+// uses the dominance properties of SSA form to efficiently compute live
+// variables for virtual registers, and assumes that physical registers are only
+// live within a single basic block (allowing it to do a single local analysis
+// to resolve physical register lifetimes in each basic block). If a physical
+// register is not register allocatable, it is not tracked. This is useful for
+// things like the stack pointer and condition codes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEVARIABLES_H
+#define LLVM_CODEGEN_LIVEVARIABLES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseBitVector.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineRegisterInfo;
+
+class LiveVariables : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ LiveVariables() : MachineFunctionPass(ID) {
+ initializeLiveVariablesPass(*PassRegistry::getPassRegistry());
+ }
+
+ /// VarInfo - This represents the regions where a virtual register is live in
+ /// the program. We represent this with three different pieces of
+ /// information: the set of blocks in which the instruction is live
+ /// throughout, the set of blocks in which the instruction is actually used,
+ /// and the set of non-phi instructions that are the last users of the value.
+ ///
+ /// In the common case where a value is defined and killed in the same block,
+ /// There is one killing instruction, and AliveBlocks is empty.
+ ///
+ /// Otherwise, the value is live out of the block. If the value is live
+ /// throughout any blocks, these blocks are listed in AliveBlocks. Blocks
+ /// where the liveness range ends are not included in AliveBlocks, instead
+ /// being captured by the Kills set. In these blocks, the value is live into
+ /// the block (unless the value is defined and killed in the same block) and
+ /// lives until the specified instruction. Note that there cannot ever be a
+ /// value whose Kills set contains two instructions from the same basic block.
+ ///
+ /// PHI nodes complicate things a bit. If a PHI node is the last user of a
+ /// value in one of its predecessor blocks, it is not listed in the kills set,
+ /// but does include the predecessor block in the AliveBlocks set (unless that
+ /// block also defines the value). This leads to the (perfectly sensical)
+ /// situation where a value is defined in a block, and the last use is a phi
+ /// node in the successor. In this case, AliveBlocks is empty (the value is
+ /// not live across any blocks) and Kills is empty (phi nodes are not
+ /// included). This is sensical because the value must be live to the end of
+ /// the block, but is not live in any successor blocks.
+ struct VarInfo {
+ /// AliveBlocks - Set of blocks in which this value is alive completely
+ /// through. This is a bit set which uses the basic block number as an
+ /// index.
+ ///
+ SparseBitVector<> AliveBlocks;
+
+ /// Kills - List of MachineInstruction's which are the last use of this
+ /// virtual register (kill it) in their basic block.
+ ///
+ std::vector<MachineInstr*> Kills;
+
+ /// removeKill - Delete a kill corresponding to the specified
+ /// machine instruction. Returns true if there was a kill
+ /// corresponding to this instruction, false otherwise.
+ bool removeKill(MachineInstr *MI) {
+ std::vector<MachineInstr*>::iterator
+ I = std::find(Kills.begin(), Kills.end(), MI);
+ if (I == Kills.end())
+ return false;
+ Kills.erase(I);
+ return true;
+ }
+
+ /// findKill - Find a kill instruction in MBB. Return NULL if none is found.
+ MachineInstr *findKill(const MachineBasicBlock *MBB) const;
+
+ /// isLiveIn - Is Reg live in to MBB? This means that Reg is live through
+ /// MBB, or it is killed in MBB. If Reg is only used by PHI instructions in
+ /// MBB, it is not considered live in.
+ bool isLiveIn(const MachineBasicBlock &MBB,
+ unsigned Reg,
+ MachineRegisterInfo &MRI);
+
+ void dump() const;
+ };
+
+private:
+ /// VirtRegInfo - This list is a mapping from virtual register number to
+ /// variable information.
+ ///
+ IndexedMap<VarInfo, VirtReg2IndexFunctor> VirtRegInfo;
+
+ /// PHIJoins - list of virtual registers that are PHI joins. These registers
+ /// may have multiple definitions, and they require special handling when
+ /// building live intervals.
+ SparseBitVector<> PHIJoins;
+
+private: // Intermediate data structures
+ MachineFunction *MF;
+
+ MachineRegisterInfo* MRI;
+
+ const TargetRegisterInfo *TRI;
+
+ // PhysRegInfo - Keep track of which instruction was the last def of a
+ // physical register. This is a purely local property, because all physical
+ // register references are presumed dead across basic blocks.
+ std::vector<MachineInstr *> PhysRegDef;
+
+ // PhysRegInfo - Keep track of which instruction was the last use of a
+ // physical register. This is a purely local property, because all physical
+ // register references are presumed dead across basic blocks.
+ std::vector<MachineInstr *> PhysRegUse;
+
+ std::vector<SmallVector<unsigned, 4>> PHIVarInfo;
+
+ // DistanceMap - Keep track the distance of a MI from the start of the
+ // current basic block.
+ DenseMap<MachineInstr*, unsigned> DistanceMap;
+
+ /// HandlePhysRegKill - Add kills of Reg and its sub-registers to the
+ /// uses. Pay special attention to the sub-register uses which may come below
+ /// the last use of the whole register.
+ bool HandlePhysRegKill(unsigned Reg, MachineInstr *MI);
+
+ /// HandleRegMask - Call HandlePhysRegKill for all registers clobbered by Mask.
+ void HandleRegMask(const MachineOperand&);
+
+ void HandlePhysRegUse(unsigned Reg, MachineInstr *MI);
+ void HandlePhysRegDef(unsigned Reg, MachineInstr *MI,
+ SmallVectorImpl<unsigned> &Defs);
+ void UpdatePhysRegDefs(MachineInstr *MI, SmallVectorImpl<unsigned> &Defs);
+
+ /// FindLastRefOrPartRef - Return the last reference or partial reference of
+ /// the specified register.
+ MachineInstr *FindLastRefOrPartRef(unsigned Reg);
+
+ /// FindLastPartialDef - Return the last partial def of the specified
+ /// register. Also returns the sub-registers that're defined by the
+ /// instruction.
+ MachineInstr *FindLastPartialDef(unsigned Reg,
+ SmallSet<unsigned,4> &PartDefRegs);
+
+ /// analyzePHINodes - Gather information about the PHI nodes in here. In
+ /// particular, we want to map the variable information of a virtual
+ /// register which is used in a PHI node. We map that to the BB the vreg
+ /// is coming from.
+ void analyzePHINodes(const MachineFunction& Fn);
+
+ void runOnInstr(MachineInstr *MI, SmallVectorImpl<unsigned> &Defs);
+
+ void runOnBlock(MachineBasicBlock *MBB, unsigned NumRegs);
+public:
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ /// RegisterDefIsDead - Return true if the specified instruction defines the
+ /// specified register, but that definition is dead.
+ bool RegisterDefIsDead(MachineInstr *MI, unsigned Reg) const;
+
+ //===--------------------------------------------------------------------===//
+ // API to update live variable information
+
+ /// replaceKillInstruction - Update register kill info by replacing a kill
+ /// instruction with a new one.
+ void replaceKillInstruction(unsigned Reg, MachineInstr *OldMI,
+ MachineInstr *NewMI);
+
+ /// addVirtualRegisterKilled - Add information about the fact that the
+ /// specified register is killed after being used by the specified
+ /// instruction. If AddIfNotFound is true, add a implicit operand if it's
+ /// not found.
+ void addVirtualRegisterKilled(unsigned IncomingReg, MachineInstr *MI,
+ bool AddIfNotFound = false) {
+ if (MI->addRegisterKilled(IncomingReg, TRI, AddIfNotFound))
+ getVarInfo(IncomingReg).Kills.push_back(MI);
+ }
+
+ /// removeVirtualRegisterKilled - Remove the specified kill of the virtual
+ /// register from the live variable information. Returns true if the
+ /// variable was marked as killed by the specified instruction,
+ /// false otherwise.
+ bool removeVirtualRegisterKilled(unsigned reg, MachineInstr *MI) {
+ if (!getVarInfo(reg).removeKill(MI))
+ return false;
+
+ bool Removed = false;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isKill() && MO.getReg() == reg) {
+ MO.setIsKill(false);
+ Removed = true;
+ break;
+ }
+ }
+
+ assert(Removed && "Register is not used by this instruction!");
+ (void)Removed;
+ return true;
+ }
+
+ /// removeVirtualRegistersKilled - Remove all killed info for the specified
+ /// instruction.
+ void removeVirtualRegistersKilled(MachineInstr *MI);
+
+ /// addVirtualRegisterDead - Add information about the fact that the specified
+ /// register is dead after being used by the specified instruction. If
+ /// AddIfNotFound is true, add a implicit operand if it's not found.
+ void addVirtualRegisterDead(unsigned IncomingReg, MachineInstr *MI,
+ bool AddIfNotFound = false) {
+ if (MI->addRegisterDead(IncomingReg, TRI, AddIfNotFound))
+ getVarInfo(IncomingReg).Kills.push_back(MI);
+ }
+
+ /// removeVirtualRegisterDead - Remove the specified kill of the virtual
+ /// register from the live variable information. Returns true if the
+ /// variable was marked dead at the specified instruction, false
+ /// otherwise.
+ bool removeVirtualRegisterDead(unsigned reg, MachineInstr *MI) {
+ if (!getVarInfo(reg).removeKill(MI))
+ return false;
+
+ bool Removed = false;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isDef() && MO.getReg() == reg) {
+ MO.setIsDead(false);
+ Removed = true;
+ break;
+ }
+ }
+ assert(Removed && "Register is not defined by this instruction!");
+ (void)Removed;
+ return true;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ void releaseMemory() override {
+ VirtRegInfo.clear();
+ }
+
+ /// getVarInfo - Return the VarInfo structure for the specified VIRTUAL
+ /// register.
+ VarInfo &getVarInfo(unsigned RegIdx);
+
+ void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
+ MachineBasicBlock *BB);
+ void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
+ MachineBasicBlock *BB,
+ std::vector<MachineBasicBlock*> &WorkList);
+ void HandleVirtRegDef(unsigned reg, MachineInstr *MI);
+ void HandleVirtRegUse(unsigned reg, MachineBasicBlock *MBB,
+ MachineInstr *MI);
+
+ bool isLiveIn(unsigned Reg, const MachineBasicBlock &MBB) {
+ return getVarInfo(Reg).isLiveIn(MBB, Reg, *MRI);
+ }
+
+ /// isLiveOut - Determine if Reg is live out from MBB, when not considering
+ /// PHI nodes. This means that Reg is either killed by a successor block or
+ /// passed through one.
+ bool isLiveOut(unsigned Reg, const MachineBasicBlock &MBB);
+
+ /// addNewBlock - Add a new basic block BB between DomBB and SuccBB. All
+ /// variables that are live out of DomBB and live into SuccBB will be marked
+ /// as passing live through BB. This method assumes that the machine code is
+ /// still in SSA form.
+ void addNewBlock(MachineBasicBlock *BB,
+ MachineBasicBlock *DomBB,
+ MachineBasicBlock *SuccBB);
+
+ /// isPHIJoin - Return true if Reg is a phi join register.
+ bool isPHIJoin(unsigned Reg) { return PHIJoins.test(Reg); }
+
+ /// setPHIJoin - Mark Reg as a phi join register.
+ void setPHIJoin(unsigned Reg) { PHIJoins.set(Reg); }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MIRParser/MIRParser.h b/contrib/llvm/include/llvm/CodeGen/MIRParser/MIRParser.h
new file mode 100644
index 0000000..a569d5e
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MIRParser/MIRParser.h
@@ -0,0 +1,81 @@
+//===- MIRParser.h - MIR serialization format parser ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This MIR serialization library is currently a work in progress. It can't
+// serialize machine functions at this time.
+//
+// This file declares the functions that parse the MIR serialization format
+// files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
+#define LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/MachineFunctionInitializer.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <memory>
+
+namespace llvm {
+
+class MIRParserImpl;
+class SMDiagnostic;
+
+/// This class initializes machine functions by applying the state loaded from
+/// a MIR file.
+class MIRParser : public MachineFunctionInitializer {
+ std::unique_ptr<MIRParserImpl> Impl;
+
+public:
+ MIRParser(std::unique_ptr<MIRParserImpl> Impl);
+ MIRParser(const MIRParser &) = delete;
+ ~MIRParser() override;
+
+ /// Parse the optional LLVM IR module that's embedded in the MIR file.
+ ///
+ /// A new, empty module is created if the LLVM IR isn't present.
+ /// Returns null if a parsing error occurred.
+ std::unique_ptr<Module> parseLLVMModule();
+
+ /// Initialize the machine function to the state that's described in the MIR
+ /// file.
+ ///
+ /// Return true if error occurred.
+ bool initializeMachineFunction(MachineFunction &MF) override;
+};
+
+/// This function is the main interface to the MIR serialization format parser.
+///
+/// It reads in a MIR file and returns a MIR parser that can parse the embedded
+/// LLVM IR module and initialize the machine functions by parsing the machine
+/// function's state.
+///
+/// \param Filename - The name of the file to parse.
+/// \param Error - Error result info.
+/// \param Context - Context which will be used for the parsed LLVM IR module.
+std::unique_ptr<MIRParser> createMIRParserFromFile(StringRef Filename,
+ SMDiagnostic &Error,
+ LLVMContext &Context);
+
+/// This function is another interface to the MIR serialization format parser.
+///
+/// It returns a MIR parser that works with the given memory buffer and that can
+/// parse the embedded LLVM IR module and initialize the machine functions by
+/// parsing the machine function's state.
+///
+/// \param Contents - The MemoryBuffer containing the machine level IR.
+/// \param Context - Context which will be used for the parsed LLVM IR module.
+std::unique_ptr<MIRParser>
+createMIRParser(std::unique_ptr<MemoryBuffer> Contents, LLVMContext &Context);
+
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_MIRPARSER_MIRPARSER_H
diff --git a/contrib/llvm/include/llvm/CodeGen/MIRYamlMapping.h b/contrib/llvm/include/llvm/CodeGen/MIRYamlMapping.h
new file mode 100644
index 0000000..14d3744
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MIRYamlMapping.h
@@ -0,0 +1,430 @@
+//===- MIRYAMLMapping.h - Describes the mapping between MIR and YAML ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The MIR serialization library is currently a work in progress. It can't
+// serialize machine functions at this time.
+//
+// This file implements the mapping between various MIR data structures and
+// their corresponding YAML representation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_CODEGEN_MIRYAMLMAPPING_H
+#define LLVM_LIB_CODEGEN_MIRYAMLMAPPING_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <vector>
+
+namespace llvm {
+namespace yaml {
+
+/// A wrapper around std::string which contains a source range that's being
+/// set during parsing.
+struct StringValue {
+ std::string Value;
+ SMRange SourceRange;
+
+ StringValue() {}
+ StringValue(std::string Value) : Value(std::move(Value)) {}
+
+ bool operator==(const StringValue &Other) const {
+ return Value == Other.Value;
+ }
+};
+
+template <> struct ScalarTraits<StringValue> {
+ static void output(const StringValue &S, void *, llvm::raw_ostream &OS) {
+ OS << S.Value;
+ }
+
+ static StringRef input(StringRef Scalar, void *Ctx, StringValue &S) {
+ S.Value = Scalar.str();
+ if (const auto *Node =
+ reinterpret_cast<yaml::Input *>(Ctx)->getCurrentNode())
+ S.SourceRange = Node->getSourceRange();
+ return "";
+ }
+
+ static bool mustQuote(StringRef Scalar) { return needsQuotes(Scalar); }
+};
+
+struct FlowStringValue : StringValue {
+ FlowStringValue() {}
+ FlowStringValue(std::string Value) : StringValue(Value) {}
+};
+
+template <> struct ScalarTraits<FlowStringValue> {
+ static void output(const FlowStringValue &S, void *, llvm::raw_ostream &OS) {
+ return ScalarTraits<StringValue>::output(S, nullptr, OS);
+ }
+
+ static StringRef input(StringRef Scalar, void *Ctx, FlowStringValue &S) {
+ return ScalarTraits<StringValue>::input(Scalar, Ctx, S);
+ }
+
+ static bool mustQuote(StringRef Scalar) { return needsQuotes(Scalar); }
+};
+
+struct BlockStringValue {
+ StringValue Value;
+};
+
+template <> struct BlockScalarTraits<BlockStringValue> {
+ static void output(const BlockStringValue &S, void *Ctx, raw_ostream &OS) {
+ return ScalarTraits<StringValue>::output(S.Value, Ctx, OS);
+ }
+
+ static StringRef input(StringRef Scalar, void *Ctx, BlockStringValue &S) {
+ return ScalarTraits<StringValue>::input(Scalar, Ctx, S.Value);
+ }
+};
+
+/// A wrapper around unsigned which contains a source range that's being set
+/// during parsing.
+struct UnsignedValue {
+ unsigned Value;
+ SMRange SourceRange;
+
+ UnsignedValue() : Value(0) {}
+ UnsignedValue(unsigned Value) : Value(Value) {}
+
+ bool operator==(const UnsignedValue &Other) const {
+ return Value == Other.Value;
+ }
+};
+
+template <> struct ScalarTraits<UnsignedValue> {
+ static void output(const UnsignedValue &Value, void *Ctx, raw_ostream &OS) {
+ return ScalarTraits<unsigned>::output(Value.Value, Ctx, OS);
+ }
+
+ static StringRef input(StringRef Scalar, void *Ctx, UnsignedValue &Value) {
+ if (const auto *Node =
+ reinterpret_cast<yaml::Input *>(Ctx)->getCurrentNode())
+ Value.SourceRange = Node->getSourceRange();
+ return ScalarTraits<unsigned>::input(Scalar, Ctx, Value.Value);
+ }
+
+ static bool mustQuote(StringRef Scalar) {
+ return ScalarTraits<unsigned>::mustQuote(Scalar);
+ }
+};
+
+template <> struct ScalarEnumerationTraits<MachineJumpTableInfo::JTEntryKind> {
+ static void enumeration(yaml::IO &IO,
+ MachineJumpTableInfo::JTEntryKind &EntryKind) {
+ IO.enumCase(EntryKind, "block-address",
+ MachineJumpTableInfo::EK_BlockAddress);
+ IO.enumCase(EntryKind, "gp-rel64-block-address",
+ MachineJumpTableInfo::EK_GPRel64BlockAddress);
+ IO.enumCase(EntryKind, "gp-rel32-block-address",
+ MachineJumpTableInfo::EK_GPRel32BlockAddress);
+ IO.enumCase(EntryKind, "label-difference32",
+ MachineJumpTableInfo::EK_LabelDifference32);
+ IO.enumCase(EntryKind, "inline", MachineJumpTableInfo::EK_Inline);
+ IO.enumCase(EntryKind, "custom32", MachineJumpTableInfo::EK_Custom32);
+ }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::StringValue)
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::FlowStringValue)
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::yaml::UnsignedValue)
+
+namespace llvm {
+namespace yaml {
+
+struct VirtualRegisterDefinition {
+ UnsignedValue ID;
+ StringValue Class;
+ StringValue PreferredRegister;
+ // TODO: Serialize the target specific register hints.
+};
+
+template <> struct MappingTraits<VirtualRegisterDefinition> {
+ static void mapping(IO &YamlIO, VirtualRegisterDefinition &Reg) {
+ YamlIO.mapRequired("id", Reg.ID);
+ YamlIO.mapRequired("class", Reg.Class);
+ YamlIO.mapOptional("preferred-register", Reg.PreferredRegister,
+ StringValue()); // Don't print out when it's empty.
+ }
+
+ static const bool flow = true;
+};
+
+struct MachineFunctionLiveIn {
+ StringValue Register;
+ StringValue VirtualRegister;
+};
+
+template <> struct MappingTraits<MachineFunctionLiveIn> {
+ static void mapping(IO &YamlIO, MachineFunctionLiveIn &LiveIn) {
+ YamlIO.mapRequired("reg", LiveIn.Register);
+ YamlIO.mapOptional(
+ "virtual-reg", LiveIn.VirtualRegister,
+ StringValue()); // Don't print the virtual register when it's empty.
+ }
+
+ static const bool flow = true;
+};
+
+/// Serializable representation of stack object from the MachineFrameInfo class.
+///
+/// The flags 'isImmutable' and 'isAliased' aren't serialized, as they are
+/// determined by the object's type and frame information flags.
+/// Dead stack objects aren't serialized.
+///
+/// The 'isPreallocated' flag is determined by the local offset.
+struct MachineStackObject {
+ enum ObjectType { DefaultType, SpillSlot, VariableSized };
+ UnsignedValue ID;
+ StringValue Name;
+ // TODO: Serialize unnamed LLVM alloca reference.
+ ObjectType Type = DefaultType;
+ int64_t Offset = 0;
+ uint64_t Size = 0;
+ unsigned Alignment = 0;
+ StringValue CalleeSavedRegister;
+ Optional<int64_t> LocalOffset;
+ StringValue DebugVar;
+ StringValue DebugExpr;
+ StringValue DebugLoc;
+};
+
+template <> struct ScalarEnumerationTraits<MachineStackObject::ObjectType> {
+ static void enumeration(yaml::IO &IO, MachineStackObject::ObjectType &Type) {
+ IO.enumCase(Type, "default", MachineStackObject::DefaultType);
+ IO.enumCase(Type, "spill-slot", MachineStackObject::SpillSlot);
+ IO.enumCase(Type, "variable-sized", MachineStackObject::VariableSized);
+ }
+};
+
+template <> struct MappingTraits<MachineStackObject> {
+ static void mapping(yaml::IO &YamlIO, MachineStackObject &Object) {
+ YamlIO.mapRequired("id", Object.ID);
+ YamlIO.mapOptional("name", Object.Name,
+ StringValue()); // Don't print out an empty name.
+ YamlIO.mapOptional(
+ "type", Object.Type,
+ MachineStackObject::DefaultType); // Don't print the default type.
+ YamlIO.mapOptional("offset", Object.Offset);
+ if (Object.Type != MachineStackObject::VariableSized)
+ YamlIO.mapRequired("size", Object.Size);
+ YamlIO.mapOptional("alignment", Object.Alignment);
+ YamlIO.mapOptional("callee-saved-register", Object.CalleeSavedRegister,
+ StringValue()); // Don't print it out when it's empty.
+ YamlIO.mapOptional("local-offset", Object.LocalOffset);
+ YamlIO.mapOptional("di-variable", Object.DebugVar,
+ StringValue()); // Don't print it out when it's empty.
+ YamlIO.mapOptional("di-expression", Object.DebugExpr,
+ StringValue()); // Don't print it out when it's empty.
+ YamlIO.mapOptional("di-location", Object.DebugLoc,
+ StringValue()); // Don't print it out when it's empty.
+ }
+
+ static const bool flow = true;
+};
+
+/// Serializable representation of the fixed stack object from the
+/// MachineFrameInfo class.
+struct FixedMachineStackObject {
+ enum ObjectType { DefaultType, SpillSlot };
+ UnsignedValue ID;
+ ObjectType Type = DefaultType;
+ int64_t Offset = 0;
+ uint64_t Size = 0;
+ unsigned Alignment = 0;
+ bool IsImmutable = false;
+ bool IsAliased = false;
+ StringValue CalleeSavedRegister;
+};
+
+template <>
+struct ScalarEnumerationTraits<FixedMachineStackObject::ObjectType> {
+ static void enumeration(yaml::IO &IO,
+ FixedMachineStackObject::ObjectType &Type) {
+ IO.enumCase(Type, "default", FixedMachineStackObject::DefaultType);
+ IO.enumCase(Type, "spill-slot", FixedMachineStackObject::SpillSlot);
+ }
+};
+
+template <> struct MappingTraits<FixedMachineStackObject> {
+ static void mapping(yaml::IO &YamlIO, FixedMachineStackObject &Object) {
+ YamlIO.mapRequired("id", Object.ID);
+ YamlIO.mapOptional(
+ "type", Object.Type,
+ FixedMachineStackObject::DefaultType); // Don't print the default type.
+ YamlIO.mapOptional("offset", Object.Offset);
+ YamlIO.mapOptional("size", Object.Size);
+ YamlIO.mapOptional("alignment", Object.Alignment);
+ if (Object.Type != FixedMachineStackObject::SpillSlot) {
+ YamlIO.mapOptional("isImmutable", Object.IsImmutable);
+ YamlIO.mapOptional("isAliased", Object.IsAliased);
+ }
+ YamlIO.mapOptional("callee-saved-register", Object.CalleeSavedRegister,
+ StringValue()); // Don't print it out when it's empty.
+ }
+
+ static const bool flow = true;
+};
+
+struct MachineConstantPoolValue {
+ UnsignedValue ID;
+ StringValue Value;
+ unsigned Alignment = 0;
+};
+
+template <> struct MappingTraits<MachineConstantPoolValue> {
+ static void mapping(IO &YamlIO, MachineConstantPoolValue &Constant) {
+ YamlIO.mapRequired("id", Constant.ID);
+ YamlIO.mapOptional("value", Constant.Value);
+ YamlIO.mapOptional("alignment", Constant.Alignment);
+ }
+};
+
+struct MachineJumpTable {
+ struct Entry {
+ UnsignedValue ID;
+ std::vector<FlowStringValue> Blocks;
+ };
+
+ MachineJumpTableInfo::JTEntryKind Kind = MachineJumpTableInfo::EK_Custom32;
+ std::vector<Entry> Entries;
+};
+
+template <> struct MappingTraits<MachineJumpTable::Entry> {
+ static void mapping(IO &YamlIO, MachineJumpTable::Entry &Entry) {
+ YamlIO.mapRequired("id", Entry.ID);
+ YamlIO.mapOptional("blocks", Entry.Blocks);
+ }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineFunctionLiveIn)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::VirtualRegisterDefinition)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineStackObject)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::FixedMachineStackObject)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineConstantPoolValue)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::MachineJumpTable::Entry)
+
+namespace llvm {
+namespace yaml {
+
+template <> struct MappingTraits<MachineJumpTable> {
+ static void mapping(IO &YamlIO, MachineJumpTable &JT) {
+ YamlIO.mapRequired("kind", JT.Kind);
+ YamlIO.mapOptional("entries", JT.Entries);
+ }
+};
+
+/// Serializable representation of MachineFrameInfo.
+///
+/// Doesn't serialize attributes like 'StackAlignment', 'IsStackRealignable' and
+/// 'RealignOption' as they are determined by the target and LLVM function
+/// attributes.
+/// It also doesn't serialize attributes like 'NumFixedObject' and
+/// 'HasVarSizedObjects' as they are determined by the frame objects themselves.
+struct MachineFrameInfo {
+ bool IsFrameAddressTaken = false;
+ bool IsReturnAddressTaken = false;
+ bool HasStackMap = false;
+ bool HasPatchPoint = false;
+ uint64_t StackSize = 0;
+ int OffsetAdjustment = 0;
+ unsigned MaxAlignment = 0;
+ bool AdjustsStack = false;
+ bool HasCalls = false;
+ StringValue StackProtector;
+ // TODO: Serialize FunctionContextIdx
+ unsigned MaxCallFrameSize = 0;
+ bool HasOpaqueSPAdjustment = false;
+ bool HasVAStart = false;
+ bool HasMustTailInVarArgFunc = false;
+ StringValue SavePoint;
+ StringValue RestorePoint;
+};
+
+template <> struct MappingTraits<MachineFrameInfo> {
+ static void mapping(IO &YamlIO, MachineFrameInfo &MFI) {
+ YamlIO.mapOptional("isFrameAddressTaken", MFI.IsFrameAddressTaken);
+ YamlIO.mapOptional("isReturnAddressTaken", MFI.IsReturnAddressTaken);
+ YamlIO.mapOptional("hasStackMap", MFI.HasStackMap);
+ YamlIO.mapOptional("hasPatchPoint", MFI.HasPatchPoint);
+ YamlIO.mapOptional("stackSize", MFI.StackSize);
+ YamlIO.mapOptional("offsetAdjustment", MFI.OffsetAdjustment);
+ YamlIO.mapOptional("maxAlignment", MFI.MaxAlignment);
+ YamlIO.mapOptional("adjustsStack", MFI.AdjustsStack);
+ YamlIO.mapOptional("hasCalls", MFI.HasCalls);
+ YamlIO.mapOptional("stackProtector", MFI.StackProtector,
+ StringValue()); // Don't print it out when it's empty.
+ YamlIO.mapOptional("maxCallFrameSize", MFI.MaxCallFrameSize);
+ YamlIO.mapOptional("hasOpaqueSPAdjustment", MFI.HasOpaqueSPAdjustment);
+ YamlIO.mapOptional("hasVAStart", MFI.HasVAStart);
+ YamlIO.mapOptional("hasMustTailInVarArgFunc", MFI.HasMustTailInVarArgFunc);
+ YamlIO.mapOptional("savePoint", MFI.SavePoint,
+ StringValue()); // Don't print it out when it's empty.
+ YamlIO.mapOptional("restorePoint", MFI.RestorePoint,
+ StringValue()); // Don't print it out when it's empty.
+ }
+};
+
+struct MachineFunction {
+ StringRef Name;
+ unsigned Alignment = 0;
+ bool ExposesReturnsTwice = false;
+ bool HasInlineAsm = false;
+ // Register information
+ bool IsSSA = false;
+ bool TracksRegLiveness = false;
+ bool TracksSubRegLiveness = false;
+ std::vector<VirtualRegisterDefinition> VirtualRegisters;
+ std::vector<MachineFunctionLiveIn> LiveIns;
+ Optional<std::vector<FlowStringValue>> CalleeSavedRegisters;
+ // TODO: Serialize the various register masks.
+ // Frame information
+ MachineFrameInfo FrameInfo;
+ std::vector<FixedMachineStackObject> FixedStackObjects;
+ std::vector<MachineStackObject> StackObjects;
+ std::vector<MachineConstantPoolValue> Constants; /// Constant pool.
+ MachineJumpTable JumpTableInfo;
+ BlockStringValue Body;
+};
+
+template <> struct MappingTraits<MachineFunction> {
+ static void mapping(IO &YamlIO, MachineFunction &MF) {
+ YamlIO.mapRequired("name", MF.Name);
+ YamlIO.mapOptional("alignment", MF.Alignment);
+ YamlIO.mapOptional("exposesReturnsTwice", MF.ExposesReturnsTwice);
+ YamlIO.mapOptional("hasInlineAsm", MF.HasInlineAsm);
+ YamlIO.mapOptional("isSSA", MF.IsSSA);
+ YamlIO.mapOptional("tracksRegLiveness", MF.TracksRegLiveness);
+ YamlIO.mapOptional("tracksSubRegLiveness", MF.TracksSubRegLiveness);
+ YamlIO.mapOptional("registers", MF.VirtualRegisters);
+ YamlIO.mapOptional("liveins", MF.LiveIns);
+ YamlIO.mapOptional("calleeSavedRegisters", MF.CalleeSavedRegisters);
+ YamlIO.mapOptional("frameInfo", MF.FrameInfo);
+ YamlIO.mapOptional("fixedStack", MF.FixedStackObjects);
+ YamlIO.mapOptional("stack", MF.StackObjects);
+ YamlIO.mapOptional("constants", MF.Constants);
+ if (!YamlIO.outputting() || !MF.JumpTableInfo.Entries.empty())
+ YamlIO.mapOptional("jumpTable", MF.JumpTableInfo);
+ YamlIO.mapOptional("body", MF.Body);
+ }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachORelocation.h b/contrib/llvm/include/llvm/CodeGen/MachORelocation.h
new file mode 100644
index 0000000..8c9b7a8
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachORelocation.h
@@ -0,0 +1,56 @@
+//=== MachORelocation.h - Mach-O Relocation Info ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachORelocation class.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CODEGEN_MACHORELOCATION_H
+#define LLVM_CODEGEN_MACHORELOCATION_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+ /// MachORelocation - This struct contains information about each relocation
+ /// that needs to be emitted to the file.
+ /// see <mach-o/reloc.h>
+ class MachORelocation {
+ uint32_t r_address; // offset in the section to what is being relocated
+ uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
+ bool r_pcrel; // was relocated pc-relative already
+ uint8_t r_length; // length = 2 ^ r_length
+ bool r_extern; //
+ uint8_t r_type; // if not 0, machine-specific relocation type.
+ bool r_scattered; // 1 = scattered, 0 = non-scattered
+ int32_t r_value; // the value the item to be relocated is referring
+ // to.
+ public:
+ uint32_t getPackedFields() const {
+ if (r_scattered)
+ return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
+ ((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
+ else
+ return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
+ (r_extern << 4) | (r_type & 15);
+ }
+ uint32_t getAddress() const { return r_scattered ? r_value : r_address; }
+ uint32_t getRawAddress() const { return r_address; }
+
+ MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
+ bool ext, uint8_t type, bool scattered = false,
+ int32_t value = 0) :
+ r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
+ r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
+ };
+
+} // end llvm namespace
+
+#endif // LLVM_CODEGEN_MACHORELOCATION_H
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
new file mode 100644
index 0000000..3d58c49
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBasicBlock.h
@@ -0,0 +1,883 @@
+//===-- llvm/CodeGen/MachineBasicBlock.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect the sequence of machine instructions for a basic block.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBASICBLOCK_H
+#define LLVM_CODEGEN_MACHINEBASICBLOCK_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/DataTypes.h"
+#include <functional>
+
+namespace llvm {
+
+class Pass;
+class BasicBlock;
+class MachineFunction;
+class MCSymbol;
+class MIPrinter;
+class SlotIndexes;
+class StringRef;
+class raw_ostream;
+class MachineBranchProbabilityInfo;
+
+// Forward declaration to avoid circular include problem with TargetRegisterInfo
+typedef unsigned LaneBitmask;
+
+template <>
+struct ilist_traits<MachineInstr> : public ilist_default_traits<MachineInstr> {
+private:
+ mutable ilist_half_node<MachineInstr> Sentinel;
+
+ // this is only set by the MachineBasicBlock owning the LiveList
+ friend class MachineBasicBlock;
+ MachineBasicBlock* Parent;
+
+public:
+ MachineInstr *createSentinel() const {
+ return static_cast<MachineInstr*>(&Sentinel);
+ }
+ void destroySentinel(MachineInstr *) const {}
+
+ MachineInstr *provideInitialHead() const { return createSentinel(); }
+ MachineInstr *ensureHead(MachineInstr*) const { return createSentinel(); }
+ static void noteHead(MachineInstr*, MachineInstr*) {}
+
+ void addNodeToList(MachineInstr* N);
+ void removeNodeFromList(MachineInstr* N);
+ void transferNodesFromList(ilist_traits &SrcTraits,
+ ilist_iterator<MachineInstr> First,
+ ilist_iterator<MachineInstr> Last);
+ void deleteNode(MachineInstr *N);
+private:
+ void createNode(const MachineInstr &);
+};
+
+class MachineBasicBlock
+ : public ilist_node_with_parent<MachineBasicBlock, MachineFunction> {
+public:
+ /// Pair of physical register and lane mask.
+ /// This is not simply a std::pair typedef because the members should be named
+ /// clearly as they both have an integer type.
+ struct RegisterMaskPair {
+ public:
+ MCPhysReg PhysReg;
+ LaneBitmask LaneMask;
+
+ RegisterMaskPair(MCPhysReg PhysReg, LaneBitmask LaneMask)
+ : PhysReg(PhysReg), LaneMask(LaneMask) {}
+ };
+
+private:
+ typedef ilist<MachineInstr> Instructions;
+ Instructions Insts;
+ const BasicBlock *BB;
+ int Number;
+ MachineFunction *xParent;
+
+ /// Keep track of the predecessor / successor basic blocks.
+ std::vector<MachineBasicBlock *> Predecessors;
+ std::vector<MachineBasicBlock *> Successors;
+
+ /// Keep track of the probabilities to the successors. This vector has the
+ /// same order as Successors, or it is empty if we don't use it (disable
+ /// optimization).
+ std::vector<BranchProbability> Probs;
+ typedef std::vector<BranchProbability>::iterator probability_iterator;
+ typedef std::vector<BranchProbability>::const_iterator
+ const_probability_iterator;
+
+ /// Keep track of the physical registers that are livein of the basicblock.
+ typedef std::vector<RegisterMaskPair> LiveInVector;
+ LiveInVector LiveIns;
+
+ /// Alignment of the basic block. Zero if the basic block does not need to be
+ /// aligned. The alignment is specified as log2(bytes).
+ unsigned Alignment = 0;
+
+ /// Indicate that this basic block is entered via an exception handler.
+ bool IsEHPad = false;
+
+ /// Indicate that this basic block is potentially the target of an indirect
+ /// branch.
+ bool AddressTaken = false;
+
+ /// Indicate that this basic block is the entry block of an EH funclet.
+ bool IsEHFuncletEntry = false;
+
+ /// Indicate that this basic block is the entry block of a cleanup funclet.
+ bool IsCleanupFuncletEntry = false;
+
+ /// \brief since getSymbol is a relatively heavy-weight operation, the symbol
+ /// is only computed once and is cached.
+ mutable MCSymbol *CachedMCSymbol = nullptr;
+
+ // Intrusive list support
+ MachineBasicBlock() {}
+
+ explicit MachineBasicBlock(MachineFunction &MF, const BasicBlock *BB);
+
+ ~MachineBasicBlock();
+
+ // MachineBasicBlocks are allocated and owned by MachineFunction.
+ friend class MachineFunction;
+
+public:
+ /// Return the LLVM basic block that this instance corresponded to originally.
+ /// Note that this may be NULL if this instance does not correspond directly
+ /// to an LLVM basic block.
+ const BasicBlock *getBasicBlock() const { return BB; }
+
+ /// Return the name of the corresponding LLVM basic block, or "(null)".
+ StringRef getName() const;
+
+ /// Return a formatted string to identify this block and its parent function.
+ std::string getFullName() const;
+
+ /// Test whether this block is potentially the target of an indirect branch.
+ bool hasAddressTaken() const { return AddressTaken; }
+
+ /// Set this block to reflect that it potentially is the target of an indirect
+ /// branch.
+ void setHasAddressTaken() { AddressTaken = true; }
+
+ /// Return the MachineFunction containing this basic block.
+ const MachineFunction *getParent() const { return xParent; }
+ MachineFunction *getParent() { return xParent; }
+
+ /// MachineBasicBlock iterator that automatically skips over MIs that are
+ /// inside bundles (i.e. walk top level MIs only).
+ template<typename Ty, typename IterTy>
+ class bundle_iterator
+ : public std::iterator<std::bidirectional_iterator_tag, Ty, ptrdiff_t> {
+ IterTy MII;
+
+ public:
+ bundle_iterator(IterTy MI) : MII(MI) {}
+
+ bundle_iterator(Ty &MI) : MII(MI) {
+ assert(!MI.isBundledWithPred() &&
+ "It's not legal to initialize bundle_iterator with a bundled MI");
+ }
+ bundle_iterator(Ty *MI) : MII(MI) {
+ assert((!MI || !MI->isBundledWithPred()) &&
+ "It's not legal to initialize bundle_iterator with a bundled MI");
+ }
+ // Template allows conversion from const to nonconst.
+ template<class OtherTy, class OtherIterTy>
+ bundle_iterator(const bundle_iterator<OtherTy, OtherIterTy> &I)
+ : MII(I.getInstrIterator()) {}
+ bundle_iterator() : MII(nullptr) {}
+
+ Ty &operator*() const { return *MII; }
+ Ty *operator->() const { return &operator*(); }
+
+ operator Ty *() const { return MII.getNodePtrUnchecked(); }
+
+ bool operator==(const bundle_iterator &X) const {
+ return MII == X.MII;
+ }
+ bool operator!=(const bundle_iterator &X) const {
+ return !operator==(X);
+ }
+
+ // Increment and decrement operators...
+ bundle_iterator &operator--() { // predecrement - Back up
+ do --MII;
+ while (MII->isBundledWithPred());
+ return *this;
+ }
+ bundle_iterator &operator++() { // preincrement - Advance
+ while (MII->isBundledWithSucc())
+ ++MII;
+ ++MII;
+ return *this;
+ }
+ bundle_iterator operator--(int) { // postdecrement operators...
+ bundle_iterator tmp = *this;
+ --*this;
+ return tmp;
+ }
+ bundle_iterator operator++(int) { // postincrement operators...
+ bundle_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ IterTy getInstrIterator() const {
+ return MII;
+ }
+ };
+
+ typedef Instructions::iterator instr_iterator;
+ typedef Instructions::const_iterator const_instr_iterator;
+ typedef std::reverse_iterator<instr_iterator> reverse_instr_iterator;
+ typedef
+ std::reverse_iterator<const_instr_iterator> const_reverse_instr_iterator;
+
+ typedef
+ bundle_iterator<MachineInstr,instr_iterator> iterator;
+ typedef
+ bundle_iterator<const MachineInstr,const_instr_iterator> const_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+
+ unsigned size() const { return (unsigned)Insts.size(); }
+ bool empty() const { return Insts.empty(); }
+
+ MachineInstr &instr_front() { return Insts.front(); }
+ MachineInstr &instr_back() { return Insts.back(); }
+ const MachineInstr &instr_front() const { return Insts.front(); }
+ const MachineInstr &instr_back() const { return Insts.back(); }
+
+ MachineInstr &front() { return Insts.front(); }
+ MachineInstr &back() { return *--end(); }
+ const MachineInstr &front() const { return Insts.front(); }
+ const MachineInstr &back() const { return *--end(); }
+
+ instr_iterator instr_begin() { return Insts.begin(); }
+ const_instr_iterator instr_begin() const { return Insts.begin(); }
+ instr_iterator instr_end() { return Insts.end(); }
+ const_instr_iterator instr_end() const { return Insts.end(); }
+ reverse_instr_iterator instr_rbegin() { return Insts.rbegin(); }
+ const_reverse_instr_iterator instr_rbegin() const { return Insts.rbegin(); }
+ reverse_instr_iterator instr_rend () { return Insts.rend(); }
+ const_reverse_instr_iterator instr_rend () const { return Insts.rend(); }
+
+ iterator begin() { return instr_begin(); }
+ const_iterator begin() const { return instr_begin(); }
+ iterator end () { return instr_end(); }
+ const_iterator end () const { return instr_end(); }
+ reverse_iterator rbegin() { return instr_rbegin(); }
+ const_reverse_iterator rbegin() const { return instr_rbegin(); }
+ reverse_iterator rend () { return instr_rend(); }
+ const_reverse_iterator rend () const { return instr_rend(); }
+
+ /// Support for MachineInstr::getNextNode().
+ static Instructions MachineBasicBlock::*getSublistAccess(MachineInstr *) {
+ return &MachineBasicBlock::Insts;
+ }
+
+ inline iterator_range<iterator> terminators() {
+ return make_range(getFirstTerminator(), end());
+ }
+ inline iterator_range<const_iterator> terminators() const {
+ return make_range(getFirstTerminator(), end());
+ }
+
+ // Machine-CFG iterators
+ typedef std::vector<MachineBasicBlock *>::iterator pred_iterator;
+ typedef std::vector<MachineBasicBlock *>::const_iterator const_pred_iterator;
+ typedef std::vector<MachineBasicBlock *>::iterator succ_iterator;
+ typedef std::vector<MachineBasicBlock *>::const_iterator const_succ_iterator;
+ typedef std::vector<MachineBasicBlock *>::reverse_iterator
+ pred_reverse_iterator;
+ typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
+ const_pred_reverse_iterator;
+ typedef std::vector<MachineBasicBlock *>::reverse_iterator
+ succ_reverse_iterator;
+ typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
+ const_succ_reverse_iterator;
+ pred_iterator pred_begin() { return Predecessors.begin(); }
+ const_pred_iterator pred_begin() const { return Predecessors.begin(); }
+ pred_iterator pred_end() { return Predecessors.end(); }
+ const_pred_iterator pred_end() const { return Predecessors.end(); }
+ pred_reverse_iterator pred_rbegin()
+ { return Predecessors.rbegin();}
+ const_pred_reverse_iterator pred_rbegin() const
+ { return Predecessors.rbegin();}
+ pred_reverse_iterator pred_rend()
+ { return Predecessors.rend(); }
+ const_pred_reverse_iterator pred_rend() const
+ { return Predecessors.rend(); }
+ unsigned pred_size() const {
+ return (unsigned)Predecessors.size();
+ }
+ bool pred_empty() const { return Predecessors.empty(); }
+ succ_iterator succ_begin() { return Successors.begin(); }
+ const_succ_iterator succ_begin() const { return Successors.begin(); }
+ succ_iterator succ_end() { return Successors.end(); }
+ const_succ_iterator succ_end() const { return Successors.end(); }
+ succ_reverse_iterator succ_rbegin()
+ { return Successors.rbegin(); }
+ const_succ_reverse_iterator succ_rbegin() const
+ { return Successors.rbegin(); }
+ succ_reverse_iterator succ_rend()
+ { return Successors.rend(); }
+ const_succ_reverse_iterator succ_rend() const
+ { return Successors.rend(); }
+ unsigned succ_size() const {
+ return (unsigned)Successors.size();
+ }
+ bool succ_empty() const { return Successors.empty(); }
+
+ inline iterator_range<pred_iterator> predecessors() {
+ return make_range(pred_begin(), pred_end());
+ }
+ inline iterator_range<const_pred_iterator> predecessors() const {
+ return make_range(pred_begin(), pred_end());
+ }
+ inline iterator_range<succ_iterator> successors() {
+ return make_range(succ_begin(), succ_end());
+ }
+ inline iterator_range<const_succ_iterator> successors() const {
+ return make_range(succ_begin(), succ_end());
+ }
+
+ // LiveIn management methods.
+
+ /// Adds the specified register as a live in. Note that it is an error to add
+ /// the same register to the same set more than once unless the intention is
+ /// to call sortUniqueLiveIns after all registers are added.
+ void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask = ~0u) {
+ LiveIns.push_back(RegisterMaskPair(PhysReg, LaneMask));
+ }
+ void addLiveIn(const RegisterMaskPair &RegMaskPair) {
+ LiveIns.push_back(RegMaskPair);
+ }
+
+ /// Sorts and uniques the LiveIns vector. It can be significantly faster to do
+ /// this than repeatedly calling isLiveIn before calling addLiveIn for every
+ /// LiveIn insertion.
+ void sortUniqueLiveIns();
+
+ /// Add PhysReg as live in to this block, and ensure that there is a copy of
+ /// PhysReg to a virtual register of class RC. Return the virtual register
+ /// that is a copy of the live in PhysReg.
+ unsigned addLiveIn(MCPhysReg PhysReg, const TargetRegisterClass *RC);
+
+ /// Remove the specified register from the live in set.
+ void removeLiveIn(MCPhysReg Reg, LaneBitmask LaneMask = ~0u);
+
+ /// Return true if the specified register is in the live in set.
+ bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask = ~0u) const;
+
+ // Iteration support for live in sets. These sets are kept in sorted
+ // order by their register number.
+ typedef LiveInVector::const_iterator livein_iterator;
+ livein_iterator livein_begin() const { return LiveIns.begin(); }
+ livein_iterator livein_end() const { return LiveIns.end(); }
+ bool livein_empty() const { return LiveIns.empty(); }
+ iterator_range<livein_iterator> liveins() const {
+ return make_range(livein_begin(), livein_end());
+ }
+
+ /// Get the clobber mask for the start of this basic block. Funclets use this
+ /// to prevent register allocation across funclet transitions.
+ const uint32_t *getBeginClobberMask(const TargetRegisterInfo *TRI) const;
+
+ /// Get the clobber mask for the end of the basic block.
+ /// \see getBeginClobberMask()
+ const uint32_t *getEndClobberMask(const TargetRegisterInfo *TRI) const;
+
+ /// Return alignment of the basic block. The alignment is specified as
+ /// log2(bytes).
+ unsigned getAlignment() const { return Alignment; }
+
+ /// Set alignment of the basic block. The alignment is specified as
+ /// log2(bytes).
+ void setAlignment(unsigned Align) { Alignment = Align; }
+
+ /// Returns true if the block is a landing pad. That is this basic block is
+ /// entered via an exception handler.
+ bool isEHPad() const { return IsEHPad; }
+
+ /// Indicates the block is a landing pad. That is this basic block is entered
+ /// via an exception handler.
+ void setIsEHPad(bool V = true) { IsEHPad = V; }
+
+ /// If this block has a successor that is a landing pad, return it. Otherwise
+ /// return NULL.
+ const MachineBasicBlock *getLandingPadSuccessor() const;
+
+ bool hasEHPadSuccessor() const;
+
+ /// Returns true if this is the entry block of an EH funclet.
+ bool isEHFuncletEntry() const { return IsEHFuncletEntry; }
+
+ /// Indicates if this is the entry block of an EH funclet.
+ void setIsEHFuncletEntry(bool V = true) { IsEHFuncletEntry = V; }
+
+ /// Returns true if this is the entry block of a cleanup funclet.
+ bool isCleanupFuncletEntry() const { return IsCleanupFuncletEntry; }
+
+ /// Indicates if this is the entry block of a cleanup funclet.
+ void setIsCleanupFuncletEntry(bool V = true) { IsCleanupFuncletEntry = V; }
+
+ // Code Layout methods.
+
+ /// Move 'this' block before or after the specified block. This only moves
+ /// the block, it does not modify the CFG or adjust potential fall-throughs at
+ /// the end of the block.
+ void moveBefore(MachineBasicBlock *NewAfter);
+ void moveAfter(MachineBasicBlock *NewBefore);
+
+ /// Update the terminator instructions in block to account for changes to the
+ /// layout. If the block previously used a fallthrough, it may now need a
+ /// branch, and if it previously used branching it may now be able to use a
+ /// fallthrough.
+ void updateTerminator();
+
+ // Machine-CFG mutators
+
+ /// Add Succ as a successor of this MachineBasicBlock. The Predecessors list
+ /// of Succ is automatically updated. PROB parameter is stored in
+ /// Probabilities list. The default probability is set as unknown. Mixing
+ /// known and unknown probabilities in successor list is not allowed. When all
+ /// successors have unknown probabilities, 1 / N is returned as the
+ /// probability for each successor, where N is the number of successors.
+ ///
+ /// Note that duplicate Machine CFG edges are not allowed.
+ void addSuccessor(MachineBasicBlock *Succ,
+ BranchProbability Prob = BranchProbability::getUnknown());
+
+ /// Add Succ as a successor of this MachineBasicBlock. The Predecessors list
+ /// of Succ is automatically updated. The probability is not provided because
+ /// BPI is not available (e.g. -O0 is used), in which case edge probabilities
+ /// won't be used. Using this interface can save some space.
+ void addSuccessorWithoutProb(MachineBasicBlock *Succ);
+
+ /// Set successor probability of a given iterator.
+ void setSuccProbability(succ_iterator I, BranchProbability Prob);
+
+ /// Normalize probabilities of all successors so that the sum of them becomes
+ /// one. This is usually done when the current update on this MBB is done, and
+ /// the sum of its successors' probabilities is not guaranteed to be one. The
+ /// user is responsible for the correct use of this function.
+ /// MBB::removeSuccessor() has an option to do this automatically.
+ void normalizeSuccProbs() {
+ BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
+ }
+
+ /// Validate successors' probabilities and check if the sum of them is
+ /// approximate one. This only works in DEBUG mode.
+ void validateSuccProbs() const;
+
+ /// Remove successor from the successors list of this MachineBasicBlock. The
+ /// Predecessors list of Succ is automatically updated.
+ /// If NormalizeSuccProbs is true, then normalize successors' probabilities
+ /// after the successor is removed.
+ void removeSuccessor(MachineBasicBlock *Succ,
+ bool NormalizeSuccProbs = false);
+
+ /// Remove specified successor from the successors list of this
+ /// MachineBasicBlock. The Predecessors list of Succ is automatically updated.
+ /// If NormalizeSuccProbs is true, then normalize successors' probabilities
+ /// after the successor is removed.
+ /// Return the iterator to the element after the one removed.
+ succ_iterator removeSuccessor(succ_iterator I,
+ bool NormalizeSuccProbs = false);
+
+ /// Replace successor OLD with NEW and update probability info.
+ void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+ /// Transfers all the successors from MBB to this machine basic block (i.e.,
+ /// copies all the successors FromMBB and remove all the successors from
+ /// FromMBB).
+ void transferSuccessors(MachineBasicBlock *FromMBB);
+
+ /// Transfers all the successors, as in transferSuccessors, and update PHI
+ /// operands in the successor blocks which refer to FromMBB to refer to this.
+ void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB);
+
+ /// Return true if any of the successors have probabilities attached to them.
+ bool hasSuccessorProbabilities() const { return !Probs.empty(); }
+
+ /// Return true if the specified MBB is a predecessor of this block.
+ bool isPredecessor(const MachineBasicBlock *MBB) const;
+
+ /// Return true if the specified MBB is a successor of this block.
+ bool isSuccessor(const MachineBasicBlock *MBB) const;
+
+ /// Return true if the specified MBB will be emitted immediately after this
+ /// block, such that if this block exits by falling through, control will
+ /// transfer to the specified MBB. Note that MBB need not be a successor at
+ /// all, for example if this block ends with an unconditional branch to some
+ /// other block.
+ bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;
+
+ /// Return true if the block can implicitly transfer control to the block
+ /// after it by falling off the end of it. This should return false if it can
+ /// reach the block after it, but it uses an explicit branch to do so (e.g., a
+ /// table jump). True is a conservative answer.
+ bool canFallThrough();
+
+ /// Returns a pointer to the first instruction in this block that is not a
+ /// PHINode instruction. When adding instructions to the beginning of the
+ /// basic block, they should be added before the returned value, not before
+ /// the first instruction, which might be PHI.
+ /// Returns end() is there's no non-PHI instruction.
+ iterator getFirstNonPHI();
+
+ /// Return the first instruction in MBB after I that is not a PHI or a label.
+ /// This is the correct point to insert copies at the beginning of a basic
+ /// block.
+ iterator SkipPHIsAndLabels(iterator I);
+
+ /// Returns an iterator to the first terminator instruction of this basic
+ /// block. If a terminator does not exist, it returns end().
+ iterator getFirstTerminator();
+ const_iterator getFirstTerminator() const {
+ return const_cast<MachineBasicBlock *>(this)->getFirstTerminator();
+ }
+
+ /// Same getFirstTerminator but it ignores bundles and return an
+ /// instr_iterator instead.
+ instr_iterator getFirstInstrTerminator();
+
+ /// Returns an iterator to the first non-debug instruction in the basic block,
+ /// or end().
+ iterator getFirstNonDebugInstr();
+ const_iterator getFirstNonDebugInstr() const {
+ return const_cast<MachineBasicBlock *>(this)->getFirstNonDebugInstr();
+ }
+
+ /// Returns an iterator to the last non-debug instruction in the basic block,
+ /// or end().
+ iterator getLastNonDebugInstr();
+ const_iterator getLastNonDebugInstr() const {
+ return const_cast<MachineBasicBlock *>(this)->getLastNonDebugInstr();
+ }
+
+ /// Convenience function that returns true if the block ends in a return
+ /// instruction.
+ bool isReturnBlock() const {
+ return !empty() && back().isReturn();
+ }
+
+ /// Split the critical edge from this block to the given successor block, and
+ /// return the newly created block, or null if splitting is not possible.
+ ///
+ /// This function updates LiveVariables, MachineDominatorTree, and
+ /// MachineLoopInfo, as applicable.
+ MachineBasicBlock *SplitCriticalEdge(MachineBasicBlock *Succ, Pass *P);
+
+ void pop_front() { Insts.pop_front(); }
+ void pop_back() { Insts.pop_back(); }
+ void push_back(MachineInstr *MI) { Insts.push_back(MI); }
+
+ /// Insert MI into the instruction list before I, possibly inside a bundle.
+ ///
+ /// If the insertion point is inside a bundle, MI will be added to the bundle,
+ /// otherwise MI will not be added to any bundle. That means this function
+ /// alone can't be used to prepend or append instructions to bundles. See
+ /// MIBundleBuilder::insert() for a more reliable way of doing that.
+ instr_iterator insert(instr_iterator I, MachineInstr *M);
+
+ /// Insert a range of instructions into the instruction list before I.
+ template<typename IT>
+ void insert(iterator I, IT S, IT E) {
+ assert((I == end() || I->getParent() == this) &&
+ "iterator points outside of basic block");
+ Insts.insert(I.getInstrIterator(), S, E);
+ }
+
+ /// Insert MI into the instruction list before I.
+ iterator insert(iterator I, MachineInstr *MI) {
+ assert((I == end() || I->getParent() == this) &&
+ "iterator points outside of basic block");
+ assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
+ "Cannot insert instruction with bundle flags");
+ return Insts.insert(I.getInstrIterator(), MI);
+ }
+
+ /// Insert MI into the instruction list after I.
+ iterator insertAfter(iterator I, MachineInstr *MI) {
+ assert((I == end() || I->getParent() == this) &&
+ "iterator points outside of basic block");
+ assert(!MI->isBundledWithPred() && !MI->isBundledWithSucc() &&
+ "Cannot insert instruction with bundle flags");
+ return Insts.insertAfter(I.getInstrIterator(), MI);
+ }
+
+ /// Remove an instruction from the instruction list and delete it.
+ ///
+ /// If the instruction is part of a bundle, the other instructions in the
+ /// bundle will still be bundled after removing the single instruction.
+ instr_iterator erase(instr_iterator I);
+
+ /// Remove an instruction from the instruction list and delete it.
+ ///
+ /// If the instruction is part of a bundle, the other instructions in the
+ /// bundle will still be bundled after removing the single instruction.
+ instr_iterator erase_instr(MachineInstr *I) {
+ return erase(instr_iterator(I));
+ }
+
+ /// Remove a range of instructions from the instruction list and delete them.
+ iterator erase(iterator I, iterator E) {
+ return Insts.erase(I.getInstrIterator(), E.getInstrIterator());
+ }
+
+ /// Remove an instruction or bundle from the instruction list and delete it.
+ ///
+ /// If I points to a bundle of instructions, they are all erased.
+ iterator erase(iterator I) {
+ return erase(I, std::next(I));
+ }
+
+ /// Remove an instruction from the instruction list and delete it.
+ ///
+ /// If I is the head of a bundle of instructions, the whole bundle will be
+ /// erased.
+ iterator erase(MachineInstr *I) {
+ return erase(iterator(I));
+ }
+
+ /// Remove the unbundled instruction from the instruction list without
+ /// deleting it.
+ ///
+ /// This function can not be used to remove bundled instructions, use
+ /// remove_instr to remove individual instructions from a bundle.
+ MachineInstr *remove(MachineInstr *I) {
+ assert(!I->isBundled() && "Cannot remove bundled instructions");
+ return Insts.remove(instr_iterator(I));
+ }
+
+ /// Remove the possibly bundled instruction from the instruction list
+ /// without deleting it.
+ ///
+ /// If the instruction is part of a bundle, the other instructions in the
+ /// bundle will still be bundled after removing the single instruction.
+ MachineInstr *remove_instr(MachineInstr *I);
+
+ void clear() {
+ Insts.clear();
+ }
+
+ /// Take an instruction from MBB 'Other' at the position From, and insert it
+ /// into this MBB right before 'Where'.
+ ///
+ /// If From points to a bundle of instructions, the whole bundle is moved.
+ void splice(iterator Where, MachineBasicBlock *Other, iterator From) {
+ // The range splice() doesn't allow noop moves, but this one does.
+ if (Where != From)
+ splice(Where, Other, From, std::next(From));
+ }
+
+ /// Take a block of instructions from MBB 'Other' in the range [From, To),
+ /// and insert them into this MBB right before 'Where'.
+ ///
+ /// The instruction at 'Where' must not be included in the range of
+ /// instructions to move.
+ void splice(iterator Where, MachineBasicBlock *Other,
+ iterator From, iterator To) {
+ Insts.splice(Where.getInstrIterator(), Other->Insts,
+ From.getInstrIterator(), To.getInstrIterator());
+ }
+
+ /// This method unlinks 'this' from the containing function, and returns it,
+ /// but does not delete it.
+ MachineBasicBlock *removeFromParent();
+
+ /// This method unlinks 'this' from the containing function and deletes it.
+ void eraseFromParent();
+
+ /// Given a machine basic block that branched to 'Old', change the code and
+ /// CFG so that it branches to 'New' instead.
+ void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+ /// Various pieces of code can cause excess edges in the CFG to be inserted.
+ /// If we have proven that MBB can only branch to DestA and DestB, remove any
+ /// other MBB successors from the CFG. DestA and DestB can be null. Besides
+ /// DestA and DestB, retain other edges leading to LandingPads (currently
+ /// there can be only one; we don't check or require that here). Note it is
+ /// possible that DestA and/or DestB are LandingPads.
+ bool CorrectExtraCFGEdges(MachineBasicBlock *DestA,
+ MachineBasicBlock *DestB,
+ bool IsCond);
+
+ /// Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE
+ /// instructions. Return UnknownLoc if there is none.
+ DebugLoc findDebugLoc(instr_iterator MBBI);
+ DebugLoc findDebugLoc(iterator MBBI) {
+ return findDebugLoc(MBBI.getInstrIterator());
+ }
+
+ /// Possible outcome of a register liveness query to computeRegisterLiveness()
+ enum LivenessQueryResult {
+ LQR_Live, ///< Register is known to be (at least partially) live.
+ LQR_Dead, ///< Register is known to be fully dead.
+ LQR_Unknown ///< Register liveness not decidable from local neighborhood.
+ };
+
+ /// Return whether (physical) register \p Reg has been <def>ined and not
+ /// <kill>ed as of just before \p Before.
+ ///
+ /// Search is localised to a neighborhood of \p Neighborhood instructions
+ /// before (searching for defs or kills) and \p Neighborhood instructions
+ /// after (searching just for defs) \p Before.
+ ///
+ /// \p Reg must be a physical register.
+ LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
+ unsigned Reg,
+ const_iterator Before,
+ unsigned Neighborhood=10) const;
+
+ // Debugging methods.
+ void dump() const;
+ void print(raw_ostream &OS, SlotIndexes* = nullptr) const;
+ void print(raw_ostream &OS, ModuleSlotTracker &MST,
+ SlotIndexes * = nullptr) const;
+
+ // Printing method used by LoopInfo.
+ void printAsOperand(raw_ostream &OS, bool PrintType = true) const;
+
+ /// MachineBasicBlocks are uniquely numbered at the function level, unless
+ /// they're not in a MachineFunction yet, in which case this will return -1.
+ int getNumber() const { return Number; }
+ void setNumber(int N) { Number = N; }
+
+ /// Return the MCSymbol for this basic block.
+ MCSymbol *getSymbol() const;
+
+
+private:
+ /// Return probability iterator corresponding to the I successor iterator.
+ probability_iterator getProbabilityIterator(succ_iterator I);
+ const_probability_iterator
+ getProbabilityIterator(const_succ_iterator I) const;
+
+ friend class MachineBranchProbabilityInfo;
+ friend class MIPrinter;
+
+ /// Return probability of the edge from this block to MBB. This method should
+ /// NOT be called directly, but by using getEdgeProbability method from
+ /// MachineBranchProbabilityInfo class.
+ BranchProbability getSuccProbability(const_succ_iterator Succ) const;
+
+ // Methods used to maintain doubly linked list of blocks...
+ friend struct ilist_traits<MachineBasicBlock>;
+
+ // Machine-CFG mutators
+
+ /// Remove Pred as a predecessor of this MachineBasicBlock. Don't do this
+ /// unless you know what you're doing, because it doesn't update Pred's
+ /// successors list. Use Pred->addSuccessor instead.
+ void addPredecessor(MachineBasicBlock *Pred);
+
+ /// Remove Pred as a predecessor of this MachineBasicBlock. Don't do this
+ /// unless you know what you're doing, because it doesn't update Pred's
+ /// successors list. Use Pred->removeSuccessor instead.
+ void removePredecessor(MachineBasicBlock *Pred);
+};
+
+raw_ostream& operator<<(raw_ostream &OS, const MachineBasicBlock &MBB);
+
+// This is useful when building IndexedMaps keyed on basic block pointers.
+struct MBB2NumberFunctor :
+ public std::unary_function<const MachineBasicBlock*, unsigned> {
+ unsigned operator()(const MachineBasicBlock *MBB) const {
+ return MBB->getNumber();
+ }
+};
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for machine basic block graphs (machine-CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a
+// MachineFunction as a graph of MachineBasicBlocks.
+//
+
+template <> struct GraphTraits<MachineBasicBlock *> {
+ typedef MachineBasicBlock NodeType;
+ typedef MachineBasicBlock::succ_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(MachineBasicBlock *BB) { return BB; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->succ_begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->succ_end();
+ }
+};
+
+template <> struct GraphTraits<const MachineBasicBlock *> {
+ typedef const MachineBasicBlock NodeType;
+ typedef MachineBasicBlock::const_succ_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(const MachineBasicBlock *BB) { return BB; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->succ_begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->succ_end();
+ }
+};
+
+// Provide specializations of GraphTraits to be able to treat a
+// MachineFunction as a graph of MachineBasicBlocks and to walk it
+// in inverse order. Inverse order for a function is considered
+// to be when traversing the predecessor edges of a MBB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<MachineBasicBlock*> > {
+ typedef MachineBasicBlock NodeType;
+ typedef MachineBasicBlock::pred_iterator ChildIteratorType;
+ static NodeType *getEntryNode(Inverse<MachineBasicBlock *> G) {
+ return G.Graph;
+ }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->pred_begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->pred_end();
+ }
+};
+
+template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > {
+ typedef const MachineBasicBlock NodeType;
+ typedef MachineBasicBlock::const_pred_iterator ChildIteratorType;
+ static NodeType *getEntryNode(Inverse<const MachineBasicBlock*> G) {
+ return G.Graph;
+ }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->pred_begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->pred_end();
+ }
+};
+
+
+
+/// MachineInstrSpan provides an interface to get an iteration range
+/// containing the instruction it was initialized with, along with all
+/// those instructions inserted prior to or following that instruction
+/// at some point after the MachineInstrSpan is constructed.
+class MachineInstrSpan {
+ MachineBasicBlock &MBB;
+ MachineBasicBlock::iterator I, B, E;
+public:
+ MachineInstrSpan(MachineBasicBlock::iterator I)
+ : MBB(*I->getParent()),
+ I(I),
+ B(I == MBB.begin() ? MBB.end() : std::prev(I)),
+ E(std::next(I)) {}
+
+ MachineBasicBlock::iterator begin() {
+ return B == MBB.end() ? MBB.begin() : std::next(B);
+ }
+ MachineBasicBlock::iterator end() { return E; }
+ bool empty() { return begin() == end(); }
+
+ MachineBasicBlock::iterator getInitial() { return I; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBlockFrequencyInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
new file mode 100644
index 0000000..feb394e
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBlockFrequencyInfo.h
@@ -0,0 +1,71 @@
+//===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -*- C++ -*-----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Loops should be simplified before this analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
+#define LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/BlockFrequency.h"
+#include <climits>
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineBranchProbabilityInfo;
+template <class BlockT> class BlockFrequencyInfoImpl;
+
+/// MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation
+/// to estimate machine basic block frequencies.
+class MachineBlockFrequencyInfo : public MachineFunctionPass {
+ typedef BlockFrequencyInfoImpl<MachineBasicBlock> ImplType;
+ std::unique_ptr<ImplType> MBFI;
+
+public:
+ static char ID;
+
+ MachineBlockFrequencyInfo();
+
+ ~MachineBlockFrequencyInfo() override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ void releaseMemory() override;
+
+ /// getblockFreq - Return block frequency. Return 0 if we don't have the
+ /// information. Please note that initial frequency is equal to 1024. It means
+ /// that we should not rely on the value itself, but only on the comparison to
+ /// the other block frequencies. We do this to avoid using of floating points.
+ ///
+ BlockFrequency getBlockFreq(const MachineBasicBlock *MBB) const;
+
+ const MachineFunction *getFunction() const;
+ void view() const;
+
+ // Print the block frequency Freq to OS using the current functions entry
+ // frequency to convert freq into a relative decimal form.
+ raw_ostream &printBlockFreq(raw_ostream &OS, const BlockFrequency Freq) const;
+
+ // Convenience method that attempts to look up the frequency associated with
+ // BB and print it to OS.
+ raw_ostream &printBlockFreq(raw_ostream &OS,
+ const MachineBasicBlock *MBB) const;
+
+ uint64_t getEntryFreq() const;
+
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
new file mode 100644
index 0000000..81b0524
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
@@ -0,0 +1,77 @@
+//=- MachineBranchProbabilityInfo.h - Branch Probability Analysis -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is used to evaluate branch probabilties on machine basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
+#define LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/BranchProbability.h"
+#include <climits>
+#include <numeric>
+
+namespace llvm {
+
+class MachineBranchProbabilityInfo : public ImmutablePass {
+ virtual void anchor();
+
+ // Default weight value. Used when we don't have information about the edge.
+ // TODO: DEFAULT_WEIGHT makes sense during static predication, when none of
+ // the successors have a weight yet. But it doesn't make sense when providing
+ // weight to an edge that may have siblings with non-zero weights. This can
+ // be handled various ways, but it's probably fine for an edge with unknown
+ // weight to just "inherit" the non-zero weight of an adjacent successor.
+ static const uint32_t DEFAULT_WEIGHT = 16;
+
+public:
+ static char ID;
+
+ MachineBranchProbabilityInfo() : ImmutablePass(ID) {
+ PassRegistry &Registry = *PassRegistry::getPassRegistry();
+ initializeMachineBranchProbabilityInfoPass(Registry);
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+
+ // Return edge probability.
+ BranchProbability getEdgeProbability(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const;
+
+ // Same as above, but using a const_succ_iterator from Src. This is faster
+ // when the iterator is already available.
+ BranchProbability
+ getEdgeProbability(const MachineBasicBlock *Src,
+ MachineBasicBlock::const_succ_iterator Dst) const;
+
+ // A 'Hot' edge is an edge which probability is >= 80%.
+ bool isEdgeHot(const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const;
+
+ // Return a hot successor for the block BB or null if there isn't one.
+ // NB: This routine's complexity is linear on the number of successors.
+ MachineBasicBlock *getHotSucc(MachineBasicBlock *MBB) const;
+
+ // Print value between 0 (0% probability) and 1 (100% probability),
+ // however the value is never equal to 0, and can be 1 only iff SRC block
+ // has only one successor.
+ raw_ostream &printEdgeProbability(raw_ostream &OS,
+ const MachineBasicBlock *Src,
+ const MachineBasicBlock *Dst) const;
+};
+
+}
+
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineCombinerPattern.h b/contrib/llvm/include/llvm/CodeGen/MachineCombinerPattern.h
new file mode 100644
index 0000000..f389122
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineCombinerPattern.h
@@ -0,0 +1,46 @@
+//===-- llvm/CodeGen/MachineCombinerPattern.h - Instruction pattern supported by
+// combiner ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines instruction pattern supported by combiner
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECOMBINERPATTERN_H
+#define LLVM_CODEGEN_MACHINECOMBINERPATTERN_H
+
+namespace llvm {
+
+/// These are instruction patterns matched by the machine combiner pass.
+enum class MachineCombinerPattern {
+ // These are commutative variants for reassociating a computation chain. See
+ // the comments before getMachineCombinerPatterns() in TargetInstrInfo.cpp.
+ REASSOC_AX_BY,
+ REASSOC_AX_YB,
+ REASSOC_XA_BY,
+ REASSOC_XA_YB,
+
+ // These are multiply-add patterns matched by the AArch64 machine combiner.
+ MULADDW_OP1,
+ MULADDW_OP2,
+ MULSUBW_OP1,
+ MULSUBW_OP2,
+ MULADDWI_OP1,
+ MULSUBWI_OP1,
+ MULADDX_OP1,
+ MULADDX_OP2,
+ MULSUBX_OP1,
+ MULSUBX_OP2,
+ MULADDXI_OP1,
+ MULSUBXI_OP1
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h b/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h
new file mode 100644
index 0000000..d2036c4
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineConstantPool.h
@@ -0,0 +1,165 @@
+//===-- CodeGen/MachineConstantPool.h - Abstract Constant Pool --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file declares the MachineConstantPool class which is an abstract
+/// constant pool to keep track of constants referenced by a function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECONSTANTPOOL_H
+#define LLVM_CODEGEN_MACHINECONSTANTPOOL_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/MC/SectionKind.h"
+#include <cassert>
+#include <climits>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class FoldingSetNodeID;
+class DataLayout;
+class TargetMachine;
+class Type;
+class MachineConstantPool;
+class raw_ostream;
+
+/// Abstract base class for all machine specific constantpool value subclasses.
+///
+class MachineConstantPoolValue {
+ virtual void anchor();
+ Type *Ty;
+
+public:
+ explicit MachineConstantPoolValue(Type *ty) : Ty(ty) {}
+ virtual ~MachineConstantPoolValue() {}
+
+ /// getType - get type of this MachineConstantPoolValue.
+ ///
+ Type *getType() const { return Ty; }
+
+ virtual int getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment) = 0;
+
+ virtual void addSelectionDAGCSEId(FoldingSetNodeID &ID) = 0;
+
+ /// print - Implement operator<<
+ virtual void print(raw_ostream &O) const = 0;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+ const MachineConstantPoolValue &V) {
+ V.print(OS);
+ return OS;
+}
+
+/// This class is a data container for one entry in a MachineConstantPool.
+/// It contains a pointer to the value and an offset from the start of
+/// the constant pool.
+/// @brief An entry in a MachineConstantPool
+class MachineConstantPoolEntry {
+public:
+ /// The constant itself.
+ union {
+ const Constant *ConstVal;
+ MachineConstantPoolValue *MachineCPVal;
+ } Val;
+
+ /// The required alignment for this entry. The top bit is set when Val is
+ /// a target specific MachineConstantPoolValue.
+ unsigned Alignment;
+
+ MachineConstantPoolEntry(const Constant *V, unsigned A)
+ : Alignment(A) {
+ Val.ConstVal = V;
+ }
+ MachineConstantPoolEntry(MachineConstantPoolValue *V, unsigned A)
+ : Alignment(A) {
+ Val.MachineCPVal = V;
+ Alignment |= 1U << (sizeof(unsigned) * CHAR_BIT - 1);
+ }
+
+ /// isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry
+ /// is indeed a target specific constantpool entry, not a wrapper over a
+ /// Constant.
+ bool isMachineConstantPoolEntry() const {
+ return (int)Alignment < 0;
+ }
+
+ int getAlignment() const {
+ return Alignment & ~(1 << (sizeof(unsigned) * CHAR_BIT - 1));
+ }
+
+ Type *getType() const;
+
+ /// This method classifies the entry according to whether or not it may
+ /// generate a relocation entry. This must be conservative, so if it might
+ /// codegen to a relocatable entry, it should say so.
+ bool needsRelocation() const;
+
+ SectionKind getSectionKind(const DataLayout *DL) const;
+};
+
+/// The MachineConstantPool class keeps track of constants referenced by a
+/// function which must be spilled to memory. This is used for constants which
+/// are unable to be used directly as operands to instructions, which typically
+/// include floating point and large integer constants.
+///
+/// Instructions reference the address of these constant pool constants through
+/// the use of MO_ConstantPoolIndex values. When emitting assembly or machine
+/// code, these virtual address references are converted to refer to the
+/// address of the function constant pool values.
+/// @brief The machine constant pool.
+class MachineConstantPool {
+ unsigned PoolAlignment; ///< The alignment for the pool.
+ std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants.
+ /// MachineConstantPoolValues that use an existing MachineConstantPoolEntry.
+ DenseSet<MachineConstantPoolValue*> MachineCPVsSharingEntries;
+ const DataLayout &DL;
+
+ const DataLayout &getDataLayout() const { return DL; }
+
+public:
+ /// @brief The only constructor.
+ explicit MachineConstantPool(const DataLayout &DL)
+ : PoolAlignment(1), DL(DL) {}
+ ~MachineConstantPool();
+
+ /// getConstantPoolAlignment - Return the alignment required by
+ /// the whole constant pool, of which the first element must be aligned.
+ unsigned getConstantPoolAlignment() const { return PoolAlignment; }
+
+ /// getConstantPoolIndex - Create a new entry in the constant pool or return
+ /// an existing one. User must specify the minimum required alignment for
+ /// the object.
+ unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment);
+ unsigned getConstantPoolIndex(MachineConstantPoolValue *V,
+ unsigned Alignment);
+
+ /// isEmpty - Return true if this constant pool contains no constants.
+ bool isEmpty() const { return Constants.empty(); }
+
+ const std::vector<MachineConstantPoolEntry> &getConstants() const {
+ return Constants;
+ }
+
+ /// print - Used by the MachineFunction printer to print information about
+ /// constant pool objects. Implemented in MachineFunction.cpp
+ ///
+ void print(raw_ostream &OS) const;
+
+ /// dump - Call print(cerr) to be called from the debugger.
+ void dump() const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineDominanceFrontier.h b/contrib/llvm/include/llvm/CodeGen/MachineDominanceFrontier.h
new file mode 100644
index 0000000..4131194
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineDominanceFrontier.h
@@ -0,0 +1,109 @@
+//===- llvm/CodeGen/MachineDominanceFrontier.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
+#define LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
+
+#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+
+namespace llvm {
+
+class MachineDominanceFrontier : public MachineFunctionPass {
+ ForwardDominanceFrontierBase<MachineBasicBlock> Base;
+public:
+ typedef DominatorTreeBase<MachineBasicBlock> DomTreeT;
+ typedef DomTreeNodeBase<MachineBasicBlock> DomTreeNodeT;
+ typedef DominanceFrontierBase<MachineBasicBlock>::DomSetType DomSetType;
+ typedef DominanceFrontierBase<MachineBasicBlock>::iterator iterator;
+ typedef DominanceFrontierBase<MachineBasicBlock>::const_iterator const_iterator;
+
+ void operator=(const MachineDominanceFrontier &) = delete;
+ MachineDominanceFrontier(const MachineDominanceFrontier &) = delete;
+
+ static char ID;
+
+ MachineDominanceFrontier();
+
+ DominanceFrontierBase<MachineBasicBlock> &getBase() {
+ return Base;
+ }
+
+ inline const std::vector<MachineBasicBlock*> &getRoots() const {
+ return Base.getRoots();
+ }
+
+ MachineBasicBlock *getRoot() const {
+ return Base.getRoot();
+ }
+
+ bool isPostDominator() const {
+ return Base.isPostDominator();
+ }
+
+ iterator begin() {
+ return Base.begin();
+ }
+
+ const_iterator begin() const {
+ return Base.begin();
+ }
+
+ iterator end() {
+ return Base.end();
+ }
+
+ const_iterator end() const {
+ return Base.end();
+ }
+
+ iterator find(MachineBasicBlock *B) {
+ return Base.find(B);
+ }
+
+ const_iterator find(MachineBasicBlock *B) const {
+ return Base.find(B);
+ }
+
+ iterator addBasicBlock(MachineBasicBlock *BB, const DomSetType &frontier) {
+ return Base.addBasicBlock(BB, frontier);
+ }
+
+ void removeBlock(MachineBasicBlock *BB) {
+ return Base.removeBlock(BB);
+ }
+
+ void addToFrontier(iterator I, MachineBasicBlock *Node) {
+ return Base.addToFrontier(I, Node);
+ }
+
+ void removeFromFrontier(iterator I, MachineBasicBlock *Node) {
+ return Base.removeFromFrontier(I, Node);
+ }
+
+ bool compareDomSet(DomSetType &DS1, const DomSetType &DS2) const {
+ return Base.compareDomSet(DS1, DS2);
+ }
+
+ bool compare(DominanceFrontierBase<MachineBasicBlock> &Other) const {
+ return Base.compare(Other);
+ }
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ void releaseMemory() override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineDominators.h b/contrib/llvm/include/llvm/CodeGen/MachineDominators.h
new file mode 100644
index 0000000..a69936f
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineDominators.h
@@ -0,0 +1,283 @@
+//=- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
+// but for target-specific code rather than target-independent IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
+#define LLVM_CODEGEN_MACHINEDOMINATORS_H
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/GenericDomTree.h"
+#include "llvm/Support/GenericDomTreeConstruction.h"
+
+namespace llvm {
+
+template<>
+inline void DominatorTreeBase<MachineBasicBlock>::addRoot(MachineBasicBlock* MBB) {
+ this->Roots.push_back(MBB);
+}
+
+extern template class DomTreeNodeBase<MachineBasicBlock>;
+extern template class DominatorTreeBase<MachineBasicBlock>;
+
+typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode;
+
+//===-------------------------------------
+/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
+/// compute a normal dominator tree.
+///
+class MachineDominatorTree : public MachineFunctionPass {
+ /// \brief Helper structure used to hold all the basic blocks
+ /// involved in the split of a critical edge.
+ struct CriticalEdge {
+ MachineBasicBlock *FromBB;
+ MachineBasicBlock *ToBB;
+ MachineBasicBlock *NewBB;
+ };
+
+ /// \brief Pile up all the critical edges to be split.
+ /// The splitting of a critical edge is local and thus, it is possible
+ /// to apply several of those changes at the same time.
+ mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
+ /// \brief Remember all the basic blocks that are inserted during
+ /// edge splitting.
+ /// Invariant: NewBBs == all the basic blocks contained in the NewBB
+ /// field of all the elements of CriticalEdgesToSplit.
+ /// I.e., forall elt in CriticalEdgesToSplit, it exists BB in NewBBs
+ /// such as BB == elt.NewBB.
+ mutable SmallSet<MachineBasicBlock *, 32> NewBBs;
+
+ /// \brief Apply all the recorded critical edges to the DT.
+ /// This updates the underlying DT information in a way that uses
+ /// the fast query path of DT as much as possible.
+ ///
+ /// \post CriticalEdgesToSplit.empty().
+ void applySplitCriticalEdges() const;
+
+public:
+ static char ID; // Pass ID, replacement for typeid
+ DominatorTreeBase<MachineBasicBlock>* DT;
+
+ MachineDominatorTree();
+
+ ~MachineDominatorTree() override;
+
+ DominatorTreeBase<MachineBasicBlock> &getBase() {
+ applySplitCriticalEdges();
+ return *DT;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ /// getRoots - Return the root blocks of the current CFG. This may include
+ /// multiple blocks if we are computing post dominators. For forward
+ /// dominators, this will always be a single block (the entry node).
+ ///
+ inline const std::vector<MachineBasicBlock*> &getRoots() const {
+ applySplitCriticalEdges();
+ return DT->getRoots();
+ }
+
+ inline MachineBasicBlock *getRoot() const {
+ applySplitCriticalEdges();
+ return DT->getRoot();
+ }
+
+ inline MachineDomTreeNode *getRootNode() const {
+ applySplitCriticalEdges();
+ return DT->getRootNode();
+ }
+
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ inline bool dominates(const MachineDomTreeNode* A,
+ const MachineDomTreeNode* B) const {
+ applySplitCriticalEdges();
+ return DT->dominates(A, B);
+ }
+
+ inline bool dominates(const MachineBasicBlock* A,
+ const MachineBasicBlock* B) const {
+ applySplitCriticalEdges();
+ return DT->dominates(A, B);
+ }
+
+ // dominates - Return true if A dominates B. This performs the
+ // special checks necessary if A and B are in the same basic block.
+ bool dominates(const MachineInstr *A, const MachineInstr *B) const {
+ applySplitCriticalEdges();
+ const MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
+ if (BBA != BBB) return DT->dominates(BBA, BBB);
+
+ // Loop through the basic block until we find A or B.
+ MachineBasicBlock::const_iterator I = BBA->begin();
+ for (; &*I != A && &*I != B; ++I)
+ /*empty*/ ;
+
+ //if(!DT.IsPostDominators) {
+ // A dominates B if it is found first in the basic block.
+ return &*I == A;
+ //} else {
+ // // A post-dominates B if B is found first in the basic block.
+ // return &*I == B;
+ //}
+ }
+
+ inline bool properlyDominates(const MachineDomTreeNode* A,
+ const MachineDomTreeNode* B) const {
+ applySplitCriticalEdges();
+ return DT->properlyDominates(A, B);
+ }
+
+ inline bool properlyDominates(const MachineBasicBlock* A,
+ const MachineBasicBlock* B) const {
+ applySplitCriticalEdges();
+ return DT->properlyDominates(A, B);
+ }
+
+ /// findNearestCommonDominator - Find nearest common dominator basic block
+ /// for basic block A and B. If there is no such block then return NULL.
+ inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
+ MachineBasicBlock *B) {
+ applySplitCriticalEdges();
+ return DT->findNearestCommonDominator(A, B);
+ }
+
+ inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
+ applySplitCriticalEdges();
+ return DT->getNode(BB);
+ }
+
+ /// getNode - return the (Post)DominatorTree node for the specified basic
+ /// block. This is the same as using operator[] on this class.
+ ///
+ inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
+ applySplitCriticalEdges();
+ return DT->getNode(BB);
+ }
+
+ /// addNewBlock - Add a new node to the dominator tree information. This
+ /// creates a new node as a child of DomBB dominator node,linking it into
+ /// the children list of the immediate dominator.
+ inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
+ MachineBasicBlock *DomBB) {
+ applySplitCriticalEdges();
+ return DT->addNewBlock(BB, DomBB);
+ }
+
+ /// changeImmediateDominator - This method is used to update the dominator
+ /// tree information when a node's immediate dominator changes.
+ ///
+ inline void changeImmediateDominator(MachineBasicBlock *N,
+ MachineBasicBlock* NewIDom) {
+ applySplitCriticalEdges();
+ DT->changeImmediateDominator(N, NewIDom);
+ }
+
+ inline void changeImmediateDominator(MachineDomTreeNode *N,
+ MachineDomTreeNode* NewIDom) {
+ applySplitCriticalEdges();
+ DT->changeImmediateDominator(N, NewIDom);
+ }
+
+ /// eraseNode - Removes a node from the dominator tree. Block must not
+ /// dominate any other blocks. Removes node from its immediate dominator's
+ /// children list. Deletes dominator node associated with basic block BB.
+ inline void eraseNode(MachineBasicBlock *BB) {
+ applySplitCriticalEdges();
+ DT->eraseNode(BB);
+ }
+
+ /// splitBlock - BB is split and now it has one successor. Update dominator
+ /// tree to reflect this change.
+ inline void splitBlock(MachineBasicBlock* NewBB) {
+ applySplitCriticalEdges();
+ DT->splitBlock(NewBB);
+ }
+
+ /// isReachableFromEntry - Return true if A is dominated by the entry
+ /// block of the function containing it.
+ bool isReachableFromEntry(const MachineBasicBlock *A) {
+ applySplitCriticalEdges();
+ return DT->isReachableFromEntry(A);
+ }
+
+ void releaseMemory() override;
+
+ void print(raw_ostream &OS, const Module*) const override;
+
+ /// \brief Record that the critical edge (FromBB, ToBB) has been
+ /// split with NewBB.
+ /// This is best to use this method instead of directly update the
+ /// underlying information, because this helps mitigating the
+ /// number of time the DT information is invalidated.
+ ///
+ /// \note Do not use this method with regular edges.
+ ///
+ /// \note To benefit from the compile time improvement incurred by this
+ /// method, the users of this method have to limit the queries to the DT
+ /// interface between two edges splitting. In other words, they have to
+ /// pack the splitting of critical edges as much as possible.
+ void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
+ MachineBasicBlock *ToBB,
+ MachineBasicBlock *NewBB) {
+ bool Inserted = NewBBs.insert(NewBB).second;
+ (void)Inserted;
+ assert(Inserted &&
+ "A basic block inserted via edge splitting cannot appear twice");
+ CriticalEdgesToSplit.push_back({FromBB, ToBB, NewBB});
+ }
+};
+
+//===-------------------------------------
+/// DominatorTree GraphTraits specialization so the DominatorTree can be
+/// iterable by generic graph iterators.
+///
+
+template <class Node, class ChildIterator>
+struct MachineDomTreeGraphTraitsBase {
+ typedef Node NodeType;
+ typedef ChildIterator ChildIteratorType;
+
+ static NodeType *getEntryNode(NodeType *N) { return N; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) { return N->end(); }
+};
+
+template <class T> struct GraphTraits;
+
+template <>
+struct GraphTraits<MachineDomTreeNode *>
+ : public MachineDomTreeGraphTraitsBase<MachineDomTreeNode,
+ MachineDomTreeNode::iterator> {};
+
+template <>
+struct GraphTraits<const MachineDomTreeNode *>
+ : public MachineDomTreeGraphTraitsBase<const MachineDomTreeNode,
+ MachineDomTreeNode::const_iterator> {
+};
+
+template <> struct GraphTraits<MachineDominatorTree*>
+ : public GraphTraits<MachineDomTreeNode *> {
+ static NodeType *getEntryNode(MachineDominatorTree *DT) {
+ return DT->getRootNode();
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
new file mode 100644
index 0000000..48e8ca7
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFrameInfo.h
@@ -0,0 +1,647 @@
+//===-- CodeGen/MachineFrameInfo.h - Abstract Stack Frame Rep. --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The file defines the MachineFrameInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFRAMEINFO_H
+#define LLVM_CODEGEN_MACHINEFRAMEINFO_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+class raw_ostream;
+class DataLayout;
+class TargetRegisterClass;
+class Type;
+class MachineFunction;
+class MachineBasicBlock;
+class TargetFrameLowering;
+class TargetMachine;
+class BitVector;
+class Value;
+class AllocaInst;
+
+/// The CalleeSavedInfo class tracks the information need to locate where a
+/// callee saved register is in the current frame.
+class CalleeSavedInfo {
+ unsigned Reg;
+ int FrameIdx;
+
+public:
+ explicit CalleeSavedInfo(unsigned R, int FI = 0)
+ : Reg(R), FrameIdx(FI) {}
+
+ // Accessors.
+ unsigned getReg() const { return Reg; }
+ int getFrameIdx() const { return FrameIdx; }
+ void setFrameIdx(int FI) { FrameIdx = FI; }
+};
+
+/// The MachineFrameInfo class represents an abstract stack frame until
+/// prolog/epilog code is inserted. This class is key to allowing stack frame
+/// representation optimizations, such as frame pointer elimination. It also
+/// allows more mundane (but still important) optimizations, such as reordering
+/// of abstract objects on the stack frame.
+///
+/// To support this, the class assigns unique integer identifiers to stack
+/// objects requested clients. These identifiers are negative integers for
+/// fixed stack objects (such as arguments passed on the stack) or nonnegative
+/// for objects that may be reordered. Instructions which refer to stack
+/// objects use a special MO_FrameIndex operand to represent these frame
+/// indexes.
+///
+/// Because this class keeps track of all references to the stack frame, it
+/// knows when a variable sized object is allocated on the stack. This is the
+/// sole condition which prevents frame pointer elimination, which is an
+/// important optimization on register-poor architectures. Because original
+/// variable sized alloca's in the source program are the only source of
+/// variable sized stack objects, it is safe to decide whether there will be
+/// any variable sized objects before all stack objects are known (for
+/// example, register allocator spill code never needs variable sized
+/// objects).
+///
+/// When prolog/epilog code emission is performed, the final stack frame is
+/// built and the machine instructions are modified to refer to the actual
+/// stack offsets of the object, eliminating all MO_FrameIndex operands from
+/// the program.
+///
+/// @brief Abstract Stack Frame Information
+class MachineFrameInfo {
+
+ // Represent a single object allocated on the stack.
+ struct StackObject {
+ // The offset of this object from the stack pointer on entry to
+ // the function. This field has no meaning for a variable sized element.
+ int64_t SPOffset;
+
+ // The size of this object on the stack. 0 means a variable sized object,
+ // ~0ULL means a dead object.
+ uint64_t Size;
+
+ // The required alignment of this stack slot.
+ unsigned Alignment;
+
+ // If true, the value of the stack object is set before
+ // entering the function and is not modified inside the function. By
+ // default, fixed objects are immutable unless marked otherwise.
+ bool isImmutable;
+
+ // If true the stack object is used as spill slot. It
+ // cannot alias any other memory objects.
+ bool isSpillSlot;
+
+ /// If true, this stack slot is used to spill a value (could be deopt
+ /// and/or GC related) over a statepoint. We know that the address of the
+ /// slot can't alias any LLVM IR value. This is very similiar to a Spill
+ /// Slot, but is created by statepoint lowering is SelectionDAG, not the
+ /// register allocator.
+ bool isStatepointSpillSlot;
+
+ /// If this stack object is originated from an Alloca instruction
+ /// this value saves the original IR allocation. Can be NULL.
+ const AllocaInst *Alloca;
+
+ // If true, the object was mapped into the local frame
+ // block and doesn't need additional handling for allocation beyond that.
+ bool PreAllocated;
+
+ // If true, an LLVM IR value might point to this object.
+ // Normally, spill slots and fixed-offset objects don't alias IR-accessible
+ // objects, but there are exceptions (on PowerPC, for example, some byval
+ // arguments have ABI-prescribed offsets).
+ bool isAliased;
+
+ StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM,
+ bool isSS, const AllocaInst *Val, bool A)
+ : SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM),
+ isSpillSlot(isSS), isStatepointSpillSlot(false), Alloca(Val),
+ PreAllocated(false), isAliased(A) {}
+ };
+
+ /// The alignment of the stack.
+ unsigned StackAlignment;
+
+ /// Can the stack be realigned.
+ /// Targets that set this to false don't have the ability to overalign
+ /// their stack frame, and thus, overaligned allocas are all treated
+ /// as dynamic allocations and the target must handle them as part
+ /// of DYNAMIC_STACKALLOC lowering.
+ /// FIXME: There is room for improvement in this case, in terms of
+ /// grouping overaligned allocas into a "secondary stack frame" and
+ /// then only use a single alloca to allocate this frame and only a
+ /// single virtual register to access it. Currently, without such an
+ /// optimization, each such alloca gets it's own dynamic
+ /// realignment.
+ bool StackRealignable;
+
+ /// The list of stack objects allocated.
+ std::vector<StackObject> Objects;
+
+ /// This contains the number of fixed objects contained on
+ /// the stack. Because fixed objects are stored at a negative index in the
+ /// Objects list, this is also the index to the 0th object in the list.
+ unsigned NumFixedObjects;
+
+ /// This boolean keeps track of whether any variable
+ /// sized objects have been allocated yet.
+ bool HasVarSizedObjects;
+
+ /// This boolean keeps track of whether there is a call
+ /// to builtin \@llvm.frameaddress.
+ bool FrameAddressTaken;
+
+ /// This boolean keeps track of whether there is a call
+ /// to builtin \@llvm.returnaddress.
+ bool ReturnAddressTaken;
+
+ /// This boolean keeps track of whether there is a call
+ /// to builtin \@llvm.experimental.stackmap.
+ bool HasStackMap;
+
+ /// This boolean keeps track of whether there is a call
+ /// to builtin \@llvm.experimental.patchpoint.
+ bool HasPatchPoint;
+
+ /// The prolog/epilog code inserter calculates the final stack
+ /// offsets for all of the fixed size objects, updating the Objects list
+ /// above. It then updates StackSize to contain the number of bytes that need
+ /// to be allocated on entry to the function.
+ uint64_t StackSize;
+
+ /// The amount that a frame offset needs to be adjusted to
+ /// have the actual offset from the stack/frame pointer. The exact usage of
+ /// this is target-dependent, but it is typically used to adjust between
+ /// SP-relative and FP-relative offsets. E.G., if objects are accessed via
+ /// SP then OffsetAdjustment is zero; if FP is used, OffsetAdjustment is set
+ /// to the distance between the initial SP and the value in FP. For many
+ /// targets, this value is only used when generating debug info (via
+ /// TargetRegisterInfo::getFrameIndexReference); when generating code, the
+ /// corresponding adjustments are performed directly.
+ int OffsetAdjustment;
+
+ /// The prolog/epilog code inserter may process objects that require greater
+ /// alignment than the default alignment the target provides.
+ /// To handle this, MaxAlignment is set to the maximum alignment
+ /// needed by the objects on the current frame. If this is greater than the
+ /// native alignment maintained by the compiler, dynamic alignment code will
+ /// be needed.
+ ///
+ unsigned MaxAlignment;
+
+ /// Set to true if this function adjusts the stack -- e.g.,
+ /// when calling another function. This is only valid during and after
+ /// prolog/epilog code insertion.
+ bool AdjustsStack;
+
+ /// Set to true if this function has any function calls.
+ bool HasCalls;
+
+ /// The frame index for the stack protector.
+ int StackProtectorIdx;
+
+ /// The frame index for the function context. Used for SjLj exceptions.
+ int FunctionContextIdx;
+
+ /// This contains the size of the largest call frame if the target uses frame
+ /// setup/destroy pseudo instructions (as defined in the TargetFrameInfo
+ /// class). This information is important for frame pointer elimination.
+ /// It is only valid during and after prolog/epilog code insertion.
+ unsigned MaxCallFrameSize;
+
+ /// The prolog/epilog code inserter fills in this vector with each
+ /// callee saved register saved in the frame. Beyond its use by the prolog/
+ /// epilog code inserter, this data used for debug info and exception
+ /// handling.
+ std::vector<CalleeSavedInfo> CSInfo;
+
+ /// Has CSInfo been set yet?
+ bool CSIValid;
+
+ /// References to frame indices which are mapped
+ /// into the local frame allocation block. <FrameIdx, LocalOffset>
+ SmallVector<std::pair<int, int64_t>, 32> LocalFrameObjects;
+
+ /// Size of the pre-allocated local frame block.
+ int64_t LocalFrameSize;
+
+ /// Required alignment of the local object blob, which is the strictest
+ /// alignment of any object in it.
+ unsigned LocalFrameMaxAlign;
+
+ /// Whether the local object blob needs to be allocated together. If not,
+ /// PEI should ignore the isPreAllocated flags on the stack objects and
+ /// just allocate them normally.
+ bool UseLocalStackAllocationBlock;
+
+ /// Whether the "realign-stack" option is on.
+ bool RealignOption;
+
+ /// True if the function dynamically adjusts the stack pointer through some
+ /// opaque mechanism like inline assembly or Win32 EH.
+ bool HasOpaqueSPAdjustment;
+
+ /// True if the function contains a call to the llvm.vastart intrinsic.
+ bool HasVAStart;
+
+ /// True if this is a varargs function that contains a musttail call.
+ bool HasMustTailInVarArgFunc;
+
+ /// True if this function contains a tail call. If so immutable objects like
+ /// function arguments are no longer so. A tail call *can* override fixed
+ /// stack objects like arguments so we can't treat them as immutable.
+ bool HasTailCall;
+
+ /// Not null, if shrink-wrapping found a better place for the prologue.
+ MachineBasicBlock *Save;
+ /// Not null, if shrink-wrapping found a better place for the epilogue.
+ MachineBasicBlock *Restore;
+
+public:
+ explicit MachineFrameInfo(unsigned StackAlign, bool isStackRealign,
+ bool RealignOpt)
+ : StackAlignment(StackAlign), StackRealignable(isStackRealign),
+ RealignOption(RealignOpt) {
+ StackSize = NumFixedObjects = OffsetAdjustment = MaxAlignment = 0;
+ HasVarSizedObjects = false;
+ FrameAddressTaken = false;
+ ReturnAddressTaken = false;
+ HasStackMap = false;
+ HasPatchPoint = false;
+ AdjustsStack = false;
+ HasCalls = false;
+ StackProtectorIdx = -1;
+ FunctionContextIdx = -1;
+ MaxCallFrameSize = 0;
+ CSIValid = false;
+ LocalFrameSize = 0;
+ LocalFrameMaxAlign = 0;
+ UseLocalStackAllocationBlock = false;
+ HasOpaqueSPAdjustment = false;
+ HasVAStart = false;
+ HasMustTailInVarArgFunc = false;
+ Save = nullptr;
+ Restore = nullptr;
+ HasTailCall = false;
+ }
+
+ /// Return true if there are any stack objects in this function.
+ bool hasStackObjects() const { return !Objects.empty(); }
+
+ /// This method may be called any time after instruction
+ /// selection is complete to determine if the stack frame for this function
+ /// contains any variable sized objects.
+ bool hasVarSizedObjects() const { return HasVarSizedObjects; }
+
+ /// Return the index for the stack protector object.
+ int getStackProtectorIndex() const { return StackProtectorIdx; }
+ void setStackProtectorIndex(int I) { StackProtectorIdx = I; }
+ bool hasStackProtectorIndex() const { return StackProtectorIdx != -1; }
+
+ /// Return the index for the function context object.
+ /// This object is used for SjLj exceptions.
+ int getFunctionContextIndex() const { return FunctionContextIdx; }
+ void setFunctionContextIndex(int I) { FunctionContextIdx = I; }
+
+ /// This method may be called any time after instruction
+ /// selection is complete to determine if there is a call to
+ /// \@llvm.frameaddress in this function.
+ bool isFrameAddressTaken() const { return FrameAddressTaken; }
+ void setFrameAddressIsTaken(bool T) { FrameAddressTaken = T; }
+
+ /// This method may be called any time after
+ /// instruction selection is complete to determine if there is a call to
+ /// \@llvm.returnaddress in this function.
+ bool isReturnAddressTaken() const { return ReturnAddressTaken; }
+ void setReturnAddressIsTaken(bool s) { ReturnAddressTaken = s; }
+
+ /// This method may be called any time after instruction
+ /// selection is complete to determine if there is a call to builtin
+ /// \@llvm.experimental.stackmap.
+ bool hasStackMap() const { return HasStackMap; }
+ void setHasStackMap(bool s = true) { HasStackMap = s; }
+
+ /// This method may be called any time after instruction
+ /// selection is complete to determine if there is a call to builtin
+ /// \@llvm.experimental.patchpoint.
+ bool hasPatchPoint() const { return HasPatchPoint; }
+ void setHasPatchPoint(bool s = true) { HasPatchPoint = s; }
+
+ /// Return the minimum frame object index.
+ int getObjectIndexBegin() const { return -NumFixedObjects; }
+
+ /// Return one past the maximum frame object index.
+ int getObjectIndexEnd() const { return (int)Objects.size()-NumFixedObjects; }
+
+ /// Return the number of fixed objects.
+ unsigned getNumFixedObjects() const { return NumFixedObjects; }
+
+ /// Return the number of objects.
+ unsigned getNumObjects() const { return Objects.size(); }
+
+ /// Map a frame index into the local object block
+ void mapLocalFrameObject(int ObjectIndex, int64_t Offset) {
+ LocalFrameObjects.push_back(std::pair<int, int64_t>(ObjectIndex, Offset));
+ Objects[ObjectIndex + NumFixedObjects].PreAllocated = true;
+ }
+
+ /// Get the local offset mapping for a for an object.
+ std::pair<int, int64_t> getLocalFrameObjectMap(int i) const {
+ assert (i >= 0 && (unsigned)i < LocalFrameObjects.size() &&
+ "Invalid local object reference!");
+ return LocalFrameObjects[i];
+ }
+
+ /// Return the number of objects allocated into the local object block.
+ int64_t getLocalFrameObjectCount() const { return LocalFrameObjects.size(); }
+
+ /// Set the size of the local object blob.
+ void setLocalFrameSize(int64_t sz) { LocalFrameSize = sz; }
+
+ /// Get the size of the local object blob.
+ int64_t getLocalFrameSize() const { return LocalFrameSize; }
+
+ /// Required alignment of the local object blob,
+ /// which is the strictest alignment of any object in it.
+ void setLocalFrameMaxAlign(unsigned Align) { LocalFrameMaxAlign = Align; }
+
+ /// Return the required alignment of the local object blob.
+ unsigned getLocalFrameMaxAlign() const { return LocalFrameMaxAlign; }
+
+ /// Get whether the local allocation blob should be allocated together or
+ /// let PEI allocate the locals in it directly.
+ bool getUseLocalStackAllocationBlock() const {
+ return UseLocalStackAllocationBlock;
+ }
+
+ /// setUseLocalStackAllocationBlock - Set whether the local allocation blob
+ /// should be allocated together or let PEI allocate the locals in it
+ /// directly.
+ void setUseLocalStackAllocationBlock(bool v) {
+ UseLocalStackAllocationBlock = v;
+ }
+
+ /// Return true if the object was pre-allocated into the local block.
+ bool isObjectPreAllocated(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].PreAllocated;
+ }
+
+ /// Return the size of the specified object.
+ int64_t getObjectSize(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].Size;
+ }
+
+ /// Change the size of the specified stack object.
+ void setObjectSize(int ObjectIdx, int64_t Size) {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ Objects[ObjectIdx+NumFixedObjects].Size = Size;
+ }
+
+ /// Return the alignment of the specified stack object.
+ unsigned getObjectAlignment(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].Alignment;
+ }
+
+ /// setObjectAlignment - Change the alignment of the specified stack object.
+ void setObjectAlignment(int ObjectIdx, unsigned Align) {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ Objects[ObjectIdx+NumFixedObjects].Alignment = Align;
+ ensureMaxAlignment(Align);
+ }
+
+ /// Return the underlying Alloca of the specified
+ /// stack object if it exists. Returns 0 if none exists.
+ const AllocaInst* getObjectAllocation(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].Alloca;
+ }
+
+ /// Return the assigned stack offset of the specified object
+ /// from the incoming stack pointer.
+ int64_t getObjectOffset(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ assert(!isDeadObjectIndex(ObjectIdx) &&
+ "Getting frame offset for a dead object?");
+ return Objects[ObjectIdx+NumFixedObjects].SPOffset;
+ }
+
+ /// Set the stack frame offset of the specified object. The
+ /// offset is relative to the stack pointer on entry to the function.
+ void setObjectOffset(int ObjectIdx, int64_t SPOffset) {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ assert(!isDeadObjectIndex(ObjectIdx) &&
+ "Setting frame offset for a dead object?");
+ Objects[ObjectIdx+NumFixedObjects].SPOffset = SPOffset;
+ }
+
+ /// Return the number of bytes that must be allocated to hold
+ /// all of the fixed size frame objects. This is only valid after
+ /// Prolog/Epilog code insertion has finalized the stack frame layout.
+ uint64_t getStackSize() const { return StackSize; }
+
+ /// Set the size of the stack.
+ void setStackSize(uint64_t Size) { StackSize = Size; }
+
+ /// Estimate and return the size of the stack frame.
+ unsigned estimateStackSize(const MachineFunction &MF) const;
+
+ /// Return the correction for frame offsets.
+ int getOffsetAdjustment() const { return OffsetAdjustment; }
+
+ /// Set the correction for frame offsets.
+ void setOffsetAdjustment(int Adj) { OffsetAdjustment = Adj; }
+
+ /// Return the alignment in bytes that this function must be aligned to,
+ /// which is greater than the default stack alignment provided by the target.
+ unsigned getMaxAlignment() const { return MaxAlignment; }
+
+ /// Make sure the function is at least Align bytes aligned.
+ void ensureMaxAlignment(unsigned Align);
+
+ /// Return true if this function adjusts the stack -- e.g.,
+ /// when calling another function. This is only valid during and after
+ /// prolog/epilog code insertion.
+ bool adjustsStack() const { return AdjustsStack; }
+ void setAdjustsStack(bool V) { AdjustsStack = V; }
+
+ /// Return true if the current function has any function calls.
+ bool hasCalls() const { return HasCalls; }
+ void setHasCalls(bool V) { HasCalls = V; }
+
+ /// Returns true if the function contains opaque dynamic stack adjustments.
+ bool hasOpaqueSPAdjustment() const { return HasOpaqueSPAdjustment; }
+ void setHasOpaqueSPAdjustment(bool B) { HasOpaqueSPAdjustment = B; }
+
+ /// Returns true if the function calls the llvm.va_start intrinsic.
+ bool hasVAStart() const { return HasVAStart; }
+ void setHasVAStart(bool B) { HasVAStart = B; }
+
+ /// Returns true if the function is variadic and contains a musttail call.
+ bool hasMustTailInVarArgFunc() const { return HasMustTailInVarArgFunc; }
+ void setHasMustTailInVarArgFunc(bool B) { HasMustTailInVarArgFunc = B; }
+
+ /// Returns true if the function contains a tail call.
+ bool hasTailCall() const { return HasTailCall; }
+ void setHasTailCall() { HasTailCall = true; }
+
+ /// Return the maximum size of a call frame that must be
+ /// allocated for an outgoing function call. This is only available if
+ /// CallFrameSetup/Destroy pseudo instructions are used by the target, and
+ /// then only during or after prolog/epilog code insertion.
+ ///
+ unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; }
+ void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
+
+ /// Create a new object at a fixed location on the stack.
+ /// All fixed objects should be created before other objects are created for
+ /// efficiency. By default, fixed objects are not pointed to by LLVM IR
+ /// values. This returns an index with a negative value.
+ int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable,
+ bool isAliased = false);
+
+ /// Create a spill slot at a fixed location on the stack.
+ /// Returns an index with a negative value.
+ int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset);
+
+ /// Returns true if the specified index corresponds to a fixed stack object.
+ bool isFixedObjectIndex(int ObjectIdx) const {
+ return ObjectIdx < 0 && (ObjectIdx >= -(int)NumFixedObjects);
+ }
+
+ /// Returns true if the specified index corresponds
+ /// to an object that might be pointed to by an LLVM IR value.
+ bool isAliasedObjectIndex(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].isAliased;
+ }
+
+ /// isImmutableObjectIndex - Returns true if the specified index corresponds
+ /// to an immutable object.
+ bool isImmutableObjectIndex(int ObjectIdx) const {
+ // Tail calling functions can clobber their function arguments.
+ if (HasTailCall)
+ return false;
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].isImmutable;
+ }
+
+ /// Returns true if the specified index corresponds to a spill slot.
+ bool isSpillSlotObjectIndex(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].isSpillSlot;
+ }
+
+ bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].isStatepointSpillSlot;
+ }
+
+ /// Returns true if the specified index corresponds to a dead object.
+ bool isDeadObjectIndex(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL;
+ }
+
+ /// Returns true if the specified index corresponds to a variable sized
+ /// object.
+ bool isVariableSizedObjectIndex(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx + NumFixedObjects].Size == 0;
+ }
+
+ void markAsStatepointSpillSlotObjectIndex(int ObjectIdx) {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ Objects[ObjectIdx+NumFixedObjects].isStatepointSpillSlot = true;
+ assert(isStatepointSpillSlotObjectIndex(ObjectIdx) && "inconsistent");
+ }
+
+ /// Create a new statically sized stack object, returning
+ /// a nonnegative identifier to represent it.
+ int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS,
+ const AllocaInst *Alloca = nullptr);
+
+ /// Create a new statically sized stack object that represents a spill slot,
+ /// returning a nonnegative identifier to represent it.
+ int CreateSpillStackObject(uint64_t Size, unsigned Alignment);
+
+ /// Remove or mark dead a statically sized stack object.
+ void RemoveStackObject(int ObjectIdx) {
+ // Mark it dead.
+ Objects[ObjectIdx+NumFixedObjects].Size = ~0ULL;
+ }
+
+ /// Notify the MachineFrameInfo object that a variable sized object has been
+ /// created. This must be created whenever a variable sized object is
+ /// created, whether or not the index returned is actually used.
+ int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca);
+
+ /// Returns a reference to call saved info vector for the current function.
+ const std::vector<CalleeSavedInfo> &getCalleeSavedInfo() const {
+ return CSInfo;
+ }
+
+ /// Used by prolog/epilog inserter to set the function's callee saved
+ /// information.
+ void setCalleeSavedInfo(const std::vector<CalleeSavedInfo> &CSI) {
+ CSInfo = CSI;
+ }
+
+ /// Has the callee saved info been calculated yet?
+ bool isCalleeSavedInfoValid() const { return CSIValid; }
+
+ void setCalleeSavedInfoValid(bool v) { CSIValid = v; }
+
+ MachineBasicBlock *getSavePoint() const { return Save; }
+ void setSavePoint(MachineBasicBlock *NewSave) { Save = NewSave; }
+ MachineBasicBlock *getRestorePoint() const { return Restore; }
+ void setRestorePoint(MachineBasicBlock *NewRestore) { Restore = NewRestore; }
+
+ /// Return a set of physical registers that are pristine.
+ ///
+ /// Pristine registers hold a value that is useless to the current function,
+ /// but that must be preserved - they are callee saved registers that are not
+ /// saved.
+ ///
+ /// Before the PrologueEpilogueInserter has placed the CSR spill code, this
+ /// method always returns an empty set.
+ BitVector getPristineRegs(const MachineFunction &MF) const;
+
+ /// Used by the MachineFunction printer to print information about
+ /// stack objects. Implemented in MachineFunction.cpp.
+ void print(const MachineFunction &MF, raw_ostream &OS) const;
+
+ /// dump - Print the function to stderr.
+ void dump(const MachineFunction &MF) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
new file mode 100644
index 0000000..82c30d3
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h
@@ -0,0 +1,585 @@
+//===-- llvm/CodeGen/MachineFunction.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect native machine code for a function. This class contains a list of
+// MachineBasicBlock instances that make up the current compiled function.
+//
+// This class also contains pointers to various classes which hold
+// target-specific information about the generated code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFUNCTION_H
+#define LLVM_CODEGEN_MACHINEFUNCTION_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Support/Recycler.h"
+
+namespace llvm {
+
+class Value;
+class Function;
+class GCModuleInfo;
+class MachineRegisterInfo;
+class MachineFrameInfo;
+class MachineConstantPool;
+class MachineJumpTableInfo;
+class MachineModuleInfo;
+class MCContext;
+class Pass;
+class PseudoSourceValueManager;
+class TargetMachine;
+class TargetSubtargetInfo;
+class TargetRegisterClass;
+struct MachinePointerInfo;
+struct WinEHFuncInfo;
+
+template <>
+struct ilist_traits<MachineBasicBlock>
+ : public ilist_default_traits<MachineBasicBlock> {
+ mutable ilist_half_node<MachineBasicBlock> Sentinel;
+public:
+ MachineBasicBlock *createSentinel() const {
+ return static_cast<MachineBasicBlock*>(&Sentinel);
+ }
+ void destroySentinel(MachineBasicBlock *) const {}
+
+ MachineBasicBlock *provideInitialHead() const { return createSentinel(); }
+ MachineBasicBlock *ensureHead(MachineBasicBlock*) const {
+ return createSentinel();
+ }
+ static void noteHead(MachineBasicBlock*, MachineBasicBlock*) {}
+
+ void addNodeToList(MachineBasicBlock* MBB);
+ void removeNodeFromList(MachineBasicBlock* MBB);
+ void deleteNode(MachineBasicBlock *MBB);
+private:
+ void createNode(const MachineBasicBlock &);
+};
+
+/// MachineFunctionInfo - This class can be derived from and used by targets to
+/// hold private target-specific information for each MachineFunction. Objects
+/// of type are accessed/created with MF::getInfo and destroyed when the
+/// MachineFunction is destroyed.
+struct MachineFunctionInfo {
+ virtual ~MachineFunctionInfo();
+
+ /// \brief Factory function: default behavior is to call new using the
+ /// supplied allocator.
+ ///
+ /// This function can be overridden in a derive class.
+ template<typename Ty>
+ static Ty *create(BumpPtrAllocator &Allocator, MachineFunction &MF) {
+ return new (Allocator.Allocate<Ty>()) Ty(MF);
+ }
+};
+
+class MachineFunction {
+ const Function *Fn;
+ const TargetMachine &Target;
+ const TargetSubtargetInfo *STI;
+ MCContext &Ctx;
+ MachineModuleInfo &MMI;
+
+ // RegInfo - Information about each register in use in the function.
+ MachineRegisterInfo *RegInfo;
+
+ // Used to keep track of target-specific per-machine function information for
+ // the target implementation.
+ MachineFunctionInfo *MFInfo;
+
+ // Keep track of objects allocated on the stack.
+ MachineFrameInfo *FrameInfo;
+
+ // Keep track of constants which are spilled to memory
+ MachineConstantPool *ConstantPool;
+
+ // Keep track of jump tables for switch instructions
+ MachineJumpTableInfo *JumpTableInfo;
+
+ // Keeps track of Windows exception handling related data. This will be null
+ // for functions that aren't using a funclet-based EH personality.
+ WinEHFuncInfo *WinEHInfo = nullptr;
+
+ // Function-level unique numbering for MachineBasicBlocks. When a
+ // MachineBasicBlock is inserted into a MachineFunction is it automatically
+ // numbered and this vector keeps track of the mapping from ID's to MBB's.
+ std::vector<MachineBasicBlock*> MBBNumbering;
+
+ // Pool-allocate MachineFunction-lifetime and IR objects.
+ BumpPtrAllocator Allocator;
+
+ // Allocation management for instructions in function.
+ Recycler<MachineInstr> InstructionRecycler;
+
+ // Allocation management for operand arrays on instructions.
+ ArrayRecycler<MachineOperand> OperandRecycler;
+
+ // Allocation management for basic blocks in function.
+ Recycler<MachineBasicBlock> BasicBlockRecycler;
+
+ // List of machine basic blocks in function
+ typedef ilist<MachineBasicBlock> BasicBlockListType;
+ BasicBlockListType BasicBlocks;
+
+ /// FunctionNumber - This provides a unique ID for each function emitted in
+ /// this translation unit.
+ ///
+ unsigned FunctionNumber;
+
+ /// Alignment - The alignment of the function.
+ unsigned Alignment;
+
+ /// ExposesReturnsTwice - True if the function calls setjmp or related
+ /// functions with attribute "returns twice", but doesn't have
+ /// the attribute itself.
+ /// This is used to limit optimizations which cannot reason
+ /// about the control flow of such functions.
+ bool ExposesReturnsTwice;
+
+ /// True if the function includes any inline assembly.
+ bool HasInlineAsm;
+
+ // Allocation management for pseudo source values.
+ std::unique_ptr<PseudoSourceValueManager> PSVManager;
+
+ MachineFunction(const MachineFunction &) = delete;
+ void operator=(const MachineFunction&) = delete;
+public:
+ MachineFunction(const Function *Fn, const TargetMachine &TM,
+ unsigned FunctionNum, MachineModuleInfo &MMI);
+ ~MachineFunction();
+
+ MachineModuleInfo &getMMI() const { return MMI; }
+ MCContext &getContext() const { return Ctx; }
+
+ PseudoSourceValueManager &getPSVManager() const { return *PSVManager; }
+
+ /// Return the DataLayout attached to the Module associated to this MF.
+ const DataLayout &getDataLayout() const;
+
+ /// getFunction - Return the LLVM function that this machine code represents
+ ///
+ const Function *getFunction() const { return Fn; }
+
+ /// getName - Return the name of the corresponding LLVM function.
+ ///
+ StringRef getName() const;
+
+ /// getFunctionNumber - Return a unique ID for the current function.
+ ///
+ unsigned getFunctionNumber() const { return FunctionNumber; }
+
+ /// getTarget - Return the target machine this machine code is compiled with
+ ///
+ const TargetMachine &getTarget() const { return Target; }
+
+ /// getSubtarget - Return the subtarget for which this machine code is being
+ /// compiled.
+ const TargetSubtargetInfo &getSubtarget() const { return *STI; }
+ void setSubtarget(const TargetSubtargetInfo *ST) { STI = ST; }
+
+ /// getSubtarget - This method returns a pointer to the specified type of
+ /// TargetSubtargetInfo. In debug builds, it verifies that the object being
+ /// returned is of the correct type.
+ template<typename STC> const STC &getSubtarget() const {
+ return *static_cast<const STC *>(STI);
+ }
+
+ /// getRegInfo - Return information about the registers currently in use.
+ ///
+ MachineRegisterInfo &getRegInfo() { return *RegInfo; }
+ const MachineRegisterInfo &getRegInfo() const { return *RegInfo; }
+
+ /// getFrameInfo - Return the frame info object for the current function.
+ /// This object contains information about objects allocated on the stack
+ /// frame of the current function in an abstract way.
+ ///
+ MachineFrameInfo *getFrameInfo() { return FrameInfo; }
+ const MachineFrameInfo *getFrameInfo() const { return FrameInfo; }
+
+ /// getJumpTableInfo - Return the jump table info object for the current
+ /// function. This object contains information about jump tables in the
+ /// current function. If the current function has no jump tables, this will
+ /// return null.
+ const MachineJumpTableInfo *getJumpTableInfo() const { return JumpTableInfo; }
+ MachineJumpTableInfo *getJumpTableInfo() { return JumpTableInfo; }
+
+ /// getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it
+ /// does already exist, allocate one.
+ MachineJumpTableInfo *getOrCreateJumpTableInfo(unsigned JTEntryKind);
+
+ /// getConstantPool - Return the constant pool object for the current
+ /// function.
+ ///
+ MachineConstantPool *getConstantPool() { return ConstantPool; }
+ const MachineConstantPool *getConstantPool() const { return ConstantPool; }
+
+ /// getWinEHFuncInfo - Return information about how the current function uses
+ /// Windows exception handling. Returns null for functions that don't use
+ /// funclets for exception handling.
+ const WinEHFuncInfo *getWinEHFuncInfo() const { return WinEHInfo; }
+ WinEHFuncInfo *getWinEHFuncInfo() { return WinEHInfo; }
+
+ /// getAlignment - Return the alignment (log2, not bytes) of the function.
+ ///
+ unsigned getAlignment() const { return Alignment; }
+
+ /// setAlignment - Set the alignment (log2, not bytes) of the function.
+ ///
+ void setAlignment(unsigned A) { Alignment = A; }
+
+ /// ensureAlignment - Make sure the function is at least 1 << A bytes aligned.
+ void ensureAlignment(unsigned A) {
+ if (Alignment < A) Alignment = A;
+ }
+
+ /// exposesReturnsTwice - Returns true if the function calls setjmp or
+ /// any other similar functions with attribute "returns twice" without
+ /// having the attribute itself.
+ bool exposesReturnsTwice() const {
+ return ExposesReturnsTwice;
+ }
+
+ /// setCallsSetJmp - Set a flag that indicates if there's a call to
+ /// a "returns twice" function.
+ void setExposesReturnsTwice(bool B) {
+ ExposesReturnsTwice = B;
+ }
+
+ /// Returns true if the function contains any inline assembly.
+ bool hasInlineAsm() const {
+ return HasInlineAsm;
+ }
+
+ /// Set a flag that indicates that the function contains inline assembly.
+ void setHasInlineAsm(bool B) {
+ HasInlineAsm = B;
+ }
+
+ /// getInfo - Keep track of various per-function pieces of information for
+ /// backends that would like to do so.
+ ///
+ template<typename Ty>
+ Ty *getInfo() {
+ if (!MFInfo)
+ MFInfo = Ty::template create<Ty>(Allocator, *this);
+ return static_cast<Ty*>(MFInfo);
+ }
+
+ template<typename Ty>
+ const Ty *getInfo() const {
+ return const_cast<MachineFunction*>(this)->getInfo<Ty>();
+ }
+
+ /// getBlockNumbered - MachineBasicBlocks are automatically numbered when they
+ /// are inserted into the machine function. The block number for a machine
+ /// basic block can be found by using the MBB::getBlockNumber method, this
+ /// method provides the inverse mapping.
+ ///
+ MachineBasicBlock *getBlockNumbered(unsigned N) const {
+ assert(N < MBBNumbering.size() && "Illegal block number");
+ assert(MBBNumbering[N] && "Block was removed from the machine function!");
+ return MBBNumbering[N];
+ }
+
+ /// Should we be emitting segmented stack stuff for the function
+ bool shouldSplitStack();
+
+ /// getNumBlockIDs - Return the number of MBB ID's allocated.
+ ///
+ unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); }
+
+ /// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
+ /// recomputes them. This guarantees that the MBB numbers are sequential,
+ /// dense, and match the ordering of the blocks within the function. If a
+ /// specific MachineBasicBlock is specified, only that block and those after
+ /// it are renumbered.
+ void RenumberBlocks(MachineBasicBlock *MBBFrom = nullptr);
+
+ /// print - Print out the MachineFunction in a format suitable for debugging
+ /// to the specified stream.
+ ///
+ void print(raw_ostream &OS, SlotIndexes* = nullptr) const;
+
+ /// viewCFG - This function is meant for use from the debugger. You can just
+ /// say 'call F->viewCFG()' and a ghostview window should pop up from the
+ /// program, displaying the CFG of the current function with the code for each
+ /// basic block inside. This depends on there being a 'dot' and 'gv' program
+ /// in your path.
+ ///
+ void viewCFG() const;
+
+ /// viewCFGOnly - This function is meant for use from the debugger. It works
+ /// just like viewCFG, but it does not include the contents of basic blocks
+ /// into the nodes, just the label. If you are only interested in the CFG
+ /// this can make the graph smaller.
+ ///
+ void viewCFGOnly() const;
+
+ /// dump - Print the current MachineFunction to cerr, useful for debugger use.
+ ///
+ void dump() const;
+
+ /// verify - Run the current MachineFunction through the machine code
+ /// verifier, useful for debugger use.
+ void verify(Pass *p = nullptr, const char *Banner = nullptr) const;
+
+ // Provide accessors for the MachineBasicBlock list...
+ typedef BasicBlockListType::iterator iterator;
+ typedef BasicBlockListType::const_iterator const_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ /// Support for MachineBasicBlock::getNextNode().
+ static BasicBlockListType MachineFunction::*
+ getSublistAccess(MachineBasicBlock *) {
+ return &MachineFunction::BasicBlocks;
+ }
+
+ /// addLiveIn - Add the specified physical register as a live-in value and
+ /// create a corresponding virtual register for it.
+ unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC);
+
+ //===--------------------------------------------------------------------===//
+ // BasicBlock accessor functions.
+ //
+ iterator begin() { return BasicBlocks.begin(); }
+ const_iterator begin() const { return BasicBlocks.begin(); }
+ iterator end () { return BasicBlocks.end(); }
+ const_iterator end () const { return BasicBlocks.end(); }
+
+ reverse_iterator rbegin() { return BasicBlocks.rbegin(); }
+ const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); }
+ reverse_iterator rend () { return BasicBlocks.rend(); }
+ const_reverse_iterator rend () const { return BasicBlocks.rend(); }
+
+ unsigned size() const { return (unsigned)BasicBlocks.size();}
+ bool empty() const { return BasicBlocks.empty(); }
+ const MachineBasicBlock &front() const { return BasicBlocks.front(); }
+ MachineBasicBlock &front() { return BasicBlocks.front(); }
+ const MachineBasicBlock & back() const { return BasicBlocks.back(); }
+ MachineBasicBlock & back() { return BasicBlocks.back(); }
+
+ void push_back (MachineBasicBlock *MBB) { BasicBlocks.push_back (MBB); }
+ void push_front(MachineBasicBlock *MBB) { BasicBlocks.push_front(MBB); }
+ void insert(iterator MBBI, MachineBasicBlock *MBB) {
+ BasicBlocks.insert(MBBI, MBB);
+ }
+ void splice(iterator InsertPt, iterator MBBI) {
+ BasicBlocks.splice(InsertPt, BasicBlocks, MBBI);
+ }
+ void splice(iterator InsertPt, MachineBasicBlock *MBB) {
+ BasicBlocks.splice(InsertPt, BasicBlocks, MBB);
+ }
+ void splice(iterator InsertPt, iterator MBBI, iterator MBBE) {
+ BasicBlocks.splice(InsertPt, BasicBlocks, MBBI, MBBE);
+ }
+
+ void remove(iterator MBBI) { BasicBlocks.remove(MBBI); }
+ void remove(MachineBasicBlock *MBBI) { BasicBlocks.remove(MBBI); }
+ void erase(iterator MBBI) { BasicBlocks.erase(MBBI); }
+ void erase(MachineBasicBlock *MBBI) { BasicBlocks.erase(MBBI); }
+
+ template <typename Comp>
+ void sort(Comp comp) {
+ BasicBlocks.sort(comp);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Internal functions used to automatically number MachineBasicBlocks
+ //
+
+ /// \brief Adds the MBB to the internal numbering. Returns the unique number
+ /// assigned to the MBB.
+ ///
+ unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
+ MBBNumbering.push_back(MBB);
+ return (unsigned)MBBNumbering.size()-1;
+ }
+
+ /// removeFromMBBNumbering - Remove the specific machine basic block from our
+ /// tracker, this is only really to be used by the MachineBasicBlock
+ /// implementation.
+ void removeFromMBBNumbering(unsigned N) {
+ assert(N < MBBNumbering.size() && "Illegal basic block #");
+ MBBNumbering[N] = nullptr;
+ }
+
+ /// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
+ /// of `new MachineInstr'.
+ ///
+ MachineInstr *CreateMachineInstr(const MCInstrDesc &MCID,
+ DebugLoc DL,
+ bool NoImp = false);
+
+ /// CloneMachineInstr - Create a new MachineInstr which is a copy of the
+ /// 'Orig' instruction, identical in all ways except the instruction
+ /// has no parent, prev, or next.
+ ///
+ /// See also TargetInstrInfo::duplicate() for target-specific fixes to cloned
+ /// instructions.
+ MachineInstr *CloneMachineInstr(const MachineInstr *Orig);
+
+ /// DeleteMachineInstr - Delete the given MachineInstr.
+ ///
+ void DeleteMachineInstr(MachineInstr *MI);
+
+ /// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
+ /// instead of `new MachineBasicBlock'.
+ ///
+ MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = nullptr);
+
+ /// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
+ ///
+ void DeleteMachineBasicBlock(MachineBasicBlock *MBB);
+
+ /// getMachineMemOperand - Allocate a new MachineMemOperand.
+ /// MachineMemOperands are owned by the MachineFunction and need not be
+ /// explicitly deallocated.
+ MachineMemOperand *getMachineMemOperand(MachinePointerInfo PtrInfo,
+ unsigned f, uint64_t s,
+ unsigned base_alignment,
+ const AAMDNodes &AAInfo = AAMDNodes(),
+ const MDNode *Ranges = nullptr);
+
+ /// getMachineMemOperand - Allocate a new MachineMemOperand by copying
+ /// an existing one, adjusting by an offset and using the given size.
+ /// MachineMemOperands are owned by the MachineFunction and need not be
+ /// explicitly deallocated.
+ MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
+ int64_t Offset, uint64_t Size);
+
+ typedef ArrayRecycler<MachineOperand>::Capacity OperandCapacity;
+
+ /// Allocate an array of MachineOperands. This is only intended for use by
+ /// internal MachineInstr functions.
+ MachineOperand *allocateOperandArray(OperandCapacity Cap) {
+ return OperandRecycler.allocate(Cap, Allocator);
+ }
+
+ /// Dellocate an array of MachineOperands and recycle the memory. This is
+ /// only intended for use by internal MachineInstr functions.
+ /// Cap must be the same capacity that was used to allocate the array.
+ void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array) {
+ OperandRecycler.deallocate(Cap, Array);
+ }
+
+ /// \brief Allocate and initialize a register mask with @p NumRegister bits.
+ uint32_t *allocateRegisterMask(unsigned NumRegister) {
+ unsigned Size = (NumRegister + 31) / 32;
+ uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
+ for (unsigned i = 0; i != Size; ++i)
+ Mask[i] = 0;
+ return Mask;
+ }
+
+ /// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
+ /// pointers. This array is owned by the MachineFunction.
+ MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num);
+
+ /// extractLoadMemRefs - Allocate an array and populate it with just the
+ /// load information from the given MachineMemOperand sequence.
+ std::pair<MachineInstr::mmo_iterator,
+ MachineInstr::mmo_iterator>
+ extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
+ MachineInstr::mmo_iterator End);
+
+ /// extractStoreMemRefs - Allocate an array and populate it with just the
+ /// store information from the given MachineMemOperand sequence.
+ std::pair<MachineInstr::mmo_iterator,
+ MachineInstr::mmo_iterator>
+ extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
+ MachineInstr::mmo_iterator End);
+
+ /// Allocate a string and populate it with the given external symbol name.
+ const char *createExternalSymbolName(StringRef Name);
+
+ //===--------------------------------------------------------------------===//
+ // Label Manipulation.
+ //
+
+ /// getJTISymbol - Return the MCSymbol for the specified non-empty jump table.
+ /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
+ /// normal 'L' label is returned.
+ MCSymbol *getJTISymbol(unsigned JTI, MCContext &Ctx,
+ bool isLinkerPrivate = false) const;
+
+ /// getPICBaseSymbol - Return a function-local symbol to represent the PIC
+ /// base.
+ MCSymbol *getPICBaseSymbol() const;
+};
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for function basic block graphs (CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a
+// machine function as a graph of machine basic blocks... these are
+// the same as the machine basic block iterators, except that the root
+// node is implicitly the first node of the function.
+//
+template <> struct GraphTraits<MachineFunction*> :
+ public GraphTraits<MachineBasicBlock*> {
+ static NodeType *getEntryNode(MachineFunction *F) {
+ return &F->front();
+ }
+
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef MachineFunction::iterator nodes_iterator;
+ static nodes_iterator nodes_begin(MachineFunction *F) { return F->begin(); }
+ static nodes_iterator nodes_end (MachineFunction *F) { return F->end(); }
+ static unsigned size (MachineFunction *F) { return F->size(); }
+};
+template <> struct GraphTraits<const MachineFunction*> :
+ public GraphTraits<const MachineBasicBlock*> {
+ static NodeType *getEntryNode(const MachineFunction *F) {
+ return &F->front();
+ }
+
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef MachineFunction::const_iterator nodes_iterator;
+ static nodes_iterator nodes_begin(const MachineFunction *F) {
+ return F->begin();
+ }
+ static nodes_iterator nodes_end (const MachineFunction *F) {
+ return F->end();
+ }
+ static unsigned size (const MachineFunction *F) {
+ return F->size();
+ }
+};
+
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... and to walk it in inverse order. Inverse order for
+// a function is considered to be when traversing the predecessor edges of a BB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<MachineFunction*> > :
+ public GraphTraits<Inverse<MachineBasicBlock*> > {
+ static NodeType *getEntryNode(Inverse<MachineFunction*> G) {
+ return &G.Graph->front();
+ }
+};
+template <> struct GraphTraits<Inverse<const MachineFunction*> > :
+ public GraphTraits<Inverse<const MachineBasicBlock*> > {
+ static NodeType *getEntryNode(Inverse<const MachineFunction *> G) {
+ return &G.Graph->front();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h b/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
new file mode 100644
index 0000000..4c0f5e6
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunctionAnalysis.h
@@ -0,0 +1,55 @@
+//===-- MachineFunctionAnalysis.h - Owner of MachineFunctions ----*-C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MachineFunctionAnalysis class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFUNCTIONANALYSIS_H
+#define LLVM_CODEGEN_MACHINEFUNCTIONANALYSIS_H
+
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+class MachineFunction;
+class MachineFunctionInitializer;
+class TargetMachine;
+
+/// MachineFunctionAnalysis - This class is a Pass that manages a
+/// MachineFunction object.
+struct MachineFunctionAnalysis : public FunctionPass {
+private:
+ const TargetMachine &TM;
+ MachineFunction *MF;
+ unsigned NextFnNum;
+ MachineFunctionInitializer *MFInitializer;
+
+public:
+ static char ID;
+ explicit MachineFunctionAnalysis(const TargetMachine &tm,
+ MachineFunctionInitializer *MFInitializer);
+ ~MachineFunctionAnalysis() override;
+
+ MachineFunction &getMF() const { return *MF; }
+
+ const char* getPassName() const override {
+ return "Machine Function Analysis";
+ }
+
+private:
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunctionInitializer.h b/contrib/llvm/include/llvm/CodeGen/MachineFunctionInitializer.h
new file mode 100644
index 0000000..ff4c29c
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunctionInitializer.h
@@ -0,0 +1,38 @@
+//===- MachineFunctionInitalizer.h - machine function initializer ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares an interface that allows custom machine function
+// initialization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFUNCTIONINITIALIZER_H
+#define LLVM_CODEGEN_MACHINEFUNCTIONINITIALIZER_H
+
+namespace llvm {
+
+class MachineFunction;
+
+/// This interface provides a way to initialize machine functions after they are
+/// created by the machine function analysis pass.
+class MachineFunctionInitializer {
+ virtual void anchor();
+
+public:
+ virtual ~MachineFunctionInitializer() {}
+
+ /// Initialize the machine function.
+ ///
+ /// Return true if error occurred.
+ virtual bool initializeMachineFunction(MachineFunction &MF) = 0;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunctionPass.h b/contrib/llvm/include/llvm/CodeGen/MachineFunctionPass.h
new file mode 100644
index 0000000..50a1f6e
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineFunctionPass.h
@@ -0,0 +1,59 @@
+//===-- MachineFunctionPass.h - Pass for MachineFunctions --------*-C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineFunctionPass class. MachineFunctionPass's are
+// just FunctionPass's, except they operate on machine code as part of a code
+// generator. Because they operate on machine code, not the LLVM
+// representation, MachineFunctionPass's are not allowed to modify the LLVM
+// representation. Due to this limitation, the MachineFunctionPass class takes
+// care of declaring that no LLVM passes are invalidated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
+#define LLVM_CODEGEN_MACHINEFUNCTIONPASS_H
+
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+class MachineFunction;
+
+/// MachineFunctionPass - This class adapts the FunctionPass interface to
+/// allow convenient creation of passes that operate on the MachineFunction
+/// representation. Instead of overriding runOnFunction, subclasses
+/// override runOnMachineFunction.
+class MachineFunctionPass : public FunctionPass {
+protected:
+ explicit MachineFunctionPass(char &ID) : FunctionPass(ID) {}
+
+ /// runOnMachineFunction - This method must be overloaded to perform the
+ /// desired machine code transformation or analysis.
+ ///
+ virtual bool runOnMachineFunction(MachineFunction &MF) = 0;
+
+ /// getAnalysisUsage - Subclasses that override getAnalysisUsage
+ /// must call this.
+ ///
+ /// For MachineFunctionPasses, calling AU.preservesCFG() indicates that
+ /// the pass does not modify the MachineBasicBlock CFG.
+ ///
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+private:
+ /// createPrinterPass - Get a machine function printer pass.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override;
+
+ bool runOnFunction(Function &F) override;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
new file mode 100644
index 0000000..978864e
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h
@@ -0,0 +1,1270 @@
+//===-- llvm/CodeGen/MachineInstr.h - MachineInstr class --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineInstr class, which is the
+// basic representation for all target dependent machine instructions used by
+// the back end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTR_H
+#define LLVM_CODEGEN_MACHINEINSTR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/Support/ArrayRecycler.h"
+#include "llvm/Target/TargetOpcodes.h"
+
+namespace llvm {
+
+template <typename T> class SmallVectorImpl;
+class TargetInstrInfo;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+class MachineFunction;
+class MachineMemOperand;
+
+//===----------------------------------------------------------------------===//
+/// Representation of each machine instruction.
+///
+/// This class isn't a POD type, but it must have a trivial destructor. When a
+/// MachineFunction is deleted, all the contained MachineInstrs are deallocated
+/// without having their destructor called.
+///
+class MachineInstr
+ : public ilist_node_with_parent<MachineInstr, MachineBasicBlock> {
+public:
+ typedef MachineMemOperand **mmo_iterator;
+
+ /// Flags to specify different kinds of comments to output in
+ /// assembly code. These flags carry semantic information not
+ /// otherwise easily derivable from the IR text.
+ ///
+ enum CommentFlag {
+ ReloadReuse = 0x1
+ };
+
+ enum MIFlag {
+ NoFlags = 0,
+ FrameSetup = 1 << 0, // Instruction is used as a part of
+ // function frame setup code.
+ FrameDestroy = 1 << 1, // Instruction is used as a part of
+ // function frame destruction code.
+ BundledPred = 1 << 2, // Instruction has bundled predecessors.
+ BundledSucc = 1 << 3 // Instruction has bundled successors.
+ };
+private:
+ const MCInstrDesc *MCID; // Instruction descriptor.
+ MachineBasicBlock *Parent; // Pointer to the owning basic block.
+
+ // Operands are allocated by an ArrayRecycler.
+ MachineOperand *Operands; // Pointer to the first operand.
+ unsigned NumOperands; // Number of operands on instruction.
+ typedef ArrayRecycler<MachineOperand>::Capacity OperandCapacity;
+ OperandCapacity CapOperands; // Capacity of the Operands array.
+
+ uint8_t Flags; // Various bits of additional
+ // information about machine
+ // instruction.
+
+ uint8_t AsmPrinterFlags; // Various bits of information used by
+ // the AsmPrinter to emit helpful
+ // comments. This is *not* semantic
+ // information. Do not use this for
+ // anything other than to convey comment
+ // information to AsmPrinter.
+
+ uint8_t NumMemRefs; // Information on memory references.
+ // Note that MemRefs == nullptr, means 'don't know', not 'no memory access'.
+ // Calling code must treat missing information conservatively. If the number
+ // of memory operands required to be precise exceeds the maximum value of
+ // NumMemRefs - currently 256 - we remove the operands entirely. Note also
+ // that this is a non-owning reference to a shared copy on write buffer owned
+ // by the MachineFunction and created via MF.allocateMemRefsArray.
+ mmo_iterator MemRefs;
+
+ DebugLoc debugLoc; // Source line information.
+
+ MachineInstr(const MachineInstr&) = delete;
+ void operator=(const MachineInstr&) = delete;
+ // Use MachineFunction::DeleteMachineInstr() instead.
+ ~MachineInstr() = delete;
+
+ // Intrusive list support
+ friend struct ilist_traits<MachineInstr>;
+ friend struct ilist_traits<MachineBasicBlock>;
+ void setParent(MachineBasicBlock *P) { Parent = P; }
+
+ /// This constructor creates a copy of the given
+ /// MachineInstr in the given MachineFunction.
+ MachineInstr(MachineFunction &, const MachineInstr &);
+
+ /// This constructor create a MachineInstr and add the implicit operands.
+ /// It reserves space for number of operands specified by
+ /// MCInstrDesc. An explicit DebugLoc is supplied.
+ MachineInstr(MachineFunction &, const MCInstrDesc &MCID, DebugLoc dl,
+ bool NoImp = false);
+
+ // MachineInstrs are pool-allocated and owned by MachineFunction.
+ friend class MachineFunction;
+
+public:
+ const MachineBasicBlock* getParent() const { return Parent; }
+ MachineBasicBlock* getParent() { return Parent; }
+
+ /// Return the asm printer flags bitvector.
+ uint8_t getAsmPrinterFlags() const { return AsmPrinterFlags; }
+
+ /// Clear the AsmPrinter bitvector.
+ void clearAsmPrinterFlags() { AsmPrinterFlags = 0; }
+
+ /// Return whether an AsmPrinter flag is set.
+ bool getAsmPrinterFlag(CommentFlag Flag) const {
+ return AsmPrinterFlags & Flag;
+ }
+
+ /// Set a flag for the AsmPrinter.
+ void setAsmPrinterFlag(CommentFlag Flag) {
+ AsmPrinterFlags |= (uint8_t)Flag;
+ }
+
+ /// Clear specific AsmPrinter flags.
+ void clearAsmPrinterFlag(CommentFlag Flag) {
+ AsmPrinterFlags &= ~Flag;
+ }
+
+ /// Return the MI flags bitvector.
+ uint8_t getFlags() const {
+ return Flags;
+ }
+
+ /// Return whether an MI flag is set.
+ bool getFlag(MIFlag Flag) const {
+ return Flags & Flag;
+ }
+
+ /// Set a MI flag.
+ void setFlag(MIFlag Flag) {
+ Flags |= (uint8_t)Flag;
+ }
+
+ void setFlags(unsigned flags) {
+ // Filter out the automatically maintained flags.
+ unsigned Mask = BundledPred | BundledSucc;
+ Flags = (Flags & Mask) | (flags & ~Mask);
+ }
+
+ /// clearFlag - Clear a MI flag.
+ void clearFlag(MIFlag Flag) {
+ Flags &= ~((uint8_t)Flag);
+ }
+
+ /// Return true if MI is in a bundle (but not the first MI in a bundle).
+ ///
+ /// A bundle looks like this before it's finalized:
+ /// ----------------
+ /// | MI |
+ /// ----------------
+ /// |
+ /// ----------------
+ /// | MI * |
+ /// ----------------
+ /// |
+ /// ----------------
+ /// | MI * |
+ /// ----------------
+ /// In this case, the first MI starts a bundle but is not inside a bundle, the
+ /// next 2 MIs are considered "inside" the bundle.
+ ///
+ /// After a bundle is finalized, it looks like this:
+ /// ----------------
+ /// | Bundle |
+ /// ----------------
+ /// |
+ /// ----------------
+ /// | MI * |
+ /// ----------------
+ /// |
+ /// ----------------
+ /// | MI * |
+ /// ----------------
+ /// |
+ /// ----------------
+ /// | MI * |
+ /// ----------------
+ /// The first instruction has the special opcode "BUNDLE". It's not "inside"
+ /// a bundle, but the next three MIs are.
+ bool isInsideBundle() const {
+ return getFlag(BundledPred);
+ }
+
+ /// Return true if this instruction part of a bundle. This is true
+ /// if either itself or its following instruction is marked "InsideBundle".
+ bool isBundled() const {
+ return isBundledWithPred() || isBundledWithSucc();
+ }
+
+ /// Return true if this instruction is part of a bundle, and it is not the
+ /// first instruction in the bundle.
+ bool isBundledWithPred() const { return getFlag(BundledPred); }
+
+ /// Return true if this instruction is part of a bundle, and it is not the
+ /// last instruction in the bundle.
+ bool isBundledWithSucc() const { return getFlag(BundledSucc); }
+
+ /// Bundle this instruction with its predecessor. This can be an unbundled
+ /// instruction, or it can be the first instruction in a bundle.
+ void bundleWithPred();
+
+ /// Bundle this instruction with its successor. This can be an unbundled
+ /// instruction, or it can be the last instruction in a bundle.
+ void bundleWithSucc();
+
+ /// Break bundle above this instruction.
+ void unbundleFromPred();
+
+ /// Break bundle below this instruction.
+ void unbundleFromSucc();
+
+ /// Returns the debug location id of this MachineInstr.
+ const DebugLoc &getDebugLoc() const { return debugLoc; }
+
+ /// Return the debug variable referenced by
+ /// this DBG_VALUE instruction.
+ const DILocalVariable *getDebugVariable() const {
+ assert(isDebugValue() && "not a DBG_VALUE");
+ return cast<DILocalVariable>(getOperand(2).getMetadata());
+ }
+
+ /// Return the complex address expression referenced by
+ /// this DBG_VALUE instruction.
+ const DIExpression *getDebugExpression() const {
+ assert(isDebugValue() && "not a DBG_VALUE");
+ return cast<DIExpression>(getOperand(3).getMetadata());
+ }
+
+ /// Emit an error referring to the source location of this instruction.
+ /// This should only be used for inline assembly that is somehow
+ /// impossible to compile. Other errors should have been handled much
+ /// earlier.
+ ///
+ /// If this method returns, the caller should try to recover from the error.
+ ///
+ void emitError(StringRef Msg) const;
+
+ /// Returns the target instruction descriptor of this MachineInstr.
+ const MCInstrDesc &getDesc() const { return *MCID; }
+
+ /// Returns the opcode of this MachineInstr.
+ unsigned getOpcode() const { return MCID->Opcode; }
+
+ /// Access to explicit operands of the instruction.
+ ///
+ unsigned getNumOperands() const { return NumOperands; }
+
+ const MachineOperand& getOperand(unsigned i) const {
+ assert(i < getNumOperands() && "getOperand() out of range!");
+ return Operands[i];
+ }
+ MachineOperand& getOperand(unsigned i) {
+ assert(i < getNumOperands() && "getOperand() out of range!");
+ return Operands[i];
+ }
+
+ /// Returns the number of non-implicit operands.
+ unsigned getNumExplicitOperands() const;
+
+ /// iterator/begin/end - Iterate over all operands of a machine instruction.
+ typedef MachineOperand *mop_iterator;
+ typedef const MachineOperand *const_mop_iterator;
+
+ mop_iterator operands_begin() { return Operands; }
+ mop_iterator operands_end() { return Operands + NumOperands; }
+
+ const_mop_iterator operands_begin() const { return Operands; }
+ const_mop_iterator operands_end() const { return Operands + NumOperands; }
+
+ iterator_range<mop_iterator> operands() {
+ return make_range(operands_begin(), operands_end());
+ }
+ iterator_range<const_mop_iterator> operands() const {
+ return make_range(operands_begin(), operands_end());
+ }
+ iterator_range<mop_iterator> explicit_operands() {
+ return make_range(operands_begin(),
+ operands_begin() + getNumExplicitOperands());
+ }
+ iterator_range<const_mop_iterator> explicit_operands() const {
+ return make_range(operands_begin(),
+ operands_begin() + getNumExplicitOperands());
+ }
+ iterator_range<mop_iterator> implicit_operands() {
+ return make_range(explicit_operands().end(), operands_end());
+ }
+ iterator_range<const_mop_iterator> implicit_operands() const {
+ return make_range(explicit_operands().end(), operands_end());
+ }
+ /// Returns a range over all explicit operands that are register definitions.
+ /// Implicit definition are not included!
+ iterator_range<mop_iterator> defs() {
+ return make_range(operands_begin(),
+ operands_begin() + getDesc().getNumDefs());
+ }
+ /// \copydoc defs()
+ iterator_range<const_mop_iterator> defs() const {
+ return make_range(operands_begin(),
+ operands_begin() + getDesc().getNumDefs());
+ }
+ /// Returns a range that includes all operands that are register uses.
+ /// This may include unrelated operands which are not register uses.
+ iterator_range<mop_iterator> uses() {
+ return make_range(operands_begin() + getDesc().getNumDefs(),
+ operands_end());
+ }
+ /// \copydoc uses()
+ iterator_range<const_mop_iterator> uses() const {
+ return make_range(operands_begin() + getDesc().getNumDefs(),
+ operands_end());
+ }
+
+ /// Returns the number of the operand iterator \p I points to.
+ unsigned getOperandNo(const_mop_iterator I) const {
+ return I - operands_begin();
+ }
+
+ /// Access to memory operands of the instruction
+ mmo_iterator memoperands_begin() const { return MemRefs; }
+ mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
+ /// Return true if we don't have any memory operands which described the the
+ /// memory access done by this instruction. If this is true, calling code
+ /// must be conservative.
+ bool memoperands_empty() const { return NumMemRefs == 0; }
+
+ iterator_range<mmo_iterator> memoperands() {
+ return make_range(memoperands_begin(), memoperands_end());
+ }
+ iterator_range<mmo_iterator> memoperands() const {
+ return make_range(memoperands_begin(), memoperands_end());
+ }
+
+ /// Return true if this instruction has exactly one MachineMemOperand.
+ bool hasOneMemOperand() const {
+ return NumMemRefs == 1;
+ }
+
+ /// API for querying MachineInstr properties. They are the same as MCInstrDesc
+ /// queries but they are bundle aware.
+
+ enum QueryType {
+ IgnoreBundle, // Ignore bundles
+ AnyInBundle, // Return true if any instruction in bundle has property
+ AllInBundle // Return true if all instructions in bundle have property
+ };
+
+ /// Return true if the instruction (or in the case of a bundle,
+ /// the instructions inside the bundle) has the specified property.
+ /// The first argument is the property being queried.
+ /// The second argument indicates whether the query should look inside
+ /// instruction bundles.
+ bool hasProperty(unsigned MCFlag, QueryType Type = AnyInBundle) const {
+ // Inline the fast path for unbundled or bundle-internal instructions.
+ if (Type == IgnoreBundle || !isBundled() || isBundledWithPred())
+ return getDesc().getFlags() & (1 << MCFlag);
+
+ // If this is the first instruction in a bundle, take the slow path.
+ return hasPropertyInBundle(1 << MCFlag, Type);
+ }
+
+ /// Return true if this instruction can have a variable number of operands.
+ /// In this case, the variable operands will be after the normal
+ /// operands but before the implicit definitions and uses (if any are
+ /// present).
+ bool isVariadic(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Variadic, Type);
+ }
+
+ /// Set if this instruction has an optional definition, e.g.
+ /// ARM instructions which can set condition code if 's' bit is set.
+ bool hasOptionalDef(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::HasOptionalDef, Type);
+ }
+
+ /// Return true if this is a pseudo instruction that doesn't
+ /// correspond to a real machine instruction.
+ bool isPseudo(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Pseudo, Type);
+ }
+
+ bool isReturn(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Return, Type);
+ }
+
+ bool isCall(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Call, Type);
+ }
+
+ /// Returns true if the specified instruction stops control flow
+ /// from executing the instruction immediately following it. Examples include
+ /// unconditional branches and return instructions.
+ bool isBarrier(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Barrier, Type);
+ }
+
+ /// Returns true if this instruction part of the terminator for a basic block.
+ /// Typically this is things like return and branch instructions.
+ ///
+ /// Various passes use this to insert code into the bottom of a basic block,
+ /// but before control flow occurs.
+ bool isTerminator(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Terminator, Type);
+ }
+
+ /// Returns true if this is a conditional, unconditional, or indirect branch.
+ /// Predicates below can be used to discriminate between
+ /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
+ /// get more information.
+ bool isBranch(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Branch, Type);
+ }
+
+ /// Return true if this is an indirect branch, such as a
+ /// branch through a register.
+ bool isIndirectBranch(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::IndirectBranch, Type);
+ }
+
+ /// Return true if this is a branch which may fall
+ /// through to the next instruction or may transfer control flow to some other
+ /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
+ /// information about this branch.
+ bool isConditionalBranch(QueryType Type = AnyInBundle) const {
+ return isBranch(Type) & !isBarrier(Type) & !isIndirectBranch(Type);
+ }
+
+ /// Return true if this is a branch which always
+ /// transfers control flow to some other block. The
+ /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
+ /// about this branch.
+ bool isUnconditionalBranch(QueryType Type = AnyInBundle) const {
+ return isBranch(Type) & isBarrier(Type) & !isIndirectBranch(Type);
+ }
+
+ /// Return true if this instruction has a predicate operand that
+ /// controls execution. It may be set to 'always', or may be set to other
+ /// values. There are various methods in TargetInstrInfo that can be used to
+ /// control and modify the predicate in this instruction.
+ bool isPredicable(QueryType Type = AllInBundle) const {
+ // If it's a bundle than all bundled instructions must be predicable for this
+ // to return true.
+ return hasProperty(MCID::Predicable, Type);
+ }
+
+ /// Return true if this instruction is a comparison.
+ bool isCompare(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Compare, Type);
+ }
+
+ /// Return true if this instruction is a move immediate
+ /// (including conditional moves) instruction.
+ bool isMoveImmediate(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::MoveImm, Type);
+ }
+
+ /// Return true if this instruction is a bitcast instruction.
+ bool isBitcast(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Bitcast, Type);
+ }
+
+ /// Return true if this instruction is a select instruction.
+ bool isSelect(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Select, Type);
+ }
+
+ /// Return true if this instruction cannot be safely duplicated.
+ /// For example, if the instruction has a unique labels attached
+ /// to it, duplicating it would cause multiple definition errors.
+ bool isNotDuplicable(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::NotDuplicable, Type);
+ }
+
+ /// Return true if this instruction is convergent.
+ /// Convergent instructions can not be made control-dependent on any
+ /// additional values.
+ bool isConvergent(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::Convergent, Type);
+ }
+
+ /// Returns true if the specified instruction has a delay slot
+ /// which must be filled by the code generator.
+ bool hasDelaySlot(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::DelaySlot, Type);
+ }
+
+ /// Return true for instructions that can be folded as
+ /// memory operands in other instructions. The most common use for this
+ /// is instructions that are simple loads from memory that don't modify
+ /// the loaded value in any way, but it can also be used for instructions
+ /// that can be expressed as constant-pool loads, such as V_SETALLONES
+ /// on x86, to allow them to be folded when it is beneficial.
+ /// This should only be set on instructions that return a value in their
+ /// only virtual register definition.
+ bool canFoldAsLoad(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::FoldableAsLoad, Type);
+ }
+
+ /// \brief Return true if this instruction behaves
+ /// the same way as the generic REG_SEQUENCE instructions.
+ /// E.g., on ARM,
+ /// dX VMOVDRR rY, rZ
+ /// is equivalent to
+ /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
+ ///
+ /// Note that for the optimizers to be able to take advantage of
+ /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
+ /// override accordingly.
+ bool isRegSequenceLike(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::RegSequence, Type);
+ }
+
+ /// \brief Return true if this instruction behaves
+ /// the same way as the generic EXTRACT_SUBREG instructions.
+ /// E.g., on ARM,
+ /// rX, rY VMOVRRD dZ
+ /// is equivalent to two EXTRACT_SUBREG:
+ /// rX = EXTRACT_SUBREG dZ, ssub_0
+ /// rY = EXTRACT_SUBREG dZ, ssub_1
+ ///
+ /// Note that for the optimizers to be able to take advantage of
+ /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
+ /// override accordingly.
+ bool isExtractSubregLike(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::ExtractSubreg, Type);
+ }
+
+ /// \brief Return true if this instruction behaves
+ /// the same way as the generic INSERT_SUBREG instructions.
+ /// E.g., on ARM,
+ /// dX = VSETLNi32 dY, rZ, Imm
+ /// is equivalent to a INSERT_SUBREG:
+ /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
+ ///
+ /// Note that for the optimizers to be able to take advantage of
+ /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
+ /// override accordingly.
+ bool isInsertSubregLike(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::InsertSubreg, Type);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Side Effect Analysis
+ //===--------------------------------------------------------------------===//
+
+ /// Return true if this instruction could possibly read memory.
+ /// Instructions with this flag set are not necessarily simple load
+ /// instructions, they may load a value and modify it, for example.
+ bool mayLoad(QueryType Type = AnyInBundle) const {
+ if (isInlineAsm()) {
+ unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_MayLoad)
+ return true;
+ }
+ return hasProperty(MCID::MayLoad, Type);
+ }
+
+ /// Return true if this instruction could possibly modify memory.
+ /// Instructions with this flag set are not necessarily simple store
+ /// instructions, they may store a modified value based on their operands, or
+ /// may not actually modify anything, for example.
+ bool mayStore(QueryType Type = AnyInBundle) const {
+ if (isInlineAsm()) {
+ unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
+ if (ExtraInfo & InlineAsm::Extra_MayStore)
+ return true;
+ }
+ return hasProperty(MCID::MayStore, Type);
+ }
+
+ /// Return true if this instruction could possibly read or modify memory.
+ bool mayLoadOrStore(QueryType Type = AnyInBundle) const {
+ return mayLoad(Type) || mayStore(Type);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Flags that indicate whether an instruction can be modified by a method.
+ //===--------------------------------------------------------------------===//
+
+ /// Return true if this may be a 2- or 3-address
+ /// instruction (of the form "X = op Y, Z, ..."), which produces the same
+ /// result if Y and Z are exchanged. If this flag is set, then the
+ /// TargetInstrInfo::commuteInstruction method may be used to hack on the
+ /// instruction.
+ ///
+ /// Note that this flag may be set on instructions that are only commutable
+ /// sometimes. In these cases, the call to commuteInstruction will fail.
+ /// Also note that some instructions require non-trivial modification to
+ /// commute them.
+ bool isCommutable(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::Commutable, Type);
+ }
+
+ /// Return true if this is a 2-address instruction
+ /// which can be changed into a 3-address instruction if needed. Doing this
+ /// transformation can be profitable in the register allocator, because it
+ /// means that the instruction can use a 2-address form if possible, but
+ /// degrade into a less efficient form if the source and dest register cannot
+ /// be assigned to the same register. For example, this allows the x86
+ /// backend to turn a "shl reg, 3" instruction into an LEA instruction, which
+ /// is the same speed as the shift but has bigger code size.
+ ///
+ /// If this returns true, then the target must implement the
+ /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
+ /// is allowed to fail if the transformation isn't valid for this specific
+ /// instruction (e.g. shl reg, 4 on x86).
+ ///
+ bool isConvertibleTo3Addr(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::ConvertibleTo3Addr, Type);
+ }
+
+ /// Return true if this instruction requires
+ /// custom insertion support when the DAG scheduler is inserting it into a
+ /// machine basic block. If this is true for the instruction, it basically
+ /// means that it is a pseudo instruction used at SelectionDAG time that is
+ /// expanded out into magic code by the target when MachineInstrs are formed.
+ ///
+ /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
+ /// is used to insert this into the MachineBasicBlock.
+ bool usesCustomInsertionHook(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::UsesCustomInserter, Type);
+ }
+
+ /// Return true if this instruction requires *adjustment*
+ /// after instruction selection by calling a target hook. For example, this
+ /// can be used to fill in ARM 's' optional operand depending on whether
+ /// the conditional flag register is used.
+ bool hasPostISelHook(QueryType Type = IgnoreBundle) const {
+ return hasProperty(MCID::HasPostISelHook, Type);
+ }
+
+ /// Returns true if this instruction is a candidate for remat.
+ /// This flag is deprecated, please don't use it anymore. If this
+ /// flag is set, the isReallyTriviallyReMaterializable() method is called to
+ /// verify the instruction is really rematable.
+ bool isRematerializable(QueryType Type = AllInBundle) const {
+ // It's only possible to re-mat a bundle if all bundled instructions are
+ // re-materializable.
+ return hasProperty(MCID::Rematerializable, Type);
+ }
+
+ /// Returns true if this instruction has the same cost (or less) than a move
+ /// instruction. This is useful during certain types of optimizations
+ /// (e.g., remat during two-address conversion or machine licm)
+ /// where we would like to remat or hoist the instruction, but not if it costs
+ /// more than moving the instruction into the appropriate register. Note, we
+ /// are not marking copies from and to the same register class with this flag.
+ bool isAsCheapAsAMove(QueryType Type = AllInBundle) const {
+ // Only returns true for a bundle if all bundled instructions are cheap.
+ return hasProperty(MCID::CheapAsAMove, Type);
+ }
+
+ /// Returns true if this instruction source operands
+ /// have special register allocation requirements that are not captured by the
+ /// operand register classes. e.g. ARM::STRD's two source registers must be an
+ /// even / odd pair, ARM::STM registers have to be in ascending order.
+ /// Post-register allocation passes should not attempt to change allocations
+ /// for sources of instructions with this flag.
+ bool hasExtraSrcRegAllocReq(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::ExtraSrcRegAllocReq, Type);
+ }
+
+ /// Returns true if this instruction def operands
+ /// have special register allocation requirements that are not captured by the
+ /// operand register classes. e.g. ARM::LDRD's two def registers must be an
+ /// even / odd pair, ARM::LDM registers have to be in ascending order.
+ /// Post-register allocation passes should not attempt to change allocations
+ /// for definitions of instructions with this flag.
+ bool hasExtraDefRegAllocReq(QueryType Type = AnyInBundle) const {
+ return hasProperty(MCID::ExtraDefRegAllocReq, Type);
+ }
+
+
+ enum MICheckType {
+ CheckDefs, // Check all operands for equality
+ CheckKillDead, // Check all operands including kill / dead markers
+ IgnoreDefs, // Ignore all definitions
+ IgnoreVRegDefs // Ignore virtual register definitions
+ };
+
+ /// Return true if this instruction is identical to (same
+ /// opcode and same operands as) the specified instruction.
+ bool isIdenticalTo(const MachineInstr *Other,
+ MICheckType Check = CheckDefs) const;
+
+ /// Unlink 'this' from the containing basic block, and return it without
+ /// deleting it.
+ ///
+ /// This function can not be used on bundled instructions, use
+ /// removeFromBundle() to remove individual instructions from a bundle.
+ MachineInstr *removeFromParent();
+
+ /// Unlink this instruction from its basic block and return it without
+ /// deleting it.
+ ///
+ /// If the instruction is part of a bundle, the other instructions in the
+ /// bundle remain bundled.
+ MachineInstr *removeFromBundle();
+
+ /// Unlink 'this' from the containing basic block and delete it.
+ ///
+ /// If this instruction is the header of a bundle, the whole bundle is erased.
+ /// This function can not be used for instructions inside a bundle, use
+ /// eraseFromBundle() to erase individual bundled instructions.
+ void eraseFromParent();
+
+ /// Unlink 'this' from the containing basic block and delete it.
+ ///
+ /// For all definitions mark their uses in DBG_VALUE nodes
+ /// as undefined. Otherwise like eraseFromParent().
+ void eraseFromParentAndMarkDBGValuesForRemoval();
+
+ /// Unlink 'this' form its basic block and delete it.
+ ///
+ /// If the instruction is part of a bundle, the other instructions in the
+ /// bundle remain bundled.
+ void eraseFromBundle();
+
+ bool isEHLabel() const { return getOpcode() == TargetOpcode::EH_LABEL; }
+ bool isGCLabel() const { return getOpcode() == TargetOpcode::GC_LABEL; }
+
+ /// Returns true if the MachineInstr represents a label.
+ bool isLabel() const { return isEHLabel() || isGCLabel(); }
+ bool isCFIInstruction() const {
+ return getOpcode() == TargetOpcode::CFI_INSTRUCTION;
+ }
+
+ // True if the instruction represents a position in the function.
+ bool isPosition() const { return isLabel() || isCFIInstruction(); }
+
+ bool isDebugValue() const { return getOpcode() == TargetOpcode::DBG_VALUE; }
+ /// A DBG_VALUE is indirect iff the first operand is a register and
+ /// the second operand is an immediate.
+ bool isIndirectDebugValue() const {
+ return isDebugValue()
+ && getOperand(0).isReg()
+ && getOperand(1).isImm();
+ }
+
+ bool isPHI() const { return getOpcode() == TargetOpcode::PHI; }
+ bool isKill() const { return getOpcode() == TargetOpcode::KILL; }
+ bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; }
+ bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; }
+ bool isMSInlineAsm() const {
+ return getOpcode() == TargetOpcode::INLINEASM && getInlineAsmDialect();
+ }
+ bool isStackAligningInlineAsm() const;
+ InlineAsm::AsmDialect getInlineAsmDialect() const;
+ bool isInsertSubreg() const {
+ return getOpcode() == TargetOpcode::INSERT_SUBREG;
+ }
+ bool isSubregToReg() const {
+ return getOpcode() == TargetOpcode::SUBREG_TO_REG;
+ }
+ bool isRegSequence() const {
+ return getOpcode() == TargetOpcode::REG_SEQUENCE;
+ }
+ bool isBundle() const {
+ return getOpcode() == TargetOpcode::BUNDLE;
+ }
+ bool isCopy() const {
+ return getOpcode() == TargetOpcode::COPY;
+ }
+ bool isFullCopy() const {
+ return isCopy() && !getOperand(0).getSubReg() && !getOperand(1).getSubReg();
+ }
+ bool isExtractSubreg() const {
+ return getOpcode() == TargetOpcode::EXTRACT_SUBREG;
+ }
+
+ /// Return true if the instruction behaves like a copy.
+ /// This does not include native copy instructions.
+ bool isCopyLike() const {
+ return isCopy() || isSubregToReg();
+ }
+
+ /// Return true is the instruction is an identity copy.
+ bool isIdentityCopy() const {
+ return isCopy() && getOperand(0).getReg() == getOperand(1).getReg() &&
+ getOperand(0).getSubReg() == getOperand(1).getSubReg();
+ }
+
+ /// Return true if this is a transient instruction that is
+ /// either very likely to be eliminated during register allocation (such as
+ /// copy-like instructions), or if this instruction doesn't have an
+ /// execution-time cost.
+ bool isTransient() const {
+ switch(getOpcode()) {
+ default: return false;
+ // Copy-like instructions are usually eliminated during register allocation.
+ case TargetOpcode::PHI:
+ case TargetOpcode::COPY:
+ case TargetOpcode::INSERT_SUBREG:
+ case TargetOpcode::SUBREG_TO_REG:
+ case TargetOpcode::REG_SEQUENCE:
+ // Pseudo-instructions that don't produce any real output.
+ case TargetOpcode::IMPLICIT_DEF:
+ case TargetOpcode::KILL:
+ case TargetOpcode::CFI_INSTRUCTION:
+ case TargetOpcode::EH_LABEL:
+ case TargetOpcode::GC_LABEL:
+ case TargetOpcode::DBG_VALUE:
+ return true;
+ }
+ }
+
+ /// Return the number of instructions inside the MI bundle, excluding the
+ /// bundle header.
+ ///
+ /// This is the number of instructions that MachineBasicBlock::iterator
+ /// skips, 0 for unbundled instructions.
+ unsigned getBundleSize() const;
+
+ /// Return true if the MachineInstr reads the specified register.
+ /// If TargetRegisterInfo is passed, then it also checks if there
+ /// is a read of a super-register.
+ /// This does not count partial redefines of virtual registers as reads:
+ /// %reg1024:6 = OP.
+ bool readsRegister(unsigned Reg,
+ const TargetRegisterInfo *TRI = nullptr) const {
+ return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
+ }
+
+ /// Return true if the MachineInstr reads the specified virtual register.
+ /// Take into account that a partial define is a
+ /// read-modify-write operation.
+ bool readsVirtualRegister(unsigned Reg) const {
+ return readsWritesVirtualRegister(Reg).first;
+ }
+
+ /// Return a pair of bools (reads, writes) indicating if this instruction
+ /// reads or writes Reg. This also considers partial defines.
+ /// If Ops is not null, all operand indices for Reg are added.
+ std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
+ SmallVectorImpl<unsigned> *Ops = nullptr) const;
+
+ /// Return true if the MachineInstr kills the specified register.
+ /// If TargetRegisterInfo is passed, then it also checks if there is
+ /// a kill of a super-register.
+ bool killsRegister(unsigned Reg,
+ const TargetRegisterInfo *TRI = nullptr) const {
+ return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
+ }
+
+ /// Return true if the MachineInstr fully defines the specified register.
+ /// If TargetRegisterInfo is passed, then it also checks
+ /// if there is a def of a super-register.
+ /// NOTE: It's ignoring subreg indices on virtual registers.
+ bool definesRegister(unsigned Reg,
+ const TargetRegisterInfo *TRI = nullptr) const {
+ return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
+ }
+
+ /// Return true if the MachineInstr modifies (fully define or partially
+ /// define) the specified register.
+ /// NOTE: It's ignoring subreg indices on virtual registers.
+ bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const {
+ return findRegisterDefOperandIdx(Reg, false, true, TRI) != -1;
+ }
+
+ /// Returns true if the register is dead in this machine instruction.
+ /// If TargetRegisterInfo is passed, then it also checks
+ /// if there is a dead def of a super-register.
+ bool registerDefIsDead(unsigned Reg,
+ const TargetRegisterInfo *TRI = nullptr) const {
+ return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
+ }
+
+ /// Returns the operand index that is a use of the specific register or -1
+ /// if it is not found. It further tightens the search criteria to a use
+ /// that kills the register if isKill is true.
+ int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
+ const TargetRegisterInfo *TRI = nullptr) const;
+
+ /// Wrapper for findRegisterUseOperandIdx, it returns
+ /// a pointer to the MachineOperand rather than an index.
+ MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
+ const TargetRegisterInfo *TRI = nullptr) {
+ int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
+ return (Idx == -1) ? nullptr : &getOperand(Idx);
+ }
+
+ const MachineOperand *findRegisterUseOperand(
+ unsigned Reg, bool isKill = false,
+ const TargetRegisterInfo *TRI = nullptr) const {
+ return const_cast<MachineInstr *>(this)->
+ findRegisterUseOperand(Reg, isKill, TRI);
+ }
+
+ /// Returns the operand index that is a def of the specified register or
+ /// -1 if it is not found. If isDead is true, defs that are not dead are
+ /// skipped. If Overlap is true, then it also looks for defs that merely
+ /// overlap the specified register. If TargetRegisterInfo is non-null,
+ /// then it also checks if there is a def of a super-register.
+ /// This may also return a register mask operand when Overlap is true.
+ int findRegisterDefOperandIdx(unsigned Reg,
+ bool isDead = false, bool Overlap = false,
+ const TargetRegisterInfo *TRI = nullptr) const;
+
+ /// Wrapper for findRegisterDefOperandIdx, it returns
+ /// a pointer to the MachineOperand rather than an index.
+ MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false,
+ const TargetRegisterInfo *TRI = nullptr) {
+ int Idx = findRegisterDefOperandIdx(Reg, isDead, false, TRI);
+ return (Idx == -1) ? nullptr : &getOperand(Idx);
+ }
+
+ /// Find the index of the first operand in the
+ /// operand list that is used to represent the predicate. It returns -1 if
+ /// none is found.
+ int findFirstPredOperandIdx() const;
+
+ /// Find the index of the flag word operand that
+ /// corresponds to operand OpIdx on an inline asm instruction. Returns -1 if
+ /// getOperand(OpIdx) does not belong to an inline asm operand group.
+ ///
+ /// If GroupNo is not NULL, it will receive the number of the operand group
+ /// containing OpIdx.
+ ///
+ /// The flag operand is an immediate that can be decoded with methods like
+ /// InlineAsm::hasRegClassConstraint().
+ ///
+ int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
+
+ /// Compute the static register class constraint for operand OpIdx.
+ /// For normal instructions, this is derived from the MCInstrDesc.
+ /// For inline assembly it is derived from the flag words.
+ ///
+ /// Returns NULL if the static register class constraint cannot be
+ /// determined.
+ ///
+ const TargetRegisterClass*
+ getRegClassConstraint(unsigned OpIdx,
+ const TargetInstrInfo *TII,
+ const TargetRegisterInfo *TRI) const;
+
+ /// \brief Applies the constraints (def/use) implied by this MI on \p Reg to
+ /// the given \p CurRC.
+ /// If \p ExploreBundle is set and MI is part of a bundle, all the
+ /// instructions inside the bundle will be taken into account. In other words,
+ /// this method accumulates all the constraints of the operand of this MI and
+ /// the related bundle if MI is a bundle or inside a bundle.
+ ///
+ /// Returns the register class that satisfies both \p CurRC and the
+ /// constraints set by MI. Returns NULL if such a register class does not
+ /// exist.
+ ///
+ /// \pre CurRC must not be NULL.
+ const TargetRegisterClass *getRegClassConstraintEffectForVReg(
+ unsigned Reg, const TargetRegisterClass *CurRC,
+ const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
+ bool ExploreBundle = false) const;
+
+ /// \brief Applies the constraints (def/use) implied by the \p OpIdx operand
+ /// to the given \p CurRC.
+ ///
+ /// Returns the register class that satisfies both \p CurRC and the
+ /// constraints set by \p OpIdx MI. Returns NULL if such a register class
+ /// does not exist.
+ ///
+ /// \pre CurRC must not be NULL.
+ /// \pre The operand at \p OpIdx must be a register.
+ const TargetRegisterClass *
+ getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC,
+ const TargetInstrInfo *TII,
+ const TargetRegisterInfo *TRI) const;
+
+ /// Add a tie between the register operands at DefIdx and UseIdx.
+ /// The tie will cause the register allocator to ensure that the two
+ /// operands are assigned the same physical register.
+ ///
+ /// Tied operands are managed automatically for explicit operands in the
+ /// MCInstrDesc. This method is for exceptional cases like inline asm.
+ void tieOperands(unsigned DefIdx, unsigned UseIdx);
+
+ /// Given the index of a tied register operand, find the
+ /// operand it is tied to. Defs are tied to uses and vice versa. Returns the
+ /// index of the tied operand which must exist.
+ unsigned findTiedOperandIdx(unsigned OpIdx) const;
+
+ /// Given the index of a register def operand,
+ /// check if the register def is tied to a source operand, due to either
+ /// two-address elimination or inline assembly constraints. Returns the
+ /// first tied use operand index by reference if UseOpIdx is not null.
+ bool isRegTiedToUseOperand(unsigned DefOpIdx,
+ unsigned *UseOpIdx = nullptr) const {
+ const MachineOperand &MO = getOperand(DefOpIdx);
+ if (!MO.isReg() || !MO.isDef() || !MO.isTied())
+ return false;
+ if (UseOpIdx)
+ *UseOpIdx = findTiedOperandIdx(DefOpIdx);
+ return true;
+ }
+
+ /// Return true if the use operand of the specified index is tied to a def
+ /// operand. It also returns the def operand index by reference if DefOpIdx
+ /// is not null.
+ bool isRegTiedToDefOperand(unsigned UseOpIdx,
+ unsigned *DefOpIdx = nullptr) const {
+ const MachineOperand &MO = getOperand(UseOpIdx);
+ if (!MO.isReg() || !MO.isUse() || !MO.isTied())
+ return false;
+ if (DefOpIdx)
+ *DefOpIdx = findTiedOperandIdx(UseOpIdx);
+ return true;
+ }
+
+ /// Clears kill flags on all operands.
+ void clearKillInfo();
+
+ /// Replace all occurrences of FromReg with ToReg:SubIdx,
+ /// properly composing subreg indices where necessary.
+ void substituteRegister(unsigned FromReg, unsigned ToReg, unsigned SubIdx,
+ const TargetRegisterInfo &RegInfo);
+
+ /// We have determined MI kills a register. Look for the
+ /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
+ /// add a implicit operand if it's not found. Returns true if the operand
+ /// exists / is added.
+ bool addRegisterKilled(unsigned IncomingReg,
+ const TargetRegisterInfo *RegInfo,
+ bool AddIfNotFound = false);
+
+ /// Clear all kill flags affecting Reg. If RegInfo is
+ /// provided, this includes super-register kills.
+ void clearRegisterKills(unsigned Reg, const TargetRegisterInfo *RegInfo);
+
+ /// We have determined MI defined a register without a use.
+ /// Look for the operand that defines it and mark it as IsDead. If
+ /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
+ /// true if the operand exists / is added.
+ bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo,
+ bool AddIfNotFound = false);
+
+ /// Clear all dead flags on operands defining register @p Reg.
+ void clearRegisterDeads(unsigned Reg);
+
+ /// Mark all subregister defs of register @p Reg with the undef flag.
+ /// This function is used when we determined to have a subregister def in an
+ /// otherwise undefined super register.
+ void setRegisterDefReadUndef(unsigned Reg, bool IsUndef = true);
+
+ /// We have determined MI defines a register. Make sure there is an operand
+ /// defining Reg.
+ void addRegisterDefined(unsigned Reg,
+ const TargetRegisterInfo *RegInfo = nullptr);
+
+ /// Mark every physreg used by this instruction as
+ /// dead except those in the UsedRegs list.
+ ///
+ /// On instructions with register mask operands, also add implicit-def
+ /// operands for all registers in UsedRegs.
+ void setPhysRegsDeadExcept(ArrayRef<unsigned> UsedRegs,
+ const TargetRegisterInfo &TRI);
+
+ /// Return true if it is safe to move this instruction. If
+ /// SawStore is set to true, it means that there is a store (or call) between
+ /// the instruction's location and its intended destination.
+ bool isSafeToMove(AliasAnalysis *AA, bool &SawStore) const;
+
+ /// Return true if this instruction may have an ordered
+ /// or volatile memory reference, or if the information describing the memory
+ /// reference is not available. Return false if it is known to have no
+ /// ordered or volatile memory references.
+ bool hasOrderedMemoryRef() const;
+
+ /// Return true if this instruction is loading from a
+ /// location whose value is invariant across the function. For example,
+ /// loading a value from the constant pool or from the argument area of
+ /// a function if it does not change. This should only return true of *all*
+ /// loads the instruction does are invariant (if it does multiple loads).
+ bool isInvariantLoad(AliasAnalysis *AA) const;
+
+ /// If the specified instruction is a PHI that always merges together the
+ /// same virtual register, return the register, otherwise return 0.
+ unsigned isConstantValuePHI() const;
+
+ /// Return true if this instruction has side effects that are not modeled
+ /// by mayLoad / mayStore, etc.
+ /// For all instructions, the property is encoded in MCInstrDesc::Flags
+ /// (see MCInstrDesc::hasUnmodeledSideEffects(). The only exception is
+ /// INLINEASM instruction, in which case the side effect property is encoded
+ /// in one of its operands (see InlineAsm::Extra_HasSideEffect).
+ ///
+ bool hasUnmodeledSideEffects() const;
+
+ /// Returns true if it is illegal to fold a load across this instruction.
+ bool isLoadFoldBarrier() const;
+
+ /// Return true if all the defs of this instruction are dead.
+ bool allDefsAreDead() const;
+
+ /// Copy implicit register operands from specified
+ /// instruction to this instruction.
+ void copyImplicitOps(MachineFunction &MF, const MachineInstr *MI);
+
+ //
+ // Debugging support
+ //
+ void print(raw_ostream &OS, bool SkipOpers = false) const;
+ void print(raw_ostream &OS, ModuleSlotTracker &MST,
+ bool SkipOpers = false) const;
+ void dump() const;
+
+ //===--------------------------------------------------------------------===//
+ // Accessors used to build up machine instructions.
+
+ /// Add the specified operand to the instruction. If it is an implicit
+ /// operand, it is added to the end of the operand list. If it is an
+ /// explicit operand it is added at the end of the explicit operand list
+ /// (before the first implicit operand).
+ ///
+ /// MF must be the machine function that was used to allocate this
+ /// instruction.
+ ///
+ /// MachineInstrBuilder provides a more convenient interface for creating
+ /// instructions and adding operands.
+ void addOperand(MachineFunction &MF, const MachineOperand &Op);
+
+ /// Add an operand without providing an MF reference. This only works for
+ /// instructions that are inserted in a basic block.
+ ///
+ /// MachineInstrBuilder and the two-argument addOperand(MF, MO) should be
+ /// preferred.
+ void addOperand(const MachineOperand &Op);
+
+ /// Replace the instruction descriptor (thus opcode) of
+ /// the current instruction with a new one.
+ void setDesc(const MCInstrDesc &tid) { MCID = &tid; }
+
+ /// Replace current source information with new such.
+ /// Avoid using this, the constructor argument is preferable.
+ void setDebugLoc(DebugLoc dl) {
+ debugLoc = std::move(dl);
+ assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
+ }
+
+ /// Erase an operand from an instruction, leaving it with one
+ /// fewer operand than it started with.
+ void RemoveOperand(unsigned i);
+
+ /// Add a MachineMemOperand to the machine instruction.
+ /// This function should be used only occasionally. The setMemRefs function
+ /// is the primary method for setting up a MachineInstr's MemRefs list.
+ void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
+
+ /// Assign this MachineInstr's memory reference descriptor list.
+ /// This does not transfer ownership.
+ void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
+ MemRefs = NewMemRefs;
+ NumMemRefs = uint8_t(NewMemRefsEnd - NewMemRefs);
+ assert(NumMemRefs == NewMemRefsEnd - NewMemRefs && "Too many memrefs");
+ }
+
+ /// Clear this MachineInstr's memory reference descriptor list. This resets
+ /// the memrefs to their most conservative state. This should be used only
+ /// as a last resort since it greatly pessimizes our knowledge of the memory
+ /// access performed by the instruction.
+ void dropMemRefs() {
+ MemRefs = nullptr;
+ NumMemRefs = 0;
+ }
+
+ /// Break any tie involving OpIdx.
+ void untieRegOperand(unsigned OpIdx) {
+ MachineOperand &MO = getOperand(OpIdx);
+ if (MO.isReg() && MO.isTied()) {
+ getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0;
+ MO.TiedTo = 0;
+ }
+ }
+
+ /// Add all implicit def and use operands to this instruction.
+ void addImplicitDefUseOperands(MachineFunction &MF);
+
+private:
+ /// If this instruction is embedded into a MachineFunction, return the
+ /// MachineRegisterInfo object for the current function, otherwise
+ /// return null.
+ MachineRegisterInfo *getRegInfo();
+
+ /// Unlink all of the register operands in this instruction from their
+ /// respective use lists. This requires that the operands already be on their
+ /// use lists.
+ void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
+
+ /// Add all of the register operands in this instruction from their
+ /// respective use lists. This requires that the operands not be on their
+ /// use lists yet.
+ void AddRegOperandsToUseLists(MachineRegisterInfo&);
+
+ /// Slow path for hasProperty when we're dealing with a bundle.
+ bool hasPropertyInBundle(unsigned Mask, QueryType Type) const;
+
+ /// \brief Implements the logic of getRegClassConstraintEffectForVReg for the
+ /// this MI and the given operand index \p OpIdx.
+ /// If the related operand does not constrained Reg, this returns CurRC.
+ const TargetRegisterClass *getRegClassConstraintEffectForVRegImpl(
+ unsigned OpIdx, unsigned Reg, const TargetRegisterClass *CurRC,
+ const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const;
+};
+
+/// Special DenseMapInfo traits to compare MachineInstr* by *value* of the
+/// instruction rather than by pointer value.
+/// The hashing and equality testing functions ignore definitions so this is
+/// useful for CSE, etc.
+struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
+ static inline MachineInstr *getEmptyKey() {
+ return nullptr;
+ }
+
+ static inline MachineInstr *getTombstoneKey() {
+ return reinterpret_cast<MachineInstr*>(-1);
+ }
+
+ static unsigned getHashValue(const MachineInstr* const &MI);
+
+ static bool isEqual(const MachineInstr* const &LHS,
+ const MachineInstr* const &RHS) {
+ if (RHS == getEmptyKey() || RHS == getTombstoneKey() ||
+ LHS == getEmptyKey() || LHS == getTombstoneKey())
+ return LHS == RHS;
+ return LHS->isIdenticalTo(RHS, MachineInstr::IgnoreVRegDefs);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Debugging Support
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
+ MI.print(OS);
+ return OS;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
new file mode 100644
index 0000000..aa5f4b2
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -0,0 +1,497 @@
+//===-- CodeGen/MachineInstBuilder.h - Simplify creation of MIs -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes a function named BuildMI, which is useful for dramatically
+// simplifying how MachineInstr's are created. It allows use of code like this:
+//
+// M = BuildMI(X86::ADDrr8, 2).addReg(argVal1).addReg(argVal2);
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
+#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
+
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+class MCInstrDesc;
+class MDNode;
+
+namespace RegState {
+ enum {
+ Define = 0x2,
+ Implicit = 0x4,
+ Kill = 0x8,
+ Dead = 0x10,
+ Undef = 0x20,
+ EarlyClobber = 0x40,
+ Debug = 0x80,
+ InternalRead = 0x100,
+ DefineNoRead = Define | Undef,
+ ImplicitDefine = Implicit | Define,
+ ImplicitKill = Implicit | Kill
+ };
+}
+
+class MachineInstrBuilder {
+ MachineFunction *MF;
+ MachineInstr *MI;
+public:
+ MachineInstrBuilder() : MF(nullptr), MI(nullptr) {}
+
+ /// Create a MachineInstrBuilder for manipulating an existing instruction.
+ /// F must be the machine function that was used to allocate I.
+ MachineInstrBuilder(MachineFunction &F, MachineInstr *I) : MF(&F), MI(I) {}
+
+ /// Allow automatic conversion to the machine instruction we are working on.
+ operator MachineInstr*() const { return MI; }
+ MachineInstr *operator->() const { return MI; }
+ operator MachineBasicBlock::iterator() const { return MI; }
+
+ /// If conversion operators fail, use this method to get the MachineInstr
+ /// explicitly.
+ MachineInstr *getInstr() const { return MI; }
+
+ /// Add a new virtual register operand.
+ const MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0,
+ unsigned SubReg = 0) const {
+ assert((flags & 0x1) == 0 &&
+ "Passing in 'true' to addReg is forbidden! Use enums instead.");
+ MI->addOperand(*MF, MachineOperand::CreateReg(RegNo,
+ flags & RegState::Define,
+ flags & RegState::Implicit,
+ flags & RegState::Kill,
+ flags & RegState::Dead,
+ flags & RegState::Undef,
+ flags & RegState::EarlyClobber,
+ SubReg,
+ flags & RegState::Debug,
+ flags & RegState::InternalRead));
+ return *this;
+ }
+
+ /// Add a new immediate operand.
+ const MachineInstrBuilder &addImm(int64_t Val) const {
+ MI->addOperand(*MF, MachineOperand::CreateImm(Val));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addCImm(const ConstantInt *Val) const {
+ MI->addOperand(*MF, MachineOperand::CreateCImm(Val));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
+ MI->addOperand(*MF, MachineOperand::CreateFPImm(Val));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB,
+ unsigned char TargetFlags = 0) const {
+ MI->addOperand(*MF, MachineOperand::CreateMBB(MBB, TargetFlags));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addFrameIndex(int Idx) const {
+ MI->addOperand(*MF, MachineOperand::CreateFI(Idx));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addConstantPoolIndex(unsigned Idx,
+ int Offset = 0,
+ unsigned char TargetFlags = 0) const {
+ MI->addOperand(*MF, MachineOperand::CreateCPI(Idx, Offset, TargetFlags));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
+ unsigned char TargetFlags = 0) const {
+ MI->addOperand(*MF, MachineOperand::CreateTargetIndex(Idx, Offset,
+ TargetFlags));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
+ unsigned char TargetFlags = 0) const {
+ MI->addOperand(*MF, MachineOperand::CreateJTI(Idx, TargetFlags));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addGlobalAddress(const GlobalValue *GV,
+ int64_t Offset = 0,
+ unsigned char TargetFlags = 0) const {
+ MI->addOperand(*MF, MachineOperand::CreateGA(GV, Offset, TargetFlags));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addExternalSymbol(const char *FnName,
+ unsigned char TargetFlags = 0) const {
+ MI->addOperand(*MF, MachineOperand::CreateES(FnName, TargetFlags));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addBlockAddress(const BlockAddress *BA,
+ int64_t Offset = 0,
+ unsigned char TargetFlags = 0) const {
+ MI->addOperand(*MF, MachineOperand::CreateBA(BA, Offset, TargetFlags));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addRegMask(const uint32_t *Mask) const {
+ MI->addOperand(*MF, MachineOperand::CreateRegMask(Mask));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addMemOperand(MachineMemOperand *MMO) const {
+ MI->addMemOperand(*MF, MMO);
+ return *this;
+ }
+
+ const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
+ MachineInstr::mmo_iterator e) const {
+ MI->setMemRefs(b, e);
+ return *this;
+ }
+
+
+ const MachineInstrBuilder &addOperand(const MachineOperand &MO) const {
+ MI->addOperand(*MF, MO);
+ return *this;
+ }
+
+ const MachineInstrBuilder &addMetadata(const MDNode *MD) const {
+ MI->addOperand(*MF, MachineOperand::CreateMetadata(MD));
+ assert((MI->isDebugValue() ? static_cast<bool>(MI->getDebugVariable())
+ : true) &&
+ "first MDNode argument of a DBG_VALUE not a variable");
+ return *this;
+ }
+
+ const MachineInstrBuilder &addCFIIndex(unsigned CFIIndex) const {
+ MI->addOperand(*MF, MachineOperand::CreateCFIIndex(CFIIndex));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addSym(MCSymbol *Sym,
+ unsigned char TargetFlags = 0) const {
+ MI->addOperand(*MF, MachineOperand::CreateMCSymbol(Sym, TargetFlags));
+ return *this;
+ }
+
+ const MachineInstrBuilder &setMIFlags(unsigned Flags) const {
+ MI->setFlags(Flags);
+ return *this;
+ }
+
+ const MachineInstrBuilder &setMIFlag(MachineInstr::MIFlag Flag) const {
+ MI->setFlag(Flag);
+ return *this;
+ }
+
+ // Add a displacement from an existing MachineOperand with an added offset.
+ const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off,
+ unsigned char TargetFlags = 0) const {
+ // If caller specifies new TargetFlags then use it, otherwise the
+ // default behavior is to copy the target flags from the existing
+ // MachineOperand. This means if the caller wants to clear the
+ // target flags it needs to do so explicitly.
+ if (0 == TargetFlags)
+ TargetFlags = Disp.getTargetFlags();
+
+ switch (Disp.getType()) {
+ default:
+ llvm_unreachable("Unhandled operand type in addDisp()");
+ case MachineOperand::MO_Immediate:
+ return addImm(Disp.getImm() + off);
+ case MachineOperand::MO_ConstantPoolIndex:
+ return addConstantPoolIndex(Disp.getIndex(), Disp.getOffset() + off,
+ TargetFlags);
+ case MachineOperand::MO_GlobalAddress:
+ return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off,
+ TargetFlags);
+ }
+ }
+
+ /// Copy all the implicit operands from OtherMI onto this one.
+ const MachineInstrBuilder &
+ copyImplicitOps(const MachineInstr *OtherMI) const {
+ MI->copyImplicitOps(*MF, OtherMI);
+ return *this;
+ }
+};
+
+/// Builder interface. Specify how to create the initial instruction itself.
+inline MachineInstrBuilder BuildMI(MachineFunction &MF,
+ DebugLoc DL,
+ const MCInstrDesc &MCID) {
+ return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL));
+}
+
+/// This version of the builder sets up the first operand as a
+/// destination virtual register.
+inline MachineInstrBuilder BuildMI(MachineFunction &MF,
+ DebugLoc DL,
+ const MCInstrDesc &MCID,
+ unsigned DestReg) {
+ return MachineInstrBuilder(MF, MF.CreateMachineInstr(MCID, DL))
+ .addReg(DestReg, RegState::Define);
+}
+
+/// This version of the builder inserts the newly-built instruction before
+/// the given position in the given MachineBasicBlock, and sets up the first
+/// operand as a destination virtual register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineBasicBlock::iterator I,
+ DebugLoc DL,
+ const MCInstrDesc &MCID,
+ unsigned DestReg) {
+ MachineFunction &MF = *BB.getParent();
+ MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+ BB.insert(I, MI);
+ return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineBasicBlock::instr_iterator I,
+ DebugLoc DL,
+ const MCInstrDesc &MCID,
+ unsigned DestReg) {
+ MachineFunction &MF = *BB.getParent();
+ MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+ BB.insert(I, MI);
+ return MachineInstrBuilder(MF, MI).addReg(DestReg, RegState::Define);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineInstr *I,
+ DebugLoc DL,
+ const MCInstrDesc &MCID,
+ unsigned DestReg) {
+ if (I->isInsideBundle()) {
+ MachineBasicBlock::instr_iterator MII(I);
+ return BuildMI(BB, MII, DL, MCID, DestReg);
+ }
+
+ MachineBasicBlock::iterator MII = I;
+ return BuildMI(BB, MII, DL, MCID, DestReg);
+}
+
+/// This version of the builder inserts the newly-built instruction before the
+/// given position in the given MachineBasicBlock, and does NOT take a
+/// destination register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineBasicBlock::iterator I,
+ DebugLoc DL,
+ const MCInstrDesc &MCID) {
+ MachineFunction &MF = *BB.getParent();
+ MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+ BB.insert(I, MI);
+ return MachineInstrBuilder(MF, MI);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineBasicBlock::instr_iterator I,
+ DebugLoc DL,
+ const MCInstrDesc &MCID) {
+ MachineFunction &MF = *BB.getParent();
+ MachineInstr *MI = MF.CreateMachineInstr(MCID, DL);
+ BB.insert(I, MI);
+ return MachineInstrBuilder(MF, MI);
+}
+
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineInstr *I,
+ DebugLoc DL,
+ const MCInstrDesc &MCID) {
+ if (I->isInsideBundle()) {
+ MachineBasicBlock::instr_iterator MII(I);
+ return BuildMI(BB, MII, DL, MCID);
+ }
+
+ MachineBasicBlock::iterator MII = I;
+ return BuildMI(BB, MII, DL, MCID);
+}
+
+/// This version of the builder inserts the newly-built instruction at the end
+/// of the given MachineBasicBlock, and does NOT take a destination register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
+ DebugLoc DL,
+ const MCInstrDesc &MCID) {
+ return BuildMI(*BB, BB->end(), DL, MCID);
+}
+
+/// This version of the builder inserts the newly-built instruction at the
+/// end of the given MachineBasicBlock, and sets up the first operand as a
+/// destination virtual register.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
+ DebugLoc DL,
+ const MCInstrDesc &MCID,
+ unsigned DestReg) {
+ return BuildMI(*BB, BB->end(), DL, MCID, DestReg);
+}
+
+/// This version of the builder builds a DBG_VALUE intrinsic
+/// for either a value in a register or a register-indirect+offset
+/// address. The convention is that a DBG_VALUE is indirect iff the
+/// second operand is an immediate.
+inline MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL,
+ const MCInstrDesc &MCID, bool IsIndirect,
+ unsigned Reg, unsigned Offset,
+ const MDNode *Variable, const MDNode *Expr) {
+ assert(isa<DILocalVariable>(Variable) && "not a variable");
+ assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
+ assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
+ "Expected inlined-at fields to agree");
+ if (IsIndirect)
+ return BuildMI(MF, DL, MCID)
+ .addReg(Reg, RegState::Debug)
+ .addImm(Offset)
+ .addMetadata(Variable)
+ .addMetadata(Expr);
+ else {
+ assert(Offset == 0 && "A direct address cannot have an offset.");
+ return BuildMI(MF, DL, MCID)
+ .addReg(Reg, RegState::Debug)
+ .addReg(0U, RegState::Debug)
+ .addMetadata(Variable)
+ .addMetadata(Expr);
+ }
+}
+
+/// This version of the builder builds a DBG_VALUE intrinsic
+/// for either a value in a register or a register-indirect+offset
+/// address and inserts it at position I.
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ const MCInstrDesc &MCID, bool IsIndirect,
+ unsigned Reg, unsigned Offset,
+ const MDNode *Variable, const MDNode *Expr) {
+ assert(isa<DILocalVariable>(Variable) && "not a variable");
+ assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
+ MachineFunction &MF = *BB.getParent();
+ MachineInstr *MI =
+ BuildMI(MF, DL, MCID, IsIndirect, Reg, Offset, Variable, Expr);
+ BB.insert(I, MI);
+ return MachineInstrBuilder(MF, MI);
+}
+
+
+inline unsigned getDefRegState(bool B) {
+ return B ? RegState::Define : 0;
+}
+inline unsigned getImplRegState(bool B) {
+ return B ? RegState::Implicit : 0;
+}
+inline unsigned getKillRegState(bool B) {
+ return B ? RegState::Kill : 0;
+}
+inline unsigned getDeadRegState(bool B) {
+ return B ? RegState::Dead : 0;
+}
+inline unsigned getUndefRegState(bool B) {
+ return B ? RegState::Undef : 0;
+}
+inline unsigned getInternalReadRegState(bool B) {
+ return B ? RegState::InternalRead : 0;
+}
+inline unsigned getDebugRegState(bool B) {
+ return B ? RegState::Debug : 0;
+}
+
+
+/// Helper class for constructing bundles of MachineInstrs.
+///
+/// MIBundleBuilder can create a bundle from scratch by inserting new
+/// MachineInstrs one at a time, or it can create a bundle from a sequence of
+/// existing MachineInstrs in a basic block.
+class MIBundleBuilder {
+ MachineBasicBlock &MBB;
+ MachineBasicBlock::instr_iterator Begin;
+ MachineBasicBlock::instr_iterator End;
+
+public:
+ /// Create an MIBundleBuilder that inserts instructions into a new bundle in
+ /// BB above the bundle or instruction at Pos.
+ MIBundleBuilder(MachineBasicBlock &BB,
+ MachineBasicBlock::iterator Pos)
+ : MBB(BB), Begin(Pos.getInstrIterator()), End(Begin) {}
+
+ /// Create a bundle from the sequence of instructions between B and E.
+ MIBundleBuilder(MachineBasicBlock &BB,
+ MachineBasicBlock::iterator B,
+ MachineBasicBlock::iterator E)
+ : MBB(BB), Begin(B.getInstrIterator()), End(E.getInstrIterator()) {
+ assert(B != E && "No instructions to bundle");
+ ++B;
+ while (B != E) {
+ MachineInstr *MI = B;
+ ++B;
+ MI->bundleWithPred();
+ }
+ }
+
+ /// Create an MIBundleBuilder representing an existing instruction or bundle
+ /// that has MI as its head.
+ explicit MIBundleBuilder(MachineInstr *MI)
+ : MBB(*MI->getParent()), Begin(MI), End(getBundleEnd(MI)) {}
+
+ /// Return a reference to the basic block containing this bundle.
+ MachineBasicBlock &getMBB() const { return MBB; }
+
+ /// Return true if no instructions have been inserted in this bundle yet.
+ /// Empty bundles aren't representable in a MachineBasicBlock.
+ bool empty() const { return Begin == End; }
+
+ /// Return an iterator to the first bundled instruction.
+ MachineBasicBlock::instr_iterator begin() const { return Begin; }
+
+ /// Return an iterator beyond the last bundled instruction.
+ MachineBasicBlock::instr_iterator end() const { return End; }
+
+ /// Insert MI into this bundle before I which must point to an instruction in
+ /// the bundle, or end().
+ MIBundleBuilder &insert(MachineBasicBlock::instr_iterator I,
+ MachineInstr *MI) {
+ MBB.insert(I, MI);
+ if (I == Begin) {
+ if (!empty())
+ MI->bundleWithSucc();
+ Begin = MI->getIterator();
+ return *this;
+ }
+ if (I == End) {
+ MI->bundleWithPred();
+ return *this;
+ }
+ // MI was inserted in the middle of the bundle, so its neighbors' flags are
+ // already fine. Update MI's bundle flags manually.
+ MI->setFlag(MachineInstr::BundledPred);
+ MI->setFlag(MachineInstr::BundledSucc);
+ return *this;
+ }
+
+ /// Insert MI into MBB by prepending it to the instructions in the bundle.
+ /// MI will become the first instruction in the bundle.
+ MIBundleBuilder &prepend(MachineInstr *MI) {
+ return insert(begin(), MI);
+ }
+
+ /// Insert MI into MBB by appending it to the instructions in the bundle.
+ /// MI will become the last instruction in the bundle.
+ MIBundleBuilder &append(MachineInstr *MI) {
+ return insert(end(), MI);
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h b/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
new file mode 100644
index 0000000..4fbe206
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineInstrBundle.h
@@ -0,0 +1,257 @@
+//===-- CodeGen/MachineInstBundle.h - MI bundle utilities -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provide utility functions to manipulate machine instruction
+// bundles.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
+#define LLVM_CODEGEN_MACHINEINSTRBUNDLE_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+
+namespace llvm {
+
+/// finalizeBundle - Finalize a machine instruction bundle which includes
+/// a sequence of instructions starting from FirstMI to LastMI (exclusive).
+/// This routine adds a BUNDLE instruction to represent the bundle, it adds
+/// IsInternalRead markers to MachineOperands which are defined inside the
+/// bundle, and it copies externally visible defs and uses to the BUNDLE
+/// instruction.
+void finalizeBundle(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator FirstMI,
+ MachineBasicBlock::instr_iterator LastMI);
+
+/// finalizeBundle - Same functionality as the previous finalizeBundle except
+/// the last instruction in the bundle is not provided as an input. This is
+/// used in cases where bundles are pre-determined by marking instructions
+/// with 'InsideBundle' marker. It returns the MBB instruction iterator that
+/// points to the end of the bundle.
+MachineBasicBlock::instr_iterator finalizeBundle(MachineBasicBlock &MBB,
+ MachineBasicBlock::instr_iterator FirstMI);
+
+/// finalizeBundles - Finalize instruction bundles in the specified
+/// MachineFunction. Return true if any bundles are finalized.
+bool finalizeBundles(MachineFunction &MF);
+
+/// getBundleStart - Returns the first instruction in the bundle containing MI.
+///
+inline MachineInstr *getBundleStart(MachineInstr *MI) {
+ MachineBasicBlock::instr_iterator I(MI);
+ while (I->isBundledWithPred())
+ --I;
+ return &*I;
+}
+
+inline const MachineInstr *getBundleStart(const MachineInstr *MI) {
+ MachineBasicBlock::const_instr_iterator I(MI);
+ while (I->isBundledWithPred())
+ --I;
+ return &*I;
+}
+
+/// Return an iterator pointing beyond the bundle containing MI.
+inline MachineBasicBlock::instr_iterator
+getBundleEnd(MachineInstr *MI) {
+ MachineBasicBlock::instr_iterator I(MI);
+ while (I->isBundledWithSucc())
+ ++I;
+ return ++I;
+}
+
+/// Return an iterator pointing beyond the bundle containing MI.
+inline MachineBasicBlock::const_instr_iterator
+getBundleEnd(const MachineInstr *MI) {
+ MachineBasicBlock::const_instr_iterator I(MI);
+ while (I->isBundledWithSucc())
+ ++I;
+ return ++I;
+}
+
+//===----------------------------------------------------------------------===//
+// MachineOperand iterator
+//
+
+/// MachineOperandIteratorBase - Iterator that can visit all operands on a
+/// MachineInstr, or all operands on a bundle of MachineInstrs. This class is
+/// not intended to be used directly, use one of the sub-classes instead.
+///
+/// Intended use:
+///
+/// for (MIBundleOperands MIO(MI); MIO.isValid(); ++MIO) {
+/// if (!MIO->isReg())
+/// continue;
+/// ...
+/// }
+///
+class MachineOperandIteratorBase {
+ MachineBasicBlock::instr_iterator InstrI, InstrE;
+ MachineInstr::mop_iterator OpI, OpE;
+
+ // If the operands on InstrI are exhausted, advance InstrI to the next
+ // bundled instruction with operands.
+ void advance() {
+ while (OpI == OpE) {
+ // Don't advance off the basic block, or into a new bundle.
+ if (++InstrI == InstrE || !InstrI->isInsideBundle())
+ break;
+ OpI = InstrI->operands_begin();
+ OpE = InstrI->operands_end();
+ }
+ }
+
+protected:
+ /// MachineOperandIteratorBase - Create an iterator that visits all operands
+ /// on MI, or all operands on every instruction in the bundle containing MI.
+ ///
+ /// @param MI The instruction to examine.
+ /// @param WholeBundle When true, visit all operands on the entire bundle.
+ ///
+ explicit MachineOperandIteratorBase(MachineInstr *MI, bool WholeBundle) {
+ if (WholeBundle) {
+ InstrI = getBundleStart(MI)->getIterator();
+ InstrE = MI->getParent()->instr_end();
+ } else {
+ InstrI = InstrE = MI->getIterator();
+ ++InstrE;
+ }
+ OpI = InstrI->operands_begin();
+ OpE = InstrI->operands_end();
+ if (WholeBundle)
+ advance();
+ }
+
+ MachineOperand &deref() const { return *OpI; }
+
+public:
+ /// isValid - Returns true until all the operands have been visited.
+ bool isValid() const { return OpI != OpE; }
+
+ /// Preincrement. Move to the next operand.
+ void operator++() {
+ assert(isValid() && "Cannot advance MIOperands beyond the last operand");
+ ++OpI;
+ advance();
+ }
+
+ /// getOperandNo - Returns the number of the current operand relative to its
+ /// instruction.
+ ///
+ unsigned getOperandNo() const {
+ return OpI - InstrI->operands_begin();
+ }
+
+ /// VirtRegInfo - Information about a virtual register used by a set of operands.
+ ///
+ struct VirtRegInfo {
+ /// Reads - One of the operands read the virtual register. This does not
+ /// include <undef> or <internal> use operands, see MO::readsReg().
+ bool Reads;
+
+ /// Writes - One of the operands writes the virtual register.
+ bool Writes;
+
+ /// Tied - Uses and defs must use the same register. This can be because of
+ /// a two-address constraint, or there may be a partial redefinition of a
+ /// sub-register.
+ bool Tied;
+ };
+
+ /// Information about how a physical register Reg is used by a set of
+ /// operands.
+ struct PhysRegInfo {
+ /// There is a regmask operand indicating Reg is clobbered.
+ /// \see MachineOperand::CreateRegMask().
+ bool Clobbered;
+
+ /// Reg or one of its aliases is defined. The definition may only cover
+ /// parts of the register.
+ bool Defined;
+ /// Reg or a super-register is defined. The definition covers the full
+ /// register.
+ bool FullyDefined;
+
+ /// Reg or ont of its aliases is read. The register may only be read
+ /// partially.
+ bool Read;
+ /// Reg or a super-register is read. The full register is read.
+ bool FullyRead;
+
+ /// Reg is FullyDefined and all defs of reg or an overlapping register are
+ /// dead.
+ bool DeadDef;
+
+ /// There is a use operand of reg or a super-register with kill flag set.
+ bool Killed;
+ };
+
+ /// analyzeVirtReg - Analyze how the current instruction or bundle uses a
+ /// virtual register. This function should not be called after operator++(),
+ /// it expects a fresh iterator.
+ ///
+ /// @param Reg The virtual register to analyze.
+ /// @param Ops When set, this vector will receive an (MI, OpNum) entry for
+ /// each operand referring to Reg.
+ /// @returns A filled-in RegInfo struct.
+ VirtRegInfo analyzeVirtReg(unsigned Reg,
+ SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = nullptr);
+
+ /// analyzePhysReg - Analyze how the current instruction or bundle uses a
+ /// physical register. This function should not be called after operator++(),
+ /// it expects a fresh iterator.
+ ///
+ /// @param Reg The physical register to analyze.
+ /// @returns A filled-in PhysRegInfo struct.
+ PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI);
+};
+
+/// MIOperands - Iterate over operands of a single instruction.
+///
+class MIOperands : public MachineOperandIteratorBase {
+public:
+ MIOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, false) {}
+ MachineOperand &operator* () const { return deref(); }
+ MachineOperand *operator->() const { return &deref(); }
+};
+
+/// ConstMIOperands - Iterate over operands of a single const instruction.
+///
+class ConstMIOperands : public MachineOperandIteratorBase {
+public:
+ ConstMIOperands(const MachineInstr *MI)
+ : MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), false) {}
+ const MachineOperand &operator* () const { return deref(); }
+ const MachineOperand *operator->() const { return &deref(); }
+};
+
+/// MIBundleOperands - Iterate over all operands in a bundle of machine
+/// instructions.
+///
+class MIBundleOperands : public MachineOperandIteratorBase {
+public:
+ MIBundleOperands(MachineInstr *MI) : MachineOperandIteratorBase(MI, true) {}
+ MachineOperand &operator* () const { return deref(); }
+ MachineOperand *operator->() const { return &deref(); }
+};
+
+/// ConstMIBundleOperands - Iterate over all operands in a const bundle of
+/// machine instructions.
+///
+class ConstMIBundleOperands : public MachineOperandIteratorBase {
+public:
+ ConstMIBundleOperands(const MachineInstr *MI)
+ : MachineOperandIteratorBase(const_cast<MachineInstr*>(MI), true) {}
+ const MachineOperand &operator* () const { return deref(); }
+ const MachineOperand *operator->() const { return &deref(); }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
new file mode 100644
index 0000000..adcd1d0
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -0,0 +1,130 @@
+//===-- CodeGen/MachineJumpTableInfo.h - Abstract Jump Tables --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The MachineJumpTableInfo class keeps track of jump tables referenced by
+// lowered switch instructions in the MachineFunction.
+//
+// Instructions reference the address of these jump tables through the use of
+// MO_JumpTableIndex values. When emitting assembly or machine code, these
+// virtual address references are converted to refer to the address of the
+// function jump tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
+#define LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
+
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+
+class MachineBasicBlock;
+class DataLayout;
+class raw_ostream;
+
+/// MachineJumpTableEntry - One jump table in the jump table info.
+///
+struct MachineJumpTableEntry {
+ /// MBBs - The vector of basic blocks from which to create the jump table.
+ std::vector<MachineBasicBlock*> MBBs;
+
+ explicit MachineJumpTableEntry(const std::vector<MachineBasicBlock*> &M)
+ : MBBs(M) {}
+};
+
+class MachineJumpTableInfo {
+public:
+ /// JTEntryKind - This enum indicates how each entry of the jump table is
+ /// represented and emitted.
+ enum JTEntryKind {
+ /// EK_BlockAddress - Each entry is a plain address of block, e.g.:
+ /// .word LBB123
+ EK_BlockAddress,
+
+ /// EK_GPRel64BlockAddress - Each entry is an address of block, encoded
+ /// with a relocation as gp-relative, e.g.:
+ /// .gpdword LBB123
+ EK_GPRel64BlockAddress,
+
+ /// EK_GPRel32BlockAddress - Each entry is an address of block, encoded
+ /// with a relocation as gp-relative, e.g.:
+ /// .gprel32 LBB123
+ EK_GPRel32BlockAddress,
+
+ /// EK_LabelDifference32 - Each entry is the address of the block minus
+ /// the address of the jump table. This is used for PIC jump tables where
+ /// gprel32 is not supported. e.g.:
+ /// .word LBB123 - LJTI1_2
+ /// If the .set directive is supported, this is emitted as:
+ /// .set L4_5_set_123, LBB123 - LJTI1_2
+ /// .word L4_5_set_123
+ EK_LabelDifference32,
+
+ /// EK_Inline - Jump table entries are emitted inline at their point of
+ /// use. It is the responsibility of the target to emit the entries.
+ EK_Inline,
+
+ /// EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the
+ /// TargetLowering::LowerCustomJumpTableEntry hook.
+ EK_Custom32
+ };
+private:
+ JTEntryKind EntryKind;
+ std::vector<MachineJumpTableEntry> JumpTables;
+public:
+ explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
+
+ JTEntryKind getEntryKind() const { return EntryKind; }
+
+ /// getEntrySize - Return the size of each entry in the jump table.
+ unsigned getEntrySize(const DataLayout &TD) const;
+ /// getEntryAlignment - Return the alignment of each entry in the jump table.
+ unsigned getEntryAlignment(const DataLayout &TD) const;
+
+ /// createJumpTableIndex - Create a new jump table.
+ ///
+ unsigned createJumpTableIndex(const std::vector<MachineBasicBlock*> &DestBBs);
+
+ /// isEmpty - Return true if there are no jump tables.
+ ///
+ bool isEmpty() const { return JumpTables.empty(); }
+
+ const std::vector<MachineJumpTableEntry> &getJumpTables() const {
+ return JumpTables;
+ }
+
+ /// RemoveJumpTable - Mark the specific index as being dead. This will
+ /// prevent it from being emitted.
+ void RemoveJumpTable(unsigned Idx) {
+ JumpTables[Idx].MBBs.clear();
+ }
+
+ /// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
+ /// the jump tables to branch to New instead.
+ bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+ /// ReplaceMBBInJumpTable - If Old is a target of the jump tables, update
+ /// the jump table to branch to New instead.
+ bool ReplaceMBBInJumpTable(unsigned Idx, MachineBasicBlock *Old,
+ MachineBasicBlock *New);
+
+ /// print - Used by the MachineFunction printer to print information about
+ /// jump tables. Implemented in MachineFunction.cpp
+ ///
+ void print(raw_ostream &OS) const;
+
+ /// dump - Call to stderr.
+ ///
+ void dump() const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
new file mode 100644
index 0000000..4868b73
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineLoopInfo.h
@@ -0,0 +1,186 @@
+//===- llvm/CodeGen/MachineLoopInfo.h - Natural Loop Calculator -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineLoopInfo class that is used to identify natural
+// loops and determine the loop depth of various nodes of the CFG. Note that
+// natural loops may actually be several loops that share the same header node.
+//
+// This analysis calculates the nesting structure of loops in a function. For
+// each natural loop identified, this analysis identifies natural loops
+// contained entirely within the loop and the basic blocks the make up the loop.
+//
+// It can calculate on the fly various bits of information, for example:
+//
+// * whether there is a preheader for the loop
+// * the number of back edges to the header
+// * whether or not a particular block branches out of the loop
+// * the successor blocks of the loop
+// * the loop depth
+// * the trip count
+// * etc...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINELOOPINFO_H
+#define LLVM_CODEGEN_MACHINELOOPINFO_H
+
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+// Implementation in LoopInfoImpl.h
+class MachineLoop;
+extern template class LoopBase<MachineBasicBlock, MachineLoop>;
+
+class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> {
+public:
+ MachineLoop();
+
+ /// getTopBlock - Return the "top" block in the loop, which is the first
+ /// block in the linear layout, ignoring any parts of the loop not
+ /// contiguous with the part the contains the header.
+ MachineBasicBlock *getTopBlock();
+
+ /// getBottomBlock - Return the "bottom" block in the loop, which is the last
+ /// block in the linear layout, ignoring any parts of the loop not
+ /// contiguous with the part the contains the header.
+ MachineBasicBlock *getBottomBlock();
+
+ void dump() const;
+
+private:
+ friend class LoopInfoBase<MachineBasicBlock, MachineLoop>;
+ explicit MachineLoop(MachineBasicBlock *MBB)
+ : LoopBase<MachineBasicBlock, MachineLoop>(MBB) {}
+};
+
+// Implementation in LoopInfoImpl.h
+extern template class LoopInfoBase<MachineBasicBlock, MachineLoop>;
+
+class MachineLoopInfo : public MachineFunctionPass {
+ LoopInfoBase<MachineBasicBlock, MachineLoop> LI;
+ friend class LoopBase<MachineBasicBlock, MachineLoop>;
+
+ void operator=(const MachineLoopInfo &) = delete;
+ MachineLoopInfo(const MachineLoopInfo &) = delete;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ MachineLoopInfo() : MachineFunctionPass(ID) {
+ initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
+ }
+
+ LoopInfoBase<MachineBasicBlock, MachineLoop>& getBase() { return LI; }
+
+ /// iterator/begin/end - The interface to the top-level loops in the current
+ /// function.
+ ///
+ typedef LoopInfoBase<MachineBasicBlock, MachineLoop>::iterator iterator;
+ inline iterator begin() const { return LI.begin(); }
+ inline iterator end() const { return LI.end(); }
+ bool empty() const { return LI.empty(); }
+
+ /// getLoopFor - Return the inner most loop that BB lives in. If a basic
+ /// block is in no loop (for example the entry node), null is returned.
+ ///
+ inline MachineLoop *getLoopFor(const MachineBasicBlock *BB) const {
+ return LI.getLoopFor(BB);
+ }
+
+ /// operator[] - same as getLoopFor...
+ ///
+ inline const MachineLoop *operator[](const MachineBasicBlock *BB) const {
+ return LI.getLoopFor(BB);
+ }
+
+ /// getLoopDepth - Return the loop nesting level of the specified block...
+ ///
+ inline unsigned getLoopDepth(const MachineBasicBlock *BB) const {
+ return LI.getLoopDepth(BB);
+ }
+
+ // isLoopHeader - True if the block is a loop header node
+ inline bool isLoopHeader(const MachineBasicBlock *BB) const {
+ return LI.isLoopHeader(BB);
+ }
+
+ /// runOnFunction - Calculate the natural loop information.
+ ///
+ bool runOnMachineFunction(MachineFunction &F) override;
+
+ void releaseMemory() override { LI.releaseMemory(); }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ /// removeLoop - This removes the specified top-level loop from this loop info
+ /// object. The loop is not deleted, as it will presumably be inserted into
+ /// another loop.
+ inline MachineLoop *removeLoop(iterator I) { return LI.removeLoop(I); }
+
+ /// changeLoopFor - Change the top-level loop that contains BB to the
+ /// specified loop. This should be used by transformations that restructure
+ /// the loop hierarchy tree.
+ inline void changeLoopFor(MachineBasicBlock *BB, MachineLoop *L) {
+ LI.changeLoopFor(BB, L);
+ }
+
+ /// changeTopLevelLoop - Replace the specified loop in the top-level loops
+ /// list with the indicated loop.
+ inline void changeTopLevelLoop(MachineLoop *OldLoop, MachineLoop *NewLoop) {
+ LI.changeTopLevelLoop(OldLoop, NewLoop);
+ }
+
+ /// addTopLevelLoop - This adds the specified loop to the collection of
+ /// top-level loops.
+ inline void addTopLevelLoop(MachineLoop *New) {
+ LI.addTopLevelLoop(New);
+ }
+
+ /// removeBlock - This method completely removes BB from all data structures,
+ /// including all of the Loop objects it is nested in and our mapping from
+ /// MachineBasicBlocks to loops.
+ void removeBlock(MachineBasicBlock *BB) {
+ LI.removeBlock(BB);
+ }
+};
+
+
+// Allow clients to walk the list of nested loops...
+template <> struct GraphTraits<const MachineLoop*> {
+ typedef const MachineLoop NodeType;
+ typedef MachineLoopInfo::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(const MachineLoop *L) { return L; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->end();
+ }
+};
+
+template <> struct GraphTraits<MachineLoop*> {
+ typedef MachineLoop NodeType;
+ typedef MachineLoopInfo::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(MachineLoop *L) { return L; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->end();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h b/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
new file mode 100644
index 0000000..1ca0d90
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineMemOperand.h
@@ -0,0 +1,238 @@
+//==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineMemOperand class, which is a
+// description of a memory reference. It is used to help track dependencies
+// in the backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
+#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
+
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Value.h" // PointerLikeTypeTraits<Value*>
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class FoldingSetNodeID;
+class MDNode;
+class raw_ostream;
+class MachineFunction;
+class ModuleSlotTracker;
+
+/// MachinePointerInfo - This class contains a discriminated union of
+/// information about pointers in memory operands, relating them back to LLVM IR
+/// or to virtual locations (such as frame indices) that are exposed during
+/// codegen.
+struct MachinePointerInfo {
+ /// V - This is the IR pointer value for the access, or it is null if unknown.
+ /// If this is null, then the access is to a pointer in the default address
+ /// space.
+ PointerUnion<const Value *, const PseudoSourceValue *> V;
+
+ /// Offset - This is an offset from the base Value*.
+ int64_t Offset;
+
+ explicit MachinePointerInfo(const Value *v = nullptr, int64_t offset = 0)
+ : V(v), Offset(offset) {}
+
+ explicit MachinePointerInfo(const PseudoSourceValue *v,
+ int64_t offset = 0)
+ : V(v), Offset(offset) {}
+
+ MachinePointerInfo getWithOffset(int64_t O) const {
+ if (V.isNull()) return MachinePointerInfo();
+ if (V.is<const Value*>())
+ return MachinePointerInfo(V.get<const Value*>(), Offset+O);
+ return MachinePointerInfo(V.get<const PseudoSourceValue*>(), Offset+O);
+ }
+
+ /// getAddrSpace - Return the LLVM IR address space number that this pointer
+ /// points into.
+ unsigned getAddrSpace() const;
+
+ /// getConstantPool - Return a MachinePointerInfo record that refers to the
+ /// constant pool.
+ static MachinePointerInfo getConstantPool(MachineFunction &MF);
+
+ /// getFixedStack - Return a MachinePointerInfo record that refers to the
+ /// the specified FrameIndex.
+ static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI,
+ int64_t Offset = 0);
+
+ /// getJumpTable - Return a MachinePointerInfo record that refers to a
+ /// jump table entry.
+ static MachinePointerInfo getJumpTable(MachineFunction &MF);
+
+ /// getGOT - Return a MachinePointerInfo record that refers to a
+ /// GOT entry.
+ static MachinePointerInfo getGOT(MachineFunction &MF);
+
+ /// getStack - stack pointer relative access.
+ static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset);
+};
+
+
+//===----------------------------------------------------------------------===//
+/// MachineMemOperand - A description of a memory reference used in the backend.
+/// Instead of holding a StoreInst or LoadInst, this class holds the address
+/// Value of the reference along with a byte size and offset. This allows it
+/// to describe lowered loads and stores. Also, the special PseudoSourceValue
+/// objects can be used to represent loads and stores to memory locations
+/// that aren't explicit in the regular LLVM IR.
+///
+class MachineMemOperand {
+ MachinePointerInfo PtrInfo;
+ uint64_t Size;
+ unsigned Flags;
+ AAMDNodes AAInfo;
+ const MDNode *Ranges;
+
+public:
+ /// Flags values. These may be or'd together.
+ enum MemOperandFlags {
+ /// The memory access reads data.
+ MOLoad = 1,
+ /// The memory access writes data.
+ MOStore = 2,
+ /// The memory access is volatile.
+ MOVolatile = 4,
+ /// The memory access is non-temporal.
+ MONonTemporal = 8,
+ /// The memory access is invariant.
+ MOInvariant = 16,
+ // Target hints allow target passes to annotate memory operations.
+ MOTargetStartBit = 5,
+ MOTargetNumBits = 3,
+ // This is the number of bits we need to represent flags.
+ MOMaxBits = 8
+ };
+
+ /// MachineMemOperand - Construct an MachineMemOperand object with the
+ /// specified PtrInfo, flags, size, and base alignment.
+ MachineMemOperand(MachinePointerInfo PtrInfo, unsigned flags, uint64_t s,
+ unsigned base_alignment,
+ const AAMDNodes &AAInfo = AAMDNodes(),
+ const MDNode *Ranges = nullptr);
+
+ const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
+
+ /// getValue - Return the base address of the memory access. This may either
+ /// be a normal LLVM IR Value, or one of the special values used in CodeGen.
+ /// Special values are those obtained via
+ /// PseudoSourceValue::getFixedStack(int), PseudoSourceValue::getStack, and
+ /// other PseudoSourceValue member functions which return objects which stand
+ /// for frame/stack pointer relative references and other special references
+ /// which are not representable in the high-level IR.
+ const Value *getValue() const { return PtrInfo.V.dyn_cast<const Value*>(); }
+
+ const PseudoSourceValue *getPseudoValue() const {
+ return PtrInfo.V.dyn_cast<const PseudoSourceValue*>();
+ }
+
+ const void *getOpaqueValue() const { return PtrInfo.V.getOpaqueValue(); }
+
+ /// getFlags - Return the raw flags of the source value, \see MemOperandFlags.
+ unsigned int getFlags() const { return Flags & ((1 << MOMaxBits) - 1); }
+
+ /// Bitwise OR the current flags with the given flags.
+ void setFlags(unsigned f) { Flags |= (f & ((1 << MOMaxBits) - 1)); }
+
+ /// getOffset - For normal values, this is a byte offset added to the base
+ /// address. For PseudoSourceValue::FPRel values, this is the FrameIndex
+ /// number.
+ int64_t getOffset() const { return PtrInfo.Offset; }
+
+ unsigned getAddrSpace() const { return PtrInfo.getAddrSpace(); }
+
+ /// getSize - Return the size in bytes of the memory reference.
+ uint64_t getSize() const { return Size; }
+
+ /// getAlignment - Return the minimum known alignment in bytes of the
+ /// actual memory reference.
+ uint64_t getAlignment() const;
+
+ /// getBaseAlignment - Return the minimum known alignment in bytes of the
+ /// base address, without the offset.
+ uint64_t getBaseAlignment() const { return (1u << (Flags >> MOMaxBits)) >> 1; }
+
+ /// getAAInfo - Return the AA tags for the memory reference.
+ AAMDNodes getAAInfo() const { return AAInfo; }
+
+ /// getRanges - Return the range tag for the memory reference.
+ const MDNode *getRanges() const { return Ranges; }
+
+ bool isLoad() const { return Flags & MOLoad; }
+ bool isStore() const { return Flags & MOStore; }
+ bool isVolatile() const { return Flags & MOVolatile; }
+ bool isNonTemporal() const { return Flags & MONonTemporal; }
+ bool isInvariant() const { return Flags & MOInvariant; }
+
+ /// isUnordered - Returns true if this memory operation doesn't have any
+ /// ordering constraints other than normal aliasing. Volatile and atomic
+ /// memory operations can't be reordered.
+ ///
+ /// Currently, we don't model the difference between volatile and atomic
+ /// operations. They should retain their ordering relative to all memory
+ /// operations.
+ bool isUnordered() const { return !isVolatile(); }
+
+ /// refineAlignment - Update this MachineMemOperand to reflect the alignment
+ /// of MMO, if it has a greater alignment. This must only be used when the
+ /// new alignment applies to all users of this MachineMemOperand.
+ void refineAlignment(const MachineMemOperand *MMO);
+
+ /// setValue - Change the SourceValue for this MachineMemOperand. This
+ /// should only be used when an object is being relocated and all references
+ /// to it are being updated.
+ void setValue(const Value *NewSV) { PtrInfo.V = NewSV; }
+ void setValue(const PseudoSourceValue *NewSV) { PtrInfo.V = NewSV; }
+ void setOffset(int64_t NewOffset) { PtrInfo.Offset = NewOffset; }
+
+ /// Profile - Gather unique data for the object.
+ ///
+ void Profile(FoldingSetNodeID &ID) const;
+
+ /// Support for operator<<.
+ /// @{
+ void print(raw_ostream &OS) const;
+ void print(raw_ostream &OS, ModuleSlotTracker &MST) const;
+ /// @}
+
+ friend bool operator==(const MachineMemOperand &LHS,
+ const MachineMemOperand &RHS) {
+ return LHS.getValue() == RHS.getValue() &&
+ LHS.getPseudoValue() == RHS.getPseudoValue() &&
+ LHS.getSize() == RHS.getSize() &&
+ LHS.getOffset() == RHS.getOffset() &&
+ LHS.getFlags() == RHS.getFlags() &&
+ LHS.getAAInfo() == RHS.getAAInfo() &&
+ LHS.getRanges() == RHS.getRanges() &&
+ LHS.getAlignment() == RHS.getAlignment() &&
+ LHS.getAddrSpace() == RHS.getAddrSpace();
+ }
+
+ friend bool operator!=(const MachineMemOperand &LHS,
+ const MachineMemOperand &RHS) {
+ return !(LHS == RHS);
+ }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MRO) {
+ MRO.print(OS);
+ return OS;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
new file mode 100644
index 0000000..7757112
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfo.h
@@ -0,0 +1,433 @@
+//===-- llvm/CodeGen/MachineModuleInfo.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect meta information for a module. This information should be in a
+// neutral form that can be used by different debugging and exception handling
+// schemes.
+//
+// The organization of information is primarily clustered around the source
+// compile units. The main exception is source line correspondence where
+// inlining may interleave code from various compile units.
+//
+// The following information can be retrieved from the MachineModuleInfo.
+//
+// -- Source directories - Directories are uniqued based on their canonical
+// string and assigned a sequential numeric ID (base 1.)
+// -- Source files - Files are also uniqued based on their name and directory
+// ID. A file ID is sequential number (base 1.)
+// -- Source line correspondence - A vector of file ID, line#, column# triples.
+// A DEBUG_LOCATION instruction is generated by the DAG Legalizer
+// corresponding to each entry in the source line list. This allows a debug
+// emitter to generate labels referenced by debug information tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMODULEINFO_H
+#define LLVM_CODEGEN_MACHINEMODULEINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MachineLocation.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Dwarf.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Forward declarations.
+class Constant;
+class GlobalVariable;
+class BlockAddress;
+class MDNode;
+class MMIAddrLabelMap;
+class MachineBasicBlock;
+class MachineFunction;
+class Module;
+class PointerType;
+class StructType;
+
+struct SEHHandler {
+ // Filter or finally function. Null indicates a catch-all.
+ const Function *FilterOrFinally;
+
+ // Address of block to recover at. Null for a finally handler.
+ const BlockAddress *RecoverBA;
+};
+
+//===----------------------------------------------------------------------===//
+/// LandingPadInfo - This structure is used to retain landing pad info for
+/// the current function.
+///
+struct LandingPadInfo {
+ MachineBasicBlock *LandingPadBlock; // Landing pad block.
+ SmallVector<MCSymbol *, 1> BeginLabels; // Labels prior to invoke.
+ SmallVector<MCSymbol *, 1> EndLabels; // Labels after invoke.
+ SmallVector<SEHHandler, 1> SEHHandlers; // SEH handlers active at this lpad.
+ MCSymbol *LandingPadLabel; // Label at beginning of landing pad.
+ std::vector<int> TypeIds; // List of type ids (filters negative).
+
+ explicit LandingPadInfo(MachineBasicBlock *MBB)
+ : LandingPadBlock(MBB), LandingPadLabel(nullptr) {}
+};
+
+//===----------------------------------------------------------------------===//
+/// MachineModuleInfoImpl - This class can be derived from and used by targets
+/// to hold private target-specific information for each Module. Objects of
+/// type are accessed/created with MMI::getInfo and destroyed when the
+/// MachineModuleInfo is destroyed.
+///
+class MachineModuleInfoImpl {
+public:
+ typedef PointerIntPair<MCSymbol*, 1, bool> StubValueTy;
+ virtual ~MachineModuleInfoImpl();
+ typedef std::vector<std::pair<MCSymbol*, StubValueTy> > SymbolListTy;
+protected:
+
+ /// Return the entries from a DenseMap in a deterministic sorted orer.
+ /// Clears the map.
+ static SymbolListTy getSortedStubs(DenseMap<MCSymbol*, StubValueTy>&);
+};
+
+//===----------------------------------------------------------------------===//
+/// MachineModuleInfo - This class contains meta information specific to a
+/// module. Queries can be made by different debugging and exception handling
+/// schemes and reformated for specific use.
+///
+class MachineModuleInfo : public ImmutablePass {
+ /// Context - This is the MCContext used for the entire code generator.
+ MCContext Context;
+
+ /// TheModule - This is the LLVM Module being worked on.
+ const Module *TheModule;
+
+ /// ObjFileMMI - This is the object-file-format-specific implementation of
+ /// MachineModuleInfoImpl, which lets targets accumulate whatever info they
+ /// want.
+ MachineModuleInfoImpl *ObjFileMMI;
+
+ /// List of moves done by a function's prolog. Used to construct frame maps
+ /// by debug and exception handling consumers.
+ std::vector<MCCFIInstruction> FrameInstructions;
+
+ /// LandingPads - List of LandingPadInfo describing the landing pad
+ /// information in the current function.
+ std::vector<LandingPadInfo> LandingPads;
+
+ /// LPadToCallSiteMap - Map a landing pad's EH symbol to the call site
+ /// indexes.
+ DenseMap<MCSymbol*, SmallVector<unsigned, 4> > LPadToCallSiteMap;
+
+ /// CallSiteMap - Map of invoke call site index values to associated begin
+ /// EH_LABEL for the current function.
+ DenseMap<MCSymbol*, unsigned> CallSiteMap;
+
+ /// CurCallSite - The current call site index being processed, if any. 0 if
+ /// none.
+ unsigned CurCallSite;
+
+ /// TypeInfos - List of C++ TypeInfo used in the current function.
+ std::vector<const GlobalValue *> TypeInfos;
+
+ /// FilterIds - List of typeids encoding filters used in the current function.
+ std::vector<unsigned> FilterIds;
+
+ /// FilterEnds - List of the indices in FilterIds corresponding to filter
+ /// terminators.
+ std::vector<unsigned> FilterEnds;
+
+ /// Personalities - Vector of all personality functions ever seen. Used to
+ /// emit common EH frames.
+ std::vector<const Function *> Personalities;
+
+ /// AddrLabelSymbols - This map keeps track of which symbol is being used for
+ /// the specified basic block's address of label.
+ MMIAddrLabelMap *AddrLabelSymbols;
+
+ bool CallsEHReturn;
+ bool CallsUnwindInit;
+ bool HasEHFunclets;
+
+ // TODO: Ideally, what we'd like is to have a switch that allows emitting
+ // synchronous (precise at call-sites only) CFA into .eh_frame. However,
+ // even under this switch, we'd like .debug_frame to be precise when using.
+ // -g. At this moment, there's no way to specify that some CFI directives
+ // go into .eh_frame only, while others go into .debug_frame only.
+
+ /// DbgInfoAvailable - True if debugging information is available
+ /// in this module.
+ bool DbgInfoAvailable;
+
+ /// UsesVAFloatArgument - True if this module calls VarArg function with
+ /// floating-point arguments. This is used to emit an undefined reference
+ /// to _fltused on Windows targets.
+ bool UsesVAFloatArgument;
+
+ /// UsesMorestackAddr - True if the module calls the __morestack function
+ /// indirectly, as is required under the large code model on x86. This is used
+ /// to emit a definition of a symbol, __morestack_addr, containing the
+ /// address. See comments in lib/Target/X86/X86FrameLowering.cpp for more
+ /// details.
+ bool UsesMorestackAddr;
+
+ EHPersonality PersonalityTypeCache;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ struct VariableDbgInfo {
+ const DILocalVariable *Var;
+ const DIExpression *Expr;
+ unsigned Slot;
+ const DILocation *Loc;
+
+ VariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
+ unsigned Slot, const DILocation *Loc)
+ : Var(Var), Expr(Expr), Slot(Slot), Loc(Loc) {}
+ };
+ typedef SmallVector<VariableDbgInfo, 4> VariableDbgInfoMapTy;
+ VariableDbgInfoMapTy VariableDbgInfos;
+
+ MachineModuleInfo(); // DUMMY CONSTRUCTOR, DO NOT CALL.
+ // Real constructor.
+ MachineModuleInfo(const MCAsmInfo &MAI, const MCRegisterInfo &MRI,
+ const MCObjectFileInfo *MOFI);
+ ~MachineModuleInfo() override;
+
+ // Initialization and Finalization
+ bool doInitialization(Module &) override;
+ bool doFinalization(Module &) override;
+
+ /// EndFunction - Discard function meta information.
+ ///
+ void EndFunction();
+
+ const MCContext &getContext() const { return Context; }
+ MCContext &getContext() { return Context; }
+
+ void setModule(const Module *M) { TheModule = M; }
+ const Module *getModule() const { return TheModule; }
+
+ /// getInfo - Keep track of various per-function pieces of information for
+ /// backends that would like to do so.
+ ///
+ template<typename Ty>
+ Ty &getObjFileInfo() {
+ if (ObjFileMMI == nullptr)
+ ObjFileMMI = new Ty(*this);
+ return *static_cast<Ty*>(ObjFileMMI);
+ }
+
+ template<typename Ty>
+ const Ty &getObjFileInfo() const {
+ return const_cast<MachineModuleInfo*>(this)->getObjFileInfo<Ty>();
+ }
+
+ /// hasDebugInfo - Returns true if valid debug info is present.
+ ///
+ bool hasDebugInfo() const { return DbgInfoAvailable; }
+ void setDebugInfoAvailability(bool avail) { DbgInfoAvailable = avail; }
+
+ bool callsEHReturn() const { return CallsEHReturn; }
+ void setCallsEHReturn(bool b) { CallsEHReturn = b; }
+
+ bool callsUnwindInit() const { return CallsUnwindInit; }
+ void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
+
+ bool hasEHFunclets() const { return HasEHFunclets; }
+ void setHasEHFunclets(bool V) { HasEHFunclets = V; }
+
+ bool usesVAFloatArgument() const {
+ return UsesVAFloatArgument;
+ }
+
+ void setUsesVAFloatArgument(bool b) {
+ UsesVAFloatArgument = b;
+ }
+
+ bool usesMorestackAddr() const {
+ return UsesMorestackAddr;
+ }
+
+ void setUsesMorestackAddr(bool b) {
+ UsesMorestackAddr = b;
+ }
+
+ /// \brief Returns a reference to a list of cfi instructions in the current
+ /// function's prologue. Used to construct frame maps for debug and exception
+ /// handling comsumers.
+ const std::vector<MCCFIInstruction> &getFrameInstructions() const {
+ return FrameInstructions;
+ }
+
+ unsigned LLVM_ATTRIBUTE_UNUSED_RESULT
+ addFrameInst(const MCCFIInstruction &Inst) {
+ FrameInstructions.push_back(Inst);
+ return FrameInstructions.size() - 1;
+ }
+
+ /// getAddrLabelSymbol - Return the symbol to be used for the specified basic
+ /// block when its address is taken. This cannot be its normal LBB label
+ /// because the block may be accessed outside its containing function.
+ MCSymbol *getAddrLabelSymbol(const BasicBlock *BB) {
+ return getAddrLabelSymbolToEmit(BB).front();
+ }
+
+ /// getAddrLabelSymbolToEmit - Return the symbol to be used for the specified
+ /// basic block when its address is taken. If other blocks were RAUW'd to
+ /// this one, we may have to emit them as well, return the whole set.
+ ArrayRef<MCSymbol *> getAddrLabelSymbolToEmit(const BasicBlock *BB);
+
+ /// takeDeletedSymbolsForFunction - If the specified function has had any
+ /// references to address-taken blocks generated, but the block got deleted,
+ /// return the symbol now so we can emit it. This prevents emitting a
+ /// reference to a symbol that has no definition.
+ void takeDeletedSymbolsForFunction(const Function *F,
+ std::vector<MCSymbol*> &Result);
+
+
+ //===- EH ---------------------------------------------------------------===//
+
+ /// getOrCreateLandingPadInfo - Find or create an LandingPadInfo for the
+ /// specified MachineBasicBlock.
+ LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
+
+ /// addInvoke - Provide the begin and end labels of an invoke style call and
+ /// associate it with a try landing pad block.
+ void addInvoke(MachineBasicBlock *LandingPad,
+ MCSymbol *BeginLabel, MCSymbol *EndLabel);
+
+ /// addLandingPad - Add a new panding pad. Returns the label ID for the
+ /// landing pad entry.
+ MCSymbol *addLandingPad(MachineBasicBlock *LandingPad);
+
+ /// addPersonality - Provide the personality function for the exception
+ /// information.
+ void addPersonality(const Function *Personality);
+
+ /// getPersonalities - Return array of personality functions ever seen.
+ const std::vector<const Function *>& getPersonalities() const {
+ return Personalities;
+ }
+
+ /// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
+ ///
+ void addCatchTypeInfo(MachineBasicBlock *LandingPad,
+ ArrayRef<const GlobalValue *> TyInfo);
+
+ /// addFilterTypeInfo - Provide the filter typeinfo for a landing pad.
+ ///
+ void addFilterTypeInfo(MachineBasicBlock *LandingPad,
+ ArrayRef<const GlobalValue *> TyInfo);
+
+ /// addCleanup - Add a cleanup action for a landing pad.
+ ///
+ void addCleanup(MachineBasicBlock *LandingPad);
+
+ void addSEHCatchHandler(MachineBasicBlock *LandingPad, const Function *Filter,
+ const BlockAddress *RecoverLabel);
+
+ void addSEHCleanupHandler(MachineBasicBlock *LandingPad,
+ const Function *Cleanup);
+
+ /// getTypeIDFor - Return the type id for the specified typeinfo. This is
+ /// function wide.
+ unsigned getTypeIDFor(const GlobalValue *TI);
+
+ /// getFilterIDFor - Return the id of the filter encoded by TyIds. This is
+ /// function wide.
+ int getFilterIDFor(std::vector<unsigned> &TyIds);
+
+ /// TidyLandingPads - Remap landing pad labels and remove any deleted landing
+ /// pads.
+ void TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = nullptr);
+
+ /// getLandingPads - Return a reference to the landing pad info for the
+ /// current function.
+ const std::vector<LandingPadInfo> &getLandingPads() const {
+ return LandingPads;
+ }
+
+ /// setCallSiteLandingPad - Map the landing pad's EH symbol to the call
+ /// site indexes.
+ void setCallSiteLandingPad(MCSymbol *Sym, ArrayRef<unsigned> Sites);
+
+ /// getCallSiteLandingPad - Get the call site indexes for a landing pad EH
+ /// symbol.
+ SmallVectorImpl<unsigned> &getCallSiteLandingPad(MCSymbol *Sym) {
+ assert(hasCallSiteLandingPad(Sym) &&
+ "missing call site number for landing pad!");
+ return LPadToCallSiteMap[Sym];
+ }
+
+ /// hasCallSiteLandingPad - Return true if the landing pad Eh symbol has an
+ /// associated call site.
+ bool hasCallSiteLandingPad(MCSymbol *Sym) {
+ return !LPadToCallSiteMap[Sym].empty();
+ }
+
+ /// setCallSiteBeginLabel - Map the begin label for a call site.
+ void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site) {
+ CallSiteMap[BeginLabel] = Site;
+ }
+
+ /// getCallSiteBeginLabel - Get the call site number for a begin label.
+ unsigned getCallSiteBeginLabel(MCSymbol *BeginLabel) {
+ assert(hasCallSiteBeginLabel(BeginLabel) &&
+ "Missing call site number for EH_LABEL!");
+ return CallSiteMap[BeginLabel];
+ }
+
+ /// hasCallSiteBeginLabel - Return true if the begin label has a call site
+ /// number associated with it.
+ bool hasCallSiteBeginLabel(MCSymbol *BeginLabel) {
+ return CallSiteMap[BeginLabel] != 0;
+ }
+
+ /// setCurrentCallSite - Set the call site currently being processed.
+ void setCurrentCallSite(unsigned Site) { CurCallSite = Site; }
+
+ /// getCurrentCallSite - Get the call site currently being processed, if any.
+ /// return zero if none.
+ unsigned getCurrentCallSite() { return CurCallSite; }
+
+ /// getTypeInfos - Return a reference to the C++ typeinfo for the current
+ /// function.
+ const std::vector<const GlobalValue *> &getTypeInfos() const {
+ return TypeInfos;
+ }
+
+ /// getFilterIds - Return a reference to the typeids encoding filters used in
+ /// the current function.
+ const std::vector<unsigned> &getFilterIds() const {
+ return FilterIds;
+ }
+
+ /// setVariableDbgInfo - Collect information used to emit debugging
+ /// information of a variable.
+ void setVariableDbgInfo(const DILocalVariable *Var, const DIExpression *Expr,
+ unsigned Slot, const DILocation *Loc) {
+ VariableDbgInfos.emplace_back(Var, Expr, Slot, Loc);
+ }
+
+ VariableDbgInfoMapTy &getVariableDbgInfo() { return VariableDbgInfos; }
+
+}; // End class MachineModuleInfo
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
new file mode 100644
index 0000000..e747214
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineModuleInfoImpls.h
@@ -0,0 +1,89 @@
+//===-- llvm/CodeGen/MachineModuleInfoImpls.h -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines object-file format specific implementations of
+// MachineModuleInfoImpl.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
+#define LLVM_CODEGEN_MACHINEMODULEINFOIMPLS_H
+
+#include "llvm/CodeGen/MachineModuleInfo.h"
+
+namespace llvm {
+class MCSymbol;
+
+/// MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation
+/// for MachO targets.
+class MachineModuleInfoMachO : public MachineModuleInfoImpl {
+ /// FnStubs - Darwin '$stub' stubs. The key is something like "Lfoo$stub",
+ /// the value is something like "_foo".
+ DenseMap<MCSymbol *, StubValueTy> FnStubs;
+
+ /// GVStubs - Darwin '$non_lazy_ptr' stubs. The key is something like
+ /// "Lfoo$non_lazy_ptr", the value is something like "_foo". The extra bit
+ /// is true if this GV is external.
+ DenseMap<MCSymbol *, StubValueTy> GVStubs;
+
+ /// HiddenGVStubs - Darwin '$non_lazy_ptr' stubs. The key is something like
+ /// "Lfoo$non_lazy_ptr", the value is something like "_foo". Unlike GVStubs
+ /// these are for things with hidden visibility. The extra bit is true if
+ /// this GV is external.
+ DenseMap<MCSymbol *, StubValueTy> HiddenGVStubs;
+
+ virtual void anchor(); // Out of line virtual method.
+public:
+ MachineModuleInfoMachO(const MachineModuleInfo &) {}
+
+ StubValueTy &getFnStubEntry(MCSymbol *Sym) {
+ assert(Sym && "Key cannot be null");
+ return FnStubs[Sym];
+ }
+
+ StubValueTy &getGVStubEntry(MCSymbol *Sym) {
+ assert(Sym && "Key cannot be null");
+ return GVStubs[Sym];
+ }
+
+ StubValueTy &getHiddenGVStubEntry(MCSymbol *Sym) {
+ assert(Sym && "Key cannot be null");
+ return HiddenGVStubs[Sym];
+ }
+
+ /// Accessor methods to return the set of stubs in sorted order.
+ SymbolListTy GetFnStubList() { return getSortedStubs(FnStubs); }
+ SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
+ SymbolListTy GetHiddenGVStubList() { return getSortedStubs(HiddenGVStubs); }
+};
+
+/// MachineModuleInfoELF - This is a MachineModuleInfoImpl implementation
+/// for ELF targets.
+class MachineModuleInfoELF : public MachineModuleInfoImpl {
+ /// GVStubs - These stubs are used to materialize global addresses in PIC
+ /// mode.
+ DenseMap<MCSymbol *, StubValueTy> GVStubs;
+
+ virtual void anchor(); // Out of line virtual method.
+public:
+ MachineModuleInfoELF(const MachineModuleInfo &) {}
+
+ StubValueTy &getGVStubEntry(MCSymbol *Sym) {
+ assert(Sym && "Key cannot be null");
+ return GVStubs[Sym];
+ }
+
+ /// Accessor methods to return the set of stubs in sorted order.
+
+ SymbolListTy GetGVStubList() { return getSortedStubs(GVStubs); }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineOperand.h b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
new file mode 100644
index 0000000..c43e47c
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineOperand.h
@@ -0,0 +1,754 @@
+//===-- llvm/CodeGen/MachineOperand.h - MachineOperand class ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineOperand class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
+#define LLVM_CODEGEN_MACHINEOPERAND_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+
+namespace llvm {
+
+class BlockAddress;
+class ConstantFP;
+class ConstantInt;
+class GlobalValue;
+class MachineBasicBlock;
+class MachineInstr;
+class MachineRegisterInfo;
+class MDNode;
+class ModuleSlotTracker;
+class TargetMachine;
+class TargetRegisterInfo;
+class hash_code;
+class raw_ostream;
+class MCSymbol;
+
+/// MachineOperand class - Representation of each machine instruction operand.
+///
+/// This class isn't a POD type because it has a private constructor, but its
+/// destructor must be trivial. Functions like MachineInstr::addOperand(),
+/// MachineRegisterInfo::moveOperands(), and MF::DeleteMachineInstr() depend on
+/// not having to call the MachineOperand destructor.
+///
+class MachineOperand {
+public:
+ enum MachineOperandType : unsigned char {
+ MO_Register, ///< Register operand.
+ MO_Immediate, ///< Immediate operand
+ MO_CImmediate, ///< Immediate >64bit operand
+ MO_FPImmediate, ///< Floating-point immediate operand
+ MO_MachineBasicBlock, ///< MachineBasicBlock reference
+ MO_FrameIndex, ///< Abstract Stack Frame Index
+ MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
+ MO_TargetIndex, ///< Target-dependent index+offset operand.
+ MO_JumpTableIndex, ///< Address of indexed Jump Table for switch
+ MO_ExternalSymbol, ///< Name of external global symbol
+ MO_GlobalAddress, ///< Address of a global value
+ MO_BlockAddress, ///< Address of a basic block
+ MO_RegisterMask, ///< Mask of preserved registers.
+ MO_RegisterLiveOut, ///< Mask of live-out registers.
+ MO_Metadata, ///< Metadata reference (for debug info)
+ MO_MCSymbol, ///< MCSymbol reference (for debug/eh info)
+ MO_CFIIndex ///< MCCFIInstruction index.
+ };
+
+private:
+ /// OpKind - Specify what kind of operand this is. This discriminates the
+ /// union.
+ MachineOperandType OpKind : 8;
+
+ /// Subregister number for MO_Register. A value of 0 indicates the
+ /// MO_Register has no subReg.
+ ///
+ /// For all other kinds of operands, this field holds target-specific flags.
+ unsigned SubReg_TargetFlags : 12;
+
+ /// TiedTo - Non-zero when this register operand is tied to another register
+ /// operand. The encoding of this field is described in the block comment
+ /// before MachineInstr::tieOperands().
+ unsigned char TiedTo : 4;
+
+ /// IsDef/IsImp/IsKill/IsDead flags - These are only valid for MO_Register
+ /// operands.
+
+ /// IsDef - True if this is a def, false if this is a use of the register.
+ ///
+ bool IsDef : 1;
+
+ /// IsImp - True if this is an implicit def or use, false if it is explicit.
+ ///
+ bool IsImp : 1;
+
+ /// IsKill - True if this instruction is the last use of the register on this
+ /// path through the function. This is only valid on uses of registers.
+ bool IsKill : 1;
+
+ /// IsDead - True if this register is never used by a subsequent instruction.
+ /// This is only valid on definitions of registers.
+ bool IsDead : 1;
+
+ /// IsUndef - True if this register operand reads an "undef" value, i.e. the
+ /// read value doesn't matter. This flag can be set on both use and def
+ /// operands. On a sub-register def operand, it refers to the part of the
+ /// register that isn't written. On a full-register def operand, it is a
+ /// noop. See readsReg().
+ ///
+ /// This is only valid on registers.
+ ///
+ /// Note that an instruction may have multiple <undef> operands referring to
+ /// the same register. In that case, the instruction may depend on those
+ /// operands reading the same dont-care value. For example:
+ ///
+ /// %vreg1<def> = XOR %vreg2<undef>, %vreg2<undef>
+ ///
+ /// Any register can be used for %vreg2, and its value doesn't matter, but
+ /// the two operands must be the same register.
+ ///
+ bool IsUndef : 1;
+
+ /// IsInternalRead - True if this operand reads a value that was defined
+ /// inside the same instruction or bundle. This flag can be set on both use
+ /// and def operands. On a sub-register def operand, it refers to the part
+ /// of the register that isn't written. On a full-register def operand, it
+ /// is a noop.
+ ///
+ /// When this flag is set, the instruction bundle must contain at least one
+ /// other def of the register. If multiple instructions in the bundle define
+ /// the register, the meaning is target-defined.
+ bool IsInternalRead : 1;
+
+ /// IsEarlyClobber - True if this MO_Register 'def' operand is written to
+ /// by the MachineInstr before all input registers are read. This is used to
+ /// model the GCC inline asm '&' constraint modifier.
+ bool IsEarlyClobber : 1;
+
+ /// IsDebug - True if this MO_Register 'use' operand is in a debug pseudo,
+ /// not a real instruction. Such uses should be ignored during codegen.
+ bool IsDebug : 1;
+
+ /// SmallContents - This really should be part of the Contents union, but
+ /// lives out here so we can get a better packed struct.
+ /// MO_Register: Register number.
+ /// OffsetedInfo: Low bits of offset.
+ union {
+ unsigned RegNo; // For MO_Register.
+ unsigned OffsetLo; // Matches Contents.OffsetedInfo.OffsetHi.
+ } SmallContents;
+
+ /// ParentMI - This is the instruction that this operand is embedded into.
+ /// This is valid for all operand types, when the operand is in an instr.
+ MachineInstr *ParentMI;
+
+ /// Contents union - This contains the payload for the various operand types.
+ union {
+ MachineBasicBlock *MBB; // For MO_MachineBasicBlock.
+ const ConstantFP *CFP; // For MO_FPImmediate.
+ const ConstantInt *CI; // For MO_CImmediate. Integers > 64bit.
+ int64_t ImmVal; // For MO_Immediate.
+ const uint32_t *RegMask; // For MO_RegisterMask and MO_RegisterLiveOut.
+ const MDNode *MD; // For MO_Metadata.
+ MCSymbol *Sym; // For MO_MCSymbol.
+ unsigned CFIIndex; // For MO_CFI.
+
+ struct { // For MO_Register.
+ // Register number is in SmallContents.RegNo.
+ MachineOperand *Prev; // Access list for register. See MRI.
+ MachineOperand *Next;
+ } Reg;
+
+ /// OffsetedInfo - This struct contains the offset and an object identifier.
+ /// this represent the object as with an optional offset from it.
+ struct {
+ union {
+ int Index; // For MO_*Index - The index itself.
+ const char *SymbolName; // For MO_ExternalSymbol.
+ const GlobalValue *GV; // For MO_GlobalAddress.
+ const BlockAddress *BA; // For MO_BlockAddress.
+ } Val;
+ // Low bits of offset are in SmallContents.OffsetLo.
+ int OffsetHi; // An offset from the object, high 32 bits.
+ } OffsetedInfo;
+ } Contents;
+
+ explicit MachineOperand(MachineOperandType K)
+ : OpKind(K), SubReg_TargetFlags(0), ParentMI(nullptr) {}
+public:
+ /// getType - Returns the MachineOperandType for this operand.
+ ///
+ MachineOperandType getType() const { return (MachineOperandType)OpKind; }
+
+ unsigned getTargetFlags() const {
+ return isReg() ? 0 : SubReg_TargetFlags;
+ }
+ void setTargetFlags(unsigned F) {
+ assert(!isReg() && "Register operands can't have target flags");
+ SubReg_TargetFlags = F;
+ assert(SubReg_TargetFlags == F && "Target flags out of range");
+ }
+ void addTargetFlag(unsigned F) {
+ assert(!isReg() && "Register operands can't have target flags");
+ SubReg_TargetFlags |= F;
+ assert((SubReg_TargetFlags & F) && "Target flags out of range");
+ }
+
+
+ /// getParent - Return the instruction that this operand belongs to.
+ ///
+ MachineInstr *getParent() { return ParentMI; }
+ const MachineInstr *getParent() const { return ParentMI; }
+
+ /// clearParent - Reset the parent pointer.
+ ///
+ /// The MachineOperand copy constructor also copies ParentMI, expecting the
+ /// original to be deleted. If a MachineOperand is ever stored outside a
+ /// MachineInstr, the parent pointer must be cleared.
+ ///
+ /// Never call clearParent() on an operand in a MachineInstr.
+ ///
+ void clearParent() { ParentMI = nullptr; }
+
+ void print(raw_ostream &os, const TargetRegisterInfo *TRI = nullptr) const;
+ void print(raw_ostream &os, ModuleSlotTracker &MST,
+ const TargetRegisterInfo *TRI = nullptr) const;
+
+ //===--------------------------------------------------------------------===//
+ // Accessors that tell you what kind of MachineOperand you're looking at.
+ //===--------------------------------------------------------------------===//
+
+ /// isReg - Tests if this is a MO_Register operand.
+ bool isReg() const { return OpKind == MO_Register; }
+ /// isImm - Tests if this is a MO_Immediate operand.
+ bool isImm() const { return OpKind == MO_Immediate; }
+ /// isCImm - Test if this is a MO_CImmediate operand.
+ bool isCImm() const { return OpKind == MO_CImmediate; }
+ /// isFPImm - Tests if this is a MO_FPImmediate operand.
+ bool isFPImm() const { return OpKind == MO_FPImmediate; }
+ /// isMBB - Tests if this is a MO_MachineBasicBlock operand.
+ bool isMBB() const { return OpKind == MO_MachineBasicBlock; }
+ /// isFI - Tests if this is a MO_FrameIndex operand.
+ bool isFI() const { return OpKind == MO_FrameIndex; }
+ /// isCPI - Tests if this is a MO_ConstantPoolIndex operand.
+ bool isCPI() const { return OpKind == MO_ConstantPoolIndex; }
+ /// isTargetIndex - Tests if this is a MO_TargetIndex operand.
+ bool isTargetIndex() const { return OpKind == MO_TargetIndex; }
+ /// isJTI - Tests if this is a MO_JumpTableIndex operand.
+ bool isJTI() const { return OpKind == MO_JumpTableIndex; }
+ /// isGlobal - Tests if this is a MO_GlobalAddress operand.
+ bool isGlobal() const { return OpKind == MO_GlobalAddress; }
+ /// isSymbol - Tests if this is a MO_ExternalSymbol operand.
+ bool isSymbol() const { return OpKind == MO_ExternalSymbol; }
+ /// isBlockAddress - Tests if this is a MO_BlockAddress operand.
+ bool isBlockAddress() const { return OpKind == MO_BlockAddress; }
+ /// isRegMask - Tests if this is a MO_RegisterMask operand.
+ bool isRegMask() const { return OpKind == MO_RegisterMask; }
+ /// isRegLiveOut - Tests if this is a MO_RegisterLiveOut operand.
+ bool isRegLiveOut() const { return OpKind == MO_RegisterLiveOut; }
+ /// isMetadata - Tests if this is a MO_Metadata operand.
+ bool isMetadata() const { return OpKind == MO_Metadata; }
+ bool isMCSymbol() const { return OpKind == MO_MCSymbol; }
+ bool isCFIIndex() const { return OpKind == MO_CFIIndex; }
+
+ //===--------------------------------------------------------------------===//
+ // Accessors for Register Operands
+ //===--------------------------------------------------------------------===//
+
+ /// getReg - Returns the register number.
+ unsigned getReg() const {
+ assert(isReg() && "This is not a register operand!");
+ return SmallContents.RegNo;
+ }
+
+ unsigned getSubReg() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return SubReg_TargetFlags;
+ }
+
+ bool isUse() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return !IsDef;
+ }
+
+ bool isDef() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsDef;
+ }
+
+ bool isImplicit() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsImp;
+ }
+
+ bool isDead() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsDead;
+ }
+
+ bool isKill() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsKill;
+ }
+
+ bool isUndef() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsUndef;
+ }
+
+ bool isInternalRead() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsInternalRead;
+ }
+
+ bool isEarlyClobber() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsEarlyClobber;
+ }
+
+ bool isTied() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return TiedTo;
+ }
+
+ bool isDebug() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsDebug;
+ }
+
+ /// readsReg - Returns true if this operand reads the previous value of its
+ /// register. A use operand with the <undef> flag set doesn't read its
+ /// register. A sub-register def implicitly reads the other parts of the
+ /// register being redefined unless the <undef> flag is set.
+ ///
+ /// This refers to reading the register value from before the current
+ /// instruction or bundle. Internal bundle reads are not included.
+ bool readsReg() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return !isUndef() && !isInternalRead() && (isUse() || getSubReg());
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Mutators for Register Operands
+ //===--------------------------------------------------------------------===//
+
+ /// Change the register this operand corresponds to.
+ ///
+ void setReg(unsigned Reg);
+
+ void setSubReg(unsigned subReg) {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ SubReg_TargetFlags = subReg;
+ assert(SubReg_TargetFlags == subReg && "SubReg out of range");
+ }
+
+ /// substVirtReg - Substitute the current register with the virtual
+ /// subregister Reg:SubReg. Take any existing SubReg index into account,
+ /// using TargetRegisterInfo to compose the subreg indices if necessary.
+ /// Reg must be a virtual register, SubIdx can be 0.
+ ///
+ void substVirtReg(unsigned Reg, unsigned SubIdx, const TargetRegisterInfo&);
+
+ /// substPhysReg - Substitute the current register with the physical register
+ /// Reg, taking any existing SubReg into account. For instance,
+ /// substPhysReg(%EAX) will change %reg1024:sub_8bit to %AL.
+ ///
+ void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
+
+ void setIsUse(bool Val = true) { setIsDef(!Val); }
+
+ void setIsDef(bool Val = true);
+
+ void setImplicit(bool Val = true) {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ IsImp = Val;
+ }
+
+ void setIsKill(bool Val = true) {
+ assert(isReg() && !IsDef && "Wrong MachineOperand accessor");
+ assert((!Val || !isDebug()) && "Marking a debug operation as kill");
+ IsKill = Val;
+ }
+
+ void setIsDead(bool Val = true) {
+ assert(isReg() && IsDef && "Wrong MachineOperand accessor");
+ IsDead = Val;
+ }
+
+ void setIsUndef(bool Val = true) {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ IsUndef = Val;
+ }
+
+ void setIsInternalRead(bool Val = true) {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ IsInternalRead = Val;
+ }
+
+ void setIsEarlyClobber(bool Val = true) {
+ assert(isReg() && IsDef && "Wrong MachineOperand accessor");
+ IsEarlyClobber = Val;
+ }
+
+ void setIsDebug(bool Val = true) {
+ assert(isReg() && !IsDef && "Wrong MachineOperand accessor");
+ IsDebug = Val;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Accessors for various operand types.
+ //===--------------------------------------------------------------------===//
+
+ int64_t getImm() const {
+ assert(isImm() && "Wrong MachineOperand accessor");
+ return Contents.ImmVal;
+ }
+
+ const ConstantInt *getCImm() const {
+ assert(isCImm() && "Wrong MachineOperand accessor");
+ return Contents.CI;
+ }
+
+ const ConstantFP *getFPImm() const {
+ assert(isFPImm() && "Wrong MachineOperand accessor");
+ return Contents.CFP;
+ }
+
+ MachineBasicBlock *getMBB() const {
+ assert(isMBB() && "Wrong MachineOperand accessor");
+ return Contents.MBB;
+ }
+
+ int getIndex() const {
+ assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
+ "Wrong MachineOperand accessor");
+ return Contents.OffsetedInfo.Val.Index;
+ }
+
+ const GlobalValue *getGlobal() const {
+ assert(isGlobal() && "Wrong MachineOperand accessor");
+ return Contents.OffsetedInfo.Val.GV;
+ }
+
+ const BlockAddress *getBlockAddress() const {
+ assert(isBlockAddress() && "Wrong MachineOperand accessor");
+ return Contents.OffsetedInfo.Val.BA;
+ }
+
+ MCSymbol *getMCSymbol() const {
+ assert(isMCSymbol() && "Wrong MachineOperand accessor");
+ return Contents.Sym;
+ }
+
+ unsigned getCFIIndex() const {
+ assert(isCFIIndex() && "Wrong MachineOperand accessor");
+ return Contents.CFIIndex;
+ }
+
+ /// Return the offset from the symbol in this operand. This always returns 0
+ /// for ExternalSymbol operands.
+ int64_t getOffset() const {
+ assert((isGlobal() || isSymbol() || isMCSymbol() || isCPI() ||
+ isTargetIndex() || isBlockAddress()) &&
+ "Wrong MachineOperand accessor");
+ return int64_t(uint64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
+ SmallContents.OffsetLo;
+ }
+
+ const char *getSymbolName() const {
+ assert(isSymbol() && "Wrong MachineOperand accessor");
+ return Contents.OffsetedInfo.Val.SymbolName;
+ }
+
+ /// clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
+ /// It is sometimes necessary to detach the register mask pointer from its
+ /// machine operand. This static method can be used for such detached bit
+ /// mask pointers.
+ static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg) {
+ // See TargetRegisterInfo.h.
+ assert(PhysReg < (1u << 30) && "Not a physical register");
+ return !(RegMask[PhysReg / 32] & (1u << PhysReg % 32));
+ }
+
+ /// clobbersPhysReg - Returns true if this RegMask operand clobbers PhysReg.
+ bool clobbersPhysReg(unsigned PhysReg) const {
+ return clobbersPhysReg(getRegMask(), PhysReg);
+ }
+
+ /// getRegMask - Returns a bit mask of registers preserved by this RegMask
+ /// operand.
+ const uint32_t *getRegMask() const {
+ assert(isRegMask() && "Wrong MachineOperand accessor");
+ return Contents.RegMask;
+ }
+
+ /// getRegLiveOut - Returns a bit mask of live-out registers.
+ const uint32_t *getRegLiveOut() const {
+ assert(isRegLiveOut() && "Wrong MachineOperand accessor");
+ return Contents.RegMask;
+ }
+
+ const MDNode *getMetadata() const {
+ assert(isMetadata() && "Wrong MachineOperand accessor");
+ return Contents.MD;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Mutators for various operand types.
+ //===--------------------------------------------------------------------===//
+
+ void setImm(int64_t immVal) {
+ assert(isImm() && "Wrong MachineOperand mutator");
+ Contents.ImmVal = immVal;
+ }
+
+ void setFPImm(const ConstantFP *CFP) {
+ assert(isFPImm() && "Wrong MachineOperand mutator");
+ Contents.CFP = CFP;
+ }
+
+ void setOffset(int64_t Offset) {
+ assert((isGlobal() || isSymbol() || isMCSymbol() || isCPI() ||
+ isTargetIndex() || isBlockAddress()) &&
+ "Wrong MachineOperand accessor");
+ SmallContents.OffsetLo = unsigned(Offset);
+ Contents.OffsetedInfo.OffsetHi = int(Offset >> 32);
+ }
+
+ void setIndex(int Idx) {
+ assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
+ "Wrong MachineOperand accessor");
+ Contents.OffsetedInfo.Val.Index = Idx;
+ }
+
+ void setMBB(MachineBasicBlock *MBB) {
+ assert(isMBB() && "Wrong MachineOperand accessor");
+ Contents.MBB = MBB;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Other methods.
+ //===--------------------------------------------------------------------===//
+
+ /// isIdenticalTo - Return true if this operand is identical to the specified
+ /// operand. Note: This method ignores isKill and isDead properties.
+ bool isIdenticalTo(const MachineOperand &Other) const;
+
+ /// \brief MachineOperand hash_value overload.
+ ///
+ /// Note that this includes the same information in the hash that
+ /// isIdenticalTo uses for comparison. It is thus suited for use in hash
+ /// tables which use that function for equality comparisons only.
+ friend hash_code hash_value(const MachineOperand &MO);
+
+ /// ChangeToImmediate - Replace this operand with a new immediate operand of
+ /// the specified value. If an operand is known to be an immediate already,
+ /// the setImm method should be used.
+ void ChangeToImmediate(int64_t ImmVal);
+
+ /// ChangeToFPImmediate - Replace this operand with a new FP immediate operand
+ /// of the specified value. If an operand is known to be an FP immediate
+ /// already, the setFPImm method should be used.
+ void ChangeToFPImmediate(const ConstantFP *FPImm);
+
+ /// ChangeToES - Replace this operand with a new external symbol operand.
+ void ChangeToES(const char *SymName, unsigned char TargetFlags = 0);
+
+ /// ChangeToMCSymbol - Replace this operand with a new MC symbol operand.
+ void ChangeToMCSymbol(MCSymbol *Sym);
+
+ /// ChangeToRegister - Replace this operand with a new register operand of
+ /// the specified value. If an operand is known to be an register already,
+ /// the setReg method should be used.
+ void ChangeToRegister(unsigned Reg, bool isDef, bool isImp = false,
+ bool isKill = false, bool isDead = false,
+ bool isUndef = false, bool isDebug = false);
+
+ //===--------------------------------------------------------------------===//
+ // Construction methods.
+ //===--------------------------------------------------------------------===//
+
+ static MachineOperand CreateImm(int64_t Val) {
+ MachineOperand Op(MachineOperand::MO_Immediate);
+ Op.setImm(Val);
+ return Op;
+ }
+
+ static MachineOperand CreateCImm(const ConstantInt *CI) {
+ MachineOperand Op(MachineOperand::MO_CImmediate);
+ Op.Contents.CI = CI;
+ return Op;
+ }
+
+ static MachineOperand CreateFPImm(const ConstantFP *CFP) {
+ MachineOperand Op(MachineOperand::MO_FPImmediate);
+ Op.Contents.CFP = CFP;
+ return Op;
+ }
+
+ static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp = false,
+ bool isKill = false, bool isDead = false,
+ bool isUndef = false,
+ bool isEarlyClobber = false,
+ unsigned SubReg = 0,
+ bool isDebug = false,
+ bool isInternalRead = false) {
+ assert(!(isDead && !isDef) && "Dead flag on non-def");
+ assert(!(isKill && isDef) && "Kill flag on def");
+ MachineOperand Op(MachineOperand::MO_Register);
+ Op.IsDef = isDef;
+ Op.IsImp = isImp;
+ Op.IsKill = isKill;
+ Op.IsDead = isDead;
+ Op.IsUndef = isUndef;
+ Op.IsInternalRead = isInternalRead;
+ Op.IsEarlyClobber = isEarlyClobber;
+ Op.TiedTo = 0;
+ Op.IsDebug = isDebug;
+ Op.SmallContents.RegNo = Reg;
+ Op.Contents.Reg.Prev = nullptr;
+ Op.Contents.Reg.Next = nullptr;
+ Op.setSubReg(SubReg);
+ return Op;
+ }
+ static MachineOperand CreateMBB(MachineBasicBlock *MBB,
+ unsigned char TargetFlags = 0) {
+ MachineOperand Op(MachineOperand::MO_MachineBasicBlock);
+ Op.setMBB(MBB);
+ Op.setTargetFlags(TargetFlags);
+ return Op;
+ }
+ static MachineOperand CreateFI(int Idx) {
+ MachineOperand Op(MachineOperand::MO_FrameIndex);
+ Op.setIndex(Idx);
+ return Op;
+ }
+ static MachineOperand CreateCPI(unsigned Idx, int Offset,
+ unsigned char TargetFlags = 0) {
+ MachineOperand Op(MachineOperand::MO_ConstantPoolIndex);
+ Op.setIndex(Idx);
+ Op.setOffset(Offset);
+ Op.setTargetFlags(TargetFlags);
+ return Op;
+ }
+ static MachineOperand CreateTargetIndex(unsigned Idx, int64_t Offset,
+ unsigned char TargetFlags = 0) {
+ MachineOperand Op(MachineOperand::MO_TargetIndex);
+ Op.setIndex(Idx);
+ Op.setOffset(Offset);
+ Op.setTargetFlags(TargetFlags);
+ return Op;
+ }
+ static MachineOperand CreateJTI(unsigned Idx,
+ unsigned char TargetFlags = 0) {
+ MachineOperand Op(MachineOperand::MO_JumpTableIndex);
+ Op.setIndex(Idx);
+ Op.setTargetFlags(TargetFlags);
+ return Op;
+ }
+ static MachineOperand CreateGA(const GlobalValue *GV, int64_t Offset,
+ unsigned char TargetFlags = 0) {
+ MachineOperand Op(MachineOperand::MO_GlobalAddress);
+ Op.Contents.OffsetedInfo.Val.GV = GV;
+ Op.setOffset(Offset);
+ Op.setTargetFlags(TargetFlags);
+ return Op;
+ }
+ static MachineOperand CreateES(const char *SymName,
+ unsigned char TargetFlags = 0) {
+ MachineOperand Op(MachineOperand::MO_ExternalSymbol);
+ Op.Contents.OffsetedInfo.Val.SymbolName = SymName;
+ Op.setOffset(0); // Offset is always 0.
+ Op.setTargetFlags(TargetFlags);
+ return Op;
+ }
+ static MachineOperand CreateBA(const BlockAddress *BA, int64_t Offset,
+ unsigned char TargetFlags = 0) {
+ MachineOperand Op(MachineOperand::MO_BlockAddress);
+ Op.Contents.OffsetedInfo.Val.BA = BA;
+ Op.setOffset(Offset);
+ Op.setTargetFlags(TargetFlags);
+ return Op;
+ }
+ /// CreateRegMask - Creates a register mask operand referencing Mask. The
+ /// operand does not take ownership of the memory referenced by Mask, it must
+ /// remain valid for the lifetime of the operand.
+ ///
+ /// A RegMask operand represents a set of non-clobbered physical registers on
+ /// an instruction that clobbers many registers, typically a call. The bit
+ /// mask has a bit set for each physreg that is preserved by this
+ /// instruction, as described in the documentation for
+ /// TargetRegisterInfo::getCallPreservedMask().
+ ///
+ /// Any physreg with a 0 bit in the mask is clobbered by the instruction.
+ ///
+ static MachineOperand CreateRegMask(const uint32_t *Mask) {
+ assert(Mask && "Missing register mask");
+ MachineOperand Op(MachineOperand::MO_RegisterMask);
+ Op.Contents.RegMask = Mask;
+ return Op;
+ }
+ static MachineOperand CreateRegLiveOut(const uint32_t *Mask) {
+ assert(Mask && "Missing live-out register mask");
+ MachineOperand Op(MachineOperand::MO_RegisterLiveOut);
+ Op.Contents.RegMask = Mask;
+ return Op;
+ }
+ static MachineOperand CreateMetadata(const MDNode *Meta) {
+ MachineOperand Op(MachineOperand::MO_Metadata);
+ Op.Contents.MD = Meta;
+ return Op;
+ }
+
+ static MachineOperand CreateMCSymbol(MCSymbol *Sym,
+ unsigned char TargetFlags = 0) {
+ MachineOperand Op(MachineOperand::MO_MCSymbol);
+ Op.Contents.Sym = Sym;
+ Op.setOffset(0);
+ Op.setTargetFlags(TargetFlags);
+ return Op;
+ }
+
+ static MachineOperand CreateCFIIndex(unsigned CFIIndex) {
+ MachineOperand Op(MachineOperand::MO_CFIIndex);
+ Op.Contents.CFIIndex = CFIIndex;
+ return Op;
+ }
+
+ friend class MachineInstr;
+ friend class MachineRegisterInfo;
+private:
+ void removeRegFromUses();
+
+ //===--------------------------------------------------------------------===//
+ // Methods for handling register use/def lists.
+ //===--------------------------------------------------------------------===//
+
+ /// isOnRegUseList - Return true if this operand is on a register use/def list
+ /// or false if not. This can only be called for register operands that are
+ /// part of a machine instruction.
+ bool isOnRegUseList() const {
+ assert(isReg() && "Can only add reg operand to use lists");
+ return Contents.Reg.Prev != nullptr;
+ }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) {
+ MO.print(OS, nullptr);
+ return OS;
+}
+
+ // See friend declaration above. This additional declaration is required in
+ // order to compile LLVM with IBM xlC compiler.
+ hash_code hash_value(const MachineOperand &MO);
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h b/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
new file mode 100644
index 0000000..6731983
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachinePassRegistry.h
@@ -0,0 +1,157 @@
+//===-- llvm/CodeGen/MachinePassRegistry.h ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the mechanics for machine function pass registries. A
+// function pass registry (MachinePassRegistry) is auto filled by the static
+// constructors of MachinePassRegistryNode. Further there is a command line
+// parser (RegisterPassParser) which listens to each registry for additions
+// and deletions, so that the appropriate command option is updated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEPASSREGISTRY_H
+#define LLVM_CODEGEN_MACHINEPASSREGISTRY_H
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+typedef void *(*MachinePassCtor)();
+
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistryListener - Listener to adds and removals of nodes in
+/// registration list.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistryListener {
+ virtual void anchor();
+public:
+ MachinePassRegistryListener() {}
+ virtual ~MachinePassRegistryListener() {}
+ virtual void NotifyAdd(const char *N, MachinePassCtor C, const char *D) = 0;
+ virtual void NotifyRemove(const char *N) = 0;
+};
+
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistryNode - Machine pass node stored in registration list.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistryNode {
+
+private:
+
+ MachinePassRegistryNode *Next; // Next function pass in list.
+ const char *Name; // Name of function pass.
+ const char *Description; // Description string.
+ MachinePassCtor Ctor; // Function pass creator.
+
+public:
+
+ MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
+ : Next(nullptr)
+ , Name(N)
+ , Description(D)
+ , Ctor(C)
+ {}
+
+ // Accessors
+ MachinePassRegistryNode *getNext() const { return Next; }
+ MachinePassRegistryNode **getNextAddress() { return &Next; }
+ const char *getName() const { return Name; }
+ const char *getDescription() const { return Description; }
+ MachinePassCtor getCtor() const { return Ctor; }
+ void setNext(MachinePassRegistryNode *N) { Next = N; }
+
+};
+
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistry - Track the registration of machine passes.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistry {
+
+private:
+
+ MachinePassRegistryNode *List; // List of registry nodes.
+ MachinePassCtor Default; // Default function pass creator.
+ MachinePassRegistryListener* Listener;// Listener for list adds are removes.
+
+public:
+
+ // NO CONSTRUCTOR - we don't want static constructor ordering to mess
+ // with the registry.
+
+ // Accessors.
+ //
+ MachinePassRegistryNode *getList() { return List; }
+ MachinePassCtor getDefault() { return Default; }
+ void setDefault(MachinePassCtor C) { Default = C; }
+ void setDefault(StringRef Name);
+ void setListener(MachinePassRegistryListener *L) { Listener = L; }
+
+ /// Add - Adds a function pass to the registration list.
+ ///
+ void Add(MachinePassRegistryNode *Node);
+
+ /// Remove - Removes a function pass from the registration list.
+ ///
+ void Remove(MachinePassRegistryNode *Node);
+
+};
+
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterPassParser class - Handle the addition of new machine passes.
+///
+//===----------------------------------------------------------------------===//
+template<class RegistryClass>
+class RegisterPassParser : public MachinePassRegistryListener,
+ public cl::parser<typename RegistryClass::FunctionPassCtor> {
+public:
+ RegisterPassParser(cl::Option &O)
+ : cl::parser<typename RegistryClass::FunctionPassCtor>(O) {}
+ ~RegisterPassParser() override { RegistryClass::setListener(nullptr); }
+
+ void initialize() {
+ cl::parser<typename RegistryClass::FunctionPassCtor>::initialize();
+
+ // Add existing passes to option.
+ for (RegistryClass *Node = RegistryClass::getList();
+ Node; Node = Node->getNext()) {
+ this->addLiteralOption(Node->getName(),
+ (typename RegistryClass::FunctionPassCtor)Node->getCtor(),
+ Node->getDescription());
+ }
+
+ // Make sure we listen for list changes.
+ RegistryClass::setListener(this);
+ }
+
+ // Implement the MachinePassRegistryListener callbacks.
+ //
+ void NotifyAdd(const char *N, MachinePassCtor C, const char *D) override {
+ this->addLiteralOption(N, (typename RegistryClass::FunctionPassCtor)C, D);
+ }
+ void NotifyRemove(const char *N) override {
+ this->removeLiteralOption(N);
+ }
+};
+
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachinePostDominators.h b/contrib/llvm/include/llvm/CodeGen/MachinePostDominators.h
new file mode 100644
index 0000000..70bdb19
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachinePostDominators.h
@@ -0,0 +1,86 @@
+//=- llvm/CodeGen/MachineDominators.h ----------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes interfaces to post dominance information for
+// target-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
+#define LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H
+
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+
+namespace llvm {
+
+///
+/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used
+/// to compute the post-dominator tree.
+///
+struct MachinePostDominatorTree : public MachineFunctionPass {
+private:
+ DominatorTreeBase<MachineBasicBlock> *DT;
+
+public:
+ static char ID;
+
+ MachinePostDominatorTree();
+
+ ~MachinePostDominatorTree() override;
+
+ FunctionPass *createMachinePostDominatorTreePass();
+
+ const std::vector<MachineBasicBlock *> &getRoots() const {
+ return DT->getRoots();
+ }
+
+ MachineDomTreeNode *getRootNode() const {
+ return DT->getRootNode();
+ }
+
+ MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
+ return DT->getNode(BB);
+ }
+
+ MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
+ return DT->getNode(BB);
+ }
+
+ bool dominates(const MachineDomTreeNode *A,
+ const MachineDomTreeNode *B) const {
+ return DT->dominates(A, B);
+ }
+
+ bool dominates(const MachineBasicBlock *A, const MachineBasicBlock *B) const {
+ return DT->dominates(A, B);
+ }
+
+ bool properlyDominates(const MachineDomTreeNode *A,
+ const MachineDomTreeNode *B) const {
+ return DT->properlyDominates(A, B);
+ }
+
+ bool properlyDominates(const MachineBasicBlock *A,
+ const MachineBasicBlock *B) const {
+ return DT->properlyDominates(A, B);
+ }
+
+ MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
+ MachineBasicBlock *B) {
+ return DT->findNearestCommonDominator(A, B);
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void print(llvm::raw_ostream &OS, const Module *M = nullptr) const override;
+};
+} //end of namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineRegionInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineRegionInfo.h
new file mode 100644
index 0000000..df9823f
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineRegionInfo.h
@@ -0,0 +1,180 @@
+//===- llvm/CodeGen/MachineRegionInfo.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEREGIONINFO_H
+#define LLVM_CODEGEN_MACHINEREGIONINFO_H
+
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/RegionIterator.h"
+#include "llvm/CodeGen/MachineDominanceFrontier.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+
+
+namespace llvm {
+
+class MachineDominatorTree;
+struct MachinePostDominatorTree;
+class MachineRegion;
+class MachineRegionNode;
+class MachineRegionInfo;
+
+template<>
+struct RegionTraits<MachineFunction> {
+ typedef MachineFunction FuncT;
+ typedef MachineBasicBlock BlockT;
+ typedef MachineRegion RegionT;
+ typedef MachineRegionNode RegionNodeT;
+ typedef MachineRegionInfo RegionInfoT;
+ typedef MachineDominatorTree DomTreeT;
+ typedef MachineDomTreeNode DomTreeNodeT;
+ typedef MachinePostDominatorTree PostDomTreeT;
+ typedef MachineDominanceFrontier DomFrontierT;
+ typedef MachineInstr InstT;
+ typedef MachineLoop LoopT;
+ typedef MachineLoopInfo LoopInfoT;
+
+ static unsigned getNumSuccessors(MachineBasicBlock *BB) {
+ return BB->succ_size();
+ }
+};
+
+
+class MachineRegionNode : public RegionNodeBase<RegionTraits<MachineFunction>> {
+public:
+ inline MachineRegionNode(MachineRegion *Parent,
+ MachineBasicBlock *Entry,
+ bool isSubRegion = false)
+ : RegionNodeBase<RegionTraits<MachineFunction>>(Parent, Entry, isSubRegion) {
+
+ }
+
+ bool operator==(const MachineRegion &RN) const {
+ return this == reinterpret_cast<const MachineRegionNode*>(&RN);
+ }
+};
+
+class MachineRegion : public RegionBase<RegionTraits<MachineFunction>> {
+public:
+ MachineRegion(MachineBasicBlock *Entry, MachineBasicBlock *Exit,
+ MachineRegionInfo* RI,
+ MachineDominatorTree *DT, MachineRegion *Parent = nullptr);
+ ~MachineRegion();
+
+ bool operator==(const MachineRegionNode &RN) const {
+ return &RN == reinterpret_cast<const MachineRegionNode*>(this);
+ }
+};
+
+class MachineRegionInfo : public RegionInfoBase<RegionTraits<MachineFunction>> {
+public:
+ explicit MachineRegionInfo();
+
+ ~MachineRegionInfo() override;
+
+ // updateStatistics - Update statistic about created regions.
+ void updateStatistics(MachineRegion *R) final;
+
+ void recalculate(MachineFunction &F,
+ MachineDominatorTree *DT,
+ MachinePostDominatorTree *PDT,
+ MachineDominanceFrontier *DF);
+};
+
+class MachineRegionInfoPass : public MachineFunctionPass {
+ MachineRegionInfo RI;
+
+public:
+ static char ID;
+ explicit MachineRegionInfoPass();
+
+ ~MachineRegionInfoPass() override;
+
+ MachineRegionInfo &getRegionInfo() {
+ return RI;
+ }
+
+ const MachineRegionInfo &getRegionInfo() const {
+ return RI;
+ }
+
+ /// @name MachineFunctionPass interface
+ //@{
+ bool runOnMachineFunction(MachineFunction &F) override;
+ void releaseMemory() override;
+ void verifyAnalysis() const override;
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ void print(raw_ostream &OS, const Module *) const override;
+ void dump() const;
+ //@}
+};
+
+
+template <>
+template <>
+inline MachineBasicBlock* RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineBasicBlock>() const {
+ assert(!isSubRegion() && "This is not a MachineBasicBlock RegionNode!");
+ return getEntry();
+}
+
+template<>
+template<>
+inline MachineRegion* RegionNodeBase<RegionTraits<MachineFunction>>::getNodeAs<MachineRegion>() const {
+ assert(isSubRegion() && "This is not a subregion RegionNode!");
+ auto Unconst = const_cast<RegionNodeBase<RegionTraits<MachineFunction>>*>(this);
+ return reinterpret_cast<MachineRegion*>(Unconst);
+}
+
+
+RegionNodeGraphTraits(MachineRegionNode, MachineBasicBlock, MachineRegion);
+RegionNodeGraphTraits(const MachineRegionNode, MachineBasicBlock, MachineRegion);
+
+RegionGraphTraits(MachineRegion, MachineRegionNode);
+RegionGraphTraits(const MachineRegion, const MachineRegionNode);
+
+template <> struct GraphTraits<MachineRegionInfo*>
+ : public GraphTraits<FlatIt<MachineRegionNode*> > {
+ typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
+ GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
+
+ static NodeType *getEntryNode(MachineRegionInfo *RI) {
+ return GraphTraits<FlatIt<MachineRegion*> >::getEntryNode(RI->getTopLevelRegion());
+ }
+ static nodes_iterator nodes_begin(MachineRegionInfo* RI) {
+ return nodes_iterator::begin(getEntryNode(RI));
+ }
+ static nodes_iterator nodes_end(MachineRegionInfo *RI) {
+ return nodes_iterator::end(getEntryNode(RI));
+ }
+};
+
+template <> struct GraphTraits<MachineRegionInfoPass*>
+ : public GraphTraits<MachineRegionInfo *> {
+ typedef df_iterator<NodeType*, SmallPtrSet<NodeType*, 8>, false,
+ GraphTraits<FlatIt<NodeType*> > > nodes_iterator;
+
+ static NodeType *getEntryNode(MachineRegionInfoPass *RI) {
+ return GraphTraits<MachineRegionInfo*>::getEntryNode(&RI->getRegionInfo());
+ }
+ static nodes_iterator nodes_begin(MachineRegionInfoPass* RI) {
+ return GraphTraits<MachineRegionInfo*>::nodes_begin(&RI->getRegionInfo());
+ }
+ static nodes_iterator nodes_end(MachineRegionInfoPass *RI) {
+ return GraphTraits<MachineRegionInfo*>::nodes_end(&RI->getRegionInfo());
+ }
+};
+
+extern template class RegionBase<RegionTraits<MachineFunction>>;
+extern template class RegionNodeBase<RegionTraits<MachineFunction>>;
+extern template class RegionInfoBase<RegionTraits<MachineFunction>>;
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
new file mode 100644
index 0000000..04191bc
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -0,0 +1,995 @@
+//===-- llvm/CodeGen/MachineRegisterInfo.h ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEREGISTERINFO_H
+#define LLVM_CODEGEN_MACHINEREGISTERINFO_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include <vector>
+
+namespace llvm {
+class PSetIterator;
+
+/// MachineRegisterInfo - Keep track of information for virtual and physical
+/// registers, including vreg register classes, use/def chains for registers,
+/// etc.
+class MachineRegisterInfo {
+public:
+ class Delegate {
+ virtual void anchor();
+ public:
+ virtual void MRI_NoteNewVirtualRegister(unsigned Reg) = 0;
+
+ virtual ~Delegate() {}
+ };
+
+private:
+ const MachineFunction *MF;
+ Delegate *TheDelegate;
+
+ /// IsSSA - True when the machine function is in SSA form and virtual
+ /// registers have a single def.
+ bool IsSSA;
+
+ /// TracksLiveness - True while register liveness is being tracked accurately.
+ /// Basic block live-in lists, kill flags, and implicit defs may not be
+ /// accurate when after this flag is cleared.
+ bool TracksLiveness;
+
+ /// True if subregister liveness is tracked.
+ bool TracksSubRegLiveness;
+
+ /// VRegInfo - Information we keep for each virtual register.
+ ///
+ /// Each element in this list contains the register class of the vreg and the
+ /// start of the use/def list for the register.
+ IndexedMap<std::pair<const TargetRegisterClass*, MachineOperand*>,
+ VirtReg2IndexFunctor> VRegInfo;
+
+ /// RegAllocHints - This vector records register allocation hints for virtual
+ /// registers. For each virtual register, it keeps a register and hint type
+ /// pair making up the allocation hint. Hint type is target specific except
+ /// for the value 0 which means the second value of the pair is the preferred
+ /// register for allocation. For example, if the hint is <0, 1024>, it means
+ /// the allocator should prefer the physical register allocated to the virtual
+ /// register of the hint.
+ IndexedMap<std::pair<unsigned, unsigned>, VirtReg2IndexFunctor> RegAllocHints;
+
+ /// PhysRegUseDefLists - This is an array of the head of the use/def list for
+ /// physical registers.
+ std::unique_ptr<MachineOperand *[]> PhysRegUseDefLists;
+
+ /// getRegUseDefListHead - Return the head pointer for the register use/def
+ /// list for the specified virtual or physical register.
+ MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
+ if (TargetRegisterInfo::isVirtualRegister(RegNo))
+ return VRegInfo[RegNo].second;
+ return PhysRegUseDefLists[RegNo];
+ }
+
+ MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
+ if (TargetRegisterInfo::isVirtualRegister(RegNo))
+ return VRegInfo[RegNo].second;
+ return PhysRegUseDefLists[RegNo];
+ }
+
+ /// Get the next element in the use-def chain.
+ static MachineOperand *getNextOperandForReg(const MachineOperand *MO) {
+ assert(MO && MO->isReg() && "This is not a register operand!");
+ return MO->Contents.Reg.Next;
+ }
+
+ /// UsedPhysRegMask - Additional used physregs including aliases.
+ /// This bit vector represents all the registers clobbered by function calls.
+ BitVector UsedPhysRegMask;
+
+ /// ReservedRegs - This is a bit vector of reserved registers. The target
+ /// may change its mind about which registers should be reserved. This
+ /// vector is the frozen set of reserved registers when register allocation
+ /// started.
+ BitVector ReservedRegs;
+
+ /// Keep track of the physical registers that are live in to the function.
+ /// Live in values are typically arguments in registers. LiveIn values are
+ /// allowed to have virtual registers associated with them, stored in the
+ /// second element.
+ std::vector<std::pair<unsigned, unsigned> > LiveIns;
+
+ MachineRegisterInfo(const MachineRegisterInfo&) = delete;
+ void operator=(const MachineRegisterInfo&) = delete;
+public:
+ explicit MachineRegisterInfo(const MachineFunction *MF);
+
+ const TargetRegisterInfo *getTargetRegisterInfo() const {
+ return MF->getSubtarget().getRegisterInfo();
+ }
+
+ void resetDelegate(Delegate *delegate) {
+ // Ensure another delegate does not take over unless the current
+ // delegate first unattaches itself. If we ever need to multicast
+ // notifications, we will need to change to using a list.
+ assert(TheDelegate == delegate &&
+ "Only the current delegate can perform reset!");
+ TheDelegate = nullptr;
+ }
+
+ void setDelegate(Delegate *delegate) {
+ assert(delegate && !TheDelegate &&
+ "Attempted to set delegate to null, or to change it without "
+ "first resetting it!");
+
+ TheDelegate = delegate;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Function State
+ //===--------------------------------------------------------------------===//
+
+ // isSSA - Returns true when the machine function is in SSA form. Early
+ // passes require the machine function to be in SSA form where every virtual
+ // register has a single defining instruction.
+ //
+ // The TwoAddressInstructionPass and PHIElimination passes take the machine
+ // function out of SSA form when they introduce multiple defs per virtual
+ // register.
+ bool isSSA() const { return IsSSA; }
+
+ // leaveSSA - Indicates that the machine function is no longer in SSA form.
+ void leaveSSA() { IsSSA = false; }
+
+ /// tracksLiveness - Returns true when tracking register liveness accurately.
+ ///
+ /// While this flag is true, register liveness information in basic block
+ /// live-in lists and machine instruction operands is accurate. This means it
+ /// can be used to change the code in ways that affect the values in
+ /// registers, for example by the register scavenger.
+ ///
+ /// When this flag is false, liveness is no longer reliable.
+ bool tracksLiveness() const { return TracksLiveness; }
+
+ /// invalidateLiveness - Indicates that register liveness is no longer being
+ /// tracked accurately.
+ ///
+ /// This should be called by late passes that invalidate the liveness
+ /// information.
+ void invalidateLiveness() { TracksLiveness = false; }
+
+ /// Returns true if liveness for register class @p RC should be tracked at
+ /// the subregister level.
+ bool shouldTrackSubRegLiveness(const TargetRegisterClass &RC) const {
+ return subRegLivenessEnabled() && RC.HasDisjunctSubRegs;
+ }
+ bool shouldTrackSubRegLiveness(unsigned VReg) const {
+ assert(TargetRegisterInfo::isVirtualRegister(VReg) && "Must pass a VReg");
+ return shouldTrackSubRegLiveness(*getRegClass(VReg));
+ }
+ bool subRegLivenessEnabled() const {
+ return TracksSubRegLiveness;
+ }
+
+ void enableSubRegLiveness(bool Enable = true) {
+ TracksSubRegLiveness = Enable;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Register Info
+ //===--------------------------------------------------------------------===//
+
+ // Strictly for use by MachineInstr.cpp.
+ void addRegOperandToUseList(MachineOperand *MO);
+
+ // Strictly for use by MachineInstr.cpp.
+ void removeRegOperandFromUseList(MachineOperand *MO);
+
+ // Strictly for use by MachineInstr.cpp.
+ void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps);
+
+ /// Verify the sanity of the use list for Reg.
+ void verifyUseList(unsigned Reg) const;
+
+ /// Verify the use list of all registers.
+ void verifyUseLists() const;
+
+ /// reg_begin/reg_end - Provide iteration support to walk over all definitions
+ /// and uses of a register within the MachineFunction that corresponds to this
+ /// MachineRegisterInfo object.
+ template<bool Uses, bool Defs, bool SkipDebug,
+ bool ByOperand, bool ByInstr, bool ByBundle>
+ class defusechain_iterator;
+ template<bool Uses, bool Defs, bool SkipDebug,
+ bool ByOperand, bool ByInstr, bool ByBundle>
+ class defusechain_instr_iterator;
+
+ // Make it a friend so it can access getNextOperandForReg().
+ template<bool, bool, bool, bool, bool, bool>
+ friend class defusechain_iterator;
+ template<bool, bool, bool, bool, bool, bool>
+ friend class defusechain_instr_iterator;
+
+
+
+ /// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
+ /// register.
+ typedef defusechain_iterator<true,true,false,true,false,false>
+ reg_iterator;
+ reg_iterator reg_begin(unsigned RegNo) const {
+ return reg_iterator(getRegUseDefListHead(RegNo));
+ }
+ static reg_iterator reg_end() { return reg_iterator(nullptr); }
+
+ inline iterator_range<reg_iterator> reg_operands(unsigned Reg) const {
+ return make_range(reg_begin(Reg), reg_end());
+ }
+
+ /// reg_instr_iterator/reg_instr_begin/reg_instr_end - Walk all defs and uses
+ /// of the specified register, stepping by MachineInstr.
+ typedef defusechain_instr_iterator<true,true,false,false,true,false>
+ reg_instr_iterator;
+ reg_instr_iterator reg_instr_begin(unsigned RegNo) const {
+ return reg_instr_iterator(getRegUseDefListHead(RegNo));
+ }
+ static reg_instr_iterator reg_instr_end() {
+ return reg_instr_iterator(nullptr);
+ }
+
+ inline iterator_range<reg_instr_iterator>
+ reg_instructions(unsigned Reg) const {
+ return make_range(reg_instr_begin(Reg), reg_instr_end());
+ }
+
+ /// reg_bundle_iterator/reg_bundle_begin/reg_bundle_end - Walk all defs and uses
+ /// of the specified register, stepping by bundle.
+ typedef defusechain_instr_iterator<true,true,false,false,false,true>
+ reg_bundle_iterator;
+ reg_bundle_iterator reg_bundle_begin(unsigned RegNo) const {
+ return reg_bundle_iterator(getRegUseDefListHead(RegNo));
+ }
+ static reg_bundle_iterator reg_bundle_end() {
+ return reg_bundle_iterator(nullptr);
+ }
+
+ inline iterator_range<reg_bundle_iterator> reg_bundles(unsigned Reg) const {
+ return make_range(reg_bundle_begin(Reg), reg_bundle_end());
+ }
+
+ /// reg_empty - Return true if there are no instructions using or defining the
+ /// specified register (it may be live-in).
+ bool reg_empty(unsigned RegNo) const { return reg_begin(RegNo) == reg_end(); }
+
+ /// reg_nodbg_iterator/reg_nodbg_begin/reg_nodbg_end - Walk all defs and uses
+ /// of the specified register, skipping those marked as Debug.
+ typedef defusechain_iterator<true,true,true,true,false,false>
+ reg_nodbg_iterator;
+ reg_nodbg_iterator reg_nodbg_begin(unsigned RegNo) const {
+ return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
+ }
+ static reg_nodbg_iterator reg_nodbg_end() {
+ return reg_nodbg_iterator(nullptr);
+ }
+
+ inline iterator_range<reg_nodbg_iterator>
+ reg_nodbg_operands(unsigned Reg) const {
+ return make_range(reg_nodbg_begin(Reg), reg_nodbg_end());
+ }
+
+ /// reg_instr_nodbg_iterator/reg_instr_nodbg_begin/reg_instr_nodbg_end - Walk
+ /// all defs and uses of the specified register, stepping by MachineInstr,
+ /// skipping those marked as Debug.
+ typedef defusechain_instr_iterator<true,true,true,false,true,false>
+ reg_instr_nodbg_iterator;
+ reg_instr_nodbg_iterator reg_instr_nodbg_begin(unsigned RegNo) const {
+ return reg_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
+ }
+ static reg_instr_nodbg_iterator reg_instr_nodbg_end() {
+ return reg_instr_nodbg_iterator(nullptr);
+ }
+
+ inline iterator_range<reg_instr_nodbg_iterator>
+ reg_nodbg_instructions(unsigned Reg) const {
+ return make_range(reg_instr_nodbg_begin(Reg), reg_instr_nodbg_end());
+ }
+
+ /// reg_bundle_nodbg_iterator/reg_bundle_nodbg_begin/reg_bundle_nodbg_end - Walk
+ /// all defs and uses of the specified register, stepping by bundle,
+ /// skipping those marked as Debug.
+ typedef defusechain_instr_iterator<true,true,true,false,false,true>
+ reg_bundle_nodbg_iterator;
+ reg_bundle_nodbg_iterator reg_bundle_nodbg_begin(unsigned RegNo) const {
+ return reg_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
+ }
+ static reg_bundle_nodbg_iterator reg_bundle_nodbg_end() {
+ return reg_bundle_nodbg_iterator(nullptr);
+ }
+
+ inline iterator_range<reg_bundle_nodbg_iterator>
+ reg_nodbg_bundles(unsigned Reg) const {
+ return make_range(reg_bundle_nodbg_begin(Reg), reg_bundle_nodbg_end());
+ }
+
+ /// reg_nodbg_empty - Return true if the only instructions using or defining
+ /// Reg are Debug instructions.
+ bool reg_nodbg_empty(unsigned RegNo) const {
+ return reg_nodbg_begin(RegNo) == reg_nodbg_end();
+ }
+
+ /// def_iterator/def_begin/def_end - Walk all defs of the specified register.
+ typedef defusechain_iterator<false,true,false,true,false,false>
+ def_iterator;
+ def_iterator def_begin(unsigned RegNo) const {
+ return def_iterator(getRegUseDefListHead(RegNo));
+ }
+ static def_iterator def_end() { return def_iterator(nullptr); }
+
+ inline iterator_range<def_iterator> def_operands(unsigned Reg) const {
+ return make_range(def_begin(Reg), def_end());
+ }
+
+ /// def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the
+ /// specified register, stepping by MachineInst.
+ typedef defusechain_instr_iterator<false,true,false,false,true,false>
+ def_instr_iterator;
+ def_instr_iterator def_instr_begin(unsigned RegNo) const {
+ return def_instr_iterator(getRegUseDefListHead(RegNo));
+ }
+ static def_instr_iterator def_instr_end() {
+ return def_instr_iterator(nullptr);
+ }
+
+ inline iterator_range<def_instr_iterator>
+ def_instructions(unsigned Reg) const {
+ return make_range(def_instr_begin(Reg), def_instr_end());
+ }
+
+ /// def_bundle_iterator/def_bundle_begin/def_bundle_end - Walk all defs of the
+ /// specified register, stepping by bundle.
+ typedef defusechain_instr_iterator<false,true,false,false,false,true>
+ def_bundle_iterator;
+ def_bundle_iterator def_bundle_begin(unsigned RegNo) const {
+ return def_bundle_iterator(getRegUseDefListHead(RegNo));
+ }
+ static def_bundle_iterator def_bundle_end() {
+ return def_bundle_iterator(nullptr);
+ }
+
+ inline iterator_range<def_bundle_iterator> def_bundles(unsigned Reg) const {
+ return make_range(def_bundle_begin(Reg), def_bundle_end());
+ }
+
+ /// def_empty - Return true if there are no instructions defining the
+ /// specified register (it may be live-in).
+ bool def_empty(unsigned RegNo) const { return def_begin(RegNo) == def_end(); }
+
+ /// hasOneDef - Return true if there is exactly one instruction defining the
+ /// specified register.
+ bool hasOneDef(unsigned RegNo) const {
+ def_iterator DI = def_begin(RegNo);
+ if (DI == def_end())
+ return false;
+ return ++DI == def_end();
+ }
+
+ /// use_iterator/use_begin/use_end - Walk all uses of the specified register.
+ typedef defusechain_iterator<true,false,false,true,false,false>
+ use_iterator;
+ use_iterator use_begin(unsigned RegNo) const {
+ return use_iterator(getRegUseDefListHead(RegNo));
+ }
+ static use_iterator use_end() { return use_iterator(nullptr); }
+
+ inline iterator_range<use_iterator> use_operands(unsigned Reg) const {
+ return make_range(use_begin(Reg), use_end());
+ }
+
+ /// use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the
+ /// specified register, stepping by MachineInstr.
+ typedef defusechain_instr_iterator<true,false,false,false,true,false>
+ use_instr_iterator;
+ use_instr_iterator use_instr_begin(unsigned RegNo) const {
+ return use_instr_iterator(getRegUseDefListHead(RegNo));
+ }
+ static use_instr_iterator use_instr_end() {
+ return use_instr_iterator(nullptr);
+ }
+
+ inline iterator_range<use_instr_iterator>
+ use_instructions(unsigned Reg) const {
+ return make_range(use_instr_begin(Reg), use_instr_end());
+ }
+
+ /// use_bundle_iterator/use_bundle_begin/use_bundle_end - Walk all uses of the
+ /// specified register, stepping by bundle.
+ typedef defusechain_instr_iterator<true,false,false,false,false,true>
+ use_bundle_iterator;
+ use_bundle_iterator use_bundle_begin(unsigned RegNo) const {
+ return use_bundle_iterator(getRegUseDefListHead(RegNo));
+ }
+ static use_bundle_iterator use_bundle_end() {
+ return use_bundle_iterator(nullptr);
+ }
+
+ inline iterator_range<use_bundle_iterator> use_bundles(unsigned Reg) const {
+ return make_range(use_bundle_begin(Reg), use_bundle_end());
+ }
+
+ /// use_empty - Return true if there are no instructions using the specified
+ /// register.
+ bool use_empty(unsigned RegNo) const { return use_begin(RegNo) == use_end(); }
+
+ /// hasOneUse - Return true if there is exactly one instruction using the
+ /// specified register.
+ bool hasOneUse(unsigned RegNo) const {
+ use_iterator UI = use_begin(RegNo);
+ if (UI == use_end())
+ return false;
+ return ++UI == use_end();
+ }
+
+ /// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
+ /// specified register, skipping those marked as Debug.
+ typedef defusechain_iterator<true,false,true,true,false,false>
+ use_nodbg_iterator;
+ use_nodbg_iterator use_nodbg_begin(unsigned RegNo) const {
+ return use_nodbg_iterator(getRegUseDefListHead(RegNo));
+ }
+ static use_nodbg_iterator use_nodbg_end() {
+ return use_nodbg_iterator(nullptr);
+ }
+
+ inline iterator_range<use_nodbg_iterator>
+ use_nodbg_operands(unsigned Reg) const {
+ return make_range(use_nodbg_begin(Reg), use_nodbg_end());
+ }
+
+ /// use_instr_nodbg_iterator/use_instr_nodbg_begin/use_instr_nodbg_end - Walk
+ /// all uses of the specified register, stepping by MachineInstr, skipping
+ /// those marked as Debug.
+ typedef defusechain_instr_iterator<true,false,true,false,true,false>
+ use_instr_nodbg_iterator;
+ use_instr_nodbg_iterator use_instr_nodbg_begin(unsigned RegNo) const {
+ return use_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
+ }
+ static use_instr_nodbg_iterator use_instr_nodbg_end() {
+ return use_instr_nodbg_iterator(nullptr);
+ }
+
+ inline iterator_range<use_instr_nodbg_iterator>
+ use_nodbg_instructions(unsigned Reg) const {
+ return make_range(use_instr_nodbg_begin(Reg), use_instr_nodbg_end());
+ }
+
+ /// use_bundle_nodbg_iterator/use_bundle_nodbg_begin/use_bundle_nodbg_end - Walk
+ /// all uses of the specified register, stepping by bundle, skipping
+ /// those marked as Debug.
+ typedef defusechain_instr_iterator<true,false,true,false,false,true>
+ use_bundle_nodbg_iterator;
+ use_bundle_nodbg_iterator use_bundle_nodbg_begin(unsigned RegNo) const {
+ return use_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
+ }
+ static use_bundle_nodbg_iterator use_bundle_nodbg_end() {
+ return use_bundle_nodbg_iterator(nullptr);
+ }
+
+ inline iterator_range<use_bundle_nodbg_iterator>
+ use_nodbg_bundles(unsigned Reg) const {
+ return make_range(use_bundle_nodbg_begin(Reg), use_bundle_nodbg_end());
+ }
+
+ /// use_nodbg_empty - Return true if there are no non-Debug instructions
+ /// using the specified register.
+ bool use_nodbg_empty(unsigned RegNo) const {
+ return use_nodbg_begin(RegNo) == use_nodbg_end();
+ }
+
+ /// hasOneNonDBGUse - Return true if there is exactly one non-Debug
+ /// instruction using the specified register.
+ bool hasOneNonDBGUse(unsigned RegNo) const;
+
+ /// replaceRegWith - Replace all instances of FromReg with ToReg in the
+ /// machine function. This is like llvm-level X->replaceAllUsesWith(Y),
+ /// except that it also changes any definitions of the register as well.
+ ///
+ /// Note that it is usually necessary to first constrain ToReg's register
+ /// class to match the FromReg constraints using:
+ ///
+ /// constrainRegClass(ToReg, getRegClass(FromReg))
+ ///
+ /// That function will return NULL if the virtual registers have incompatible
+ /// constraints.
+ ///
+ /// Note that if ToReg is a physical register the function will replace and
+ /// apply sub registers to ToReg in order to obtain a final/proper physical
+ /// register.
+ void replaceRegWith(unsigned FromReg, unsigned ToReg);
+
+ /// getVRegDef - Return the machine instr that defines the specified virtual
+ /// register or null if none is found. This assumes that the code is in SSA
+ /// form, so there should only be one definition.
+ MachineInstr *getVRegDef(unsigned Reg) const;
+
+ /// getUniqueVRegDef - Return the unique machine instr that defines the
+ /// specified virtual register or null if none is found. If there are
+ /// multiple definitions or no definition, return null.
+ MachineInstr *getUniqueVRegDef(unsigned Reg) const;
+
+ /// clearKillFlags - Iterate over all the uses of the given register and
+ /// clear the kill flag from the MachineOperand. This function is used by
+ /// optimization passes which extend register lifetimes and need only
+ /// preserve conservative kill flag information.
+ void clearKillFlags(unsigned Reg) const;
+
+#ifndef NDEBUG
+ void dumpUses(unsigned RegNo) const;
+#endif
+
+ /// isConstantPhysReg - Returns true if PhysReg is unallocatable and constant
+ /// throughout the function. It is safe to move instructions that read such
+ /// a physreg.
+ bool isConstantPhysReg(unsigned PhysReg, const MachineFunction &MF) const;
+
+ /// Get an iterator over the pressure sets affected by the given physical or
+ /// virtual register. If RegUnit is physical, it must be a register unit (from
+ /// MCRegUnitIterator).
+ PSetIterator getPressureSets(unsigned RegUnit) const;
+
+ //===--------------------------------------------------------------------===//
+ // Virtual Register Info
+ //===--------------------------------------------------------------------===//
+
+ /// getRegClass - Return the register class of the specified virtual register.
+ ///
+ const TargetRegisterClass *getRegClass(unsigned Reg) const {
+ return VRegInfo[Reg].first;
+ }
+
+ /// setRegClass - Set the register class of the specified virtual register.
+ ///
+ void setRegClass(unsigned Reg, const TargetRegisterClass *RC);
+
+ /// constrainRegClass - Constrain the register class of the specified virtual
+ /// register to be a common subclass of RC and the current register class,
+ /// but only if the new class has at least MinNumRegs registers. Return the
+ /// new register class, or NULL if no such class exists.
+ /// This should only be used when the constraint is known to be trivial, like
+ /// GR32 -> GR32_NOSP. Beware of increasing register pressure.
+ ///
+ const TargetRegisterClass *constrainRegClass(unsigned Reg,
+ const TargetRegisterClass *RC,
+ unsigned MinNumRegs = 0);
+
+ /// recomputeRegClass - Try to find a legal super-class of Reg's register
+ /// class that still satisfies the constraints from the instructions using
+ /// Reg. Returns true if Reg was upgraded.
+ ///
+ /// This method can be used after constraints have been removed from a
+ /// virtual register, for example after removing instructions or splitting
+ /// the live range.
+ ///
+ bool recomputeRegClass(unsigned Reg);
+
+ /// createVirtualRegister - Create and return a new virtual register in the
+ /// function with the specified register class.
+ ///
+ unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
+
+ /// getNumVirtRegs - Return the number of virtual registers created.
+ ///
+ unsigned getNumVirtRegs() const { return VRegInfo.size(); }
+
+ /// clearVirtRegs - Remove all virtual registers (after physreg assignment).
+ void clearVirtRegs();
+
+ /// setRegAllocationHint - Specify a register allocation hint for the
+ /// specified virtual register.
+ void setRegAllocationHint(unsigned VReg, unsigned Type, unsigned PrefReg) {
+ assert(TargetRegisterInfo::isVirtualRegister(VReg));
+ RegAllocHints[VReg].first = Type;
+ RegAllocHints[VReg].second = PrefReg;
+ }
+
+ /// Specify the preferred register allocation hint for the specified virtual
+ /// register.
+ void setSimpleHint(unsigned VReg, unsigned PrefReg) {
+ setRegAllocationHint(VReg, /*Type=*/0, PrefReg);
+ }
+
+ /// getRegAllocationHint - Return the register allocation hint for the
+ /// specified virtual register.
+ std::pair<unsigned, unsigned>
+ getRegAllocationHint(unsigned VReg) const {
+ assert(TargetRegisterInfo::isVirtualRegister(VReg));
+ return RegAllocHints[VReg];
+ }
+
+ /// getSimpleHint - Return the preferred register allocation hint, or 0 if a
+ /// standard simple hint (Type == 0) is not set.
+ unsigned getSimpleHint(unsigned VReg) const {
+ assert(TargetRegisterInfo::isVirtualRegister(VReg));
+ std::pair<unsigned, unsigned> Hint = getRegAllocationHint(VReg);
+ return Hint.first ? 0 : Hint.second;
+ }
+
+ /// markUsesInDebugValueAsUndef - Mark every DBG_VALUE referencing the
+ /// specified register as undefined which causes the DBG_VALUE to be
+ /// deleted during LiveDebugVariables analysis.
+ void markUsesInDebugValueAsUndef(unsigned Reg) const;
+
+ /// Return true if the specified register is modified in this function.
+ /// This checks that no defining machine operands exist for the register or
+ /// any of its aliases. Definitions found on functions marked noreturn are
+ /// ignored. The register is also considered modified when it is set in the
+ /// UsedPhysRegMask.
+ bool isPhysRegModified(unsigned PhysReg) const;
+
+ /// Return true if the specified register is modified or read in this
+ /// function. This checks that no machine operands exist for the register or
+ /// any of its aliases. The register is also considered used when it is set
+ /// in the UsedPhysRegMask.
+ bool isPhysRegUsed(unsigned PhysReg) const;
+
+ /// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used.
+ /// This corresponds to the bit mask attached to register mask operands.
+ void addPhysRegsUsedFromRegMask(const uint32_t *RegMask) {
+ UsedPhysRegMask.setBitsNotInMask(RegMask);
+ }
+
+ const BitVector &getUsedPhysRegsMask() const { return UsedPhysRegMask; }
+
+ void setUsedPhysRegMask(BitVector &Mask) { UsedPhysRegMask = Mask; }
+
+ //===--------------------------------------------------------------------===//
+ // Reserved Register Info
+ //===--------------------------------------------------------------------===//
+ //
+ // The set of reserved registers must be invariant during register
+ // allocation. For example, the target cannot suddenly decide it needs a
+ // frame pointer when the register allocator has already used the frame
+ // pointer register for something else.
+ //
+ // These methods can be used by target hooks like hasFP() to avoid changing
+ // the reserved register set during register allocation.
+
+ /// freezeReservedRegs - Called by the register allocator to freeze the set
+ /// of reserved registers before allocation begins.
+ void freezeReservedRegs(const MachineFunction&);
+
+ /// reservedRegsFrozen - Returns true after freezeReservedRegs() was called
+ /// to ensure the set of reserved registers stays constant.
+ bool reservedRegsFrozen() const {
+ return !ReservedRegs.empty();
+ }
+
+ /// canReserveReg - Returns true if PhysReg can be used as a reserved
+ /// register. Any register can be reserved before freezeReservedRegs() is
+ /// called.
+ bool canReserveReg(unsigned PhysReg) const {
+ return !reservedRegsFrozen() || ReservedRegs.test(PhysReg);
+ }
+
+ /// getReservedRegs - Returns a reference to the frozen set of reserved
+ /// registers. This method should always be preferred to calling
+ /// TRI::getReservedRegs() when possible.
+ const BitVector &getReservedRegs() const {
+ assert(reservedRegsFrozen() &&
+ "Reserved registers haven't been frozen yet. "
+ "Use TRI::getReservedRegs().");
+ return ReservedRegs;
+ }
+
+ /// isReserved - Returns true when PhysReg is a reserved register.
+ ///
+ /// Reserved registers may belong to an allocatable register class, but the
+ /// target has explicitly requested that they are not used.
+ ///
+ bool isReserved(unsigned PhysReg) const {
+ return getReservedRegs().test(PhysReg);
+ }
+
+ /// isAllocatable - Returns true when PhysReg belongs to an allocatable
+ /// register class and it hasn't been reserved.
+ ///
+ /// Allocatable registers may show up in the allocation order of some virtual
+ /// register, so a register allocator needs to track its liveness and
+ /// availability.
+ bool isAllocatable(unsigned PhysReg) const {
+ return getTargetRegisterInfo()->isInAllocatableClass(PhysReg) &&
+ !isReserved(PhysReg);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // LiveIn Management
+ //===--------------------------------------------------------------------===//
+
+ /// addLiveIn - Add the specified register as a live-in. Note that it
+ /// is an error to add the same register to the same set more than once.
+ void addLiveIn(unsigned Reg, unsigned vreg = 0) {
+ LiveIns.push_back(std::make_pair(Reg, vreg));
+ }
+
+ // Iteration support for the live-ins set. It's kept in sorted order
+ // by register number.
+ typedef std::vector<std::pair<unsigned,unsigned> >::const_iterator
+ livein_iterator;
+ livein_iterator livein_begin() const { return LiveIns.begin(); }
+ livein_iterator livein_end() const { return LiveIns.end(); }
+ bool livein_empty() const { return LiveIns.empty(); }
+
+ bool isLiveIn(unsigned Reg) const;
+
+ /// getLiveInPhysReg - If VReg is a live-in virtual register, return the
+ /// corresponding live-in physical register.
+ unsigned getLiveInPhysReg(unsigned VReg) const;
+
+ /// getLiveInVirtReg - If PReg is a live-in physical register, return the
+ /// corresponding live-in physical register.
+ unsigned getLiveInVirtReg(unsigned PReg) const;
+
+ /// EmitLiveInCopies - Emit copies to initialize livein virtual registers
+ /// into the given entry block.
+ void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
+ const TargetRegisterInfo &TRI,
+ const TargetInstrInfo &TII);
+
+ /// Returns a mask covering all bits that can appear in lane masks of
+ /// subregisters of the virtual register @p Reg.
+ LaneBitmask getMaxLaneMaskForVReg(unsigned Reg) const;
+
+ /// defusechain_iterator - This class provides iterator support for machine
+ /// operands in the function that use or define a specific register. If
+ /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
+ /// returns defs. If neither are true then you are silly and it always
+ /// returns end(). If SkipDebug is true it skips uses marked Debug
+ /// when incrementing.
+ template<bool ReturnUses, bool ReturnDefs, bool SkipDebug,
+ bool ByOperand, bool ByInstr, bool ByBundle>
+ class defusechain_iterator
+ : public std::iterator<std::forward_iterator_tag, MachineInstr, ptrdiff_t> {
+ MachineOperand *Op;
+ explicit defusechain_iterator(MachineOperand *op) : Op(op) {
+ // If the first node isn't one we're interested in, advance to one that
+ // we are interested in.
+ if (op) {
+ if ((!ReturnUses && op->isUse()) ||
+ (!ReturnDefs && op->isDef()) ||
+ (SkipDebug && op->isDebug()))
+ advance();
+ }
+ }
+ friend class MachineRegisterInfo;
+
+ void advance() {
+ assert(Op && "Cannot increment end iterator!");
+ Op = getNextOperandForReg(Op);
+
+ // All defs come before the uses, so stop def_iterator early.
+ if (!ReturnUses) {
+ if (Op) {
+ if (Op->isUse())
+ Op = nullptr;
+ else
+ assert(!Op->isDebug() && "Can't have debug defs");
+ }
+ } else {
+ // If this is an operand we don't care about, skip it.
+ while (Op && ((!ReturnDefs && Op->isDef()) ||
+ (SkipDebug && Op->isDebug())))
+ Op = getNextOperandForReg(Op);
+ }
+ }
+ public:
+ typedef std::iterator<std::forward_iterator_tag,
+ MachineInstr, ptrdiff_t>::reference reference;
+ typedef std::iterator<std::forward_iterator_tag,
+ MachineInstr, ptrdiff_t>::pointer pointer;
+
+ defusechain_iterator() : Op(nullptr) {}
+
+ bool operator==(const defusechain_iterator &x) const {
+ return Op == x.Op;
+ }
+ bool operator!=(const defusechain_iterator &x) const {
+ return !operator==(x);
+ }
+
+ /// atEnd - return true if this iterator is equal to reg_end() on the value.
+ bool atEnd() const { return Op == nullptr; }
+
+ // Iterator traversal: forward iteration only
+ defusechain_iterator &operator++() { // Preincrement
+ assert(Op && "Cannot increment end iterator!");
+ if (ByOperand)
+ advance();
+ else if (ByInstr) {
+ MachineInstr *P = Op->getParent();
+ do {
+ advance();
+ } while (Op && Op->getParent() == P);
+ } else if (ByBundle) {
+ MachineInstr *P = getBundleStart(Op->getParent());
+ do {
+ advance();
+ } while (Op && getBundleStart(Op->getParent()) == P);
+ }
+
+ return *this;
+ }
+ defusechain_iterator operator++(int) { // Postincrement
+ defusechain_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ /// getOperandNo - Return the operand # of this MachineOperand in its
+ /// MachineInstr.
+ unsigned getOperandNo() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return Op - &Op->getParent()->getOperand(0);
+ }
+
+ // Retrieve a reference to the current operand.
+ MachineOperand &operator*() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return *Op;
+ }
+
+ MachineOperand *operator->() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return Op;
+ }
+ };
+
+ /// defusechain_iterator - This class provides iterator support for machine
+ /// operands in the function that use or define a specific register. If
+ /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
+ /// returns defs. If neither are true then you are silly and it always
+ /// returns end(). If SkipDebug is true it skips uses marked Debug
+ /// when incrementing.
+ template<bool ReturnUses, bool ReturnDefs, bool SkipDebug,
+ bool ByOperand, bool ByInstr, bool ByBundle>
+ class defusechain_instr_iterator
+ : public std::iterator<std::forward_iterator_tag, MachineInstr, ptrdiff_t> {
+ MachineOperand *Op;
+ explicit defusechain_instr_iterator(MachineOperand *op) : Op(op) {
+ // If the first node isn't one we're interested in, advance to one that
+ // we are interested in.
+ if (op) {
+ if ((!ReturnUses && op->isUse()) ||
+ (!ReturnDefs && op->isDef()) ||
+ (SkipDebug && op->isDebug()))
+ advance();
+ }
+ }
+ friend class MachineRegisterInfo;
+
+ void advance() {
+ assert(Op && "Cannot increment end iterator!");
+ Op = getNextOperandForReg(Op);
+
+ // All defs come before the uses, so stop def_iterator early.
+ if (!ReturnUses) {
+ if (Op) {
+ if (Op->isUse())
+ Op = nullptr;
+ else
+ assert(!Op->isDebug() && "Can't have debug defs");
+ }
+ } else {
+ // If this is an operand we don't care about, skip it.
+ while (Op && ((!ReturnDefs && Op->isDef()) ||
+ (SkipDebug && Op->isDebug())))
+ Op = getNextOperandForReg(Op);
+ }
+ }
+ public:
+ typedef std::iterator<std::forward_iterator_tag,
+ MachineInstr, ptrdiff_t>::reference reference;
+ typedef std::iterator<std::forward_iterator_tag,
+ MachineInstr, ptrdiff_t>::pointer pointer;
+
+ defusechain_instr_iterator() : Op(nullptr) {}
+
+ bool operator==(const defusechain_instr_iterator &x) const {
+ return Op == x.Op;
+ }
+ bool operator!=(const defusechain_instr_iterator &x) const {
+ return !operator==(x);
+ }
+
+ /// atEnd - return true if this iterator is equal to reg_end() on the value.
+ bool atEnd() const { return Op == nullptr; }
+
+ // Iterator traversal: forward iteration only
+ defusechain_instr_iterator &operator++() { // Preincrement
+ assert(Op && "Cannot increment end iterator!");
+ if (ByOperand)
+ advance();
+ else if (ByInstr) {
+ MachineInstr *P = Op->getParent();
+ do {
+ advance();
+ } while (Op && Op->getParent() == P);
+ } else if (ByBundle) {
+ MachineInstr *P = getBundleStart(Op->getParent());
+ do {
+ advance();
+ } while (Op && getBundleStart(Op->getParent()) == P);
+ }
+
+ return *this;
+ }
+ defusechain_instr_iterator operator++(int) { // Postincrement
+ defusechain_instr_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ // Retrieve a reference to the current operand.
+ MachineInstr &operator*() const {
+ assert(Op && "Cannot dereference end iterator!");
+ if (ByBundle) return *(getBundleStart(Op->getParent()));
+ return *Op->getParent();
+ }
+
+ MachineInstr *operator->() const {
+ assert(Op && "Cannot dereference end iterator!");
+ if (ByBundle) return getBundleStart(Op->getParent());
+ return Op->getParent();
+ }
+ };
+};
+
+/// Iterate over the pressure sets affected by the given physical or virtual
+/// register. If Reg is physical, it must be a register unit (from
+/// MCRegUnitIterator).
+class PSetIterator {
+ const int *PSet;
+ unsigned Weight;
+public:
+ PSetIterator(): PSet(nullptr), Weight(0) {}
+ PSetIterator(unsigned RegUnit, const MachineRegisterInfo *MRI) {
+ const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
+ if (TargetRegisterInfo::isVirtualRegister(RegUnit)) {
+ const TargetRegisterClass *RC = MRI->getRegClass(RegUnit);
+ PSet = TRI->getRegClassPressureSets(RC);
+ Weight = TRI->getRegClassWeight(RC).RegWeight;
+ }
+ else {
+ PSet = TRI->getRegUnitPressureSets(RegUnit);
+ Weight = TRI->getRegUnitWeight(RegUnit);
+ }
+ if (*PSet == -1)
+ PSet = nullptr;
+ }
+ bool isValid() const { return PSet; }
+
+ unsigned getWeight() const { return Weight; }
+
+ unsigned operator*() const { return *PSet; }
+
+ void operator++() {
+ assert(isValid() && "Invalid PSetIterator.");
+ ++PSet;
+ if (*PSet == -1)
+ PSet = nullptr;
+ }
+};
+
+inline PSetIterator MachineRegisterInfo::
+getPressureSets(unsigned RegUnit) const {
+ return PSetIterator(RegUnit, this);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineSSAUpdater.h b/contrib/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
new file mode 100644
index 0000000..5f988ad
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineSSAUpdater.h
@@ -0,0 +1,116 @@
+//===-- MachineSSAUpdater.h - Unstructured SSA Update Tool ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MachineSSAUpdater class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINESSAUPDATER_H
+#define LLVM_CODEGEN_MACHINESSAUPDATER_H
+
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+ class MachineBasicBlock;
+ class MachineFunction;
+ class MachineInstr;
+ class MachineOperand;
+ class MachineRegisterInfo;
+ class TargetInstrInfo;
+ class TargetRegisterClass;
+ template<typename T> class SmallVectorImpl;
+ template<typename T> class SSAUpdaterTraits;
+
+/// MachineSSAUpdater - This class updates SSA form for a set of virtual
+/// registers defined in multiple blocks. This is used when code duplication
+/// or another unstructured transformation wants to rewrite a set of uses of one
+/// vreg with uses of a set of vregs.
+class MachineSSAUpdater {
+ friend class SSAUpdaterTraits<MachineSSAUpdater>;
+
+private:
+ /// AvailableVals - This keeps track of which value to use on a per-block
+ /// basis. When we insert PHI nodes, we keep track of them here.
+ //typedef DenseMap<MachineBasicBlock*, unsigned > AvailableValsTy;
+ void *AV;
+
+ /// VR - Current virtual register whose uses are being updated.
+ unsigned VR;
+
+ /// VRC - Register class of the current virtual register.
+ const TargetRegisterClass *VRC;
+
+ /// InsertedPHIs - If this is non-null, the MachineSSAUpdater adds all PHI
+ /// nodes that it creates to the vector.
+ SmallVectorImpl<MachineInstr*> *InsertedPHIs;
+
+ const TargetInstrInfo *TII;
+ MachineRegisterInfo *MRI;
+public:
+ /// MachineSSAUpdater constructor. If InsertedPHIs is specified, it will be
+ /// filled in with all PHI Nodes created by rewriting.
+ explicit MachineSSAUpdater(MachineFunction &MF,
+ SmallVectorImpl<MachineInstr*> *InsertedPHIs = nullptr);
+ ~MachineSSAUpdater();
+
+ /// Initialize - Reset this object to get ready for a new set of SSA
+ /// updates.
+ void Initialize(unsigned V);
+
+ /// AddAvailableValue - Indicate that a rewritten value is available at the
+ /// end of the specified block with the specified value.
+ void AddAvailableValue(MachineBasicBlock *BB, unsigned V);
+
+ /// HasValueForBlock - Return true if the MachineSSAUpdater already has a
+ /// value for the specified block.
+ bool HasValueForBlock(MachineBasicBlock *BB) const;
+
+ /// GetValueAtEndOfBlock - Construct SSA form, materializing a value that is
+ /// live at the end of the specified block.
+ unsigned GetValueAtEndOfBlock(MachineBasicBlock *BB);
+
+ /// GetValueInMiddleOfBlock - Construct SSA form, materializing a value that
+ /// is live in the middle of the specified block.
+ ///
+ /// GetValueInMiddleOfBlock is the same as GetValueAtEndOfBlock except in one
+ /// important case: if there is a definition of the rewritten value after the
+ /// 'use' in BB. Consider code like this:
+ ///
+ /// X1 = ...
+ /// SomeBB:
+ /// use(X)
+ /// X2 = ...
+ /// br Cond, SomeBB, OutBB
+ ///
+ /// In this case, there are two values (X1 and X2) added to the AvailableVals
+ /// set by the client of the rewriter, and those values are both live out of
+ /// their respective blocks. However, the use of X happens in the *middle* of
+ /// a block. Because of this, we need to insert a new PHI node in SomeBB to
+ /// merge the appropriate values, and this value isn't live out of the block.
+ ///
+ unsigned GetValueInMiddleOfBlock(MachineBasicBlock *BB);
+
+ /// RewriteUse - Rewrite a use of the symbolic value. This handles PHI nodes,
+ /// which use their value in the corresponding predecessor. Note that this
+ /// will not work if the use is supposed to be rewritten to a value defined in
+ /// the same block as the use, but above it. Any 'AddAvailableValue's added
+ /// for the use's block will be considered to be below it.
+ void RewriteUse(MachineOperand &U);
+
+private:
+ unsigned GetValueAtEndOfBlockInternal(MachineBasicBlock *BB);
+
+ void operator=(const MachineSSAUpdater&) = delete;
+ MachineSSAUpdater(const MachineSSAUpdater&) = delete;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h b/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
new file mode 100644
index 0000000..358fd5a
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineScheduler.h
@@ -0,0 +1,964 @@
+//==- MachineScheduler.h - MachineInstr Scheduling Pass ----------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an interface for customizing the standard MachineScheduler
+// pass. Note that the entire pass may be replaced as follows:
+//
+// <Target>TargetMachine::createPassConfig(PassManagerBase &PM) {
+// PM.substitutePass(&MachineSchedulerID, &CustomSchedulerPassID);
+// ...}
+//
+// The MachineScheduler pass is only responsible for choosing the regions to be
+// scheduled. Targets can override the DAG builder and scheduler without
+// replacing the pass as follows:
+//
+// ScheduleDAGInstrs *<Target>PassConfig::
+// createMachineScheduler(MachineSchedContext *C) {
+// return new CustomMachineScheduler(C);
+// }
+//
+// The default scheduler, ScheduleDAGMILive, builds the DAG and drives list
+// scheduling while updating the instruction stream, register pressure, and live
+// intervals. Most targets don't need to override the DAG builder and list
+// schedulier, but subtargets that require custom scheduling heuristics may
+// plugin an alternate MachineSchedStrategy. The strategy is responsible for
+// selecting the highest priority node from the list:
+//
+// ScheduleDAGInstrs *<Target>PassConfig::
+// createMachineScheduler(MachineSchedContext *C) {
+// return new ScheduleDAGMI(C, CustomStrategy(C));
+// }
+//
+// The DAG builder can also be customized in a sense by adding DAG mutations
+// that will run after DAG building and before list scheduling. DAG mutations
+// can adjust dependencies based on target-specific knowledge or add weak edges
+// to aid heuristics:
+//
+// ScheduleDAGInstrs *<Target>PassConfig::
+// createMachineScheduler(MachineSchedContext *C) {
+// ScheduleDAGMI *DAG = new ScheduleDAGMI(C, CustomStrategy(C));
+// DAG->addMutation(new CustomDependencies(DAG->TII, DAG->TRI));
+// return DAG;
+// }
+//
+// A target that supports alternative schedulers can use the
+// MachineSchedRegistry to allow command line selection. This can be done by
+// implementing the following boilerplate:
+//
+// static ScheduleDAGInstrs *createCustomMachineSched(MachineSchedContext *C) {
+// return new CustomMachineScheduler(C);
+// }
+// static MachineSchedRegistry
+// SchedCustomRegistry("custom", "Run my target's custom scheduler",
+// createCustomMachineSched);
+//
+//
+// Finally, subtargets that don't need to implement custom heuristics but would
+// like to configure the GenericScheduler's policy for a given scheduler region,
+// including scheduling direction and register pressure tracking policy, can do
+// this:
+//
+// void <SubTarget>Subtarget::
+// overrideSchedPolicy(MachineSchedPolicy &Policy,
+// MachineInstr *begin,
+// MachineInstr *end,
+// unsigned NumRegionInstrs) const {
+// Policy.<Flag> = true;
+// }
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINESCHEDULER_H
+#define LLVM_CODEGEN_MACHINESCHEDULER_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachinePassRegistry.h"
+#include "llvm/CodeGen/RegisterPressure.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include <memory>
+
+namespace llvm {
+
+extern cl::opt<bool> ForceTopDown;
+extern cl::opt<bool> ForceBottomUp;
+
+class LiveIntervals;
+class MachineDominatorTree;
+class MachineLoopInfo;
+class RegisterClassInfo;
+class ScheduleDAGInstrs;
+class SchedDFSResult;
+class ScheduleHazardRecognizer;
+
+/// MachineSchedContext provides enough context from the MachineScheduler pass
+/// for the target to instantiate a scheduler.
+struct MachineSchedContext {
+ MachineFunction *MF;
+ const MachineLoopInfo *MLI;
+ const MachineDominatorTree *MDT;
+ const TargetPassConfig *PassConfig;
+ AliasAnalysis *AA;
+ LiveIntervals *LIS;
+
+ RegisterClassInfo *RegClassInfo;
+
+ MachineSchedContext();
+ virtual ~MachineSchedContext();
+};
+
+/// MachineSchedRegistry provides a selection of available machine instruction
+/// schedulers.
+class MachineSchedRegistry : public MachinePassRegistryNode {
+public:
+ typedef ScheduleDAGInstrs *(*ScheduleDAGCtor)(MachineSchedContext *);
+
+ // RegisterPassParser requires a (misnamed) FunctionPassCtor type.
+ typedef ScheduleDAGCtor FunctionPassCtor;
+
+ static MachinePassRegistry Registry;
+
+ MachineSchedRegistry(const char *N, const char *D, ScheduleDAGCtor C)
+ : MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
+ Registry.Add(this);
+ }
+ ~MachineSchedRegistry() { Registry.Remove(this); }
+
+ // Accessors.
+ //
+ MachineSchedRegistry *getNext() const {
+ return (MachineSchedRegistry *)MachinePassRegistryNode::getNext();
+ }
+ static MachineSchedRegistry *getList() {
+ return (MachineSchedRegistry *)Registry.getList();
+ }
+ static void setListener(MachinePassRegistryListener *L) {
+ Registry.setListener(L);
+ }
+};
+
+class ScheduleDAGMI;
+
+/// Define a generic scheduling policy for targets that don't provide their own
+/// MachineSchedStrategy. This can be overriden for each scheduling region
+/// before building the DAG.
+struct MachineSchedPolicy {
+ // Allow the scheduler to disable register pressure tracking.
+ bool ShouldTrackPressure;
+
+ // Allow the scheduler to force top-down or bottom-up scheduling. If neither
+ // is true, the scheduler runs in both directions and converges.
+ bool OnlyTopDown;
+ bool OnlyBottomUp;
+
+ // Disable heuristic that tries to fetch nodes from long dependency chains
+ // first.
+ bool DisableLatencyHeuristic;
+
+ MachineSchedPolicy(): ShouldTrackPressure(false), OnlyTopDown(false),
+ OnlyBottomUp(false), DisableLatencyHeuristic(false) {}
+};
+
+/// MachineSchedStrategy - Interface to the scheduling algorithm used by
+/// ScheduleDAGMI.
+///
+/// Initialization sequence:
+/// initPolicy -> shouldTrackPressure -> initialize(DAG) -> registerRoots
+class MachineSchedStrategy {
+ virtual void anchor();
+public:
+ virtual ~MachineSchedStrategy() {}
+
+ /// Optionally override the per-region scheduling policy.
+ virtual void initPolicy(MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
+ unsigned NumRegionInstrs) {}
+
+ virtual void dumpPolicy() {}
+
+ /// Check if pressure tracking is needed before building the DAG and
+ /// initializing this strategy. Called after initPolicy.
+ virtual bool shouldTrackPressure() const { return true; }
+
+ /// Initialize the strategy after building the DAG for a new region.
+ virtual void initialize(ScheduleDAGMI *DAG) = 0;
+
+ /// Notify this strategy that all roots have been released (including those
+ /// that depend on EntrySU or ExitSU).
+ virtual void registerRoots() {}
+
+ /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to
+ /// schedule the node at the top of the unscheduled region. Otherwise it will
+ /// be scheduled at the bottom.
+ virtual SUnit *pickNode(bool &IsTopNode) = 0;
+
+ /// \brief Scheduler callback to notify that a new subtree is scheduled.
+ virtual void scheduleTree(unsigned SubtreeID) {}
+
+ /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an
+ /// instruction and updated scheduled/remaining flags in the DAG nodes.
+ virtual void schedNode(SUnit *SU, bool IsTopNode) = 0;
+
+ /// When all predecessor dependencies have been resolved, free this node for
+ /// top-down scheduling.
+ virtual void releaseTopNode(SUnit *SU) = 0;
+ /// When all successor dependencies have been resolved, free this node for
+ /// bottom-up scheduling.
+ virtual void releaseBottomNode(SUnit *SU) = 0;
+};
+
+/// Mutate the DAG as a postpass after normal DAG building.
+class ScheduleDAGMutation {
+ virtual void anchor();
+public:
+ virtual ~ScheduleDAGMutation() {}
+
+ virtual void apply(ScheduleDAGMI *DAG) = 0;
+};
+
+/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply
+/// schedules machine instructions according to the given MachineSchedStrategy
+/// without much extra book-keeping. This is the common functionality between
+/// PreRA and PostRA MachineScheduler.
+class ScheduleDAGMI : public ScheduleDAGInstrs {
+protected:
+ AliasAnalysis *AA;
+ LiveIntervals *LIS;
+ std::unique_ptr<MachineSchedStrategy> SchedImpl;
+
+ /// Topo - A topological ordering for SUnits which permits fast IsReachable
+ /// and similar queries.
+ ScheduleDAGTopologicalSort Topo;
+
+ /// Ordered list of DAG postprocessing steps.
+ std::vector<std::unique_ptr<ScheduleDAGMutation>> Mutations;
+
+ /// The top of the unscheduled zone.
+ MachineBasicBlock::iterator CurrentTop;
+
+ /// The bottom of the unscheduled zone.
+ MachineBasicBlock::iterator CurrentBottom;
+
+ /// Record the next node in a scheduled cluster.
+ const SUnit *NextClusterPred;
+ const SUnit *NextClusterSucc;
+
+#ifndef NDEBUG
+ /// The number of instructions scheduled so far. Used to cut off the
+ /// scheduler at the point determined by misched-cutoff.
+ unsigned NumInstrsScheduled;
+#endif
+public:
+ ScheduleDAGMI(MachineSchedContext *C, std::unique_ptr<MachineSchedStrategy> S,
+ bool RemoveKillFlags)
+ : ScheduleDAGInstrs(*C->MF, C->MLI, RemoveKillFlags), AA(C->AA),
+ LIS(C->LIS), SchedImpl(std::move(S)), Topo(SUnits, &ExitSU),
+ CurrentTop(), CurrentBottom(), NextClusterPred(nullptr),
+ NextClusterSucc(nullptr) {
+#ifndef NDEBUG
+ NumInstrsScheduled = 0;
+#endif
+ }
+
+ // Provide a vtable anchor
+ ~ScheduleDAGMI() override;
+
+ // Returns LiveIntervals instance for use in DAG mutators and such.
+ LiveIntervals *getLIS() const { return LIS; }
+
+ /// Return true if this DAG supports VReg liveness and RegPressure.
+ virtual bool hasVRegLiveness() const { return false; }
+
+ /// Add a postprocessing step to the DAG builder.
+ /// Mutations are applied in the order that they are added after normal DAG
+ /// building and before MachineSchedStrategy initialization.
+ ///
+ /// ScheduleDAGMI takes ownership of the Mutation object.
+ void addMutation(std::unique_ptr<ScheduleDAGMutation> Mutation) {
+ Mutations.push_back(std::move(Mutation));
+ }
+
+ /// \brief True if an edge can be added from PredSU to SuccSU without creating
+ /// a cycle.
+ bool canAddEdge(SUnit *SuccSU, SUnit *PredSU);
+
+ /// \brief Add a DAG edge to the given SU with the given predecessor
+ /// dependence data.
+ ///
+ /// \returns true if the edge may be added without creating a cycle OR if an
+ /// equivalent edge already existed (false indicates failure).
+ bool addEdge(SUnit *SuccSU, const SDep &PredDep);
+
+ MachineBasicBlock::iterator top() const { return CurrentTop; }
+ MachineBasicBlock::iterator bottom() const { return CurrentBottom; }
+
+ /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
+ /// region. This covers all instructions in a block, while schedule() may only
+ /// cover a subset.
+ void enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned regioninstrs) override;
+
+ /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
+ /// reorderable instructions.
+ void schedule() override;
+
+ /// Change the position of an instruction within the basic block and update
+ /// live ranges and region boundary iterators.
+ void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
+
+ const SUnit *getNextClusterPred() const { return NextClusterPred; }
+
+ const SUnit *getNextClusterSucc() const { return NextClusterSucc; }
+
+ void viewGraph(const Twine &Name, const Twine &Title) override;
+ void viewGraph() override;
+
+protected:
+ // Top-Level entry points for the schedule() driver...
+
+ /// Apply each ScheduleDAGMutation step in order. This allows different
+ /// instances of ScheduleDAGMI to perform custom DAG postprocessing.
+ void postprocessDAG();
+
+ /// Release ExitSU predecessors and setup scheduler queues.
+ void initQueues(ArrayRef<SUnit*> TopRoots, ArrayRef<SUnit*> BotRoots);
+
+ /// Update scheduler DAG and queues after scheduling an instruction.
+ void updateQueues(SUnit *SU, bool IsTopNode);
+
+ /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues.
+ void placeDebugValues();
+
+ /// \brief dump the scheduled Sequence.
+ void dumpSchedule() const;
+
+ // Lesser helpers...
+ bool checkSchedLimit();
+
+ void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
+ SmallVectorImpl<SUnit*> &BotRoots);
+
+ void releaseSucc(SUnit *SU, SDep *SuccEdge);
+ void releaseSuccessors(SUnit *SU);
+ void releasePred(SUnit *SU, SDep *PredEdge);
+ void releasePredecessors(SUnit *SU);
+};
+
+/// ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules
+/// machine instructions while updating LiveIntervals and tracking regpressure.
+class ScheduleDAGMILive : public ScheduleDAGMI {
+protected:
+ RegisterClassInfo *RegClassInfo;
+
+ /// Information about DAG subtrees. If DFSResult is NULL, then SchedulerTrees
+ /// will be empty.
+ SchedDFSResult *DFSResult;
+ BitVector ScheduledTrees;
+
+ MachineBasicBlock::iterator LiveRegionEnd;
+
+ // Map each SU to its summary of pressure changes. This array is updated for
+ // liveness during bottom-up scheduling. Top-down scheduling may proceed but
+ // has no affect on the pressure diffs.
+ PressureDiffs SUPressureDiffs;
+
+ /// Register pressure in this region computed by initRegPressure.
+ bool ShouldTrackPressure;
+ IntervalPressure RegPressure;
+ RegPressureTracker RPTracker;
+
+ /// List of pressure sets that exceed the target's pressure limit before
+ /// scheduling, listed in increasing set ID order. Each pressure set is paired
+ /// with its max pressure in the currently scheduled regions.
+ std::vector<PressureChange> RegionCriticalPSets;
+
+ /// The top of the unscheduled zone.
+ IntervalPressure TopPressure;
+ RegPressureTracker TopRPTracker;
+
+ /// The bottom of the unscheduled zone.
+ IntervalPressure BotPressure;
+ RegPressureTracker BotRPTracker;
+
+public:
+ ScheduleDAGMILive(MachineSchedContext *C,
+ std::unique_ptr<MachineSchedStrategy> S)
+ : ScheduleDAGMI(C, std::move(S), /*RemoveKillFlags=*/false),
+ RegClassInfo(C->RegClassInfo), DFSResult(nullptr),
+ ShouldTrackPressure(false), RPTracker(RegPressure),
+ TopRPTracker(TopPressure), BotRPTracker(BotPressure) {}
+
+ ~ScheduleDAGMILive() override;
+
+ /// Return true if this DAG supports VReg liveness and RegPressure.
+ bool hasVRegLiveness() const override { return true; }
+
+ /// \brief Return true if register pressure tracking is enabled.
+ bool isTrackingPressure() const { return ShouldTrackPressure; }
+
+ /// Get current register pressure for the top scheduled instructions.
+ const IntervalPressure &getTopPressure() const { return TopPressure; }
+ const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
+
+ /// Get current register pressure for the bottom scheduled instructions.
+ const IntervalPressure &getBotPressure() const { return BotPressure; }
+ const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; }
+
+ /// Get register pressure for the entire scheduling region before scheduling.
+ const IntervalPressure &getRegPressure() const { return RegPressure; }
+
+ const std::vector<PressureChange> &getRegionCriticalPSets() const {
+ return RegionCriticalPSets;
+ }
+
+ PressureDiff &getPressureDiff(const SUnit *SU) {
+ return SUPressureDiffs[SU->NodeNum];
+ }
+
+ /// Compute a DFSResult after DAG building is complete, and before any
+ /// queue comparisons.
+ void computeDFSResult();
+
+ /// Return a non-null DFS result if the scheduling strategy initialized it.
+ const SchedDFSResult *getDFSResult() const { return DFSResult; }
+
+ BitVector &getScheduledTrees() { return ScheduledTrees; }
+
+ /// Implement the ScheduleDAGInstrs interface for handling the next scheduling
+ /// region. This covers all instructions in a block, while schedule() may only
+ /// cover a subset.
+ void enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned regioninstrs) override;
+
+ /// Implement ScheduleDAGInstrs interface for scheduling a sequence of
+ /// reorderable instructions.
+ void schedule() override;
+
+ /// Compute the cyclic critical path through the DAG.
+ unsigned computeCyclicCriticalPath();
+
+protected:
+ // Top-Level entry points for the schedule() driver...
+
+ /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking
+ /// enabled. This sets up three trackers. RPTracker will cover the entire DAG
+ /// region, TopTracker and BottomTracker will be initialized to the top and
+ /// bottom of the DAG region without covereing any unscheduled instruction.
+ void buildDAGWithRegPressure();
+
+ /// Move an instruction and update register pressure.
+ void scheduleMI(SUnit *SU, bool IsTopNode);
+
+ // Lesser helpers...
+
+ void initRegPressure();
+
+ void updatePressureDiffs(ArrayRef<unsigned> LiveUses);
+
+ void updateScheduledPressure(const SUnit *SU,
+ const std::vector<unsigned> &NewMaxPressure);
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// Helpers for implementing custom MachineSchedStrategy classes. These take
+/// care of the book-keeping associated with list scheduling heuristics.
+///
+//===----------------------------------------------------------------------===//
+
+/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience
+/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified
+/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in.
+///
+/// This is a convenience class that may be used by implementations of
+/// MachineSchedStrategy.
+class ReadyQueue {
+ unsigned ID;
+ std::string Name;
+ std::vector<SUnit*> Queue;
+
+public:
+ ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {}
+
+ unsigned getID() const { return ID; }
+
+ StringRef getName() const { return Name; }
+
+ // SU is in this queue if it's NodeQueueID is a superset of this ID.
+ bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); }
+
+ bool empty() const { return Queue.empty(); }
+
+ void clear() { Queue.clear(); }
+
+ unsigned size() const { return Queue.size(); }
+
+ typedef std::vector<SUnit*>::iterator iterator;
+
+ iterator begin() { return Queue.begin(); }
+
+ iterator end() { return Queue.end(); }
+
+ ArrayRef<SUnit*> elements() { return Queue; }
+
+ iterator find(SUnit *SU) {
+ return std::find(Queue.begin(), Queue.end(), SU);
+ }
+
+ void push(SUnit *SU) {
+ Queue.push_back(SU);
+ SU->NodeQueueId |= ID;
+ }
+
+ iterator remove(iterator I) {
+ (*I)->NodeQueueId &= ~ID;
+ *I = Queue.back();
+ unsigned idx = I - Queue.begin();
+ Queue.pop_back();
+ return Queue.begin() + idx;
+ }
+
+ void dump();
+};
+
+/// Summarize the unscheduled region.
+struct SchedRemainder {
+ // Critical path through the DAG in expected latency.
+ unsigned CriticalPath;
+ unsigned CyclicCritPath;
+
+ // Scaled count of micro-ops left to schedule.
+ unsigned RemIssueCount;
+
+ bool IsAcyclicLatencyLimited;
+
+ // Unscheduled resources
+ SmallVector<unsigned, 16> RemainingCounts;
+
+ void reset() {
+ CriticalPath = 0;
+ CyclicCritPath = 0;
+ RemIssueCount = 0;
+ IsAcyclicLatencyLimited = false;
+ RemainingCounts.clear();
+ }
+
+ SchedRemainder() { reset(); }
+
+ void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel);
+};
+
+/// Each Scheduling boundary is associated with ready queues. It tracks the
+/// current cycle in the direction of movement, and maintains the state
+/// of "hazards" and other interlocks at the current cycle.
+class SchedBoundary {
+public:
+ /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both)
+ enum {
+ TopQID = 1,
+ BotQID = 2,
+ LogMaxQID = 2
+ };
+
+ ScheduleDAGMI *DAG;
+ const TargetSchedModel *SchedModel;
+ SchedRemainder *Rem;
+
+ ReadyQueue Available;
+ ReadyQueue Pending;
+
+ ScheduleHazardRecognizer *HazardRec;
+
+private:
+ /// True if the pending Q should be checked/updated before scheduling another
+ /// instruction.
+ bool CheckPending;
+
+ // For heuristics, keep a list of the nodes that immediately depend on the
+ // most recently scheduled node.
+ SmallPtrSet<const SUnit*, 8> NextSUs;
+
+ /// Number of cycles it takes to issue the instructions scheduled in this
+ /// zone. It is defined as: scheduled-micro-ops / issue-width + stalls.
+ /// See getStalls().
+ unsigned CurrCycle;
+
+ /// Micro-ops issued in the current cycle
+ unsigned CurrMOps;
+
+ /// MinReadyCycle - Cycle of the soonest available instruction.
+ unsigned MinReadyCycle;
+
+ // The expected latency of the critical path in this scheduled zone.
+ unsigned ExpectedLatency;
+
+ // The latency of dependence chains leading into this zone.
+ // For each node scheduled bottom-up: DLat = max DLat, N.Depth.
+ // For each cycle scheduled: DLat -= 1.
+ unsigned DependentLatency;
+
+ /// Count the scheduled (issued) micro-ops that can be retired by
+ /// time=CurrCycle assuming the first scheduled instr is retired at time=0.
+ unsigned RetiredMOps;
+
+ // Count scheduled resources that have been executed. Resources are
+ // considered executed if they become ready in the time that it takes to
+ // saturate any resource including the one in question. Counts are scaled
+ // for direct comparison with other resources. Counts can be compared with
+ // MOps * getMicroOpFactor and Latency * getLatencyFactor.
+ SmallVector<unsigned, 16> ExecutedResCounts;
+
+ /// Cache the max count for a single resource.
+ unsigned MaxExecutedResCount;
+
+ // Cache the critical resources ID in this scheduled zone.
+ unsigned ZoneCritResIdx;
+
+ // Is the scheduled region resource limited vs. latency limited.
+ bool IsResourceLimited;
+
+ // Record the highest cycle at which each resource has been reserved by a
+ // scheduled instruction.
+ SmallVector<unsigned, 16> ReservedCycles;
+
+#ifndef NDEBUG
+ // Remember the greatest possible stall as an upper bound on the number of
+ // times we should retry the pending queue because of a hazard.
+ unsigned MaxObservedStall;
+#endif
+
+public:
+ /// Pending queues extend the ready queues with the same ID and the
+ /// PendingFlag set.
+ SchedBoundary(unsigned ID, const Twine &Name):
+ DAG(nullptr), SchedModel(nullptr), Rem(nullptr), Available(ID, Name+".A"),
+ Pending(ID << LogMaxQID, Name+".P"),
+ HazardRec(nullptr) {
+ reset();
+ }
+
+ ~SchedBoundary();
+
+ void reset();
+
+ void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel,
+ SchedRemainder *rem);
+
+ bool isTop() const {
+ return Available.getID() == TopQID;
+ }
+
+ /// Number of cycles to issue the instructions scheduled in this zone.
+ unsigned getCurrCycle() const { return CurrCycle; }
+
+ /// Micro-ops issued in the current cycle
+ unsigned getCurrMOps() const { return CurrMOps; }
+
+ /// Return true if the given SU is used by the most recently scheduled
+ /// instruction.
+ bool isNextSU(const SUnit *SU) const { return NextSUs.count(SU); }
+
+ // The latency of dependence chains leading into this zone.
+ unsigned getDependentLatency() const { return DependentLatency; }
+
+ /// Get the number of latency cycles "covered" by the scheduled
+ /// instructions. This is the larger of the critical path within the zone
+ /// and the number of cycles required to issue the instructions.
+ unsigned getScheduledLatency() const {
+ return std::max(ExpectedLatency, CurrCycle);
+ }
+
+ unsigned getUnscheduledLatency(SUnit *SU) const {
+ return isTop() ? SU->getHeight() : SU->getDepth();
+ }
+
+ unsigned getResourceCount(unsigned ResIdx) const {
+ return ExecutedResCounts[ResIdx];
+ }
+
+ /// Get the scaled count of scheduled micro-ops and resources, including
+ /// executed resources.
+ unsigned getCriticalCount() const {
+ if (!ZoneCritResIdx)
+ return RetiredMOps * SchedModel->getMicroOpFactor();
+ return getResourceCount(ZoneCritResIdx);
+ }
+
+ /// Get a scaled count for the minimum execution time of the scheduled
+ /// micro-ops that are ready to execute by getExecutedCount. Notice the
+ /// feedback loop.
+ unsigned getExecutedCount() const {
+ return std::max(CurrCycle * SchedModel->getLatencyFactor(),
+ MaxExecutedResCount);
+ }
+
+ unsigned getZoneCritResIdx() const { return ZoneCritResIdx; }
+
+ // Is the scheduled region resource limited vs. latency limited.
+ bool isResourceLimited() const { return IsResourceLimited; }
+
+ /// Get the difference between the given SUnit's ready time and the current
+ /// cycle.
+ unsigned getLatencyStallCycles(SUnit *SU);
+
+ unsigned getNextResourceCycle(unsigned PIdx, unsigned Cycles);
+
+ bool checkHazard(SUnit *SU);
+
+ unsigned findMaxLatency(ArrayRef<SUnit*> ReadySUs);
+
+ unsigned getOtherResourceCount(unsigned &OtherCritIdx);
+
+ void releaseNode(SUnit *SU, unsigned ReadyCycle);
+
+ void releaseTopNode(SUnit *SU);
+
+ void releaseBottomNode(SUnit *SU);
+
+ void bumpCycle(unsigned NextCycle);
+
+ void incExecutedResources(unsigned PIdx, unsigned Count);
+
+ unsigned countResource(unsigned PIdx, unsigned Cycles, unsigned ReadyCycle);
+
+ void bumpNode(SUnit *SU);
+
+ void releasePending();
+
+ void removeReady(SUnit *SU);
+
+ /// Call this before applying any other heuristics to the Available queue.
+ /// Updates the Available/Pending Q's if necessary and returns the single
+ /// available instruction, or NULL if there are multiple candidates.
+ SUnit *pickOnlyChoice();
+
+#ifndef NDEBUG
+ void dumpScheduledState();
+#endif
+};
+
+/// Base class for GenericScheduler. This class maintains information about
+/// scheduling candidates based on TargetSchedModel making it easy to implement
+/// heuristics for either preRA or postRA scheduling.
+class GenericSchedulerBase : public MachineSchedStrategy {
+public:
+ /// Represent the type of SchedCandidate found within a single queue.
+ /// pickNodeBidirectional depends on these listed by decreasing priority.
+ enum CandReason {
+ NoCand, PhysRegCopy, RegExcess, RegCritical, Stall, Cluster, Weak, RegMax,
+ ResourceReduce, ResourceDemand, BotHeightReduce, BotPathReduce,
+ TopDepthReduce, TopPathReduce, NextDefUse, NodeOrder};
+
+#ifndef NDEBUG
+ static const char *getReasonStr(GenericSchedulerBase::CandReason Reason);
+#endif
+
+ /// Policy for scheduling the next instruction in the candidate's zone.
+ struct CandPolicy {
+ bool ReduceLatency;
+ unsigned ReduceResIdx;
+ unsigned DemandResIdx;
+
+ CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {}
+ };
+
+ /// Status of an instruction's critical resource consumption.
+ struct SchedResourceDelta {
+ // Count critical resources in the scheduled region required by SU.
+ unsigned CritResources;
+
+ // Count critical resources from another region consumed by SU.
+ unsigned DemandedResources;
+
+ SchedResourceDelta(): CritResources(0), DemandedResources(0) {}
+
+ bool operator==(const SchedResourceDelta &RHS) const {
+ return CritResources == RHS.CritResources
+ && DemandedResources == RHS.DemandedResources;
+ }
+ bool operator!=(const SchedResourceDelta &RHS) const {
+ return !operator==(RHS);
+ }
+ };
+
+ /// Store the state used by GenericScheduler heuristics, required for the
+ /// lifetime of one invocation of pickNode().
+ struct SchedCandidate {
+ CandPolicy Policy;
+
+ // The best SUnit candidate.
+ SUnit *SU;
+
+ // The reason for this candidate.
+ CandReason Reason;
+
+ // Set of reasons that apply to multiple candidates.
+ uint32_t RepeatReasonSet;
+
+ // Register pressure values for the best candidate.
+ RegPressureDelta RPDelta;
+
+ // Critical resource consumption of the best candidate.
+ SchedResourceDelta ResDelta;
+
+ SchedCandidate(const CandPolicy &policy)
+ : Policy(policy), SU(nullptr), Reason(NoCand), RepeatReasonSet(0) {}
+
+ bool isValid() const { return SU; }
+
+ // Copy the status of another candidate without changing policy.
+ void setBest(SchedCandidate &Best) {
+ assert(Best.Reason != NoCand && "uninitialized Sched candidate");
+ SU = Best.SU;
+ Reason = Best.Reason;
+ RPDelta = Best.RPDelta;
+ ResDelta = Best.ResDelta;
+ }
+
+ bool isRepeat(CandReason R) { return RepeatReasonSet & (1 << R); }
+ void setRepeat(CandReason R) { RepeatReasonSet |= (1 << R); }
+
+ void initResourceDelta(const ScheduleDAGMI *DAG,
+ const TargetSchedModel *SchedModel);
+ };
+
+protected:
+ const MachineSchedContext *Context;
+ const TargetSchedModel *SchedModel;
+ const TargetRegisterInfo *TRI;
+
+ SchedRemainder Rem;
+protected:
+ GenericSchedulerBase(const MachineSchedContext *C):
+ Context(C), SchedModel(nullptr), TRI(nullptr) {}
+
+ void setPolicy(CandPolicy &Policy, bool IsPostRA, SchedBoundary &CurrZone,
+ SchedBoundary *OtherZone);
+
+#ifndef NDEBUG
+ void traceCandidate(const SchedCandidate &Cand);
+#endif
+};
+
+/// GenericScheduler shrinks the unscheduled zone using heuristics to balance
+/// the schedule.
+class GenericScheduler : public GenericSchedulerBase {
+ ScheduleDAGMILive *DAG;
+
+ // State of the top and bottom scheduled instruction boundaries.
+ SchedBoundary Top;
+ SchedBoundary Bot;
+
+ MachineSchedPolicy RegionPolicy;
+public:
+ GenericScheduler(const MachineSchedContext *C):
+ GenericSchedulerBase(C), DAG(nullptr), Top(SchedBoundary::TopQID, "TopQ"),
+ Bot(SchedBoundary::BotQID, "BotQ") {}
+
+ void initPolicy(MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
+ unsigned NumRegionInstrs) override;
+
+ void dumpPolicy() override;
+
+ bool shouldTrackPressure() const override {
+ return RegionPolicy.ShouldTrackPressure;
+ }
+
+ void initialize(ScheduleDAGMI *dag) override;
+
+ SUnit *pickNode(bool &IsTopNode) override;
+
+ void schedNode(SUnit *SU, bool IsTopNode) override;
+
+ void releaseTopNode(SUnit *SU) override {
+ Top.releaseTopNode(SU);
+ }
+
+ void releaseBottomNode(SUnit *SU) override {
+ Bot.releaseBottomNode(SU);
+ }
+
+ void registerRoots() override;
+
+protected:
+ void checkAcyclicLatency();
+
+ void tryCandidate(SchedCandidate &Cand,
+ SchedCandidate &TryCand,
+ SchedBoundary &Zone,
+ const RegPressureTracker &RPTracker,
+ RegPressureTracker &TempTracker);
+
+ SUnit *pickNodeBidirectional(bool &IsTopNode);
+
+ void pickNodeFromQueue(SchedBoundary &Zone,
+ const RegPressureTracker &RPTracker,
+ SchedCandidate &Candidate);
+
+ void reschedulePhysRegCopies(SUnit *SU, bool isTop);
+};
+
+/// PostGenericScheduler - Interface to the scheduling algorithm used by
+/// ScheduleDAGMI.
+///
+/// Callbacks from ScheduleDAGMI:
+/// initPolicy -> initialize(DAG) -> registerRoots -> pickNode ...
+class PostGenericScheduler : public GenericSchedulerBase {
+ ScheduleDAGMI *DAG;
+ SchedBoundary Top;
+ SmallVector<SUnit*, 8> BotRoots;
+public:
+ PostGenericScheduler(const MachineSchedContext *C):
+ GenericSchedulerBase(C), Top(SchedBoundary::TopQID, "TopQ") {}
+
+ ~PostGenericScheduler() override {}
+
+ void initPolicy(MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End,
+ unsigned NumRegionInstrs) override {
+ /* no configurable policy */
+ }
+
+ /// PostRA scheduling does not track pressure.
+ bool shouldTrackPressure() const override { return false; }
+
+ void initialize(ScheduleDAGMI *Dag) override;
+
+ void registerRoots() override;
+
+ SUnit *pickNode(bool &IsTopNode) override;
+
+ void scheduleTree(unsigned SubtreeID) override {
+ llvm_unreachable("PostRA scheduler does not support subtree analysis.");
+ }
+
+ void schedNode(SUnit *SU, bool IsTopNode) override;
+
+ void releaseTopNode(SUnit *SU) override {
+ Top.releaseTopNode(SU);
+ }
+
+ // Only called for roots.
+ void releaseBottomNode(SUnit *SU) override {
+ BotRoots.push_back(SU);
+ }
+
+protected:
+ void tryCandidate(SchedCandidate &Cand, SchedCandidate &TryCand);
+
+ void pickNodeFromQueue(SchedCandidate &Cand);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineTraceMetrics.h b/contrib/llvm/include/llvm/CodeGen/MachineTraceMetrics.h
new file mode 100644
index 0000000..bfe6e94
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineTraceMetrics.h
@@ -0,0 +1,398 @@
+//===- lib/CodeGen/MachineTraceMetrics.h - Super-scalar metrics -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface for the MachineTraceMetrics analysis pass
+// that estimates CPU resource usage and critical data dependency paths through
+// preferred traces. This is useful for super-scalar CPUs where execution speed
+// can be limited both by data dependencies and by limited execution resources.
+//
+// Out-of-order CPUs will often be executing instructions from multiple basic
+// blocks at the same time. This makes it difficult to estimate the resource
+// usage accurately in a single basic block. Resources can be estimated better
+// by looking at a trace through the current basic block.
+//
+// For every block, the MachineTraceMetrics pass will pick a preferred trace
+// that passes through the block. The trace is chosen based on loop structure,
+// branch probabilities, and resource usage. The intention is to pick likely
+// traces that would be the most affected by code transformations.
+//
+// It is expensive to compute a full arbitrary trace for every block, so to
+// save some computations, traces are chosen to be convergent. This means that
+// if the traces through basic blocks A and B ever cross when moving away from
+// A and B, they never diverge again. This applies in both directions - If the
+// traces meet above A and B, they won't diverge when going further back.
+//
+// Traces tend to align with loops. The trace through a block in an inner loop
+// will begin at the loop entry block and end at a back edge. If there are
+// nested loops, the trace may begin and end at those instead.
+//
+// For each trace, we compute the critical path length, which is the number of
+// cycles required to execute the trace when execution is limited by data
+// dependencies only. We also compute the resource height, which is the number
+// of cycles required to execute all instructions in the trace when ignoring
+// data dependencies.
+//
+// Every instruction in the current block has a slack - the number of cycles
+// execution of the instruction can be delayed without extending the critical
+// path.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINETRACEMETRICS_H
+#define LLVM_CODEGEN_MACHINETRACEMETRICS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+
+namespace llvm {
+
+class InstrItineraryData;
+class MachineBasicBlock;
+class MachineInstr;
+class MachineLoop;
+class MachineLoopInfo;
+class MachineRegisterInfo;
+class TargetInstrInfo;
+class TargetRegisterInfo;
+class raw_ostream;
+
+class MachineTraceMetrics : public MachineFunctionPass {
+ const MachineFunction *MF;
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ const MachineRegisterInfo *MRI;
+ const MachineLoopInfo *Loops;
+ TargetSchedModel SchedModel;
+
+public:
+ class Ensemble;
+ class Trace;
+ static char ID;
+ MachineTraceMetrics();
+ void getAnalysisUsage(AnalysisUsage&) const override;
+ bool runOnMachineFunction(MachineFunction&) override;
+ void releaseMemory() override;
+ void verifyAnalysis() const override;
+
+ friend class Ensemble;
+ friend class Trace;
+
+ /// Per-basic block information that doesn't depend on the trace through the
+ /// block.
+ struct FixedBlockInfo {
+ /// The number of non-trivial instructions in the block.
+ /// Doesn't count PHI and COPY instructions that are likely to be removed.
+ unsigned InstrCount;
+
+ /// True when the block contains calls.
+ bool HasCalls;
+
+ FixedBlockInfo() : InstrCount(~0u), HasCalls(false) {}
+
+ /// Returns true when resource information for this block has been computed.
+ bool hasResources() const { return InstrCount != ~0u; }
+
+ /// Invalidate resource information.
+ void invalidate() { InstrCount = ~0u; }
+ };
+
+ /// Get the fixed resource information about MBB. Compute it on demand.
+ const FixedBlockInfo *getResources(const MachineBasicBlock*);
+
+ /// Get the scaled number of cycles used per processor resource in MBB.
+ /// This is an array with SchedModel.getNumProcResourceKinds() entries.
+ /// The getResources() function above must have been called first.
+ ///
+ /// These numbers have already been scaled by SchedModel.getResourceFactor().
+ ArrayRef<unsigned> getProcResourceCycles(unsigned MBBNum) const;
+
+ /// A virtual register or regunit required by a basic block or its trace
+ /// successors.
+ struct LiveInReg {
+ /// The virtual register required, or a register unit.
+ unsigned Reg;
+
+ /// For virtual registers: Minimum height of the defining instruction.
+ /// For regunits: Height of the highest user in the trace.
+ unsigned Height;
+
+ LiveInReg(unsigned Reg, unsigned Height = 0) : Reg(Reg), Height(Height) {}
+ };
+
+ /// Per-basic block information that relates to a specific trace through the
+ /// block. Convergent traces means that only one of these is required per
+ /// block in a trace ensemble.
+ struct TraceBlockInfo {
+ /// Trace predecessor, or NULL for the first block in the trace.
+ /// Valid when hasValidDepth().
+ const MachineBasicBlock *Pred;
+
+ /// Trace successor, or NULL for the last block in the trace.
+ /// Valid when hasValidHeight().
+ const MachineBasicBlock *Succ;
+
+ /// The block number of the head of the trace. (When hasValidDepth()).
+ unsigned Head;
+
+ /// The block number of the tail of the trace. (When hasValidHeight()).
+ unsigned Tail;
+
+ /// Accumulated number of instructions in the trace above this block.
+ /// Does not include instructions in this block.
+ unsigned InstrDepth;
+
+ /// Accumulated number of instructions in the trace below this block.
+ /// Includes instructions in this block.
+ unsigned InstrHeight;
+
+ TraceBlockInfo() :
+ Pred(nullptr), Succ(nullptr),
+ InstrDepth(~0u), InstrHeight(~0u),
+ HasValidInstrDepths(false), HasValidInstrHeights(false) {}
+
+ /// Returns true if the depth resources have been computed from the trace
+ /// above this block.
+ bool hasValidDepth() const { return InstrDepth != ~0u; }
+
+ /// Returns true if the height resources have been computed from the trace
+ /// below this block.
+ bool hasValidHeight() const { return InstrHeight != ~0u; }
+
+ /// Invalidate depth resources when some block above this one has changed.
+ void invalidateDepth() { InstrDepth = ~0u; HasValidInstrDepths = false; }
+
+ /// Invalidate height resources when a block below this one has changed.
+ void invalidateHeight() { InstrHeight = ~0u; HasValidInstrHeights = false; }
+
+ /// Assuming that this is a dominator of TBI, determine if it contains
+ /// useful instruction depths. A dominating block can be above the current
+ /// trace head, and any dependencies from such a far away dominator are not
+ /// expected to affect the critical path.
+ ///
+ /// Also returns true when TBI == this.
+ bool isUsefulDominator(const TraceBlockInfo &TBI) const {
+ // The trace for TBI may not even be calculated yet.
+ if (!hasValidDepth() || !TBI.hasValidDepth())
+ return false;
+ // Instruction depths are only comparable if the traces share a head.
+ if (Head != TBI.Head)
+ return false;
+ // It is almost always the case that TBI belongs to the same trace as
+ // this block, but rare convoluted cases involving irreducible control
+ // flow, a dominator may share a trace head without actually being on the
+ // same trace as TBI. This is not a big problem as long as it doesn't
+ // increase the instruction depth.
+ return HasValidInstrDepths && InstrDepth <= TBI.InstrDepth;
+ }
+
+ // Data-dependency-related information. Per-instruction depth and height
+ // are computed from data dependencies in the current trace, using
+ // itinerary data.
+
+ /// Instruction depths have been computed. This implies hasValidDepth().
+ bool HasValidInstrDepths;
+
+ /// Instruction heights have been computed. This implies hasValidHeight().
+ bool HasValidInstrHeights;
+
+ /// Critical path length. This is the number of cycles in the longest data
+ /// dependency chain through the trace. This is only valid when both
+ /// HasValidInstrDepths and HasValidInstrHeights are set.
+ unsigned CriticalPath;
+
+ /// Live-in registers. These registers are defined above the current block
+ /// and used by this block or a block below it.
+ /// This does not include PHI uses in the current block, but it does
+ /// include PHI uses in deeper blocks.
+ SmallVector<LiveInReg, 4> LiveIns;
+
+ void print(raw_ostream&) const;
+ };
+
+ /// InstrCycles represents the cycle height and depth of an instruction in a
+ /// trace.
+ struct InstrCycles {
+ /// Earliest issue cycle as determined by data dependencies and instruction
+ /// latencies from the beginning of the trace. Data dependencies from
+ /// before the trace are not included.
+ unsigned Depth;
+
+ /// Minimum number of cycles from this instruction is issued to the of the
+ /// trace, as determined by data dependencies and instruction latencies.
+ unsigned Height;
+ };
+
+ /// A trace represents a plausible sequence of executed basic blocks that
+ /// passes through the current basic block one. The Trace class serves as a
+ /// handle to internal cached data structures.
+ class Trace {
+ Ensemble &TE;
+ TraceBlockInfo &TBI;
+
+ unsigned getBlockNum() const { return &TBI - &TE.BlockInfo[0]; }
+
+ public:
+ explicit Trace(Ensemble &te, TraceBlockInfo &tbi) : TE(te), TBI(tbi) {}
+ void print(raw_ostream&) const;
+
+ /// Compute the total number of instructions in the trace.
+ unsigned getInstrCount() const {
+ return TBI.InstrDepth + TBI.InstrHeight;
+ }
+
+ /// Return the resource depth of the top/bottom of the trace center block.
+ /// This is the number of cycles required to execute all instructions from
+ /// the trace head to the trace center block. The resource depth only
+ /// considers execution resources, it ignores data dependencies.
+ /// When Bottom is set, instructions in the trace center block are included.
+ unsigned getResourceDepth(bool Bottom) const;
+
+ /// Return the resource length of the trace. This is the number of cycles
+ /// required to execute the instructions in the trace if they were all
+ /// independent, exposing the maximum instruction-level parallelism.
+ ///
+ /// Any blocks in Extrablocks are included as if they were part of the
+ /// trace. Likewise, extra resources required by the specified scheduling
+ /// classes are included. For the caller to account for extra machine
+ /// instructions, it must first resolve each instruction's scheduling class.
+ unsigned getResourceLength(
+ ArrayRef<const MachineBasicBlock *> Extrablocks = None,
+ ArrayRef<const MCSchedClassDesc *> ExtraInstrs = None,
+ ArrayRef<const MCSchedClassDesc *> RemoveInstrs = None) const;
+
+ /// Return the length of the (data dependency) critical path through the
+ /// trace.
+ unsigned getCriticalPath() const { return TBI.CriticalPath; }
+
+ /// Return the depth and height of MI. The depth is only valid for
+ /// instructions in or above the trace center block. The height is only
+ /// valid for instructions in or below the trace center block.
+ InstrCycles getInstrCycles(const MachineInstr *MI) const {
+ return TE.Cycles.lookup(MI);
+ }
+
+ /// Return the slack of MI. This is the number of cycles MI can be delayed
+ /// before the critical path becomes longer.
+ /// MI must be an instruction in the trace center block.
+ unsigned getInstrSlack(const MachineInstr *MI) const;
+
+ /// Return the Depth of a PHI instruction in a trace center block successor.
+ /// The PHI does not have to be part of the trace.
+ unsigned getPHIDepth(const MachineInstr *PHI) const;
+
+ /// A dependence is useful if the basic block of the defining instruction
+ /// is part of the trace of the user instruction. It is assumed that DefMI
+ /// dominates UseMI (see also isUsefulDominator).
+ bool isDepInTrace(const MachineInstr *DefMI,
+ const MachineInstr *UseMI) const;
+ };
+
+ /// A trace ensemble is a collection of traces selected using the same
+ /// strategy, for example 'minimum resource height'. There is one trace for
+ /// every block in the function.
+ class Ensemble {
+ SmallVector<TraceBlockInfo, 4> BlockInfo;
+ DenseMap<const MachineInstr*, InstrCycles> Cycles;
+ SmallVector<unsigned, 0> ProcResourceDepths;
+ SmallVector<unsigned, 0> ProcResourceHeights;
+ friend class Trace;
+
+ void computeTrace(const MachineBasicBlock*);
+ void computeDepthResources(const MachineBasicBlock*);
+ void computeHeightResources(const MachineBasicBlock*);
+ unsigned computeCrossBlockCriticalPath(const TraceBlockInfo&);
+ void computeInstrDepths(const MachineBasicBlock*);
+ void computeInstrHeights(const MachineBasicBlock*);
+ void addLiveIns(const MachineInstr *DefMI, unsigned DefOp,
+ ArrayRef<const MachineBasicBlock*> Trace);
+
+ protected:
+ MachineTraceMetrics &MTM;
+ virtual const MachineBasicBlock *pickTracePred(const MachineBasicBlock*) =0;
+ virtual const MachineBasicBlock *pickTraceSucc(const MachineBasicBlock*) =0;
+ explicit Ensemble(MachineTraceMetrics*);
+ const MachineLoop *getLoopFor(const MachineBasicBlock*) const;
+ const TraceBlockInfo *getDepthResources(const MachineBasicBlock*) const;
+ const TraceBlockInfo *getHeightResources(const MachineBasicBlock*) const;
+ ArrayRef<unsigned> getProcResourceDepths(unsigned MBBNum) const;
+ ArrayRef<unsigned> getProcResourceHeights(unsigned MBBNum) const;
+
+ public:
+ virtual ~Ensemble();
+ virtual const char *getName() const =0;
+ void print(raw_ostream&) const;
+ void invalidate(const MachineBasicBlock *MBB);
+ void verify() const;
+
+ /// Get the trace that passes through MBB.
+ /// The trace is computed on demand.
+ Trace getTrace(const MachineBasicBlock *MBB);
+ };
+
+ /// Strategies for selecting traces.
+ enum Strategy {
+ /// Select the trace through a block that has the fewest instructions.
+ TS_MinInstrCount,
+
+ TS_NumStrategies
+ };
+
+ /// Get the trace ensemble representing the given trace selection strategy.
+ /// The returned Ensemble object is owned by the MachineTraceMetrics analysis,
+ /// and valid for the lifetime of the analysis pass.
+ Ensemble *getEnsemble(Strategy);
+
+ /// Invalidate cached information about MBB. This must be called *before* MBB
+ /// is erased, or the CFG is otherwise changed.
+ ///
+ /// This invalidates per-block information about resource usage for MBB only,
+ /// and it invalidates per-trace information for any trace that passes
+ /// through MBB.
+ ///
+ /// Call Ensemble::getTrace() again to update any trace handles.
+ void invalidate(const MachineBasicBlock *MBB);
+
+private:
+ // One entry per basic block, indexed by block number.
+ SmallVector<FixedBlockInfo, 4> BlockInfo;
+
+ // Cycles consumed on each processor resource per block.
+ // The number of processor resource kinds is constant for a given subtarget,
+ // but it is not known at compile time. The number of cycles consumed by
+ // block B on processor resource R is at ProcResourceCycles[B*Kinds + R]
+ // where Kinds = SchedModel.getNumProcResourceKinds().
+ SmallVector<unsigned, 0> ProcResourceCycles;
+
+ // One ensemble per strategy.
+ Ensemble* Ensembles[TS_NumStrategies];
+
+ // Convert scaled resource usage to a cycle count that can be compared with
+ // latencies.
+ unsigned getCycles(unsigned Scaled) {
+ unsigned Factor = SchedModel.getLatencyFactor();
+ return (Scaled + Factor - 1) / Factor;
+ }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+ const MachineTraceMetrics::Trace &Tr) {
+ Tr.print(OS);
+ return OS;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+ const MachineTraceMetrics::Ensemble &En) {
+ En.print(OS);
+ return OS;
+}
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/MachineValueType.h b/contrib/llvm/include/llvm/CodeGen/MachineValueType.h
new file mode 100644
index 0000000..04d6ee3
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/MachineValueType.h
@@ -0,0 +1,713 @@
+//===- CodeGen/MachineValueType.h - Machine-Level types ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the set of machine-level target independent types which
+// legal values in the code generator use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEVALUETYPE_H
+#define LLVM_CODEGEN_MACHINEVALUETYPE_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+
+namespace llvm {
+
+ class Type;
+
+ /// MVT - Machine Value Type. Every type that is supported natively by some
+ /// processor targeted by LLVM occurs here. This means that any legal value
+ /// type can be represented by an MVT.
+class MVT {
+ public:
+ enum SimpleValueType {
+ // INVALID_SIMPLE_VALUE_TYPE - Simple value types less than zero are
+ // considered extended value types.
+ INVALID_SIMPLE_VALUE_TYPE = -1,
+
+ // If you change this numbering, you must change the values in
+ // ValueTypes.td as well!
+ Other = 0, // This is a non-standard value
+ i1 = 1, // This is a 1 bit integer value
+ i8 = 2, // This is an 8 bit integer value
+ i16 = 3, // This is a 16 bit integer value
+ i32 = 4, // This is a 32 bit integer value
+ i64 = 5, // This is a 64 bit integer value
+ i128 = 6, // This is a 128 bit integer value
+
+ FIRST_INTEGER_VALUETYPE = i1,
+ LAST_INTEGER_VALUETYPE = i128,
+
+ f16 = 7, // This is a 16 bit floating point value
+ f32 = 8, // This is a 32 bit floating point value
+ f64 = 9, // This is a 64 bit floating point value
+ f80 = 10, // This is a 80 bit floating point value
+ f128 = 11, // This is a 128 bit floating point value
+ ppcf128 = 12, // This is a PPC 128-bit floating point value
+
+ FIRST_FP_VALUETYPE = f16,
+ LAST_FP_VALUETYPE = ppcf128,
+
+ v2i1 = 13, // 2 x i1
+ v4i1 = 14, // 4 x i1
+ v8i1 = 15, // 8 x i1
+ v16i1 = 16, // 16 x i1
+ v32i1 = 17, // 32 x i1
+ v64i1 = 18, // 64 x i1
+ v512i1 = 19, // 512 x i1
+ v1024i1 = 20, // 1024 x i1
+
+ v1i8 = 21, // 1 x i8
+ v2i8 = 22, // 2 x i8
+ v4i8 = 23, // 4 x i8
+ v8i8 = 24, // 8 x i8
+ v16i8 = 25, // 16 x i8
+ v32i8 = 26, // 32 x i8
+ v64i8 = 27, // 64 x i8
+ v128i8 = 28, //128 x i8
+ v256i8 = 29, //256 x i8
+
+ v1i16 = 30, // 1 x i16
+ v2i16 = 31, // 2 x i16
+ v4i16 = 32, // 4 x i16
+ v8i16 = 33, // 8 x i16
+ v16i16 = 34, // 16 x i16
+ v32i16 = 35, // 32 x i16
+ v64i16 = 36, // 64 x i16
+ v128i16 = 37, //128 x i16
+
+ v1i32 = 38, // 1 x i32
+ v2i32 = 39, // 2 x i32
+ v4i32 = 40, // 4 x i32
+ v8i32 = 41, // 8 x i32
+ v16i32 = 42, // 16 x i32
+ v32i32 = 43, // 32 x i32
+ v64i32 = 44, // 64 x i32
+
+ v1i64 = 45, // 1 x i64
+ v2i64 = 46, // 2 x i64
+ v4i64 = 47, // 4 x i64
+ v8i64 = 48, // 8 x i64
+ v16i64 = 49, // 16 x i64
+ v32i64 = 50, // 32 x i64
+
+ v1i128 = 51, // 1 x i128
+
+ FIRST_INTEGER_VECTOR_VALUETYPE = v2i1,
+ LAST_INTEGER_VECTOR_VALUETYPE = v1i128,
+
+ v2f16 = 52, // 2 x f16
+ v4f16 = 53, // 4 x f16
+ v8f16 = 54, // 8 x f16
+ v1f32 = 55, // 1 x f32
+ v2f32 = 56, // 2 x f32
+ v4f32 = 57, // 4 x f32
+ v8f32 = 58, // 8 x f32
+ v16f32 = 59, // 16 x f32
+ v1f64 = 60, // 1 x f64
+ v2f64 = 61, // 2 x f64
+ v4f64 = 62, // 4 x f64
+ v8f64 = 63, // 8 x f64
+
+ FIRST_FP_VECTOR_VALUETYPE = v2f16,
+ LAST_FP_VECTOR_VALUETYPE = v8f64,
+
+ FIRST_VECTOR_VALUETYPE = v2i1,
+ LAST_VECTOR_VALUETYPE = v8f64,
+
+ x86mmx = 64, // This is an X86 MMX value
+
+ Glue = 65, // This glues nodes together during pre-RA sched
+
+ isVoid = 66, // This has no value
+
+ Untyped = 67, // This value takes a register, but has
+ // unspecified type. The register class
+ // will be determined by the opcode.
+
+ FIRST_VALUETYPE = 0, // This is always the beginning of the list.
+ LAST_VALUETYPE = 68, // This always remains at the end of the list.
+
+ // This is the current maximum for LAST_VALUETYPE.
+ // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
+ // This value must be a multiple of 32.
+ MAX_ALLOWED_VALUETYPE = 96,
+
+ // Token - A value of type llvm::TokenTy
+ token = 249,
+
+ // Metadata - This is MDNode or MDString.
+ Metadata = 250,
+
+ // iPTRAny - An int value the size of the pointer of the current
+ // target to any address space. This must only be used internal to
+ // tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
+ iPTRAny = 251,
+
+ // vAny - A vector with any length and element size. This is used
+ // for intrinsics that have overloadings based on vector types.
+ // This is only for tblgen's consumption!
+ vAny = 252,
+
+ // fAny - Any floating-point or vector floating-point value. This is used
+ // for intrinsics that have overloadings based on floating-point types.
+ // This is only for tblgen's consumption!
+ fAny = 253,
+
+ // iAny - An integer or vector integer value of any bit width. This is
+ // used for intrinsics that have overloadings based on integer bit widths.
+ // This is only for tblgen's consumption!
+ iAny = 254,
+
+ // iPTR - An int value the size of the pointer of the current
+ // target. This should only be used internal to tblgen!
+ iPTR = 255,
+
+ // Any - Any type. This is used for intrinsics that have overloadings.
+ // This is only for tblgen's consumption!
+ Any = 256
+ };
+
+ SimpleValueType SimpleTy;
+
+ LLVM_CONSTEXPR MVT() : SimpleTy(INVALID_SIMPLE_VALUE_TYPE) {}
+ LLVM_CONSTEXPR MVT(SimpleValueType SVT) : SimpleTy(SVT) { }
+
+ bool operator>(const MVT& S) const { return SimpleTy > S.SimpleTy; }
+ bool operator<(const MVT& S) const { return SimpleTy < S.SimpleTy; }
+ bool operator==(const MVT& S) const { return SimpleTy == S.SimpleTy; }
+ bool operator!=(const MVT& S) const { return SimpleTy != S.SimpleTy; }
+ bool operator>=(const MVT& S) const { return SimpleTy >= S.SimpleTy; }
+ bool operator<=(const MVT& S) const { return SimpleTy <= S.SimpleTy; }
+
+ /// isValid - Return true if this is a valid simple valuetype.
+ bool isValid() const {
+ return (SimpleTy >= MVT::FIRST_VALUETYPE &&
+ SimpleTy < MVT::LAST_VALUETYPE);
+ }
+
+ /// isFloatingPoint - Return true if this is a FP, or a vector FP type.
+ bool isFloatingPoint() const {
+ return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
+ SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
+ (SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE &&
+ SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE));
+ }
+
+ /// isInteger - Return true if this is an integer, or a vector integer type.
+ bool isInteger() const {
+ return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
+ SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
+ (SimpleTy >= MVT::FIRST_INTEGER_VECTOR_VALUETYPE &&
+ SimpleTy <= MVT::LAST_INTEGER_VECTOR_VALUETYPE));
+ }
+
+ /// isVector - Return true if this is a vector value type.
+ bool isVector() const {
+ return (SimpleTy >= MVT::FIRST_VECTOR_VALUETYPE &&
+ SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
+ }
+
+ /// is16BitVector - Return true if this is a 16-bit vector type.
+ bool is16BitVector() const {
+ return (SimpleTy == MVT::v2i8 || SimpleTy == MVT::v1i16 ||
+ SimpleTy == MVT::v16i1);
+ }
+
+ /// is32BitVector - Return true if this is a 32-bit vector type.
+ bool is32BitVector() const {
+ return (SimpleTy == MVT::v4i8 || SimpleTy == MVT::v2i16 ||
+ SimpleTy == MVT::v1i32 || SimpleTy == MVT::v2f16 ||
+ SimpleTy == MVT::v1f32);
+ }
+
+ /// is64BitVector - Return true if this is a 64-bit vector type.
+ bool is64BitVector() const {
+ return (SimpleTy == MVT::v8i8 || SimpleTy == MVT::v4i16 ||
+ SimpleTy == MVT::v2i32 || SimpleTy == MVT::v1i64 ||
+ SimpleTy == MVT::v4f16 || SimpleTy == MVT::v2f32 ||
+ SimpleTy == MVT::v1f64);
+ }
+
+ /// is128BitVector - Return true if this is a 128-bit vector type.
+ bool is128BitVector() const {
+ return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 ||
+ SimpleTy == MVT::v4i32 || SimpleTy == MVT::v2i64 ||
+ SimpleTy == MVT::v1i128 || SimpleTy == MVT::v8f16 ||
+ SimpleTy == MVT::v4f32 || SimpleTy == MVT::v2f64);
+ }
+
+ /// is256BitVector - Return true if this is a 256-bit vector type.
+ bool is256BitVector() const {
+ return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 ||
+ SimpleTy == MVT::v32i8 || SimpleTy == MVT::v16i16 ||
+ SimpleTy == MVT::v8i32 || SimpleTy == MVT::v4i64);
+ }
+
+ /// is512BitVector - Return true if this is a 512-bit vector type.
+ bool is512BitVector() const {
+ return (SimpleTy == MVT::v16f32 || SimpleTy == MVT::v8f64 ||
+ SimpleTy == MVT::v512i1 || SimpleTy == MVT::v64i8 ||
+ SimpleTy == MVT::v32i16 || SimpleTy == MVT::v16i32 ||
+ SimpleTy == MVT::v8i64);
+ }
+
+ /// is1024BitVector - Return true if this is a 1024-bit vector type.
+ bool is1024BitVector() const {
+ return (SimpleTy == MVT::v1024i1 || SimpleTy == MVT::v128i8 ||
+ SimpleTy == MVT::v64i16 || SimpleTy == MVT::v32i32 ||
+ SimpleTy == MVT::v16i64);
+ }
+
+ /// is2048BitVector - Return true if this is a 1024-bit vector type.
+ bool is2048BitVector() const {
+ return (SimpleTy == MVT::v256i8 || SimpleTy == MVT::v128i16 ||
+ SimpleTy == MVT::v64i32 || SimpleTy == MVT::v32i64);
+ }
+
+ /// isOverloaded - Return true if this is an overloaded type for TableGen.
+ bool isOverloaded() const {
+ return (SimpleTy==MVT::Any ||
+ SimpleTy==MVT::iAny || SimpleTy==MVT::fAny ||
+ SimpleTy==MVT::vAny || SimpleTy==MVT::iPTRAny);
+ }
+
+ /// isPow2VectorType - Returns true if the given vector is a power of 2.
+ bool isPow2VectorType() const {
+ unsigned NElts = getVectorNumElements();
+ return !(NElts & (NElts - 1));
+ }
+
+ /// getPow2VectorType - Widens the length of the given vector MVT up to
+ /// the nearest power of 2 and returns that type.
+ MVT getPow2VectorType() const {
+ if (isPow2VectorType())
+ return *this;
+
+ unsigned NElts = getVectorNumElements();
+ unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
+ return MVT::getVectorVT(getVectorElementType(), Pow2NElts);
+ }
+
+ /// getScalarType - If this is a vector type, return the element type,
+ /// otherwise return this.
+ MVT getScalarType() const {
+ return isVector() ? getVectorElementType() : *this;
+ }
+
+ MVT getVectorElementType() const {
+ switch (SimpleTy) {
+ default:
+ llvm_unreachable("Not a vector MVT!");
+ case v2i1:
+ case v4i1:
+ case v8i1:
+ case v16i1:
+ case v32i1:
+ case v64i1:
+ case v512i1:
+ case v1024i1: return i1;
+ case v1i8:
+ case v2i8:
+ case v4i8:
+ case v8i8:
+ case v16i8:
+ case v32i8:
+ case v64i8:
+ case v128i8:
+ case v256i8: return i8;
+ case v1i16:
+ case v2i16:
+ case v4i16:
+ case v8i16:
+ case v16i16:
+ case v32i16:
+ case v64i16:
+ case v128i16: return i16;
+ case v1i32:
+ case v2i32:
+ case v4i32:
+ case v8i32:
+ case v16i32:
+ case v32i32:
+ case v64i32: return i32;
+ case v1i64:
+ case v2i64:
+ case v4i64:
+ case v8i64:
+ case v16i64:
+ case v32i64: return i64;
+ case v1i128: return i128;
+ case v2f16:
+ case v4f16:
+ case v8f16: return f16;
+ case v1f32:
+ case v2f32:
+ case v4f32:
+ case v8f32:
+ case v16f32: return f32;
+ case v1f64:
+ case v2f64:
+ case v4f64:
+ case v8f64: return f64;
+ }
+ }
+
+ unsigned getVectorNumElements() const {
+ switch (SimpleTy) {
+ default:
+ llvm_unreachable("Not a vector MVT!");
+ case v1024i1: return 1024;
+ case v512i1: return 512;
+ case v256i8: return 256;
+ case v128i8:
+ case v128i16: return 128;
+ case v64i1:
+ case v64i8:
+ case v64i16:
+ case v64i32: return 64;
+ case v32i1:
+ case v32i8:
+ case v32i16:
+ case v32i32:
+ case v32i64: return 32;
+ case v16i1:
+ case v16i8:
+ case v16i16:
+ case v16i32:
+ case v16i64:
+ case v16f32: return 16;
+ case v8i1:
+ case v8i8:
+ case v8i16:
+ case v8i32:
+ case v8i64:
+ case v8f16:
+ case v8f32:
+ case v8f64: return 8;
+ case v4i1:
+ case v4i8:
+ case v4i16:
+ case v4i32:
+ case v4i64:
+ case v4f16:
+ case v4f32:
+ case v4f64: return 4;
+ case v2i1:
+ case v2i8:
+ case v2i16:
+ case v2i32:
+ case v2i64:
+ case v2f16:
+ case v2f32:
+ case v2f64: return 2;
+ case v1i8:
+ case v1i16:
+ case v1i32:
+ case v1i64:
+ case v1i128:
+ case v1f32:
+ case v1f64: return 1;
+ }
+ }
+
+ unsigned getSizeInBits() const {
+ switch (SimpleTy) {
+ default:
+ llvm_unreachable("getSizeInBits called on extended MVT.");
+ case Other:
+ llvm_unreachable("Value type is non-standard value, Other.");
+ case iPTR:
+ llvm_unreachable("Value type size is target-dependent. Ask TLI.");
+ case iPTRAny:
+ case iAny:
+ case fAny:
+ case vAny:
+ case Any:
+ llvm_unreachable("Value type is overloaded.");
+ case token:
+ llvm_unreachable("Token type is a sentinel that cannot be used "
+ "in codegen and has no size");
+ case Metadata:
+ llvm_unreachable("Value type is metadata.");
+ case i1 : return 1;
+ case v2i1: return 2;
+ case v4i1: return 4;
+ case i8 :
+ case v1i8:
+ case v8i1: return 8;
+ case i16 :
+ case f16:
+ case v16i1:
+ case v2i8:
+ case v1i16: return 16;
+ case f32 :
+ case i32 :
+ case v32i1:
+ case v4i8:
+ case v2i16:
+ case v2f16:
+ case v1f32:
+ case v1i32: return 32;
+ case x86mmx:
+ case f64 :
+ case i64 :
+ case v64i1:
+ case v8i8:
+ case v4i16:
+ case v2i32:
+ case v1i64:
+ case v4f16:
+ case v2f32:
+ case v1f64: return 64;
+ case f80 : return 80;
+ case f128:
+ case ppcf128:
+ case i128:
+ case v16i8:
+ case v8i16:
+ case v4i32:
+ case v2i64:
+ case v1i128:
+ case v8f16:
+ case v4f32:
+ case v2f64: return 128;
+ case v32i8:
+ case v16i16:
+ case v8i32:
+ case v4i64:
+ case v8f32:
+ case v4f64: return 256;
+ case v512i1:
+ case v64i8:
+ case v32i16:
+ case v16i32:
+ case v8i64:
+ case v16f32:
+ case v8f64: return 512;
+ case v1024i1:
+ case v128i8:
+ case v64i16:
+ case v32i32:
+ case v16i64: return 1024;
+ case v256i8:
+ case v128i16:
+ case v64i32:
+ case v32i64: return 2048;
+ }
+ }
+
+ unsigned getScalarSizeInBits() const {
+ return getScalarType().getSizeInBits();
+ }
+
+ /// getStoreSize - Return the number of bytes overwritten by a store
+ /// of the specified value type.
+ unsigned getStoreSize() const {
+ return (getSizeInBits() + 7) / 8;
+ }
+
+ /// getStoreSizeInBits - Return the number of bits overwritten by a store
+ /// of the specified value type.
+ unsigned getStoreSizeInBits() const {
+ return getStoreSize() * 8;
+ }
+
+ /// Return true if this has more bits than VT.
+ bool bitsGT(MVT VT) const {
+ return getSizeInBits() > VT.getSizeInBits();
+ }
+
+ /// Return true if this has no less bits than VT.
+ bool bitsGE(MVT VT) const {
+ return getSizeInBits() >= VT.getSizeInBits();
+ }
+
+ /// Return true if this has less bits than VT.
+ bool bitsLT(MVT VT) const {
+ return getSizeInBits() < VT.getSizeInBits();
+ }
+
+ /// Return true if this has no more bits than VT.
+ bool bitsLE(MVT VT) const {
+ return getSizeInBits() <= VT.getSizeInBits();
+ }
+
+
+ static MVT getFloatingPointVT(unsigned BitWidth) {
+ switch (BitWidth) {
+ default:
+ llvm_unreachable("Bad bit width!");
+ case 16:
+ return MVT::f16;
+ case 32:
+ return MVT::f32;
+ case 64:
+ return MVT::f64;
+ case 80:
+ return MVT::f80;
+ case 128:
+ return MVT::f128;
+ }
+ }
+
+ static MVT getIntegerVT(unsigned BitWidth) {
+ switch (BitWidth) {
+ default:
+ return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
+ case 1:
+ return MVT::i1;
+ case 8:
+ return MVT::i8;
+ case 16:
+ return MVT::i16;
+ case 32:
+ return MVT::i32;
+ case 64:
+ return MVT::i64;
+ case 128:
+ return MVT::i128;
+ }
+ }
+
+ static MVT getVectorVT(MVT VT, unsigned NumElements) {
+ switch (VT.SimpleTy) {
+ default:
+ break;
+ case MVT::i1:
+ if (NumElements == 2) return MVT::v2i1;
+ if (NumElements == 4) return MVT::v4i1;
+ if (NumElements == 8) return MVT::v8i1;
+ if (NumElements == 16) return MVT::v16i1;
+ if (NumElements == 32) return MVT::v32i1;
+ if (NumElements == 64) return MVT::v64i1;
+ if (NumElements == 512) return MVT::v512i1;
+ if (NumElements == 1024) return MVT::v1024i1;
+ break;
+ case MVT::i8:
+ if (NumElements == 1) return MVT::v1i8;
+ if (NumElements == 2) return MVT::v2i8;
+ if (NumElements == 4) return MVT::v4i8;
+ if (NumElements == 8) return MVT::v8i8;
+ if (NumElements == 16) return MVT::v16i8;
+ if (NumElements == 32) return MVT::v32i8;
+ if (NumElements == 64) return MVT::v64i8;
+ if (NumElements == 128) return MVT::v128i8;
+ if (NumElements == 256) return MVT::v256i8;
+ break;
+ case MVT::i16:
+ if (NumElements == 1) return MVT::v1i16;
+ if (NumElements == 2) return MVT::v2i16;
+ if (NumElements == 4) return MVT::v4i16;
+ if (NumElements == 8) return MVT::v8i16;
+ if (NumElements == 16) return MVT::v16i16;
+ if (NumElements == 32) return MVT::v32i16;
+ if (NumElements == 64) return MVT::v64i16;
+ if (NumElements == 128) return MVT::v128i16;
+ break;
+ case MVT::i32:
+ if (NumElements == 1) return MVT::v1i32;
+ if (NumElements == 2) return MVT::v2i32;
+ if (NumElements == 4) return MVT::v4i32;
+ if (NumElements == 8) return MVT::v8i32;
+ if (NumElements == 16) return MVT::v16i32;
+ if (NumElements == 32) return MVT::v32i32;
+ if (NumElements == 64) return MVT::v64i32;
+ break;
+ case MVT::i64:
+ if (NumElements == 1) return MVT::v1i64;
+ if (NumElements == 2) return MVT::v2i64;
+ if (NumElements == 4) return MVT::v4i64;
+ if (NumElements == 8) return MVT::v8i64;
+ if (NumElements == 16) return MVT::v16i64;
+ if (NumElements == 32) return MVT::v32i64;
+ break;
+ case MVT::i128:
+ if (NumElements == 1) return MVT::v1i128;
+ break;
+ case MVT::f16:
+ if (NumElements == 2) return MVT::v2f16;
+ if (NumElements == 4) return MVT::v4f16;
+ if (NumElements == 8) return MVT::v8f16;
+ break;
+ case MVT::f32:
+ if (NumElements == 1) return MVT::v1f32;
+ if (NumElements == 2) return MVT::v2f32;
+ if (NumElements == 4) return MVT::v4f32;
+ if (NumElements == 8) return MVT::v8f32;
+ if (NumElements == 16) return MVT::v16f32;
+ break;
+ case MVT::f64:
+ if (NumElements == 1) return MVT::v1f64;
+ if (NumElements == 2) return MVT::v2f64;
+ if (NumElements == 4) return MVT::v4f64;
+ if (NumElements == 8) return MVT::v8f64;
+ break;
+ }
+ return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
+ }
+
+ /// Return the value type corresponding to the specified type. This returns
+ /// all pointers as iPTR. If HandleUnknown is true, unknown types are
+ /// returned as Other, otherwise they are invalid.
+ static MVT getVT(Type *Ty, bool HandleUnknown = false);
+
+ private:
+ /// A simple iterator over the MVT::SimpleValueType enum.
+ struct mvt_iterator {
+ SimpleValueType VT;
+ mvt_iterator(SimpleValueType VT) : VT(VT) {}
+ MVT operator*() const { return VT; }
+ bool operator!=(const mvt_iterator &LHS) const { return VT != LHS.VT; }
+ mvt_iterator& operator++() {
+ VT = (MVT::SimpleValueType)((int)VT + 1);
+ assert((int)VT <= MVT::MAX_ALLOWED_VALUETYPE &&
+ "MVT iterator overflowed.");
+ return *this;
+ }
+ };
+ /// A range of the MVT::SimpleValueType enum.
+ typedef iterator_range<mvt_iterator> mvt_range;
+
+ public:
+ /// SimpleValueType Iteration
+ /// @{
+ static mvt_range all_valuetypes() {
+ return mvt_range(MVT::FIRST_VALUETYPE, MVT::LAST_VALUETYPE);
+ }
+ static mvt_range integer_valuetypes() {
+ return mvt_range(MVT::FIRST_INTEGER_VALUETYPE,
+ (MVT::SimpleValueType)(MVT::LAST_INTEGER_VALUETYPE + 1));
+ }
+ static mvt_range fp_valuetypes() {
+ return mvt_range(MVT::FIRST_FP_VALUETYPE,
+ (MVT::SimpleValueType)(MVT::LAST_FP_VALUETYPE + 1));
+ }
+ static mvt_range vector_valuetypes() {
+ return mvt_range(MVT::FIRST_VECTOR_VALUETYPE,
+ (MVT::SimpleValueType)(MVT::LAST_VECTOR_VALUETYPE + 1));
+ }
+ static mvt_range integer_vector_valuetypes() {
+ return mvt_range(
+ MVT::FIRST_INTEGER_VECTOR_VALUETYPE,
+ (MVT::SimpleValueType)(MVT::LAST_INTEGER_VECTOR_VALUETYPE + 1));
+ }
+ static mvt_range fp_vector_valuetypes() {
+ return mvt_range(
+ MVT::FIRST_FP_VECTOR_VALUETYPE,
+ (MVT::SimpleValueType)(MVT::LAST_FP_VECTOR_VALUETYPE + 1));
+ }
+ /// @}
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQP/CostAllocator.h b/contrib/llvm/include/llvm/CodeGen/PBQP/CostAllocator.h
new file mode 100644
index 0000000..02d39fe
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/CostAllocator.h
@@ -0,0 +1,132 @@
+//===---------- CostAllocator.h - PBQP Cost Allocator -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines classes conforming to the PBQP cost value manager concept.
+//
+// Cost value managers are memory managers for PBQP cost values (vectors and
+// matrices). Since PBQP graphs can grow very large (E.g. hundreds of thousands
+// of edges on the largest function in SPEC2006).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
+#define LLVM_CODEGEN_PBQP_COSTALLOCATOR_H
+
+#include "llvm/ADT/DenseSet.h"
+#include <memory>
+#include <type_traits>
+
+namespace llvm {
+namespace PBQP {
+
+template <typename ValueT>
+class ValuePool {
+public:
+ typedef std::shared_ptr<const ValueT> PoolRef;
+
+private:
+
+ class PoolEntry : public std::enable_shared_from_this<PoolEntry> {
+ public:
+ template <typename ValueKeyT>
+ PoolEntry(ValuePool &Pool, ValueKeyT Value)
+ : Pool(Pool), Value(std::move(Value)) {}
+ ~PoolEntry() { Pool.removeEntry(this); }
+ const ValueT& getValue() const { return Value; }
+ private:
+ ValuePool &Pool;
+ ValueT Value;
+ };
+
+ class PoolEntryDSInfo {
+ public:
+ static inline PoolEntry* getEmptyKey() { return nullptr; }
+
+ static inline PoolEntry* getTombstoneKey() {
+ return reinterpret_cast<PoolEntry*>(static_cast<uintptr_t>(1));
+ }
+
+ template <typename ValueKeyT>
+ static unsigned getHashValue(const ValueKeyT &C) {
+ return hash_value(C);
+ }
+
+ static unsigned getHashValue(PoolEntry *P) {
+ return getHashValue(P->getValue());
+ }
+
+ static unsigned getHashValue(const PoolEntry *P) {
+ return getHashValue(P->getValue());
+ }
+
+ template <typename ValueKeyT1, typename ValueKeyT2>
+ static
+ bool isEqual(const ValueKeyT1 &C1, const ValueKeyT2 &C2) {
+ return C1 == C2;
+ }
+
+ template <typename ValueKeyT>
+ static bool isEqual(const ValueKeyT &C, PoolEntry *P) {
+ if (P == getEmptyKey() || P == getTombstoneKey())
+ return false;
+ return isEqual(C, P->getValue());
+ }
+
+ static bool isEqual(PoolEntry *P1, PoolEntry *P2) {
+ if (P1 == getEmptyKey() || P1 == getTombstoneKey())
+ return P1 == P2;
+ return isEqual(P1->getValue(), P2);
+ }
+
+ };
+
+ typedef DenseSet<PoolEntry*, PoolEntryDSInfo> EntrySetT;
+
+ EntrySetT EntrySet;
+
+ void removeEntry(PoolEntry *P) { EntrySet.erase(P); }
+
+public:
+ template <typename ValueKeyT> PoolRef getValue(ValueKeyT ValueKey) {
+ typename EntrySetT::iterator I = EntrySet.find_as(ValueKey);
+
+ if (I != EntrySet.end())
+ return PoolRef((*I)->shared_from_this(), &(*I)->getValue());
+
+ auto P = std::make_shared<PoolEntry>(*this, std::move(ValueKey));
+ EntrySet.insert(P.get());
+ return PoolRef(std::move(P), &P->getValue());
+ }
+};
+
+template <typename VectorT, typename MatrixT>
+class PoolCostAllocator {
+private:
+ typedef ValuePool<VectorT> VectorCostPool;
+ typedef ValuePool<MatrixT> MatrixCostPool;
+public:
+ typedef VectorT Vector;
+ typedef MatrixT Matrix;
+ typedef typename VectorCostPool::PoolRef VectorPtr;
+ typedef typename MatrixCostPool::PoolRef MatrixPtr;
+
+ template <typename VectorKeyT>
+ VectorPtr getVector(VectorKeyT v) { return VectorPool.getValue(std::move(v)); }
+
+ template <typename MatrixKeyT>
+ MatrixPtr getMatrix(MatrixKeyT m) { return MatrixPool.getValue(std::move(m)); }
+private:
+ VectorCostPool VectorPool;
+ MatrixCostPool MatrixPool;
+};
+
+} // namespace PBQP
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h b/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h
new file mode 100644
index 0000000..f73383e
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/Graph.h
@@ -0,0 +1,681 @@
+//===-------------------- Graph.h - PBQP Graph ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// PBQP Graph class.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CODEGEN_PBQP_GRAPH_H
+#define LLVM_CODEGEN_PBQP_GRAPH_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/Support/Debug.h"
+#include <list>
+#include <map>
+#include <set>
+#include <vector>
+
+namespace llvm {
+namespace PBQP {
+
+ class GraphBase {
+ public:
+ typedef unsigned NodeId;
+ typedef unsigned EdgeId;
+
+ /// @brief Returns a value representing an invalid (non-existent) node.
+ static NodeId invalidNodeId() {
+ return std::numeric_limits<NodeId>::max();
+ }
+
+ /// @brief Returns a value representing an invalid (non-existent) edge.
+ static EdgeId invalidEdgeId() {
+ return std::numeric_limits<EdgeId>::max();
+ }
+ };
+
+ /// PBQP Graph class.
+ /// Instances of this class describe PBQP problems.
+ ///
+ template <typename SolverT>
+ class Graph : public GraphBase {
+ private:
+ typedef typename SolverT::CostAllocator CostAllocator;
+ public:
+ typedef typename SolverT::RawVector RawVector;
+ typedef typename SolverT::RawMatrix RawMatrix;
+ typedef typename SolverT::Vector Vector;
+ typedef typename SolverT::Matrix Matrix;
+ typedef typename CostAllocator::VectorPtr VectorPtr;
+ typedef typename CostAllocator::MatrixPtr MatrixPtr;
+ typedef typename SolverT::NodeMetadata NodeMetadata;
+ typedef typename SolverT::EdgeMetadata EdgeMetadata;
+ typedef typename SolverT::GraphMetadata GraphMetadata;
+
+ private:
+
+ class NodeEntry {
+ public:
+ typedef std::vector<EdgeId> AdjEdgeList;
+ typedef AdjEdgeList::size_type AdjEdgeIdx;
+ typedef AdjEdgeList::const_iterator AdjEdgeItr;
+
+ static AdjEdgeIdx getInvalidAdjEdgeIdx() {
+ return std::numeric_limits<AdjEdgeIdx>::max();
+ }
+
+ NodeEntry(VectorPtr Costs) : Costs(Costs) {}
+
+ AdjEdgeIdx addAdjEdgeId(EdgeId EId) {
+ AdjEdgeIdx Idx = AdjEdgeIds.size();
+ AdjEdgeIds.push_back(EId);
+ return Idx;
+ }
+
+ void removeAdjEdgeId(Graph &G, NodeId ThisNId, AdjEdgeIdx Idx) {
+ // Swap-and-pop for fast removal.
+ // 1) Update the adj index of the edge currently at back().
+ // 2) Move last Edge down to Idx.
+ // 3) pop_back()
+ // If Idx == size() - 1 then the setAdjEdgeIdx and swap are
+ // redundant, but both operations are cheap.
+ G.getEdge(AdjEdgeIds.back()).setAdjEdgeIdx(ThisNId, Idx);
+ AdjEdgeIds[Idx] = AdjEdgeIds.back();
+ AdjEdgeIds.pop_back();
+ }
+
+ const AdjEdgeList& getAdjEdgeIds() const { return AdjEdgeIds; }
+
+ VectorPtr Costs;
+ NodeMetadata Metadata;
+ private:
+ AdjEdgeList AdjEdgeIds;
+ };
+
+ class EdgeEntry {
+ public:
+ EdgeEntry(NodeId N1Id, NodeId N2Id, MatrixPtr Costs)
+ : Costs(Costs) {
+ NIds[0] = N1Id;
+ NIds[1] = N2Id;
+ ThisEdgeAdjIdxs[0] = NodeEntry::getInvalidAdjEdgeIdx();
+ ThisEdgeAdjIdxs[1] = NodeEntry::getInvalidAdjEdgeIdx();
+ }
+
+ void invalidate() {
+ NIds[0] = NIds[1] = Graph::invalidNodeId();
+ ThisEdgeAdjIdxs[0] = ThisEdgeAdjIdxs[1] =
+ NodeEntry::getInvalidAdjEdgeIdx();
+ Costs = nullptr;
+ }
+
+ void connectToN(Graph &G, EdgeId ThisEdgeId, unsigned NIdx) {
+ assert(ThisEdgeAdjIdxs[NIdx] == NodeEntry::getInvalidAdjEdgeIdx() &&
+ "Edge already connected to NIds[NIdx].");
+ NodeEntry &N = G.getNode(NIds[NIdx]);
+ ThisEdgeAdjIdxs[NIdx] = N.addAdjEdgeId(ThisEdgeId);
+ }
+
+ void connectTo(Graph &G, EdgeId ThisEdgeId, NodeId NId) {
+ if (NId == NIds[0])
+ connectToN(G, ThisEdgeId, 0);
+ else {
+ assert(NId == NIds[1] && "Edge does not connect NId.");
+ connectToN(G, ThisEdgeId, 1);
+ }
+ }
+
+ void connect(Graph &G, EdgeId ThisEdgeId) {
+ connectToN(G, ThisEdgeId, 0);
+ connectToN(G, ThisEdgeId, 1);
+ }
+
+ void setAdjEdgeIdx(NodeId NId, typename NodeEntry::AdjEdgeIdx NewIdx) {
+ if (NId == NIds[0])
+ ThisEdgeAdjIdxs[0] = NewIdx;
+ else {
+ assert(NId == NIds[1] && "Edge not connected to NId");
+ ThisEdgeAdjIdxs[1] = NewIdx;
+ }
+ }
+
+ void disconnectFromN(Graph &G, unsigned NIdx) {
+ assert(ThisEdgeAdjIdxs[NIdx] != NodeEntry::getInvalidAdjEdgeIdx() &&
+ "Edge not connected to NIds[NIdx].");
+ NodeEntry &N = G.getNode(NIds[NIdx]);
+ N.removeAdjEdgeId(G, NIds[NIdx], ThisEdgeAdjIdxs[NIdx]);
+ ThisEdgeAdjIdxs[NIdx] = NodeEntry::getInvalidAdjEdgeIdx();
+ }
+
+ void disconnectFrom(Graph &G, NodeId NId) {
+ if (NId == NIds[0])
+ disconnectFromN(G, 0);
+ else {
+ assert(NId == NIds[1] && "Edge does not connect NId");
+ disconnectFromN(G, 1);
+ }
+ }
+
+ NodeId getN1Id() const { return NIds[0]; }
+ NodeId getN2Id() const { return NIds[1]; }
+ MatrixPtr Costs;
+ EdgeMetadata Metadata;
+ private:
+ NodeId NIds[2];
+ typename NodeEntry::AdjEdgeIdx ThisEdgeAdjIdxs[2];
+ };
+
+ // ----- MEMBERS -----
+
+ GraphMetadata Metadata;
+ CostAllocator CostAlloc;
+ SolverT *Solver;
+
+ typedef std::vector<NodeEntry> NodeVector;
+ typedef std::vector<NodeId> FreeNodeVector;
+ NodeVector Nodes;
+ FreeNodeVector FreeNodeIds;
+
+ typedef std::vector<EdgeEntry> EdgeVector;
+ typedef std::vector<EdgeId> FreeEdgeVector;
+ EdgeVector Edges;
+ FreeEdgeVector FreeEdgeIds;
+
+ // ----- INTERNAL METHODS -----
+
+ NodeEntry &getNode(NodeId NId) {
+ assert(NId < Nodes.size() && "Out of bound NodeId");
+ return Nodes[NId];
+ }
+ const NodeEntry &getNode(NodeId NId) const {
+ assert(NId < Nodes.size() && "Out of bound NodeId");
+ return Nodes[NId];
+ }
+
+ EdgeEntry& getEdge(EdgeId EId) { return Edges[EId]; }
+ const EdgeEntry& getEdge(EdgeId EId) const { return Edges[EId]; }
+
+ NodeId addConstructedNode(NodeEntry N) {
+ NodeId NId = 0;
+ if (!FreeNodeIds.empty()) {
+ NId = FreeNodeIds.back();
+ FreeNodeIds.pop_back();
+ Nodes[NId] = std::move(N);
+ } else {
+ NId = Nodes.size();
+ Nodes.push_back(std::move(N));
+ }
+ return NId;
+ }
+
+ EdgeId addConstructedEdge(EdgeEntry E) {
+ assert(findEdge(E.getN1Id(), E.getN2Id()) == invalidEdgeId() &&
+ "Attempt to add duplicate edge.");
+ EdgeId EId = 0;
+ if (!FreeEdgeIds.empty()) {
+ EId = FreeEdgeIds.back();
+ FreeEdgeIds.pop_back();
+ Edges[EId] = std::move(E);
+ } else {
+ EId = Edges.size();
+ Edges.push_back(std::move(E));
+ }
+
+ EdgeEntry &NE = getEdge(EId);
+
+ // Add the edge to the adjacency sets of its nodes.
+ NE.connect(*this, EId);
+ return EId;
+ }
+
+ Graph(const Graph &Other) {}
+ void operator=(const Graph &Other) {}
+
+ public:
+
+ typedef typename NodeEntry::AdjEdgeItr AdjEdgeItr;
+
+ class NodeItr {
+ public:
+ typedef std::forward_iterator_tag iterator_category;
+ typedef NodeId value_type;
+ typedef int difference_type;
+ typedef NodeId* pointer;
+ typedef NodeId& reference;
+
+ NodeItr(NodeId CurNId, const Graph &G)
+ : CurNId(CurNId), EndNId(G.Nodes.size()), FreeNodeIds(G.FreeNodeIds) {
+ this->CurNId = findNextInUse(CurNId); // Move to first in-use node id
+ }
+
+ bool operator==(const NodeItr &O) const { return CurNId == O.CurNId; }
+ bool operator!=(const NodeItr &O) const { return !(*this == O); }
+ NodeItr& operator++() { CurNId = findNextInUse(++CurNId); return *this; }
+ NodeId operator*() const { return CurNId; }
+
+ private:
+ NodeId findNextInUse(NodeId NId) const {
+ while (NId < EndNId &&
+ std::find(FreeNodeIds.begin(), FreeNodeIds.end(), NId) !=
+ FreeNodeIds.end()) {
+ ++NId;
+ }
+ return NId;
+ }
+
+ NodeId CurNId, EndNId;
+ const FreeNodeVector &FreeNodeIds;
+ };
+
+ class EdgeItr {
+ public:
+ EdgeItr(EdgeId CurEId, const Graph &G)
+ : CurEId(CurEId), EndEId(G.Edges.size()), FreeEdgeIds(G.FreeEdgeIds) {
+ this->CurEId = findNextInUse(CurEId); // Move to first in-use edge id
+ }
+
+ bool operator==(const EdgeItr &O) const { return CurEId == O.CurEId; }
+ bool operator!=(const EdgeItr &O) const { return !(*this == O); }
+ EdgeItr& operator++() { CurEId = findNextInUse(++CurEId); return *this; }
+ EdgeId operator*() const { return CurEId; }
+
+ private:
+ EdgeId findNextInUse(EdgeId EId) const {
+ while (EId < EndEId &&
+ std::find(FreeEdgeIds.begin(), FreeEdgeIds.end(), EId) !=
+ FreeEdgeIds.end()) {
+ ++EId;
+ }
+ return EId;
+ }
+
+ EdgeId CurEId, EndEId;
+ const FreeEdgeVector &FreeEdgeIds;
+ };
+
+ class NodeIdSet {
+ public:
+ NodeIdSet(const Graph &G) : G(G) { }
+ NodeItr begin() const { return NodeItr(0, G); }
+ NodeItr end() const { return NodeItr(G.Nodes.size(), G); }
+ bool empty() const { return G.Nodes.empty(); }
+ typename NodeVector::size_type size() const {
+ return G.Nodes.size() - G.FreeNodeIds.size();
+ }
+ private:
+ const Graph& G;
+ };
+
+ class EdgeIdSet {
+ public:
+ EdgeIdSet(const Graph &G) : G(G) { }
+ EdgeItr begin() const { return EdgeItr(0, G); }
+ EdgeItr end() const { return EdgeItr(G.Edges.size(), G); }
+ bool empty() const { return G.Edges.empty(); }
+ typename NodeVector::size_type size() const {
+ return G.Edges.size() - G.FreeEdgeIds.size();
+ }
+ private:
+ const Graph& G;
+ };
+
+ class AdjEdgeIdSet {
+ public:
+ AdjEdgeIdSet(const NodeEntry &NE) : NE(NE) { }
+ typename NodeEntry::AdjEdgeItr begin() const {
+ return NE.getAdjEdgeIds().begin();
+ }
+ typename NodeEntry::AdjEdgeItr end() const {
+ return NE.getAdjEdgeIds().end();
+ }
+ bool empty() const { return NE.getAdjEdgeIds().empty(); }
+ typename NodeEntry::AdjEdgeList::size_type size() const {
+ return NE.getAdjEdgeIds().size();
+ }
+ private:
+ const NodeEntry &NE;
+ };
+
+ /// @brief Construct an empty PBQP graph.
+ Graph() : Solver(nullptr) {}
+
+ /// @brief Construct an empty PBQP graph with the given graph metadata.
+ Graph(GraphMetadata Metadata) : Metadata(Metadata), Solver(nullptr) {}
+
+ /// @brief Get a reference to the graph metadata.
+ GraphMetadata& getMetadata() { return Metadata; }
+
+ /// @brief Get a const-reference to the graph metadata.
+ const GraphMetadata& getMetadata() const { return Metadata; }
+
+ /// @brief Lock this graph to the given solver instance in preparation
+ /// for running the solver. This method will call solver.handleAddNode for
+ /// each node in the graph, and handleAddEdge for each edge, to give the
+ /// solver an opportunity to set up any requried metadata.
+ void setSolver(SolverT &S) {
+ assert(!Solver && "Solver already set. Call unsetSolver().");
+ Solver = &S;
+ for (auto NId : nodeIds())
+ Solver->handleAddNode(NId);
+ for (auto EId : edgeIds())
+ Solver->handleAddEdge(EId);
+ }
+
+ /// @brief Release from solver instance.
+ void unsetSolver() {
+ assert(Solver && "Solver not set.");
+ Solver = nullptr;
+ }
+
+ /// @brief Add a node with the given costs.
+ /// @param Costs Cost vector for the new node.
+ /// @return Node iterator for the added node.
+ template <typename OtherVectorT>
+ NodeId addNode(OtherVectorT Costs) {
+ // Get cost vector from the problem domain
+ VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
+ NodeId NId = addConstructedNode(NodeEntry(AllocatedCosts));
+ if (Solver)
+ Solver->handleAddNode(NId);
+ return NId;
+ }
+
+ /// @brief Add a node bypassing the cost allocator.
+ /// @param Costs Cost vector ptr for the new node (must be convertible to
+ /// VectorPtr).
+ /// @return Node iterator for the added node.
+ ///
+ /// This method allows for fast addition of a node whose costs don't need
+ /// to be passed through the cost allocator. The most common use case for
+ /// this is when duplicating costs from an existing node (when using a
+ /// pooling allocator). These have already been uniqued, so we can avoid
+ /// re-constructing and re-uniquing them by attaching them directly to the
+ /// new node.
+ template <typename OtherVectorPtrT>
+ NodeId addNodeBypassingCostAllocator(OtherVectorPtrT Costs) {
+ NodeId NId = addConstructedNode(NodeEntry(Costs));
+ if (Solver)
+ Solver->handleAddNode(NId);
+ return NId;
+ }
+
+ /// @brief Add an edge between the given nodes with the given costs.
+ /// @param N1Id First node.
+ /// @param N2Id Second node.
+ /// @param Costs Cost matrix for new edge.
+ /// @return Edge iterator for the added edge.
+ template <typename OtherVectorT>
+ EdgeId addEdge(NodeId N1Id, NodeId N2Id, OtherVectorT Costs) {
+ assert(getNodeCosts(N1Id).getLength() == Costs.getRows() &&
+ getNodeCosts(N2Id).getLength() == Costs.getCols() &&
+ "Matrix dimensions mismatch.");
+ // Get cost matrix from the problem domain.
+ MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
+ EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, AllocatedCosts));
+ if (Solver)
+ Solver->handleAddEdge(EId);
+ return EId;
+ }
+
+ /// @brief Add an edge bypassing the cost allocator.
+ /// @param N1Id First node.
+ /// @param N2Id Second node.
+ /// @param Costs Cost matrix for new edge.
+ /// @return Edge iterator for the added edge.
+ ///
+ /// This method allows for fast addition of an edge whose costs don't need
+ /// to be passed through the cost allocator. The most common use case for
+ /// this is when duplicating costs from an existing edge (when using a
+ /// pooling allocator). These have already been uniqued, so we can avoid
+ /// re-constructing and re-uniquing them by attaching them directly to the
+ /// new edge.
+ template <typename OtherMatrixPtrT>
+ NodeId addEdgeBypassingCostAllocator(NodeId N1Id, NodeId N2Id,
+ OtherMatrixPtrT Costs) {
+ assert(getNodeCosts(N1Id).getLength() == Costs->getRows() &&
+ getNodeCosts(N2Id).getLength() == Costs->getCols() &&
+ "Matrix dimensions mismatch.");
+ // Get cost matrix from the problem domain.
+ EdgeId EId = addConstructedEdge(EdgeEntry(N1Id, N2Id, Costs));
+ if (Solver)
+ Solver->handleAddEdge(EId);
+ return EId;
+ }
+
+ /// @brief Returns true if the graph is empty.
+ bool empty() const { return NodeIdSet(*this).empty(); }
+
+ NodeIdSet nodeIds() const { return NodeIdSet(*this); }
+ EdgeIdSet edgeIds() const { return EdgeIdSet(*this); }
+
+ AdjEdgeIdSet adjEdgeIds(NodeId NId) { return AdjEdgeIdSet(getNode(NId)); }
+
+ /// @brief Get the number of nodes in the graph.
+ /// @return Number of nodes in the graph.
+ unsigned getNumNodes() const { return NodeIdSet(*this).size(); }
+
+ /// @brief Get the number of edges in the graph.
+ /// @return Number of edges in the graph.
+ unsigned getNumEdges() const { return EdgeIdSet(*this).size(); }
+
+ /// @brief Set a node's cost vector.
+ /// @param NId Node to update.
+ /// @param Costs New costs to set.
+ template <typename OtherVectorT>
+ void setNodeCosts(NodeId NId, OtherVectorT Costs) {
+ VectorPtr AllocatedCosts = CostAlloc.getVector(std::move(Costs));
+ if (Solver)
+ Solver->handleSetNodeCosts(NId, *AllocatedCosts);
+ getNode(NId).Costs = AllocatedCosts;
+ }
+
+ /// @brief Get a VectorPtr to a node's cost vector. Rarely useful - use
+ /// getNodeCosts where possible.
+ /// @param NId Node id.
+ /// @return VectorPtr to node cost vector.
+ ///
+ /// This method is primarily useful for duplicating costs quickly by
+ /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
+ /// getNodeCosts when dealing with node cost values.
+ const VectorPtr& getNodeCostsPtr(NodeId NId) const {
+ return getNode(NId).Costs;
+ }
+
+ /// @brief Get a node's cost vector.
+ /// @param NId Node id.
+ /// @return Node cost vector.
+ const Vector& getNodeCosts(NodeId NId) const {
+ return *getNodeCostsPtr(NId);
+ }
+
+ NodeMetadata& getNodeMetadata(NodeId NId) {
+ return getNode(NId).Metadata;
+ }
+
+ const NodeMetadata& getNodeMetadata(NodeId NId) const {
+ return getNode(NId).Metadata;
+ }
+
+ typename NodeEntry::AdjEdgeList::size_type getNodeDegree(NodeId NId) const {
+ return getNode(NId).getAdjEdgeIds().size();
+ }
+
+ /// @brief Update an edge's cost matrix.
+ /// @param EId Edge id.
+ /// @param Costs New cost matrix.
+ template <typename OtherMatrixT>
+ void updateEdgeCosts(EdgeId EId, OtherMatrixT Costs) {
+ MatrixPtr AllocatedCosts = CostAlloc.getMatrix(std::move(Costs));
+ if (Solver)
+ Solver->handleUpdateCosts(EId, *AllocatedCosts);
+ getEdge(EId).Costs = AllocatedCosts;
+ }
+
+ /// @brief Get a MatrixPtr to a node's cost matrix. Rarely useful - use
+ /// getEdgeCosts where possible.
+ /// @param EId Edge id.
+ /// @return MatrixPtr to edge cost matrix.
+ ///
+ /// This method is primarily useful for duplicating costs quickly by
+ /// bypassing the cost allocator. See addNodeBypassingCostAllocator. Prefer
+ /// getEdgeCosts when dealing with edge cost values.
+ const MatrixPtr& getEdgeCostsPtr(EdgeId EId) const {
+ return getEdge(EId).Costs;
+ }
+
+ /// @brief Get an edge's cost matrix.
+ /// @param EId Edge id.
+ /// @return Edge cost matrix.
+ const Matrix& getEdgeCosts(EdgeId EId) const {
+ return *getEdge(EId).Costs;
+ }
+
+ EdgeMetadata& getEdgeMetadata(EdgeId EId) {
+ return getEdge(EId).Metadata;
+ }
+
+ const EdgeMetadata& getEdgeMetadata(EdgeId EId) const {
+ return getEdge(EId).Metadata;
+ }
+
+ /// @brief Get the first node connected to this edge.
+ /// @param EId Edge id.
+ /// @return The first node connected to the given edge.
+ NodeId getEdgeNode1Id(EdgeId EId) const {
+ return getEdge(EId).getN1Id();
+ }
+
+ /// @brief Get the second node connected to this edge.
+ /// @param EId Edge id.
+ /// @return The second node connected to the given edge.
+ NodeId getEdgeNode2Id(EdgeId EId) const {
+ return getEdge(EId).getN2Id();
+ }
+
+ /// @brief Get the "other" node connected to this edge.
+ /// @param EId Edge id.
+ /// @param NId Node id for the "given" node.
+ /// @return The iterator for the "other" node connected to this edge.
+ NodeId getEdgeOtherNodeId(EdgeId EId, NodeId NId) {
+ EdgeEntry &E = getEdge(EId);
+ if (E.getN1Id() == NId) {
+ return E.getN2Id();
+ } // else
+ return E.getN1Id();
+ }
+
+ /// @brief Get the edge connecting two nodes.
+ /// @param N1Id First node id.
+ /// @param N2Id Second node id.
+ /// @return An id for edge (N1Id, N2Id) if such an edge exists,
+ /// otherwise returns an invalid edge id.
+ EdgeId findEdge(NodeId N1Id, NodeId N2Id) {
+ for (auto AEId : adjEdgeIds(N1Id)) {
+ if ((getEdgeNode1Id(AEId) == N2Id) ||
+ (getEdgeNode2Id(AEId) == N2Id)) {
+ return AEId;
+ }
+ }
+ return invalidEdgeId();
+ }
+
+ /// @brief Remove a node from the graph.
+ /// @param NId Node id.
+ void removeNode(NodeId NId) {
+ if (Solver)
+ Solver->handleRemoveNode(NId);
+ NodeEntry &N = getNode(NId);
+ // TODO: Can this be for-each'd?
+ for (AdjEdgeItr AEItr = N.adjEdgesBegin(),
+ AEEnd = N.adjEdgesEnd();
+ AEItr != AEEnd;) {
+ EdgeId EId = *AEItr;
+ ++AEItr;
+ removeEdge(EId);
+ }
+ FreeNodeIds.push_back(NId);
+ }
+
+ /// @brief Disconnect an edge from the given node.
+ ///
+ /// Removes the given edge from the adjacency list of the given node.
+ /// This operation leaves the edge in an 'asymmetric' state: It will no
+ /// longer appear in an iteration over the given node's (NId's) edges, but
+ /// will appear in an iteration over the 'other', unnamed node's edges.
+ ///
+ /// This does not correspond to any normal graph operation, but exists to
+ /// support efficient PBQP graph-reduction based solvers. It is used to
+ /// 'effectively' remove the unnamed node from the graph while the solver
+ /// is performing the reduction. The solver will later call reconnectNode
+ /// to restore the edge in the named node's adjacency list.
+ ///
+ /// Since the degree of a node is the number of connected edges,
+ /// disconnecting an edge from a node 'u' will cause the degree of 'u' to
+ /// drop by 1.
+ ///
+ /// A disconnected edge WILL still appear in an iteration over the graph
+ /// edges.
+ ///
+ /// A disconnected edge should not be removed from the graph, it should be
+ /// reconnected first.
+ ///
+ /// A disconnected edge can be reconnected by calling the reconnectEdge
+ /// method.
+ void disconnectEdge(EdgeId EId, NodeId NId) {
+ if (Solver)
+ Solver->handleDisconnectEdge(EId, NId);
+
+ EdgeEntry &E = getEdge(EId);
+ E.disconnectFrom(*this, NId);
+ }
+
+ /// @brief Convenience method to disconnect all neighbours from the given
+ /// node.
+ void disconnectAllNeighborsFromNode(NodeId NId) {
+ for (auto AEId : adjEdgeIds(NId))
+ disconnectEdge(AEId, getEdgeOtherNodeId(AEId, NId));
+ }
+
+ /// @brief Re-attach an edge to its nodes.
+ ///
+ /// Adds an edge that had been previously disconnected back into the
+ /// adjacency set of the nodes that the edge connects.
+ void reconnectEdge(EdgeId EId, NodeId NId) {
+ EdgeEntry &E = getEdge(EId);
+ E.connectTo(*this, EId, NId);
+ if (Solver)
+ Solver->handleReconnectEdge(EId, NId);
+ }
+
+ /// @brief Remove an edge from the graph.
+ /// @param EId Edge id.
+ void removeEdge(EdgeId EId) {
+ if (Solver)
+ Solver->handleRemoveEdge(EId);
+ EdgeEntry &E = getEdge(EId);
+ E.disconnect();
+ FreeEdgeIds.push_back(EId);
+ Edges[EId].invalidate();
+ }
+
+ /// @brief Remove all nodes and edges from the graph.
+ void clear() {
+ Nodes.clear();
+ FreeNodeIds.clear();
+ Edges.clear();
+ FreeEdgeIds.clear();
+ }
+ };
+
+} // namespace PBQP
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_GRAPH_HPP
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQP/Math.h b/contrib/llvm/include/llvm/CodeGen/PBQP/Math.h
new file mode 100644
index 0000000..2792608
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/Math.h
@@ -0,0 +1,429 @@
+//===------ Math.h - PBQP Vector and Matrix classes -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_MATH_H
+#define LLVM_CODEGEN_PBQP_MATH_H
+
+#include "llvm/ADT/Hashing.h"
+#include <algorithm>
+#include <cassert>
+#include <functional>
+
+namespace llvm {
+namespace PBQP {
+
+typedef float PBQPNum;
+
+/// \brief PBQP Vector class.
+class Vector {
+ friend hash_code hash_value(const Vector &);
+public:
+
+ /// \brief Construct a PBQP vector of the given size.
+ explicit Vector(unsigned Length)
+ : Length(Length), Data(new PBQPNum[Length]) {
+ // llvm::dbgs() << "Constructing PBQP::Vector "
+ // << this << " (length " << Length << ")\n";
+ }
+
+ /// \brief Construct a PBQP vector with initializer.
+ Vector(unsigned Length, PBQPNum InitVal)
+ : Length(Length), Data(new PBQPNum[Length]) {
+ // llvm::dbgs() << "Constructing PBQP::Vector "
+ // << this << " (length " << Length << ", fill "
+ // << InitVal << ")\n";
+ std::fill(Data, Data + Length, InitVal);
+ }
+
+ /// \brief Copy construct a PBQP vector.
+ Vector(const Vector &V)
+ : Length(V.Length), Data(new PBQPNum[Length]) {
+ // llvm::dbgs() << "Copy-constructing PBQP::Vector " << this
+ // << " from PBQP::Vector " << &V << "\n";
+ std::copy(V.Data, V.Data + Length, Data);
+ }
+
+ /// \brief Move construct a PBQP vector.
+ Vector(Vector &&V)
+ : Length(V.Length), Data(V.Data) {
+ V.Length = 0;
+ V.Data = nullptr;
+ }
+
+ /// \brief Destroy this vector, return its memory.
+ ~Vector() {
+ // llvm::dbgs() << "Deleting PBQP::Vector " << this << "\n";
+ delete[] Data;
+ }
+
+ /// \brief Copy-assignment operator.
+ Vector& operator=(const Vector &V) {
+ // llvm::dbgs() << "Assigning to PBQP::Vector " << this
+ // << " from PBQP::Vector " << &V << "\n";
+ delete[] Data;
+ Length = V.Length;
+ Data = new PBQPNum[Length];
+ std::copy(V.Data, V.Data + Length, Data);
+ return *this;
+ }
+
+ /// \brief Move-assignment operator.
+ Vector& operator=(Vector &&V) {
+ delete[] Data;
+ Length = V.Length;
+ Data = V.Data;
+ V.Length = 0;
+ V.Data = nullptr;
+ return *this;
+ }
+
+ /// \brief Comparison operator.
+ bool operator==(const Vector &V) const {
+ assert(Length != 0 && Data != nullptr && "Invalid vector");
+ if (Length != V.Length)
+ return false;
+ return std::equal(Data, Data + Length, V.Data);
+ }
+
+ /// \brief Return the length of the vector
+ unsigned getLength() const {
+ assert(Length != 0 && Data != nullptr && "Invalid vector");
+ return Length;
+ }
+
+ /// \brief Element access.
+ PBQPNum& operator[](unsigned Index) {
+ assert(Length != 0 && Data != nullptr && "Invalid vector");
+ assert(Index < Length && "Vector element access out of bounds.");
+ return Data[Index];
+ }
+
+ /// \brief Const element access.
+ const PBQPNum& operator[](unsigned Index) const {
+ assert(Length != 0 && Data != nullptr && "Invalid vector");
+ assert(Index < Length && "Vector element access out of bounds.");
+ return Data[Index];
+ }
+
+ /// \brief Add another vector to this one.
+ Vector& operator+=(const Vector &V) {
+ assert(Length != 0 && Data != nullptr && "Invalid vector");
+ assert(Length == V.Length && "Vector length mismatch.");
+ std::transform(Data, Data + Length, V.Data, Data, std::plus<PBQPNum>());
+ return *this;
+ }
+
+ /// \brief Subtract another vector from this one.
+ Vector& operator-=(const Vector &V) {
+ assert(Length != 0 && Data != nullptr && "Invalid vector");
+ assert(Length == V.Length && "Vector length mismatch.");
+ std::transform(Data, Data + Length, V.Data, Data, std::minus<PBQPNum>());
+ return *this;
+ }
+
+ /// \brief Returns the index of the minimum value in this vector
+ unsigned minIndex() const {
+ assert(Length != 0 && Data != nullptr && "Invalid vector");
+ return std::min_element(Data, Data + Length) - Data;
+ }
+
+private:
+ unsigned Length;
+ PBQPNum *Data;
+};
+
+/// \brief Return a hash_value for the given vector.
+inline hash_code hash_value(const Vector &V) {
+ unsigned *VBegin = reinterpret_cast<unsigned*>(V.Data);
+ unsigned *VEnd = reinterpret_cast<unsigned*>(V.Data + V.Length);
+ return hash_combine(V.Length, hash_combine_range(VBegin, VEnd));
+}
+
+/// \brief Output a textual representation of the given vector on the given
+/// output stream.
+template <typename OStream>
+OStream& operator<<(OStream &OS, const Vector &V) {
+ assert((V.getLength() != 0) && "Zero-length vector badness.");
+
+ OS << "[ " << V[0];
+ for (unsigned i = 1; i < V.getLength(); ++i)
+ OS << ", " << V[i];
+ OS << " ]";
+
+ return OS;
+}
+
+/// \brief PBQP Matrix class
+class Matrix {
+private:
+ friend hash_code hash_value(const Matrix &);
+public:
+
+ /// \brief Construct a PBQP Matrix with the given dimensions.
+ Matrix(unsigned Rows, unsigned Cols) :
+ Rows(Rows), Cols(Cols), Data(new PBQPNum[Rows * Cols]) {
+ }
+
+ /// \brief Construct a PBQP Matrix with the given dimensions and initial
+ /// value.
+ Matrix(unsigned Rows, unsigned Cols, PBQPNum InitVal)
+ : Rows(Rows), Cols(Cols), Data(new PBQPNum[Rows * Cols]) {
+ std::fill(Data, Data + (Rows * Cols), InitVal);
+ }
+
+ /// \brief Copy construct a PBQP matrix.
+ Matrix(const Matrix &M)
+ : Rows(M.Rows), Cols(M.Cols), Data(new PBQPNum[Rows * Cols]) {
+ std::copy(M.Data, M.Data + (Rows * Cols), Data);
+ }
+
+ /// \brief Move construct a PBQP matrix.
+ Matrix(Matrix &&M)
+ : Rows(M.Rows), Cols(M.Cols), Data(M.Data) {
+ M.Rows = M.Cols = 0;
+ M.Data = nullptr;
+ }
+
+ /// \brief Destroy this matrix, return its memory.
+ ~Matrix() { delete[] Data; }
+
+ /// \brief Copy-assignment operator.
+ Matrix& operator=(const Matrix &M) {
+ delete[] Data;
+ Rows = M.Rows; Cols = M.Cols;
+ Data = new PBQPNum[Rows * Cols];
+ std::copy(M.Data, M.Data + (Rows * Cols), Data);
+ return *this;
+ }
+
+ /// \brief Move-assignment operator.
+ Matrix& operator=(Matrix &&M) {
+ delete[] Data;
+ Rows = M.Rows;
+ Cols = M.Cols;
+ Data = M.Data;
+ M.Rows = M.Cols = 0;
+ M.Data = nullptr;
+ return *this;
+ }
+
+ /// \brief Comparison operator.
+ bool operator==(const Matrix &M) const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ if (Rows != M.Rows || Cols != M.Cols)
+ return false;
+ return std::equal(Data, Data + (Rows * Cols), M.Data);
+ }
+
+ /// \brief Return the number of rows in this matrix.
+ unsigned getRows() const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ return Rows;
+ }
+
+ /// \brief Return the number of cols in this matrix.
+ unsigned getCols() const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ return Cols;
+ }
+
+ /// \brief Matrix element access.
+ PBQPNum* operator[](unsigned R) {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(R < Rows && "Row out of bounds.");
+ return Data + (R * Cols);
+ }
+
+ /// \brief Matrix element access.
+ const PBQPNum* operator[](unsigned R) const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(R < Rows && "Row out of bounds.");
+ return Data + (R * Cols);
+ }
+
+ /// \brief Returns the given row as a vector.
+ Vector getRowAsVector(unsigned R) const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ Vector V(Cols);
+ for (unsigned C = 0; C < Cols; ++C)
+ V[C] = (*this)[R][C];
+ return V;
+ }
+
+ /// \brief Returns the given column as a vector.
+ Vector getColAsVector(unsigned C) const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ Vector V(Rows);
+ for (unsigned R = 0; R < Rows; ++R)
+ V[R] = (*this)[R][C];
+ return V;
+ }
+
+ /// \brief Reset the matrix to the given value.
+ Matrix& reset(PBQPNum Val = 0) {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ std::fill(Data, Data + (Rows * Cols), Val);
+ return *this;
+ }
+
+ /// \brief Set a single row of this matrix to the given value.
+ Matrix& setRow(unsigned R, PBQPNum Val) {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(R < Rows && "Row out of bounds.");
+ std::fill(Data + (R * Cols), Data + ((R + 1) * Cols), Val);
+ return *this;
+ }
+
+ /// \brief Set a single column of this matrix to the given value.
+ Matrix& setCol(unsigned C, PBQPNum Val) {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(C < Cols && "Column out of bounds.");
+ for (unsigned R = 0; R < Rows; ++R)
+ (*this)[R][C] = Val;
+ return *this;
+ }
+
+ /// \brief Matrix transpose.
+ Matrix transpose() const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ Matrix M(Cols, Rows);
+ for (unsigned r = 0; r < Rows; ++r)
+ for (unsigned c = 0; c < Cols; ++c)
+ M[c][r] = (*this)[r][c];
+ return M;
+ }
+
+ /// \brief Returns the diagonal of the matrix as a vector.
+ ///
+ /// Matrix must be square.
+ Vector diagonalize() const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows == Cols && "Attempt to diagonalize non-square matrix.");
+ Vector V(Rows);
+ for (unsigned r = 0; r < Rows; ++r)
+ V[r] = (*this)[r][r];
+ return V;
+ }
+
+ /// \brief Add the given matrix to this one.
+ Matrix& operator+=(const Matrix &M) {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(Rows == M.Rows && Cols == M.Cols &&
+ "Matrix dimensions mismatch.");
+ std::transform(Data, Data + (Rows * Cols), M.Data, Data,
+ std::plus<PBQPNum>());
+ return *this;
+ }
+
+ Matrix operator+(const Matrix &M) {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ Matrix Tmp(*this);
+ Tmp += M;
+ return Tmp;
+ }
+
+ /// \brief Returns the minimum of the given row
+ PBQPNum getRowMin(unsigned R) const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(R < Rows && "Row out of bounds");
+ return *std::min_element(Data + (R * Cols), Data + ((R + 1) * Cols));
+ }
+
+ /// \brief Returns the minimum of the given column
+ PBQPNum getColMin(unsigned C) const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ PBQPNum MinElem = (*this)[0][C];
+ for (unsigned R = 1; R < Rows; ++R)
+ if ((*this)[R][C] < MinElem)
+ MinElem = (*this)[R][C];
+ return MinElem;
+ }
+
+ /// \brief Subtracts the given scalar from the elements of the given row.
+ Matrix& subFromRow(unsigned R, PBQPNum Val) {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ assert(R < Rows && "Row out of bounds");
+ std::transform(Data + (R * Cols), Data + ((R + 1) * Cols),
+ Data + (R * Cols),
+ std::bind2nd(std::minus<PBQPNum>(), Val));
+ return *this;
+ }
+
+ /// \brief Subtracts the given scalar from the elements of the given column.
+ Matrix& subFromCol(unsigned C, PBQPNum Val) {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ for (unsigned R = 0; R < Rows; ++R)
+ (*this)[R][C] -= Val;
+ return *this;
+ }
+
+ /// \brief Returns true if this is a zero matrix.
+ bool isZero() const {
+ assert(Rows != 0 && Cols != 0 && Data != nullptr && "Invalid matrix");
+ return find_if(Data, Data + (Rows * Cols),
+ std::bind2nd(std::not_equal_to<PBQPNum>(), 0)) ==
+ Data + (Rows * Cols);
+ }
+
+private:
+ unsigned Rows, Cols;
+ PBQPNum *Data;
+};
+
+/// \brief Return a hash_code for the given matrix.
+inline hash_code hash_value(const Matrix &M) {
+ unsigned *MBegin = reinterpret_cast<unsigned*>(M.Data);
+ unsigned *MEnd = reinterpret_cast<unsigned*>(M.Data + (M.Rows * M.Cols));
+ return hash_combine(M.Rows, M.Cols, hash_combine_range(MBegin, MEnd));
+}
+
+/// \brief Output a textual representation of the given matrix on the given
+/// output stream.
+template <typename OStream>
+OStream& operator<<(OStream &OS, const Matrix &M) {
+ assert((M.getRows() != 0) && "Zero-row matrix badness.");
+ for (unsigned i = 0; i < M.getRows(); ++i)
+ OS << M.getRowAsVector(i) << "\n";
+ return OS;
+}
+
+template <typename Metadata>
+class MDVector : public Vector {
+public:
+ MDVector(const Vector &v) : Vector(v), md(*this) { }
+ MDVector(Vector &&v) : Vector(std::move(v)), md(*this) { }
+ const Metadata& getMetadata() const { return md; }
+private:
+ Metadata md;
+};
+
+template <typename Metadata>
+inline hash_code hash_value(const MDVector<Metadata> &V) {
+ return hash_value(static_cast<const Vector&>(V));
+}
+
+template <typename Metadata>
+class MDMatrix : public Matrix {
+public:
+ MDMatrix(const Matrix &m) : Matrix(m), md(*this) { }
+ MDMatrix(Matrix &&m) : Matrix(std::move(m)), md(*this) { }
+ const Metadata& getMetadata() const { return md; }
+private:
+ Metadata md;
+};
+
+template <typename Metadata>
+inline hash_code hash_value(const MDMatrix<Metadata> &M) {
+ return hash_value(static_cast<const Matrix&>(M));
+}
+
+} // namespace PBQP
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_MATH_H
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQP/ReductionRules.h b/contrib/llvm/include/llvm/CodeGen/PBQP/ReductionRules.h
new file mode 100644
index 0000000..d4a544b
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/ReductionRules.h
@@ -0,0 +1,221 @@
+//===----------- ReductionRules.h - Reduction Rules -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Reduction Rules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
+#define LLVM_CODEGEN_PBQP_REDUCTIONRULES_H
+
+#include "Graph.h"
+#include "Math.h"
+#include "Solution.h"
+
+namespace llvm {
+namespace PBQP {
+
+ /// \brief Reduce a node of degree one.
+ ///
+ /// Propagate costs from the given node, which must be of degree one, to its
+ /// neighbor. Notify the problem domain.
+ template <typename GraphT>
+ void applyR1(GraphT &G, typename GraphT::NodeId NId) {
+ typedef typename GraphT::NodeId NodeId;
+ typedef typename GraphT::EdgeId EdgeId;
+ typedef typename GraphT::Vector Vector;
+ typedef typename GraphT::Matrix Matrix;
+ typedef typename GraphT::RawVector RawVector;
+
+ assert(G.getNodeDegree(NId) == 1 &&
+ "R1 applied to node with degree != 1.");
+
+ EdgeId EId = *G.adjEdgeIds(NId).begin();
+ NodeId MId = G.getEdgeOtherNodeId(EId, NId);
+
+ const Matrix &ECosts = G.getEdgeCosts(EId);
+ const Vector &XCosts = G.getNodeCosts(NId);
+ RawVector YCosts = G.getNodeCosts(MId);
+
+ // Duplicate a little to avoid transposing matrices.
+ if (NId == G.getEdgeNode1Id(EId)) {
+ for (unsigned j = 0; j < YCosts.getLength(); ++j) {
+ PBQPNum Min = ECosts[0][j] + XCosts[0];
+ for (unsigned i = 1; i < XCosts.getLength(); ++i) {
+ PBQPNum C = ECosts[i][j] + XCosts[i];
+ if (C < Min)
+ Min = C;
+ }
+ YCosts[j] += Min;
+ }
+ } else {
+ for (unsigned i = 0; i < YCosts.getLength(); ++i) {
+ PBQPNum Min = ECosts[i][0] + XCosts[0];
+ for (unsigned j = 1; j < XCosts.getLength(); ++j) {
+ PBQPNum C = ECosts[i][j] + XCosts[j];
+ if (C < Min)
+ Min = C;
+ }
+ YCosts[i] += Min;
+ }
+ }
+ G.setNodeCosts(MId, YCosts);
+ G.disconnectEdge(EId, MId);
+ }
+
+ template <typename GraphT>
+ void applyR2(GraphT &G, typename GraphT::NodeId NId) {
+ typedef typename GraphT::NodeId NodeId;
+ typedef typename GraphT::EdgeId EdgeId;
+ typedef typename GraphT::Vector Vector;
+ typedef typename GraphT::Matrix Matrix;
+ typedef typename GraphT::RawMatrix RawMatrix;
+
+ assert(G.getNodeDegree(NId) == 2 &&
+ "R2 applied to node with degree != 2.");
+
+ const Vector &XCosts = G.getNodeCosts(NId);
+
+ typename GraphT::AdjEdgeItr AEItr = G.adjEdgeIds(NId).begin();
+ EdgeId YXEId = *AEItr,
+ ZXEId = *(++AEItr);
+
+ NodeId YNId = G.getEdgeOtherNodeId(YXEId, NId),
+ ZNId = G.getEdgeOtherNodeId(ZXEId, NId);
+
+ bool FlipEdge1 = (G.getEdgeNode1Id(YXEId) == NId),
+ FlipEdge2 = (G.getEdgeNode1Id(ZXEId) == NId);
+
+ const Matrix *YXECosts = FlipEdge1 ?
+ new Matrix(G.getEdgeCosts(YXEId).transpose()) :
+ &G.getEdgeCosts(YXEId);
+
+ const Matrix *ZXECosts = FlipEdge2 ?
+ new Matrix(G.getEdgeCosts(ZXEId).transpose()) :
+ &G.getEdgeCosts(ZXEId);
+
+ unsigned XLen = XCosts.getLength(),
+ YLen = YXECosts->getRows(),
+ ZLen = ZXECosts->getRows();
+
+ RawMatrix Delta(YLen, ZLen);
+
+ for (unsigned i = 0; i < YLen; ++i) {
+ for (unsigned j = 0; j < ZLen; ++j) {
+ PBQPNum Min = (*YXECosts)[i][0] + (*ZXECosts)[j][0] + XCosts[0];
+ for (unsigned k = 1; k < XLen; ++k) {
+ PBQPNum C = (*YXECosts)[i][k] + (*ZXECosts)[j][k] + XCosts[k];
+ if (C < Min) {
+ Min = C;
+ }
+ }
+ Delta[i][j] = Min;
+ }
+ }
+
+ if (FlipEdge1)
+ delete YXECosts;
+
+ if (FlipEdge2)
+ delete ZXECosts;
+
+ EdgeId YZEId = G.findEdge(YNId, ZNId);
+
+ if (YZEId == G.invalidEdgeId()) {
+ YZEId = G.addEdge(YNId, ZNId, Delta);
+ } else {
+ const Matrix &YZECosts = G.getEdgeCosts(YZEId);
+ if (YNId == G.getEdgeNode1Id(YZEId)) {
+ G.updateEdgeCosts(YZEId, Delta + YZECosts);
+ } else {
+ G.updateEdgeCosts(YZEId, Delta.transpose() + YZECosts);
+ }
+ }
+
+ G.disconnectEdge(YXEId, YNId);
+ G.disconnectEdge(ZXEId, ZNId);
+
+ // TODO: Try to normalize newly added/modified edge.
+ }
+
+#ifndef NDEBUG
+ // Does this Cost vector have any register options ?
+ template <typename VectorT>
+ bool hasRegisterOptions(const VectorT &V) {
+ unsigned VL = V.getLength();
+
+ // An empty or spill only cost vector does not provide any register option.
+ if (VL <= 1)
+ return false;
+
+ // If there are registers in the cost vector, but all of them have infinite
+ // costs, then ... there is no available register.
+ for (unsigned i = 1; i < VL; ++i)
+ if (V[i] != std::numeric_limits<PBQP::PBQPNum>::infinity())
+ return true;
+
+ return false;
+ }
+#endif
+
+ // \brief Find a solution to a fully reduced graph by backpropagation.
+ //
+ // Given a graph and a reduction order, pop each node from the reduction
+ // order and greedily compute a minimum solution based on the node costs, and
+ // the dependent costs due to previously solved nodes.
+ //
+ // Note - This does not return the graph to its original (pre-reduction)
+ // state: the existing solvers destructively alter the node and edge
+ // costs. Given that, the backpropagate function doesn't attempt to
+ // replace the edges either, but leaves the graph in its reduced
+ // state.
+ template <typename GraphT, typename StackT>
+ Solution backpropagate(GraphT& G, StackT stack) {
+ typedef GraphBase::NodeId NodeId;
+ typedef typename GraphT::Matrix Matrix;
+ typedef typename GraphT::RawVector RawVector;
+
+ Solution s;
+
+ while (!stack.empty()) {
+ NodeId NId = stack.back();
+ stack.pop_back();
+
+ RawVector v = G.getNodeCosts(NId);
+
+#ifndef NDEBUG
+ // Although a conservatively allocatable node can be allocated to a register,
+ // spilling it may provide a lower cost solution. Assert here that spilling
+ // is done by choice, not because there were no register available.
+ if (G.getNodeMetadata(NId).wasConservativelyAllocatable())
+ assert(hasRegisterOptions(v) && "A conservatively allocatable node "
+ "must have available register options");
+#endif
+
+ for (auto EId : G.adjEdgeIds(NId)) {
+ const Matrix& edgeCosts = G.getEdgeCosts(EId);
+ if (NId == G.getEdgeNode1Id(EId)) {
+ NodeId mId = G.getEdgeNode2Id(EId);
+ v += edgeCosts.getColAsVector(s.getSelection(mId));
+ } else {
+ NodeId mId = G.getEdgeNode1Id(EId);
+ v += edgeCosts.getRowAsVector(s.getSelection(mId));
+ }
+ }
+
+ s.setSelection(NId, v.minIndex());
+ }
+
+ return s;
+ }
+
+} // namespace PBQP
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQP/Solution.h b/contrib/llvm/include/llvm/CodeGen/PBQP/Solution.h
new file mode 100644
index 0000000..a3bfaeb
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/PBQP/Solution.h
@@ -0,0 +1,94 @@
+//===-- Solution.h ------- PBQP Solution ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// PBQP Solution class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQP_SOLUTION_H
+#define LLVM_CODEGEN_PBQP_SOLUTION_H
+
+#include "Graph.h"
+#include "Math.h"
+#include <map>
+
+namespace llvm {
+namespace PBQP {
+
+ /// \brief Represents a solution to a PBQP problem.
+ ///
+ /// To get the selection for each node in the problem use the getSelection method.
+ class Solution {
+ private:
+
+ typedef std::map<GraphBase::NodeId, unsigned> SelectionsMap;
+ SelectionsMap selections;
+
+ unsigned r0Reductions, r1Reductions, r2Reductions, rNReductions;
+
+ public:
+
+ /// \brief Initialise an empty solution.
+ Solution()
+ : r0Reductions(0), r1Reductions(0), r2Reductions(0), rNReductions(0) {}
+
+ /// \brief Number of nodes for which selections have been made.
+ /// @return Number of nodes for which selections have been made.
+ unsigned numNodes() const { return selections.size(); }
+
+ /// \brief Records a reduction via the R0 rule. Should be called from the
+ /// solver only.
+ void recordR0() { ++r0Reductions; }
+
+ /// \brief Returns the number of R0 reductions applied to solve the problem.
+ unsigned numR0Reductions() const { return r0Reductions; }
+
+ /// \brief Records a reduction via the R1 rule. Should be called from the
+ /// solver only.
+ void recordR1() { ++r1Reductions; }
+
+ /// \brief Returns the number of R1 reductions applied to solve the problem.
+ unsigned numR1Reductions() const { return r1Reductions; }
+
+ /// \brief Records a reduction via the R2 rule. Should be called from the
+ /// solver only.
+ void recordR2() { ++r2Reductions; }
+
+ /// \brief Returns the number of R2 reductions applied to solve the problem.
+ unsigned numR2Reductions() const { return r2Reductions; }
+
+ /// \brief Records a reduction via the RN rule. Should be called from the
+ /// solver only.
+ void recordRN() { ++ rNReductions; }
+
+ /// \brief Returns the number of RN reductions applied to solve the problem.
+ unsigned numRNReductions() const { return rNReductions; }
+
+ /// \brief Set the selection for a given node.
+ /// @param nodeId Node id.
+ /// @param selection Selection for nodeId.
+ void setSelection(GraphBase::NodeId nodeId, unsigned selection) {
+ selections[nodeId] = selection;
+ }
+
+ /// \brief Get a node's selection.
+ /// @param nodeId Node id.
+ /// @return The selection for nodeId;
+ unsigned getSelection(GraphBase::NodeId nodeId) const {
+ SelectionsMap::const_iterator sItr = selections.find(nodeId);
+ assert(sItr != selections.end() && "No selection for node.");
+ return sItr->second;
+ }
+
+ };
+
+} // namespace PBQP
+} // namespace llvm
+
+#endif // LLVM_CODEGEN_PBQP_SOLUTION_H
diff --git a/contrib/llvm/include/llvm/CodeGen/PBQPRAConstraint.h b/contrib/llvm/include/llvm/CodeGen/PBQPRAConstraint.h
new file mode 100644
index 0000000..833b9ba
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/PBQPRAConstraint.h
@@ -0,0 +1,69 @@
+//===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PBQPBuilder interface, for classes which build PBQP
+// instances to represent register allocation problems, and the RegAllocPBQP
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PBQPRACONSTRAINT_H
+#define LLVM_CODEGEN_PBQPRACONSTRAINT_H
+
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace PBQP {
+namespace RegAlloc {
+// Forward declare PBQP graph class.
+class PBQPRAGraph;
+}
+}
+
+class LiveIntervals;
+class MachineBlockFrequencyInfo;
+class MachineFunction;
+class TargetRegisterInfo;
+
+typedef PBQP::RegAlloc::PBQPRAGraph PBQPRAGraph;
+
+/// @brief Abstract base for classes implementing PBQP register allocation
+/// constraints (e.g. Spill-costs, interference, coalescing).
+class PBQPRAConstraint {
+public:
+ virtual ~PBQPRAConstraint() = 0;
+ virtual void apply(PBQPRAGraph &G) = 0;
+private:
+ virtual void anchor();
+};
+
+/// @brief PBQP register allocation constraint composer.
+///
+/// Constraints added to this list will be applied, in the order that they are
+/// added, to the PBQP graph.
+class PBQPRAConstraintList : public PBQPRAConstraint {
+public:
+ void apply(PBQPRAGraph &G) override {
+ for (auto &C : Constraints)
+ C->apply(G);
+ }
+
+ void addConstraint(std::unique_ptr<PBQPRAConstraint> C) {
+ if (C)
+ Constraints.push_back(std::move(C));
+ }
+private:
+ std::vector<std::unique_ptr<PBQPRAConstraint>> Constraints;
+ void anchor() override;
+};
+
+}
+
+#endif /* LLVM_CODEGEN_PBQPRACONSTRAINT_H */
diff --git a/contrib/llvm/include/llvm/CodeGen/ParallelCG.h b/contrib/llvm/include/llvm/CodeGen/ParallelCG.h
new file mode 100644
index 0000000..fa7002f
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ParallelCG.h
@@ -0,0 +1,43 @@
+//===-- llvm/CodeGen/ParallelCG.h - Parallel code generation ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header declares functions that can be used for parallel code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PARALLELCG_H
+#define LLVM_CODEGEN_PARALLELCG_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+class Module;
+class TargetOptions;
+class raw_pwrite_stream;
+
+/// Split M into OSs.size() partitions, and generate code for each. Writes
+/// OSs.size() output files to the output streams in OSs. The resulting output
+/// files if linked together are intended to be equivalent to the single output
+/// file that would have been code generated from M.
+///
+/// \returns M if OSs.size() == 1, otherwise returns std::unique_ptr<Module>().
+std::unique_ptr<Module>
+splitCodeGen(std::unique_ptr<Module> M, ArrayRef<raw_pwrite_stream *> OSs,
+ StringRef CPU, StringRef Features, const TargetOptions &Options,
+ Reloc::Model RM = Reloc::Default,
+ CodeModel::Model CM = CodeModel::Default,
+ CodeGenOpt::Level OL = CodeGenOpt::Default,
+ TargetMachine::CodeGenFileType FT = TargetMachine::CGFT_ObjectFile);
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/Passes.h b/contrib/llvm/include/llvm/CodeGen/Passes.h
new file mode 100644
index 0000000..f45f0ed
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/Passes.h
@@ -0,0 +1,685 @@
+//===-- Passes.h - Target independent code generation passes ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines interfaces to access the target independent code generation
+// passes provided by the LLVM backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PASSES_H
+#define LLVM_CODEGEN_PASSES_H
+
+#include "llvm/Pass.h"
+#include "llvm/Target/TargetMachine.h"
+#include <functional>
+#include <string>
+
+namespace llvm {
+
+class MachineFunctionPass;
+class PassConfigImpl;
+class PassInfo;
+class ScheduleDAGInstrs;
+class TargetLowering;
+class TargetLoweringBase;
+class TargetRegisterClass;
+class raw_ostream;
+struct MachineSchedContext;
+
+// The old pass manager infrastructure is hidden in a legacy namespace now.
+namespace legacy {
+class PassManagerBase;
+}
+using legacy::PassManagerBase;
+
+/// Discriminated union of Pass ID types.
+///
+/// The PassConfig API prefers dealing with IDs because they are safer and more
+/// efficient. IDs decouple configuration from instantiation. This way, when a
+/// pass is overriden, it isn't unnecessarily instantiated. It is also unsafe to
+/// refer to a Pass pointer after adding it to a pass manager, which deletes
+/// redundant pass instances.
+///
+/// However, it is convient to directly instantiate target passes with
+/// non-default ctors. These often don't have a registered PassInfo. Rather than
+/// force all target passes to implement the pass registry boilerplate, allow
+/// the PassConfig API to handle either type.
+///
+/// AnalysisID is sadly char*, so PointerIntPair won't work.
+class IdentifyingPassPtr {
+ union {
+ AnalysisID ID;
+ Pass *P;
+ };
+ bool IsInstance;
+public:
+ IdentifyingPassPtr() : P(nullptr), IsInstance(false) {}
+ IdentifyingPassPtr(AnalysisID IDPtr) : ID(IDPtr), IsInstance(false) {}
+ IdentifyingPassPtr(Pass *InstancePtr) : P(InstancePtr), IsInstance(true) {}
+
+ bool isValid() const { return P; }
+ bool isInstance() const { return IsInstance; }
+
+ AnalysisID getID() const {
+ assert(!IsInstance && "Not a Pass ID");
+ return ID;
+ }
+ Pass *getInstance() const {
+ assert(IsInstance && "Not a Pass Instance");
+ return P;
+ }
+};
+
+template <> struct isPodLike<IdentifyingPassPtr> {
+ static const bool value = true;
+};
+
+/// Target-Independent Code Generator Pass Configuration Options.
+///
+/// This is an ImmutablePass solely for the purpose of exposing CodeGen options
+/// to the internals of other CodeGen passes.
+class TargetPassConfig : public ImmutablePass {
+public:
+ /// Pseudo Pass IDs. These are defined within TargetPassConfig because they
+ /// are unregistered pass IDs. They are only useful for use with
+ /// TargetPassConfig APIs to identify multiple occurrences of the same pass.
+ ///
+
+ /// EarlyTailDuplicate - A clone of the TailDuplicate pass that runs early
+ /// during codegen, on SSA form.
+ static char EarlyTailDuplicateID;
+
+ /// PostRAMachineLICM - A clone of the LICM pass that runs during late machine
+ /// optimization after regalloc.
+ static char PostRAMachineLICMID;
+
+private:
+ PassManagerBase *PM;
+ AnalysisID StartBefore, StartAfter;
+ AnalysisID StopAfter;
+ bool Started;
+ bool Stopped;
+ bool AddingMachinePasses;
+
+protected:
+ TargetMachine *TM;
+ PassConfigImpl *Impl; // Internal data structures
+ bool Initialized; // Flagged after all passes are configured.
+
+ // Target Pass Options
+ // Targets provide a default setting, user flags override.
+ //
+ bool DisableVerify;
+
+ /// Default setting for -enable-tail-merge on this target.
+ bool EnableTailMerge;
+
+public:
+ TargetPassConfig(TargetMachine *tm, PassManagerBase &pm);
+ // Dummy constructor.
+ TargetPassConfig();
+
+ ~TargetPassConfig() override;
+
+ static char ID;
+
+ /// Get the right type of TargetMachine for this target.
+ template<typename TMC> TMC &getTM() const {
+ return *static_cast<TMC*>(TM);
+ }
+
+ //
+ void setInitialized() { Initialized = true; }
+
+ CodeGenOpt::Level getOptLevel() const { return TM->getOptLevel(); }
+
+ /// Set the StartAfter, StartBefore and StopAfter passes to allow running only
+ /// a portion of the normal code-gen pass sequence.
+ ///
+ /// If the StartAfter and StartBefore pass ID is zero, then compilation will
+ /// begin at the normal point; otherwise, clear the Started flag to indicate
+ /// that passes should not be added until the starting pass is seen. If the
+ /// Stop pass ID is zero, then compilation will continue to the end.
+ ///
+ /// This function expects that at least one of the StartAfter or the
+ /// StartBefore pass IDs is null.
+ void setStartStopPasses(AnalysisID StartBefore, AnalysisID StartAfter,
+ AnalysisID StopAfter) {
+ if (StartAfter)
+ assert(!StartBefore && "Start after and start before passes are given");
+ this->StartBefore = StartBefore;
+ this->StartAfter = StartAfter;
+ this->StopAfter = StopAfter;
+ Started = (StartAfter == nullptr) && (StartBefore == nullptr);
+ }
+
+ void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
+
+ bool getEnableTailMerge() const { return EnableTailMerge; }
+ void setEnableTailMerge(bool Enable) { setOpt(EnableTailMerge, Enable); }
+
+ /// Allow the target to override a specific pass without overriding the pass
+ /// pipeline. When passes are added to the standard pipeline at the
+ /// point where StandardID is expected, add TargetID in its place.
+ void substitutePass(AnalysisID StandardID, IdentifyingPassPtr TargetID);
+
+ /// Insert InsertedPassID pass after TargetPassID pass.
+ void insertPass(AnalysisID TargetPassID, IdentifyingPassPtr InsertedPassID,
+ bool VerifyAfter = true, bool PrintAfter = true);
+
+ /// Allow the target to enable a specific standard pass by default.
+ void enablePass(AnalysisID PassID) { substitutePass(PassID, PassID); }
+
+ /// Allow the target to disable a specific standard pass by default.
+ void disablePass(AnalysisID PassID) {
+ substitutePass(PassID, IdentifyingPassPtr());
+ }
+
+ /// Return the pass substituted for StandardID by the target.
+ /// If no substitution exists, return StandardID.
+ IdentifyingPassPtr getPassSubstitution(AnalysisID StandardID) const;
+
+ /// Return true if the optimized regalloc pipeline is enabled.
+ bool getOptimizeRegAlloc() const;
+
+ /// Return true if shrink wrapping is enabled.
+ bool getEnableShrinkWrap() const;
+
+ /// Return true if the default global register allocator is in use and
+ /// has not be overriden on the command line with '-regalloc=...'
+ bool usingDefaultRegAlloc() const;
+
+ /// Add common target configurable passes that perform LLVM IR to IR
+ /// transforms following machine independent optimization.
+ virtual void addIRPasses();
+
+ /// Add passes to lower exception handling for the code generator.
+ void addPassesToHandleExceptions();
+
+ /// Add pass to prepare the LLVM IR for code generation. This should be done
+ /// before exception handling preparation passes.
+ virtual void addCodeGenPrepare();
+
+ /// Add common passes that perform LLVM IR to IR transforms in preparation for
+ /// instruction selection.
+ virtual void addISelPrepare();
+
+ /// addInstSelector - This method should install an instruction selector pass,
+ /// which converts from LLVM code to machine instructions.
+ virtual bool addInstSelector() {
+ return true;
+ }
+
+ /// Add the complete, standard set of LLVM CodeGen passes.
+ /// Fully developed targets will not generally override this.
+ virtual void addMachinePasses();
+
+ /// Create an instance of ScheduleDAGInstrs to be run within the standard
+ /// MachineScheduler pass for this function and target at the current
+ /// optimization level.
+ ///
+ /// This can also be used to plug a new MachineSchedStrategy into an instance
+ /// of the standard ScheduleDAGMI:
+ /// return new ScheduleDAGMI(C, make_unique<MyStrategy>(C), /*RemoveKillFlags=*/false)
+ ///
+ /// Return NULL to select the default (generic) machine scheduler.
+ virtual ScheduleDAGInstrs *
+ createMachineScheduler(MachineSchedContext *C) const {
+ return nullptr;
+ }
+
+ /// Similar to createMachineScheduler but used when postRA machine scheduling
+ /// is enabled.
+ virtual ScheduleDAGInstrs *
+ createPostMachineScheduler(MachineSchedContext *C) const {
+ return nullptr;
+ }
+
+protected:
+ // Helper to verify the analysis is really immutable.
+ void setOpt(bool &Opt, bool Val);
+
+ /// Methods with trivial inline returns are convenient points in the common
+ /// codegen pass pipeline where targets may insert passes. Methods with
+ /// out-of-line standard implementations are major CodeGen stages called by
+ /// addMachinePasses. Some targets may override major stages when inserting
+ /// passes is insufficient, but maintaining overriden stages is more work.
+ ///
+
+ /// addPreISelPasses - This method should add any "last minute" LLVM->LLVM
+ /// passes (which are run just before instruction selector).
+ virtual bool addPreISel() {
+ return true;
+ }
+
+ /// addMachineSSAOptimization - Add standard passes that optimize machine
+ /// instructions in SSA form.
+ virtual void addMachineSSAOptimization();
+
+ /// Add passes that optimize instruction level parallelism for out-of-order
+ /// targets. These passes are run while the machine code is still in SSA
+ /// form, so they can use MachineTraceMetrics to control their heuristics.
+ ///
+ /// All passes added here should preserve the MachineDominatorTree,
+ /// MachineLoopInfo, and MachineTraceMetrics analyses.
+ virtual bool addILPOpts() {
+ return false;
+ }
+
+ /// This method may be implemented by targets that want to run passes
+ /// immediately before register allocation.
+ virtual void addPreRegAlloc() { }
+
+ /// createTargetRegisterAllocator - Create the register allocator pass for
+ /// this target at the current optimization level.
+ virtual FunctionPass *createTargetRegisterAllocator(bool Optimized);
+
+ /// addFastRegAlloc - Add the minimum set of target-independent passes that
+ /// are required for fast register allocation.
+ virtual void addFastRegAlloc(FunctionPass *RegAllocPass);
+
+ /// addOptimizedRegAlloc - Add passes related to register allocation.
+ /// LLVMTargetMachine provides standard regalloc passes for most targets.
+ virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
+
+ /// addPreRewrite - Add passes to the optimized register allocation pipeline
+ /// after register allocation is complete, but before virtual registers are
+ /// rewritten to physical registers.
+ ///
+ /// These passes must preserve VirtRegMap and LiveIntervals, and when running
+ /// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
+ /// When these passes run, VirtRegMap contains legal physreg assignments for
+ /// all virtual registers.
+ virtual bool addPreRewrite() {
+ return false;
+ }
+
+ /// This method may be implemented by targets that want to run passes after
+ /// register allocation pass pipeline but before prolog-epilog insertion.
+ virtual void addPostRegAlloc() { }
+
+ /// Add passes that optimize machine instructions after register allocation.
+ virtual void addMachineLateOptimization();
+
+ /// This method may be implemented by targets that want to run passes after
+ /// prolog-epilog insertion and before the second instruction scheduling pass.
+ virtual void addPreSched2() { }
+
+ /// addGCPasses - Add late codegen passes that analyze code for garbage
+ /// collection. This should return true if GC info should be printed after
+ /// these passes.
+ virtual bool addGCPasses();
+
+ /// Add standard basic block placement passes.
+ virtual void addBlockPlacement();
+
+ /// This pass may be implemented by targets that want to run passes
+ /// immediately before machine code is emitted.
+ virtual void addPreEmitPass() { }
+
+ /// Utilities for targets to add passes to the pass manager.
+ ///
+
+ /// Add a CodeGen pass at this point in the pipeline after checking overrides.
+ /// Return the pass that was added, or zero if no pass was added.
+ /// @p printAfter if true and adding a machine function pass add an extra
+ /// machine printer pass afterwards
+ /// @p verifyAfter if true and adding a machine function pass add an extra
+ /// machine verification pass afterwards.
+ AnalysisID addPass(AnalysisID PassID, bool verifyAfter = true,
+ bool printAfter = true);
+
+ /// Add a pass to the PassManager if that pass is supposed to be run, as
+ /// determined by the StartAfter and StopAfter options. Takes ownership of the
+ /// pass.
+ /// @p printAfter if true and adding a machine function pass add an extra
+ /// machine printer pass afterwards
+ /// @p verifyAfter if true and adding a machine function pass add an extra
+ /// machine verification pass afterwards.
+ void addPass(Pass *P, bool verifyAfter = true, bool printAfter = true);
+
+ /// addMachinePasses helper to create the target-selected or overriden
+ /// regalloc pass.
+ FunctionPass *createRegAllocPass(bool Optimized);
+
+ /// printAndVerify - Add a pass to dump then verify the machine function, if
+ /// those steps are enabled.
+ ///
+ void printAndVerify(const std::string &Banner);
+
+ /// Add a pass to print the machine function if printing is enabled.
+ void addPrintPass(const std::string &Banner);
+
+ /// Add a pass to perform basic verification of the machine function if
+ /// verification is enabled.
+ void addVerifyPass(const std::string &Banner);
+};
+} // namespace llvm
+
+/// List of target independent CodeGen pass IDs.
+namespace llvm {
+ FunctionPass *createAtomicExpandPass(const TargetMachine *TM);
+
+ /// createUnreachableBlockEliminationPass - The LLVM code generator does not
+ /// work well with unreachable basic blocks (what live ranges make sense for a
+ /// block that cannot be reached?). As such, a code generator should either
+ /// not instruction select unreachable blocks, or run this pass as its
+ /// last LLVM modifying pass to clean up blocks that are not reachable from
+ /// the entry block.
+ FunctionPass *createUnreachableBlockEliminationPass();
+
+ /// MachineFunctionPrinter pass - This pass prints out the machine function to
+ /// the given stream as a debugging tool.
+ MachineFunctionPass *
+ createMachineFunctionPrinterPass(raw_ostream &OS,
+ const std::string &Banner ="");
+
+ /// MIRPrinting pass - this pass prints out the LLVM IR into the given stream
+ /// using the MIR serialization format.
+ MachineFunctionPass *createPrintMIRPass(raw_ostream &OS);
+
+ /// createCodeGenPreparePass - Transform the code to expose more pattern
+ /// matching during instruction selection.
+ FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr);
+
+ /// AtomicExpandID -- Lowers atomic operations in terms of either cmpxchg
+ /// load-linked/store-conditional loops.
+ extern char &AtomicExpandID;
+
+ /// MachineLoopInfo - This pass is a loop analysis pass.
+ extern char &MachineLoopInfoID;
+
+ /// MachineDominators - This pass is a machine dominators analysis pass.
+ extern char &MachineDominatorsID;
+
+/// MachineDominanaceFrontier - This pass is a machine dominators analysis pass.
+ extern char &MachineDominanceFrontierID;
+
+ /// EdgeBundles analysis - Bundle machine CFG edges.
+ extern char &EdgeBundlesID;
+
+ /// LiveVariables pass - This pass computes the set of blocks in which each
+ /// variable is life and sets machine operand kill flags.
+ extern char &LiveVariablesID;
+
+ /// PHIElimination - This pass eliminates machine instruction PHI nodes
+ /// by inserting copy instructions. This destroys SSA information, but is the
+ /// desired input for some register allocators. This pass is "required" by
+ /// these register allocator like this: AU.addRequiredID(PHIEliminationID);
+ extern char &PHIEliminationID;
+
+ /// LiveIntervals - This analysis keeps track of the live ranges of virtual
+ /// and physical registers.
+ extern char &LiveIntervalsID;
+
+ /// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
+ extern char &LiveStacksID;
+
+ /// TwoAddressInstruction - This pass reduces two-address instructions to
+ /// use two operands. This destroys SSA information but it is desired by
+ /// register allocators.
+ extern char &TwoAddressInstructionPassID;
+
+ /// ProcessImpicitDefs pass - This pass removes IMPLICIT_DEFs.
+ extern char &ProcessImplicitDefsID;
+
+ /// RegisterCoalescer - This pass merges live ranges to eliminate copies.
+ extern char &RegisterCoalescerID;
+
+ /// MachineScheduler - This pass schedules machine instructions.
+ extern char &MachineSchedulerID;
+
+ /// PostMachineScheduler - This pass schedules machine instructions postRA.
+ extern char &PostMachineSchedulerID;
+
+ /// SpillPlacement analysis. Suggest optimal placement of spill code between
+ /// basic blocks.
+ extern char &SpillPlacementID;
+
+ /// ShrinkWrap pass. Look for the best place to insert save and restore
+ // instruction and update the MachineFunctionInfo with that information.
+ extern char &ShrinkWrapID;
+
+ /// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
+ /// assigned in VirtRegMap.
+ extern char &VirtRegRewriterID;
+
+ /// UnreachableMachineBlockElimination - This pass removes unreachable
+ /// machine basic blocks.
+ extern char &UnreachableMachineBlockElimID;
+
+ /// DeadMachineInstructionElim - This pass removes dead machine instructions.
+ extern char &DeadMachineInstructionElimID;
+
+ /// FastRegisterAllocation Pass - This pass register allocates as fast as
+ /// possible. It is best suited for debug code where live ranges are short.
+ ///
+ FunctionPass *createFastRegisterAllocator();
+
+ /// BasicRegisterAllocation Pass - This pass implements a degenerate global
+ /// register allocator using the basic regalloc framework.
+ ///
+ FunctionPass *createBasicRegisterAllocator();
+
+ /// Greedy register allocation pass - This pass implements a global register
+ /// allocator for optimized builds.
+ ///
+ FunctionPass *createGreedyRegisterAllocator();
+
+ /// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
+ /// Quadratic Prograaming (PBQP) based register allocator.
+ ///
+ FunctionPass *createDefaultPBQPRegisterAllocator();
+
+ /// PrologEpilogCodeInserter - This pass inserts prolog and epilog code,
+ /// and eliminates abstract frame references.
+ extern char &PrologEpilogCodeInserterID;
+
+ /// ExpandPostRAPseudos - This pass expands pseudo instructions after
+ /// register allocation.
+ extern char &ExpandPostRAPseudosID;
+
+ /// createPostRAScheduler - This pass performs post register allocation
+ /// scheduling.
+ extern char &PostRASchedulerID;
+
+ /// BranchFolding - This pass performs machine code CFG based
+ /// optimizations to delete branches to branches, eliminate branches to
+ /// successor blocks (creating fall throughs), and eliminating branches over
+ /// branches.
+ extern char &BranchFolderPassID;
+
+ /// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
+ extern char &MachineFunctionPrinterPassID;
+
+ /// MIRPrintingPass - this pass prints out the LLVM IR using the MIR
+ /// serialization format.
+ extern char &MIRPrintingPassID;
+
+ /// TailDuplicate - Duplicate blocks with unconditional branches
+ /// into tails of their predecessors.
+ extern char &TailDuplicateID;
+
+ /// MachineTraceMetrics - This pass computes critical path and CPU resource
+ /// usage in an ensemble of traces.
+ extern char &MachineTraceMetricsID;
+
+ /// EarlyIfConverter - This pass performs if-conversion on SSA form by
+ /// inserting cmov instructions.
+ extern char &EarlyIfConverterID;
+
+ /// This pass performs instruction combining using trace metrics to estimate
+ /// critical-path and resource depth.
+ extern char &MachineCombinerID;
+
+ /// StackSlotColoring - This pass performs stack coloring and merging.
+ /// It merges disjoint allocas to reduce the stack size.
+ extern char &StackColoringID;
+
+ /// IfConverter - This pass performs machine code if conversion.
+ extern char &IfConverterID;
+
+ FunctionPass *createIfConverter(std::function<bool(const Function &)> Ftor);
+
+ /// MachineBlockPlacement - This pass places basic blocks based on branch
+ /// probabilities.
+ extern char &MachineBlockPlacementID;
+
+ /// MachineBlockPlacementStats - This pass collects statistics about the
+ /// basic block placement using branch probabilities and block frequency
+ /// information.
+ extern char &MachineBlockPlacementStatsID;
+
+ /// GCLowering Pass - Used by gc.root to perform its default lowering
+ /// operations.
+ FunctionPass *createGCLoweringPass();
+
+ /// ShadowStackGCLowering - Implements the custom lowering mechanism
+ /// used by the shadow stack GC. Only runs on functions which opt in to
+ /// the shadow stack collector.
+ FunctionPass *createShadowStackGCLoweringPass();
+
+ /// GCMachineCodeAnalysis - Target-independent pass to mark safe points
+ /// in machine code. Must be added very late during code generation, just
+ /// prior to output, and importantly after all CFG transformations (such as
+ /// branch folding).
+ extern char &GCMachineCodeAnalysisID;
+
+ /// Creates a pass to print GC metadata.
+ ///
+ FunctionPass *createGCInfoPrinter(raw_ostream &OS);
+
+ /// MachineCSE - This pass performs global CSE on machine instructions.
+ extern char &MachineCSEID;
+
+ /// ImplicitNullChecks - This pass folds null pointer checks into nearby
+ /// memory operations.
+ extern char &ImplicitNullChecksID;
+
+ /// MachineLICM - This pass performs LICM on machine instructions.
+ extern char &MachineLICMID;
+
+ /// MachineSinking - This pass performs sinking on machine instructions.
+ extern char &MachineSinkingID;
+
+ /// MachineCopyPropagation - This pass performs copy propagation on
+ /// machine instructions.
+ extern char &MachineCopyPropagationID;
+
+ /// PeepholeOptimizer - This pass performs peephole optimizations -
+ /// like extension and comparison eliminations.
+ extern char &PeepholeOptimizerID;
+
+ /// OptimizePHIs - This pass optimizes machine instruction PHIs
+ /// to take advantage of opportunities created during DAG legalization.
+ extern char &OptimizePHIsID;
+
+ /// StackSlotColoring - This pass performs stack slot coloring.
+ extern char &StackSlotColoringID;
+
+ /// \brief This pass lays out funclets contiguously.
+ extern char &FuncletLayoutID;
+
+ /// createStackProtectorPass - This pass adds stack protectors to functions.
+ ///
+ FunctionPass *createStackProtectorPass(const TargetMachine *TM);
+
+ /// createMachineVerifierPass - This pass verifies cenerated machine code
+ /// instructions for correctness.
+ ///
+ FunctionPass *createMachineVerifierPass(const std::string& Banner);
+
+ /// createDwarfEHPass - This pass mulches exception handling code into a form
+ /// adapted to code generation. Required if using dwarf exception handling.
+ FunctionPass *createDwarfEHPass(const TargetMachine *TM);
+
+ /// createWinEHPass - Prepares personality functions used by MSVC on Windows,
+ /// in addition to the Itanium LSDA based personalities.
+ FunctionPass *createWinEHPass(const TargetMachine *TM);
+
+ /// createSjLjEHPreparePass - This pass adapts exception handling code to use
+ /// the GCC-style builtin setjmp/longjmp (sjlj) to handling EH control flow.
+ ///
+ FunctionPass *createSjLjEHPreparePass();
+
+ /// LocalStackSlotAllocation - This pass assigns local frame indices to stack
+ /// slots relative to one another and allocates base registers to access them
+ /// when it is estimated by the target to be out of range of normal frame
+ /// pointer or stack pointer index addressing.
+ extern char &LocalStackSlotAllocationID;
+
+ /// ExpandISelPseudos - This pass expands pseudo-instructions.
+ extern char &ExpandISelPseudosID;
+
+ /// createExecutionDependencyFixPass - This pass fixes execution time
+ /// problems with dependent instructions, such as switching execution
+ /// domains to match.
+ ///
+ /// The pass will examine instructions using and defining registers in RC.
+ ///
+ FunctionPass *createExecutionDependencyFixPass(const TargetRegisterClass *RC);
+
+ /// UnpackMachineBundles - This pass unpack machine instruction bundles.
+ extern char &UnpackMachineBundlesID;
+
+ FunctionPass *
+ createUnpackMachineBundles(std::function<bool(const Function &)> Ftor);
+
+ /// FinalizeMachineBundles - This pass finalize machine instruction
+ /// bundles (created earlier, e.g. during pre-RA scheduling).
+ extern char &FinalizeMachineBundlesID;
+
+ /// StackMapLiveness - This pass analyses the register live-out set of
+ /// stackmap/patchpoint intrinsics and attaches the calculated information to
+ /// the intrinsic for later emission to the StackMap.
+ extern char &StackMapLivenessID;
+
+ /// LiveDebugValues pass
+ extern char &LiveDebugValuesID;
+
+ /// createJumpInstrTables - This pass creates jump-instruction tables.
+ ModulePass *createJumpInstrTablesPass();
+
+ /// createForwardControlFlowIntegrityPass - This pass adds control-flow
+ /// integrity.
+ ModulePass *createForwardControlFlowIntegrityPass();
+
+ /// InterleavedAccess Pass - This pass identifies and matches interleaved
+ /// memory accesses to target specific intrinsics.
+ ///
+ FunctionPass *createInterleavedAccessPass(const TargetMachine *TM);
+} // End llvm namespace
+
+/// Target machine pass initializer for passes with dependencies. Use with
+/// INITIALIZE_TM_PASS_END.
+#define INITIALIZE_TM_PASS_BEGIN INITIALIZE_PASS_BEGIN
+
+/// Target machine pass initializer for passes with dependencies. Use with
+/// INITIALIZE_TM_PASS_BEGIN.
+#define INITIALIZE_TM_PASS_END(passName, arg, name, cfg, analysis) \
+ PassInfo *PI = new PassInfo(name, arg, & passName ::ID, \
+ PassInfo::NormalCtor_t(callDefaultCtor< passName >), cfg, analysis, \
+ PassInfo::TargetMachineCtor_t(callTargetMachineCtor< passName >)); \
+ Registry.registerPass(*PI, true); \
+ return PI; \
+ } \
+ void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##passName##PassOnce) \
+ }
+
+/// This initializer registers TargetMachine constructor, so the pass being
+/// initialized can use target dependent interfaces. Please do not move this
+/// macro to be together with INITIALIZE_PASS, which is a complete target
+/// independent initializer, and we don't want to make libScalarOpts depend
+/// on libCodeGen.
+#define INITIALIZE_TM_PASS(passName, arg, name, cfg, analysis) \
+ INITIALIZE_TM_PASS_BEGIN(passName, arg, name, cfg, analysis) \
+ INITIALIZE_TM_PASS_END(passName, arg, name, cfg, analysis)
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h b/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h
new file mode 100644
index 0000000..f675520
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/PseudoSourceValue.h
@@ -0,0 +1,183 @@
+//===-- llvm/CodeGen/PseudoSourceValue.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the PseudoSourceValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
+#define LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueMap.h"
+#include <map>
+
+namespace llvm {
+
+class MachineFrameInfo;
+class MachineMemOperand;
+class raw_ostream;
+
+raw_ostream &operator<<(raw_ostream &OS, const MachineMemOperand &MMO);
+
+/// Special value supplied for machine level alias analysis. It indicates that
+/// a memory access references the functions stack frame (e.g., a spill slot),
+/// below the stack frame (e.g., argument space), or constant pool.
+class PseudoSourceValue {
+public:
+ enum PSVKind {
+ Stack,
+ GOT,
+ JumpTable,
+ ConstantPool,
+ FixedStack,
+ GlobalValueCallEntry,
+ ExternalSymbolCallEntry
+ };
+
+private:
+ PSVKind Kind;
+
+ friend class MachineMemOperand; // For printCustom().
+
+ /// Implement printing for PseudoSourceValue. This is called from
+ /// Value::print or Value's operator<<.
+ virtual void printCustom(raw_ostream &O) const;
+
+public:
+ explicit PseudoSourceValue(PSVKind Kind);
+
+ virtual ~PseudoSourceValue();
+
+ PSVKind kind() const { return Kind; }
+
+ bool isStack() const { return Kind == Stack; }
+ bool isGOT() const { return Kind == GOT; }
+ bool isConstantPool() const { return Kind == ConstantPool; }
+ bool isJumpTable() const { return Kind == JumpTable; }
+
+ /// Test whether the memory pointed to by this PseudoSourceValue has a
+ /// constant value.
+ virtual bool isConstant(const MachineFrameInfo *) const;
+
+ /// Test whether the memory pointed to by this PseudoSourceValue may also be
+ /// pointed to by an LLVM IR Value.
+ virtual bool isAliased(const MachineFrameInfo *) const;
+
+ /// Return true if the memory pointed to by this PseudoSourceValue can ever
+ /// alias an LLVM IR Value.
+ virtual bool mayAlias(const MachineFrameInfo *) const;
+};
+
+/// A specialized PseudoSourceValue for holding FixedStack values, which must
+/// include a frame index.
+class FixedStackPseudoSourceValue : public PseudoSourceValue {
+ const int FI;
+
+public:
+ explicit FixedStackPseudoSourceValue(int FI)
+ : PseudoSourceValue(FixedStack), FI(FI) {}
+
+ static inline bool classof(const PseudoSourceValue *V) {
+ return V->kind() == FixedStack;
+ }
+
+ bool isConstant(const MachineFrameInfo *MFI) const override;
+
+ bool isAliased(const MachineFrameInfo *MFI) const override;
+
+ bool mayAlias(const MachineFrameInfo *) const override;
+
+ void printCustom(raw_ostream &OS) const override;
+
+ int getFrameIndex() const { return FI; }
+};
+
+class CallEntryPseudoSourceValue : public PseudoSourceValue {
+protected:
+ CallEntryPseudoSourceValue(PSVKind Kind);
+
+public:
+ bool isConstant(const MachineFrameInfo *) const override;
+ bool isAliased(const MachineFrameInfo *) const override;
+ bool mayAlias(const MachineFrameInfo *) const override;
+};
+
+/// A specialized pseudo soruce value for holding GlobalValue values.
+class GlobalValuePseudoSourceValue : public CallEntryPseudoSourceValue {
+ const GlobalValue *GV;
+
+public:
+ GlobalValuePseudoSourceValue(const GlobalValue *GV);
+
+ static inline bool classof(const PseudoSourceValue *V) {
+ return V->kind() == GlobalValueCallEntry;
+ }
+
+ const GlobalValue *getValue() const { return GV; }
+};
+
+/// A specialized pseudo source value for holding external symbol values.
+class ExternalSymbolPseudoSourceValue : public CallEntryPseudoSourceValue {
+ const char *ES;
+
+public:
+ ExternalSymbolPseudoSourceValue(const char *ES);
+
+ static inline bool classof(const PseudoSourceValue *V) {
+ return V->kind() == ExternalSymbolCallEntry;
+ }
+
+ const char *getSymbol() const { return ES; }
+};
+
+/// Manages creation of pseudo source values.
+class PseudoSourceValueManager {
+ const PseudoSourceValue StackPSV, GOTPSV, JumpTablePSV, ConstantPoolPSV;
+ std::map<int, std::unique_ptr<FixedStackPseudoSourceValue>> FSValues;
+ StringMap<std::unique_ptr<const ExternalSymbolPseudoSourceValue>>
+ ExternalCallEntries;
+ ValueMap<const GlobalValue *,
+ std::unique_ptr<const GlobalValuePseudoSourceValue>>
+ GlobalCallEntries;
+
+public:
+ PseudoSourceValueManager();
+
+ /// Return a pseudo source value referencing the area below the stack frame of
+ /// a function, e.g., the argument space.
+ const PseudoSourceValue *getStack();
+
+ /// Return a pseudo source value referencing the global offset table
+ /// (or something the like).
+ const PseudoSourceValue *getGOT();
+
+ /// Return a pseudo source value referencing the constant pool. Since constant
+ /// pools are constant, this doesn't need to identify a specific constant
+ /// pool entry.
+ const PseudoSourceValue *getConstantPool();
+
+ /// Return a pseudo source value referencing a jump table. Since jump tables
+ /// are constant, this doesn't need to identify a specific jump table.
+ const PseudoSourceValue *getJumpTable();
+
+ /// Return a pseudo source value referencing a fixed stack frame entry,
+ /// e.g., a spill slot.
+ const PseudoSourceValue *getFixedStack(int FI);
+
+ const PseudoSourceValue *getGlobalValueCallEntry(const GlobalValue *GV);
+
+ const PseudoSourceValue *getExternalSymbolCallEntry(const char *ES);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h b/contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h
new file mode 100644
index 0000000..4122811
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/RegAllocPBQP.h
@@ -0,0 +1,598 @@
+//===-- RegAllocPBQP.h ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PBQPBuilder interface, for classes which build PBQP
+// instances to represent register allocation problems, and the RegAllocPBQP
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGALLOCPBQP_H
+#define LLVM_CODEGEN_REGALLOCPBQP_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/PBQP/CostAllocator.h"
+#include "llvm/CodeGen/PBQP/ReductionRules.h"
+#include "llvm/CodeGen/PBQPRAConstraint.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+namespace PBQP {
+namespace RegAlloc {
+
+/// @brief Spill option index.
+inline unsigned getSpillOptionIdx() { return 0; }
+
+/// \brief Metadata to speed allocatability test.
+///
+/// Keeps track of the number of infinities in each row and column.
+class MatrixMetadata {
+private:
+ MatrixMetadata(const MatrixMetadata&);
+ void operator=(const MatrixMetadata&);
+public:
+ MatrixMetadata(const Matrix& M)
+ : WorstRow(0), WorstCol(0),
+ UnsafeRows(new bool[M.getRows() - 1]()),
+ UnsafeCols(new bool[M.getCols() - 1]()) {
+
+ unsigned* ColCounts = new unsigned[M.getCols() - 1]();
+
+ for (unsigned i = 1; i < M.getRows(); ++i) {
+ unsigned RowCount = 0;
+ for (unsigned j = 1; j < M.getCols(); ++j) {
+ if (M[i][j] == std::numeric_limits<PBQPNum>::infinity()) {
+ ++RowCount;
+ ++ColCounts[j - 1];
+ UnsafeRows[i - 1] = true;
+ UnsafeCols[j - 1] = true;
+ }
+ }
+ WorstRow = std::max(WorstRow, RowCount);
+ }
+ unsigned WorstColCountForCurRow =
+ *std::max_element(ColCounts, ColCounts + M.getCols() - 1);
+ WorstCol = std::max(WorstCol, WorstColCountForCurRow);
+ delete[] ColCounts;
+ }
+
+ unsigned getWorstRow() const { return WorstRow; }
+ unsigned getWorstCol() const { return WorstCol; }
+ const bool* getUnsafeRows() const { return UnsafeRows.get(); }
+ const bool* getUnsafeCols() const { return UnsafeCols.get(); }
+
+private:
+ unsigned WorstRow, WorstCol;
+ std::unique_ptr<bool[]> UnsafeRows;
+ std::unique_ptr<bool[]> UnsafeCols;
+};
+
+/// \brief Holds a vector of the allowed physical regs for a vreg.
+class AllowedRegVector {
+ friend hash_code hash_value(const AllowedRegVector &);
+public:
+
+ AllowedRegVector() : NumOpts(0), Opts(nullptr) {}
+
+ AllowedRegVector(const std::vector<unsigned> &OptVec)
+ : NumOpts(OptVec.size()), Opts(new unsigned[NumOpts]) {
+ std::copy(OptVec.begin(), OptVec.end(), Opts.get());
+ }
+
+ AllowedRegVector(const AllowedRegVector &Other)
+ : NumOpts(Other.NumOpts), Opts(new unsigned[NumOpts]) {
+ std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
+ }
+
+ AllowedRegVector(AllowedRegVector &&Other)
+ : NumOpts(std::move(Other.NumOpts)), Opts(std::move(Other.Opts)) {}
+
+ AllowedRegVector& operator=(const AllowedRegVector &Other) {
+ NumOpts = Other.NumOpts;
+ Opts.reset(new unsigned[NumOpts]);
+ std::copy(Other.Opts.get(), Other.Opts.get() + NumOpts, Opts.get());
+ return *this;
+ }
+
+ AllowedRegVector& operator=(AllowedRegVector &&Other) {
+ NumOpts = std::move(Other.NumOpts);
+ Opts = std::move(Other.Opts);
+ return *this;
+ }
+
+ unsigned size() const { return NumOpts; }
+ unsigned operator[](size_t I) const { return Opts[I]; }
+
+ bool operator==(const AllowedRegVector &Other) const {
+ if (NumOpts != Other.NumOpts)
+ return false;
+ return std::equal(Opts.get(), Opts.get() + NumOpts, Other.Opts.get());
+ }
+
+ bool operator!=(const AllowedRegVector &Other) const {
+ return !(*this == Other);
+ }
+
+private:
+ unsigned NumOpts;
+ std::unique_ptr<unsigned[]> Opts;
+};
+
+inline hash_code hash_value(const AllowedRegVector &OptRegs) {
+ unsigned *OStart = OptRegs.Opts.get();
+ unsigned *OEnd = OptRegs.Opts.get() + OptRegs.NumOpts;
+ return hash_combine(OptRegs.NumOpts,
+ hash_combine_range(OStart, OEnd));
+}
+
+/// \brief Holds graph-level metadata relevant to PBQP RA problems.
+class GraphMetadata {
+private:
+ typedef ValuePool<AllowedRegVector> AllowedRegVecPool;
+public:
+
+ typedef AllowedRegVecPool::PoolRef AllowedRegVecRef;
+
+ GraphMetadata(MachineFunction &MF,
+ LiveIntervals &LIS,
+ MachineBlockFrequencyInfo &MBFI)
+ : MF(MF), LIS(LIS), MBFI(MBFI) {}
+
+ MachineFunction &MF;
+ LiveIntervals &LIS;
+ MachineBlockFrequencyInfo &MBFI;
+
+ void setNodeIdForVReg(unsigned VReg, GraphBase::NodeId NId) {
+ VRegToNodeId[VReg] = NId;
+ }
+
+ GraphBase::NodeId getNodeIdForVReg(unsigned VReg) const {
+ auto VRegItr = VRegToNodeId.find(VReg);
+ if (VRegItr == VRegToNodeId.end())
+ return GraphBase::invalidNodeId();
+ return VRegItr->second;
+ }
+
+ void eraseNodeIdForVReg(unsigned VReg) {
+ VRegToNodeId.erase(VReg);
+ }
+
+ AllowedRegVecRef getAllowedRegs(AllowedRegVector Allowed) {
+ return AllowedRegVecs.getValue(std::move(Allowed));
+ }
+
+private:
+ DenseMap<unsigned, GraphBase::NodeId> VRegToNodeId;
+ AllowedRegVecPool AllowedRegVecs;
+};
+
+/// \brief Holds solver state and other metadata relevant to each PBQP RA node.
+class NodeMetadata {
+public:
+ typedef RegAlloc::AllowedRegVector AllowedRegVector;
+
+ // The node's reduction state. The order in this enum is important,
+ // as it is assumed nodes can only progress up (i.e. towards being
+ // optimally reducible) when reducing the graph.
+ typedef enum {
+ Unprocessed,
+ NotProvablyAllocatable,
+ ConservativelyAllocatable,
+ OptimallyReducible
+ } ReductionState;
+
+ NodeMetadata()
+ : RS(Unprocessed), NumOpts(0), DeniedOpts(0), OptUnsafeEdges(nullptr),
+ VReg(0)
+#ifndef NDEBUG
+ , everConservativelyAllocatable(false)
+#endif
+ {}
+
+ // FIXME: Re-implementing default behavior to work around MSVC. Remove once
+ // MSVC synthesizes move constructors properly.
+ NodeMetadata(const NodeMetadata &Other)
+ : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
+ OptUnsafeEdges(new unsigned[NumOpts]), VReg(Other.VReg),
+ AllowedRegs(Other.AllowedRegs)
+#ifndef NDEBUG
+ , everConservativelyAllocatable(Other.everConservativelyAllocatable)
+#endif
+ {
+ if (NumOpts > 0) {
+ std::copy(&Other.OptUnsafeEdges[0], &Other.OptUnsafeEdges[NumOpts],
+ &OptUnsafeEdges[0]);
+ }
+ }
+
+ // FIXME: Re-implementing default behavior to work around MSVC. Remove once
+ // MSVC synthesizes move constructors properly.
+ NodeMetadata(NodeMetadata &&Other)
+ : RS(Other.RS), NumOpts(Other.NumOpts), DeniedOpts(Other.DeniedOpts),
+ OptUnsafeEdges(std::move(Other.OptUnsafeEdges)), VReg(Other.VReg),
+ AllowedRegs(std::move(Other.AllowedRegs))
+#ifndef NDEBUG
+ , everConservativelyAllocatable(Other.everConservativelyAllocatable)
+#endif
+ {}
+
+ // FIXME: Re-implementing default behavior to work around MSVC. Remove once
+ // MSVC synthesizes move constructors properly.
+ NodeMetadata& operator=(const NodeMetadata &Other) {
+ RS = Other.RS;
+ NumOpts = Other.NumOpts;
+ DeniedOpts = Other.DeniedOpts;
+ OptUnsafeEdges.reset(new unsigned[NumOpts]);
+ std::copy(Other.OptUnsafeEdges.get(), Other.OptUnsafeEdges.get() + NumOpts,
+ OptUnsafeEdges.get());
+ VReg = Other.VReg;
+ AllowedRegs = Other.AllowedRegs;
+#ifndef NDEBUG
+ everConservativelyAllocatable = Other.everConservativelyAllocatable;
+#endif
+ return *this;
+ }
+
+ // FIXME: Re-implementing default behavior to work around MSVC. Remove once
+ // MSVC synthesizes move constructors properly.
+ NodeMetadata& operator=(NodeMetadata &&Other) {
+ RS = Other.RS;
+ NumOpts = Other.NumOpts;
+ DeniedOpts = Other.DeniedOpts;
+ OptUnsafeEdges = std::move(Other.OptUnsafeEdges);
+ VReg = Other.VReg;
+ AllowedRegs = std::move(Other.AllowedRegs);
+#ifndef NDEBUG
+ everConservativelyAllocatable = Other.everConservativelyAllocatable;
+#endif
+ return *this;
+ }
+
+ void setVReg(unsigned VReg) { this->VReg = VReg; }
+ unsigned getVReg() const { return VReg; }
+
+ void setAllowedRegs(GraphMetadata::AllowedRegVecRef AllowedRegs) {
+ this->AllowedRegs = std::move(AllowedRegs);
+ }
+ const AllowedRegVector& getAllowedRegs() const { return *AllowedRegs; }
+
+ void setup(const Vector& Costs) {
+ NumOpts = Costs.getLength() - 1;
+ OptUnsafeEdges = std::unique_ptr<unsigned[]>(new unsigned[NumOpts]());
+ }
+
+ ReductionState getReductionState() const { return RS; }
+ void setReductionState(ReductionState RS) {
+ assert(RS >= this->RS && "A node's reduction state can not be downgraded");
+ this->RS = RS;
+
+#ifndef NDEBUG
+ // Remember this state to assert later that a non-infinite register
+ // option was available.
+ if (RS == ConservativelyAllocatable)
+ everConservativelyAllocatable = true;
+#endif
+ }
+
+
+ void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
+ DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
+ const bool* UnsafeOpts =
+ Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
+ for (unsigned i = 0; i < NumOpts; ++i)
+ OptUnsafeEdges[i] += UnsafeOpts[i];
+ }
+
+ void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
+ DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
+ const bool* UnsafeOpts =
+ Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
+ for (unsigned i = 0; i < NumOpts; ++i)
+ OptUnsafeEdges[i] -= UnsafeOpts[i];
+ }
+
+ bool isConservativelyAllocatable() const {
+ return (DeniedOpts < NumOpts) ||
+ (std::find(&OptUnsafeEdges[0], &OptUnsafeEdges[NumOpts], 0) !=
+ &OptUnsafeEdges[NumOpts]);
+ }
+
+#ifndef NDEBUG
+ bool wasConservativelyAllocatable() const {
+ return everConservativelyAllocatable;
+ }
+#endif
+
+private:
+ ReductionState RS;
+ unsigned NumOpts;
+ unsigned DeniedOpts;
+ std::unique_ptr<unsigned[]> OptUnsafeEdges;
+ unsigned VReg;
+ GraphMetadata::AllowedRegVecRef AllowedRegs;
+
+#ifndef NDEBUG
+ bool everConservativelyAllocatable;
+#endif
+};
+
+class RegAllocSolverImpl {
+private:
+ typedef MDMatrix<MatrixMetadata> RAMatrix;
+public:
+ typedef PBQP::Vector RawVector;
+ typedef PBQP::Matrix RawMatrix;
+ typedef PBQP::Vector Vector;
+ typedef RAMatrix Matrix;
+ typedef PBQP::PoolCostAllocator<Vector, Matrix> CostAllocator;
+
+ typedef GraphBase::NodeId NodeId;
+ typedef GraphBase::EdgeId EdgeId;
+
+ typedef RegAlloc::NodeMetadata NodeMetadata;
+ struct EdgeMetadata { };
+ typedef RegAlloc::GraphMetadata GraphMetadata;
+
+ typedef PBQP::Graph<RegAllocSolverImpl> Graph;
+
+ RegAllocSolverImpl(Graph &G) : G(G) {}
+
+ Solution solve() {
+ G.setSolver(*this);
+ Solution S;
+ setup();
+ S = backpropagate(G, reduce());
+ G.unsetSolver();
+ return S;
+ }
+
+ void handleAddNode(NodeId NId) {
+ assert(G.getNodeCosts(NId).getLength() > 1 &&
+ "PBQP Graph should not contain single or zero-option nodes");
+ G.getNodeMetadata(NId).setup(G.getNodeCosts(NId));
+ }
+ void handleRemoveNode(NodeId NId) {}
+ void handleSetNodeCosts(NodeId NId, const Vector& newCosts) {}
+
+ void handleAddEdge(EdgeId EId) {
+ handleReconnectEdge(EId, G.getEdgeNode1Id(EId));
+ handleReconnectEdge(EId, G.getEdgeNode2Id(EId));
+ }
+
+ void handleRemoveEdge(EdgeId EId) {
+ handleDisconnectEdge(EId, G.getEdgeNode1Id(EId));
+ handleDisconnectEdge(EId, G.getEdgeNode2Id(EId));
+ }
+
+ void handleDisconnectEdge(EdgeId EId, NodeId NId) {
+ NodeMetadata& NMd = G.getNodeMetadata(NId);
+ const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
+ NMd.handleRemoveEdge(MMd, NId == G.getEdgeNode2Id(EId));
+ promote(NId, NMd);
+ }
+
+ void handleReconnectEdge(EdgeId EId, NodeId NId) {
+ NodeMetadata& NMd = G.getNodeMetadata(NId);
+ const MatrixMetadata& MMd = G.getEdgeCosts(EId).getMetadata();
+ NMd.handleAddEdge(MMd, NId == G.getEdgeNode2Id(EId));
+ }
+
+ void handleUpdateCosts(EdgeId EId, const Matrix& NewCosts) {
+ NodeId N1Id = G.getEdgeNode1Id(EId);
+ NodeId N2Id = G.getEdgeNode2Id(EId);
+ NodeMetadata& N1Md = G.getNodeMetadata(N1Id);
+ NodeMetadata& N2Md = G.getNodeMetadata(N2Id);
+ bool Transpose = N1Id != G.getEdgeNode1Id(EId);
+
+ // Metadata are computed incrementally. First, update them
+ // by removing the old cost.
+ const MatrixMetadata& OldMMd = G.getEdgeCosts(EId).getMetadata();
+ N1Md.handleRemoveEdge(OldMMd, Transpose);
+ N2Md.handleRemoveEdge(OldMMd, !Transpose);
+
+ // And update now the metadata with the new cost.
+ const MatrixMetadata& MMd = NewCosts.getMetadata();
+ N1Md.handleAddEdge(MMd, Transpose);
+ N2Md.handleAddEdge(MMd, !Transpose);
+
+ // As the metadata may have changed with the update, the nodes may have
+ // become ConservativelyAllocatable or OptimallyReducible.
+ promote(N1Id, N1Md);
+ promote(N2Id, N2Md);
+ }
+
+private:
+
+ void promote(NodeId NId, NodeMetadata& NMd) {
+ if (G.getNodeDegree(NId) == 3) {
+ // This node is becoming optimally reducible.
+ moveToOptimallyReducibleNodes(NId);
+ } else if (NMd.getReductionState() ==
+ NodeMetadata::NotProvablyAllocatable &&
+ NMd.isConservativelyAllocatable()) {
+ // This node just became conservatively allocatable.
+ moveToConservativelyAllocatableNodes(NId);
+ }
+ }
+
+ void removeFromCurrentSet(NodeId NId) {
+ switch (G.getNodeMetadata(NId).getReductionState()) {
+ case NodeMetadata::Unprocessed: break;
+ case NodeMetadata::OptimallyReducible:
+ assert(OptimallyReducibleNodes.find(NId) !=
+ OptimallyReducibleNodes.end() &&
+ "Node not in optimally reducible set.");
+ OptimallyReducibleNodes.erase(NId);
+ break;
+ case NodeMetadata::ConservativelyAllocatable:
+ assert(ConservativelyAllocatableNodes.find(NId) !=
+ ConservativelyAllocatableNodes.end() &&
+ "Node not in conservatively allocatable set.");
+ ConservativelyAllocatableNodes.erase(NId);
+ break;
+ case NodeMetadata::NotProvablyAllocatable:
+ assert(NotProvablyAllocatableNodes.find(NId) !=
+ NotProvablyAllocatableNodes.end() &&
+ "Node not in not-provably-allocatable set.");
+ NotProvablyAllocatableNodes.erase(NId);
+ break;
+ }
+ }
+
+ void moveToOptimallyReducibleNodes(NodeId NId) {
+ removeFromCurrentSet(NId);
+ OptimallyReducibleNodes.insert(NId);
+ G.getNodeMetadata(NId).setReductionState(
+ NodeMetadata::OptimallyReducible);
+ }
+
+ void moveToConservativelyAllocatableNodes(NodeId NId) {
+ removeFromCurrentSet(NId);
+ ConservativelyAllocatableNodes.insert(NId);
+ G.getNodeMetadata(NId).setReductionState(
+ NodeMetadata::ConservativelyAllocatable);
+ }
+
+ void moveToNotProvablyAllocatableNodes(NodeId NId) {
+ removeFromCurrentSet(NId);
+ NotProvablyAllocatableNodes.insert(NId);
+ G.getNodeMetadata(NId).setReductionState(
+ NodeMetadata::NotProvablyAllocatable);
+ }
+
+ void setup() {
+ // Set up worklists.
+ for (auto NId : G.nodeIds()) {
+ if (G.getNodeDegree(NId) < 3)
+ moveToOptimallyReducibleNodes(NId);
+ else if (G.getNodeMetadata(NId).isConservativelyAllocatable())
+ moveToConservativelyAllocatableNodes(NId);
+ else
+ moveToNotProvablyAllocatableNodes(NId);
+ }
+ }
+
+ // Compute a reduction order for the graph by iteratively applying PBQP
+ // reduction rules. Locally optimal rules are applied whenever possible (R0,
+ // R1, R2). If no locally-optimal rules apply then any conservatively
+ // allocatable node is reduced. Finally, if no conservatively allocatable
+ // node exists then the node with the lowest spill-cost:degree ratio is
+ // selected.
+ std::vector<GraphBase::NodeId> reduce() {
+ assert(!G.empty() && "Cannot reduce empty graph.");
+
+ typedef GraphBase::NodeId NodeId;
+ std::vector<NodeId> NodeStack;
+
+ // Consume worklists.
+ while (true) {
+ if (!OptimallyReducibleNodes.empty()) {
+ NodeSet::iterator NItr = OptimallyReducibleNodes.begin();
+ NodeId NId = *NItr;
+ OptimallyReducibleNodes.erase(NItr);
+ NodeStack.push_back(NId);
+ switch (G.getNodeDegree(NId)) {
+ case 0:
+ break;
+ case 1:
+ applyR1(G, NId);
+ break;
+ case 2:
+ applyR2(G, NId);
+ break;
+ default: llvm_unreachable("Not an optimally reducible node.");
+ }
+ } else if (!ConservativelyAllocatableNodes.empty()) {
+ // Conservatively allocatable nodes will never spill. For now just
+ // take the first node in the set and push it on the stack. When we
+ // start optimizing more heavily for register preferencing, it may
+ // would be better to push nodes with lower 'expected' or worst-case
+ // register costs first (since early nodes are the most
+ // constrained).
+ NodeSet::iterator NItr = ConservativelyAllocatableNodes.begin();
+ NodeId NId = *NItr;
+ ConservativelyAllocatableNodes.erase(NItr);
+ NodeStack.push_back(NId);
+ G.disconnectAllNeighborsFromNode(NId);
+
+ } else if (!NotProvablyAllocatableNodes.empty()) {
+ NodeSet::iterator NItr =
+ std::min_element(NotProvablyAllocatableNodes.begin(),
+ NotProvablyAllocatableNodes.end(),
+ SpillCostComparator(G));
+ NodeId NId = *NItr;
+ NotProvablyAllocatableNodes.erase(NItr);
+ NodeStack.push_back(NId);
+ G.disconnectAllNeighborsFromNode(NId);
+ } else
+ break;
+ }
+
+ return NodeStack;
+ }
+
+ class SpillCostComparator {
+ public:
+ SpillCostComparator(const Graph& G) : G(G) {}
+ bool operator()(NodeId N1Id, NodeId N2Id) {
+ PBQPNum N1SC = G.getNodeCosts(N1Id)[0];
+ PBQPNum N2SC = G.getNodeCosts(N2Id)[0];
+ if (N1SC == N2SC)
+ return G.getNodeDegree(N1Id) < G.getNodeDegree(N2Id);
+ return N1SC < N2SC;
+ }
+ private:
+ const Graph& G;
+ };
+
+ Graph& G;
+ typedef std::set<NodeId> NodeSet;
+ NodeSet OptimallyReducibleNodes;
+ NodeSet ConservativelyAllocatableNodes;
+ NodeSet NotProvablyAllocatableNodes;
+};
+
+class PBQPRAGraph : public PBQP::Graph<RegAllocSolverImpl> {
+private:
+ typedef PBQP::Graph<RegAllocSolverImpl> BaseT;
+public:
+ PBQPRAGraph(GraphMetadata Metadata) : BaseT(Metadata) {}
+
+ /// @brief Dump this graph to dbgs().
+ void dump() const;
+
+ /// @brief Dump this graph to an output stream.
+ /// @param OS Output stream to print on.
+ void dump(raw_ostream &OS) const;
+
+ /// @brief Print a representation of this graph in DOT format.
+ /// @param OS Output stream to print on.
+ void printDot(raw_ostream &OS) const;
+};
+
+inline Solution solve(PBQPRAGraph& G) {
+ if (G.empty())
+ return Solution();
+ RegAllocSolverImpl RegAllocSolver(G);
+ return RegAllocSolver.solve();
+}
+
+} // namespace RegAlloc
+} // namespace PBQP
+
+/// @brief Create a PBQP register allocator instance.
+FunctionPass *
+createPBQPRegisterAllocator(char *customPassID = nullptr);
+
+} // namespace llvm
+
+#endif /* LLVM_CODEGEN_REGALLOCPBQP_H */
diff --git a/contrib/llvm/include/llvm/CodeGen/RegAllocRegistry.h b/contrib/llvm/include/llvm/CodeGen/RegAllocRegistry.h
new file mode 100644
index 0000000..5c7e999
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/RegAllocRegistry.h
@@ -0,0 +1,63 @@
+//===-- llvm/CodeGen/RegAllocRegistry.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation for register allocator function
+// pass registry (RegisterRegAlloc).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGALLOCREGISTRY_H
+#define LLVM_CODEGEN_REGALLOCREGISTRY_H
+
+#include "llvm/CodeGen/MachinePassRegistry.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterRegAlloc class - Track the registration of register allocators.
+///
+//===----------------------------------------------------------------------===//
+class RegisterRegAlloc : public MachinePassRegistryNode {
+
+public:
+
+ typedef FunctionPass *(*FunctionPassCtor)();
+
+ static MachinePassRegistry Registry;
+
+ RegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
+ : MachinePassRegistryNode(N, D, (MachinePassCtor)C) {
+ Registry.Add(this);
+ }
+ ~RegisterRegAlloc() { Registry.Remove(this); }
+
+ // Accessors.
+ //
+ RegisterRegAlloc *getNext() const {
+ return (RegisterRegAlloc *)MachinePassRegistryNode::getNext();
+ }
+ static RegisterRegAlloc *getList() {
+ return (RegisterRegAlloc *)Registry.getList();
+ }
+ static FunctionPassCtor getDefault() {
+ return (FunctionPassCtor)Registry.getDefault();
+ }
+ static void setDefault(FunctionPassCtor C) {
+ Registry.setDefault((MachinePassCtor)C);
+ }
+ static void setListener(MachinePassRegistryListener *L) {
+ Registry.setListener(L);
+ }
+};
+
+} // end namespace llvm
+
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h b/contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h
new file mode 100644
index 0000000..d784dfb
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterClassInfo.h
@@ -0,0 +1,145 @@
+//===-- RegisterClassInfo.h - Dynamic Register Class Info -*- C++ -*-------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RegisterClassInfo class which provides dynamic
+// information about target register classes. Callee saved and reserved
+// registers depends on calling conventions and other dynamic information, so
+// some things cannot be determined statically.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTERCLASSINFO_H
+#define LLVM_CODEGEN_REGISTERCLASSINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+
+class RegisterClassInfo {
+ struct RCInfo {
+ unsigned Tag;
+ unsigned NumRegs;
+ bool ProperSubClass;
+ uint8_t MinCost;
+ uint16_t LastCostChange;
+ std::unique_ptr<MCPhysReg[]> Order;
+
+ RCInfo()
+ : Tag(0), NumRegs(0), ProperSubClass(false), MinCost(0),
+ LastCostChange(0) {}
+
+ operator ArrayRef<MCPhysReg>() const {
+ return makeArrayRef(Order.get(), NumRegs);
+ }
+ };
+
+ // Brief cached information for each register class.
+ std::unique_ptr<RCInfo[]> RegClass;
+
+ // Tag changes whenever cached information needs to be recomputed. An RCInfo
+ // entry is valid when its tag matches.
+ unsigned Tag;
+
+ const MachineFunction *MF;
+ const TargetRegisterInfo *TRI;
+
+ // Callee saved registers of last MF. Assumed to be valid until the next
+ // runOnFunction() call.
+ const MCPhysReg *CalleeSaved;
+
+ // Map register number to CalleeSaved index + 1;
+ SmallVector<uint8_t, 4> CSRNum;
+
+ // Reserved registers in the current MF.
+ BitVector Reserved;
+
+ std::unique_ptr<unsigned[]> PSetLimits;
+
+ // Compute all information about RC.
+ void compute(const TargetRegisterClass *RC) const;
+
+ // Return an up-to-date RCInfo for RC.
+ const RCInfo &get(const TargetRegisterClass *RC) const {
+ const RCInfo &RCI = RegClass[RC->getID()];
+ if (Tag != RCI.Tag)
+ compute(RC);
+ return RCI;
+ }
+
+public:
+ RegisterClassInfo();
+
+ /// runOnFunction - Prepare to answer questions about MF. This must be called
+ /// before any other methods are used.
+ void runOnMachineFunction(const MachineFunction &MF);
+
+ /// getNumAllocatableRegs - Returns the number of actually allocatable
+ /// registers in RC in the current function.
+ unsigned getNumAllocatableRegs(const TargetRegisterClass *RC) const {
+ return get(RC).NumRegs;
+ }
+
+ /// getOrder - Returns the preferred allocation order for RC. The order
+ /// contains no reserved registers, and registers that alias callee saved
+ /// registers come last.
+ ArrayRef<MCPhysReg> getOrder(const TargetRegisterClass *RC) const {
+ return get(RC);
+ }
+
+ /// isProperSubClass - Returns true if RC has a legal super-class with more
+ /// allocatable registers.
+ ///
+ /// Register classes like GR32_NOSP are not proper sub-classes because %esp
+ /// is not allocatable. Similarly, tGPR is not a proper sub-class in Thumb
+ /// mode because the GPR super-class is not legal.
+ bool isProperSubClass(const TargetRegisterClass *RC) const {
+ return get(RC).ProperSubClass;
+ }
+
+ /// getLastCalleeSavedAlias - Returns the last callee saved register that
+ /// overlaps PhysReg, or 0 if Reg doesn't overlap a CSR.
+ unsigned getLastCalleeSavedAlias(unsigned PhysReg) const {
+ assert(TargetRegisterInfo::isPhysicalRegister(PhysReg));
+ if (unsigned N = CSRNum[PhysReg])
+ return CalleeSaved[N-1];
+ return 0;
+ }
+
+ /// Get the minimum register cost in RC's allocation order.
+ /// This is the smallest value returned by TRI->getCostPerUse(Reg) for all
+ /// the registers in getOrder(RC).
+ unsigned getMinCost(const TargetRegisterClass *RC) {
+ return get(RC).MinCost;
+ }
+
+ /// Get the position of the last cost change in getOrder(RC).
+ ///
+ /// All registers in getOrder(RC).slice(getLastCostChange(RC)) will have the
+ /// same cost according to TRI->getCostPerUse().
+ unsigned getLastCostChange(const TargetRegisterClass *RC) {
+ return get(RC).LastCostChange;
+ }
+
+ /// Get the register unit limit for the given pressure set index.
+ ///
+ /// RegisterClassInfo adjusts this limit for reserved registers.
+ unsigned getRegPressureSetLimit(unsigned Idx) const {
+ if (!PSetLimits[Idx])
+ PSetLimits[Idx] = computePSetLimit(Idx);
+ return PSetLimits[Idx];
+ }
+
+protected:
+ unsigned computePSetLimit(unsigned Idx) const;
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterPressure.h b/contrib/llvm/include/llvm/CodeGen/RegisterPressure.h
new file mode 100644
index 0000000..987634f
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterPressure.h
@@ -0,0 +1,455 @@
+//===-- RegisterPressure.h - Dynamic Register Pressure -*- C++ -*-------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RegisterPressure class which can be used to track
+// MachineInstr level register pressure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTERPRESSURE_H
+#define LLVM_CODEGEN_REGISTERPRESSURE_H
+
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+
+class LiveIntervals;
+class LiveRange;
+class RegisterClassInfo;
+class MachineInstr;
+
+/// Base class for register pressure results.
+struct RegisterPressure {
+ /// Map of max reg pressure indexed by pressure set ID, not class ID.
+ std::vector<unsigned> MaxSetPressure;
+
+ /// List of live in virtual registers or physical register units.
+ SmallVector<unsigned,8> LiveInRegs;
+ SmallVector<unsigned,8> LiveOutRegs;
+
+ void dump(const TargetRegisterInfo *TRI) const;
+};
+
+/// RegisterPressure computed within a region of instructions delimited by
+/// TopIdx and BottomIdx. During pressure computation, the maximum pressure per
+/// register pressure set is increased. Once pressure within a region is fully
+/// computed, the live-in and live-out sets are recorded.
+///
+/// This is preferable to RegionPressure when LiveIntervals are available,
+/// because delimiting regions by SlotIndex is more robust and convenient than
+/// holding block iterators. The block contents can change without invalidating
+/// the pressure result.
+struct IntervalPressure : RegisterPressure {
+ /// Record the boundary of the region being tracked.
+ SlotIndex TopIdx;
+ SlotIndex BottomIdx;
+
+ void reset();
+
+ void openTop(SlotIndex NextTop);
+
+ void openBottom(SlotIndex PrevBottom);
+};
+
+/// RegisterPressure computed within a region of instructions delimited by
+/// TopPos and BottomPos. This is a less precise version of IntervalPressure for
+/// use when LiveIntervals are unavailable.
+struct RegionPressure : RegisterPressure {
+ /// Record the boundary of the region being tracked.
+ MachineBasicBlock::const_iterator TopPos;
+ MachineBasicBlock::const_iterator BottomPos;
+
+ void reset();
+
+ void openTop(MachineBasicBlock::const_iterator PrevTop);
+
+ void openBottom(MachineBasicBlock::const_iterator PrevBottom);
+};
+
+/// Capture a change in pressure for a single pressure set. UnitInc may be
+/// expressed in terms of upward or downward pressure depending on the client
+/// and will be dynamically adjusted for current liveness.
+///
+/// Pressure increments are tiny, typically 1-2 units, and this is only for
+/// heuristics, so we don't check UnitInc overflow. Instead, we may have a
+/// higher level assert that pressure is consistent within a region. We also
+/// effectively ignore dead defs which don't affect heuristics much.
+class PressureChange {
+ uint16_t PSetID; // ID+1. 0=Invalid.
+ int16_t UnitInc;
+public:
+ PressureChange(): PSetID(0), UnitInc(0) {}
+ PressureChange(unsigned id): PSetID(id+1), UnitInc(0) {
+ assert(id < UINT16_MAX && "PSetID overflow.");
+ }
+
+ bool isValid() const { return PSetID > 0; }
+
+ unsigned getPSet() const {
+ assert(isValid() && "invalid PressureChange");
+ return PSetID - 1;
+ }
+ // If PSetID is invalid, return UINT16_MAX to give it lowest priority.
+ unsigned getPSetOrMax() const { return (PSetID - 1) & UINT16_MAX; }
+
+ int getUnitInc() const { return UnitInc; }
+
+ void setUnitInc(int Inc) { UnitInc = Inc; }
+
+ bool operator==(const PressureChange &RHS) const {
+ return PSetID == RHS.PSetID && UnitInc == RHS.UnitInc;
+ }
+};
+
+template <> struct isPodLike<PressureChange> {
+ static const bool value = true;
+};
+
+/// List of PressureChanges in order of increasing, unique PSetID.
+///
+/// Use a small fixed number, because we can fit more PressureChanges in an
+/// empty SmallVector than ever need to be tracked per register class. If more
+/// PSets are affected, then we only track the most constrained.
+class PressureDiff {
+ // The initial design was for MaxPSets=4, but that requires PSet partitions,
+ // which are not yet implemented. (PSet partitions are equivalent PSets given
+ // the register classes actually in use within the scheduling region.)
+ enum { MaxPSets = 16 };
+
+ PressureChange PressureChanges[MaxPSets];
+
+ typedef PressureChange* iterator;
+ iterator nonconst_begin() { return &PressureChanges[0]; }
+ iterator nonconst_end() { return &PressureChanges[MaxPSets]; }
+
+public:
+ typedef const PressureChange* const_iterator;
+ const_iterator begin() const { return &PressureChanges[0]; }
+ const_iterator end() const { return &PressureChanges[MaxPSets]; }
+
+ void addPressureChange(unsigned RegUnit, bool IsDec,
+ const MachineRegisterInfo *MRI);
+
+ LLVM_DUMP_METHOD void dump(const TargetRegisterInfo &TRI) const;
+};
+
+/// Array of PressureDiffs.
+class PressureDiffs {
+ PressureDiff *PDiffArray;
+ unsigned Size;
+ unsigned Max;
+public:
+ PressureDiffs(): PDiffArray(nullptr), Size(0), Max(0) {}
+ ~PressureDiffs() { free(PDiffArray); }
+
+ void clear() { Size = 0; }
+
+ void init(unsigned N);
+
+ PressureDiff &operator[](unsigned Idx) {
+ assert(Idx < Size && "PressureDiff index out of bounds");
+ return PDiffArray[Idx];
+ }
+ const PressureDiff &operator[](unsigned Idx) const {
+ return const_cast<PressureDiffs*>(this)->operator[](Idx);
+ }
+};
+
+/// Store the effects of a change in pressure on things that MI scheduler cares
+/// about.
+///
+/// Excess records the value of the largest difference in register units beyond
+/// the target's pressure limits across the affected pressure sets, where
+/// largest is defined as the absolute value of the difference. Negative
+/// ExcessUnits indicates a reduction in pressure that had already exceeded the
+/// target's limits.
+///
+/// CriticalMax records the largest increase in the tracker's max pressure that
+/// exceeds the critical limit for some pressure set determined by the client.
+///
+/// CurrentMax records the largest increase in the tracker's max pressure that
+/// exceeds the current limit for some pressure set determined by the client.
+struct RegPressureDelta {
+ PressureChange Excess;
+ PressureChange CriticalMax;
+ PressureChange CurrentMax;
+
+ RegPressureDelta() {}
+
+ bool operator==(const RegPressureDelta &RHS) const {
+ return Excess == RHS.Excess && CriticalMax == RHS.CriticalMax
+ && CurrentMax == RHS.CurrentMax;
+ }
+ bool operator!=(const RegPressureDelta &RHS) const {
+ return !operator==(RHS);
+ }
+};
+
+/// A set of live virtual registers and physical register units.
+///
+/// This is a wrapper around a SparseSet which deals with mapping register unit
+/// and virtual register indexes to an index usable by the sparse set.
+class LiveRegSet {
+private:
+ SparseSet<unsigned> Regs;
+ unsigned NumRegUnits;
+
+ unsigned getSparseIndexFromReg(unsigned Reg) const {
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ return TargetRegisterInfo::virtReg2Index(Reg) + NumRegUnits;
+ assert(Reg < NumRegUnits);
+ return Reg;
+ }
+ unsigned getRegFromSparseIndex(unsigned SparseIndex) const {
+ if (SparseIndex >= NumRegUnits)
+ return TargetRegisterInfo::index2VirtReg(SparseIndex-NumRegUnits);
+ return SparseIndex;
+ }
+
+public:
+ void clear();
+ void init(const MachineRegisterInfo &MRI);
+
+ bool contains(unsigned Reg) const {
+ unsigned SparseIndex = getSparseIndexFromReg(Reg);
+ return Regs.count(SparseIndex);
+ }
+
+ bool insert(unsigned Reg) {
+ unsigned SparseIndex = getSparseIndexFromReg(Reg);
+ return Regs.insert(SparseIndex).second;
+ }
+
+ bool erase(unsigned Reg) {
+ unsigned SparseIndex = getSparseIndexFromReg(Reg);
+ return Regs.erase(SparseIndex);
+ }
+
+ size_t size() const {
+ return Regs.size();
+ }
+
+ template<typename ContainerT>
+ void appendTo(ContainerT &To) const {
+ for (unsigned I : Regs) {
+ unsigned Reg = getRegFromSparseIndex(I);
+ To.push_back(Reg);
+ }
+ }
+};
+
+/// Track the current register pressure at some position in the instruction
+/// stream, and remember the high water mark within the region traversed. This
+/// does not automatically consider live-through ranges. The client may
+/// independently adjust for global liveness.
+///
+/// Each RegPressureTracker only works within a MachineBasicBlock. Pressure can
+/// be tracked across a larger region by storing a RegisterPressure result at
+/// each block boundary and explicitly adjusting pressure to account for block
+/// live-in and live-out register sets.
+///
+/// RegPressureTracker holds a reference to a RegisterPressure result that it
+/// computes incrementally. During downward tracking, P.BottomIdx or P.BottomPos
+/// is invalid until it reaches the end of the block or closeRegion() is
+/// explicitly called. Similarly, P.TopIdx is invalid during upward
+/// tracking. Changing direction has the side effect of closing region, and
+/// traversing past TopIdx or BottomIdx reopens it.
+class RegPressureTracker {
+ const MachineFunction *MF;
+ const TargetRegisterInfo *TRI;
+ const RegisterClassInfo *RCI;
+ const MachineRegisterInfo *MRI;
+ const LiveIntervals *LIS;
+
+ /// We currently only allow pressure tracking within a block.
+ const MachineBasicBlock *MBB;
+
+ /// Track the max pressure within the region traversed so far.
+ RegisterPressure &P;
+
+ /// Run in two modes dependending on whether constructed with IntervalPressure
+ /// or RegisterPressure. If requireIntervals is false, LIS are ignored.
+ bool RequireIntervals;
+
+ /// True if UntiedDefs will be populated.
+ bool TrackUntiedDefs;
+
+ /// Register pressure corresponds to liveness before this instruction
+ /// iterator. It may point to the end of the block or a DebugValue rather than
+ /// an instruction.
+ MachineBasicBlock::const_iterator CurrPos;
+
+ /// Pressure map indexed by pressure set ID, not class ID.
+ std::vector<unsigned> CurrSetPressure;
+
+ /// Set of live registers.
+ LiveRegSet LiveRegs;
+
+ /// Set of vreg defs that start a live range.
+ SparseSet<unsigned, VirtReg2IndexFunctor> UntiedDefs;
+ /// Live-through pressure.
+ std::vector<unsigned> LiveThruPressure;
+
+public:
+ RegPressureTracker(IntervalPressure &rp) :
+ MF(nullptr), TRI(nullptr), RCI(nullptr), LIS(nullptr), MBB(nullptr), P(rp),
+ RequireIntervals(true), TrackUntiedDefs(false) {}
+
+ RegPressureTracker(RegionPressure &rp) :
+ MF(nullptr), TRI(nullptr), RCI(nullptr), LIS(nullptr), MBB(nullptr), P(rp),
+ RequireIntervals(false), TrackUntiedDefs(false) {}
+
+ void reset();
+
+ void init(const MachineFunction *mf, const RegisterClassInfo *rci,
+ const LiveIntervals *lis, const MachineBasicBlock *mbb,
+ MachineBasicBlock::const_iterator pos,
+ bool ShouldTrackUntiedDefs = false);
+
+ /// Force liveness of virtual registers or physical register
+ /// units. Particularly useful to initialize the livein/out state of the
+ /// tracker before the first call to advance/recede.
+ void addLiveRegs(ArrayRef<unsigned> Regs);
+
+ /// Get the MI position corresponding to this register pressure.
+ MachineBasicBlock::const_iterator getPos() const { return CurrPos; }
+
+ // Reset the MI position corresponding to the register pressure. This allows
+ // schedulers to move instructions above the RegPressureTracker's
+ // CurrPos. Since the pressure is computed before CurrPos, the iterator
+ // position changes while pressure does not.
+ void setPos(MachineBasicBlock::const_iterator Pos) { CurrPos = Pos; }
+
+ /// Recede across the previous instruction.
+ void recede(SmallVectorImpl<unsigned> *LiveUses = nullptr,
+ PressureDiff *PDiff = nullptr);
+
+ /// Advance across the current instruction.
+ void advance();
+
+ /// Finalize the region boundaries and recored live ins and live outs.
+ void closeRegion();
+
+ /// Initialize the LiveThru pressure set based on the untied defs found in
+ /// RPTracker.
+ void initLiveThru(const RegPressureTracker &RPTracker);
+
+ /// Copy an existing live thru pressure result.
+ void initLiveThru(ArrayRef<unsigned> PressureSet) {
+ LiveThruPressure.assign(PressureSet.begin(), PressureSet.end());
+ }
+
+ ArrayRef<unsigned> getLiveThru() const { return LiveThruPressure; }
+
+ /// Get the resulting register pressure over the traversed region.
+ /// This result is complete if closeRegion() was explicitly invoked.
+ RegisterPressure &getPressure() { return P; }
+ const RegisterPressure &getPressure() const { return P; }
+
+ /// Get the register set pressure at the current position, which may be less
+ /// than the pressure across the traversed region.
+ const std::vector<unsigned> &getRegSetPressureAtPos() const {
+ return CurrSetPressure;
+ }
+
+ bool isTopClosed() const;
+ bool isBottomClosed() const;
+
+ void closeTop();
+ void closeBottom();
+
+ /// Consider the pressure increase caused by traversing this instruction
+ /// bottom-up. Find the pressure set with the most change beyond its pressure
+ /// limit based on the tracker's current pressure, and record the number of
+ /// excess register units of that pressure set introduced by this instruction.
+ void getMaxUpwardPressureDelta(const MachineInstr *MI,
+ PressureDiff *PDiff,
+ RegPressureDelta &Delta,
+ ArrayRef<PressureChange> CriticalPSets,
+ ArrayRef<unsigned> MaxPressureLimit);
+
+ void getUpwardPressureDelta(const MachineInstr *MI,
+ /*const*/ PressureDiff &PDiff,
+ RegPressureDelta &Delta,
+ ArrayRef<PressureChange> CriticalPSets,
+ ArrayRef<unsigned> MaxPressureLimit) const;
+
+ /// Consider the pressure increase caused by traversing this instruction
+ /// top-down. Find the pressure set with the most change beyond its pressure
+ /// limit based on the tracker's current pressure, and record the number of
+ /// excess register units of that pressure set introduced by this instruction.
+ void getMaxDownwardPressureDelta(const MachineInstr *MI,
+ RegPressureDelta &Delta,
+ ArrayRef<PressureChange> CriticalPSets,
+ ArrayRef<unsigned> MaxPressureLimit);
+
+ /// Find the pressure set with the most change beyond its pressure limit after
+ /// traversing this instruction either upward or downward depending on the
+ /// closed end of the current region.
+ void getMaxPressureDelta(const MachineInstr *MI,
+ RegPressureDelta &Delta,
+ ArrayRef<PressureChange> CriticalPSets,
+ ArrayRef<unsigned> MaxPressureLimit) {
+ if (isTopClosed())
+ return getMaxDownwardPressureDelta(MI, Delta, CriticalPSets,
+ MaxPressureLimit);
+
+ assert(isBottomClosed() && "Uninitialized pressure tracker");
+ return getMaxUpwardPressureDelta(MI, nullptr, Delta, CriticalPSets,
+ MaxPressureLimit);
+ }
+
+ /// Get the pressure of each PSet after traversing this instruction bottom-up.
+ void getUpwardPressure(const MachineInstr *MI,
+ std::vector<unsigned> &PressureResult,
+ std::vector<unsigned> &MaxPressureResult);
+
+ /// Get the pressure of each PSet after traversing this instruction top-down.
+ void getDownwardPressure(const MachineInstr *MI,
+ std::vector<unsigned> &PressureResult,
+ std::vector<unsigned> &MaxPressureResult);
+
+ void getPressureAfterInst(const MachineInstr *MI,
+ std::vector<unsigned> &PressureResult,
+ std::vector<unsigned> &MaxPressureResult) {
+ if (isTopClosed())
+ return getUpwardPressure(MI, PressureResult, MaxPressureResult);
+
+ assert(isBottomClosed() && "Uninitialized pressure tracker");
+ return getDownwardPressure(MI, PressureResult, MaxPressureResult);
+ }
+
+ bool hasUntiedDef(unsigned VirtReg) const {
+ return UntiedDefs.count(VirtReg);
+ }
+
+ void dump() const;
+
+protected:
+ void discoverLiveOut(unsigned Reg);
+ void discoverLiveIn(unsigned Reg);
+
+ /// \brief Get the SlotIndex for the first nondebug instruction including or
+ /// after the current position.
+ SlotIndex getCurrSlot() const;
+
+ void increaseRegPressure(ArrayRef<unsigned> Regs);
+ void decreaseRegPressure(ArrayRef<unsigned> Regs);
+
+ void bumpUpwardPressure(const MachineInstr *MI);
+ void bumpDownwardPressure(const MachineInstr *MI);
+};
+
+void dumpRegSetPressure(ArrayRef<unsigned> SetPressure,
+ const TargetRegisterInfo *TRI);
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h b/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
new file mode 100644
index 0000000..122c785
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/RegisterScavenging.h
@@ -0,0 +1,186 @@
+//===-- RegisterScavenging.h - Machine register scavenging ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the machine register scavenger class. It can provide
+// information such as unused register at any point in a machine basic block.
+// It also provides a mechanism to make registers available by evicting them
+// to spill slots.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTERSCAVENGING_H
+#define LLVM_CODEGEN_REGISTERSCAVENGING_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+
+namespace llvm {
+
+class MachineRegisterInfo;
+class TargetRegisterInfo;
+class TargetInstrInfo;
+class TargetRegisterClass;
+
+class RegScavenger {
+ const TargetRegisterInfo *TRI;
+ const TargetInstrInfo *TII;
+ MachineRegisterInfo* MRI;
+ MachineBasicBlock *MBB;
+ MachineBasicBlock::iterator MBBI;
+ unsigned NumRegUnits;
+
+ /// True if RegScavenger is currently tracking the liveness of registers.
+ bool Tracking;
+
+ /// Information on scavenged registers (held in a spill slot).
+ struct ScavengedInfo {
+ ScavengedInfo(int FI = -1) : FrameIndex(FI), Reg(0), Restore(nullptr) {}
+
+ /// A spill slot used for scavenging a register post register allocation.
+ int FrameIndex;
+
+ /// If non-zero, the specific register is currently being
+ /// scavenged. That is, it is spilled to this scavenging stack slot.
+ unsigned Reg;
+
+ /// The instruction that restores the scavenged register from stack.
+ const MachineInstr *Restore;
+ };
+
+ /// A vector of information on scavenged registers.
+ SmallVector<ScavengedInfo, 2> Scavenged;
+
+ /// The current state of each reg unit immediately before MBBI.
+ /// One bit per register unit. If bit is not set it means any
+ /// register containing that register unit is currently being used.
+ BitVector RegUnitsAvailable;
+
+ // These BitVectors are only used internally to forward(). They are members
+ // to avoid frequent reallocations.
+ BitVector KillRegUnits, DefRegUnits;
+ BitVector TmpRegUnits;
+
+public:
+ RegScavenger()
+ : MBB(nullptr), NumRegUnits(0), Tracking(false) {}
+
+ /// Start tracking liveness from the begin of the specific basic block.
+ void enterBasicBlock(MachineBasicBlock *mbb);
+
+ /// Move the internal MBB iterator and update register states.
+ void forward();
+
+ /// Move the internal MBB iterator and update register states until
+ /// it has processed the specific iterator.
+ void forward(MachineBasicBlock::iterator I) {
+ if (!Tracking && MBB->begin() != I) forward();
+ while (MBBI != I) forward();
+ }
+
+ /// Invert the behavior of forward() on the current instruction (undo the
+ /// changes to the available registers made by forward()).
+ void unprocess();
+
+ /// Unprocess instructions until you reach the provided iterator.
+ void unprocess(MachineBasicBlock::iterator I) {
+ while (MBBI != I) unprocess();
+ }
+
+ /// Move the internal MBB iterator but do not update register states.
+ void skipTo(MachineBasicBlock::iterator I) {
+ if (I == MachineBasicBlock::iterator(nullptr))
+ Tracking = false;
+ MBBI = I;
+ }
+
+ MachineBasicBlock::iterator getCurrentPosition() const { return MBBI; }
+
+ /// Return if a specific register is currently used.
+ bool isRegUsed(unsigned Reg, bool includeReserved = true) const;
+
+ /// Return all available registers in the register class in Mask.
+ BitVector getRegsAvailable(const TargetRegisterClass *RC);
+
+ /// Find an unused register of the specified register class.
+ /// Return 0 if none is found.
+ unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
+
+ /// Add a scavenging frame index.
+ void addScavengingFrameIndex(int FI) {
+ Scavenged.push_back(ScavengedInfo(FI));
+ }
+
+ /// Query whether a frame index is a scavenging frame index.
+ bool isScavengingFrameIndex(int FI) const {
+ for (SmallVectorImpl<ScavengedInfo>::const_iterator I = Scavenged.begin(),
+ IE = Scavenged.end(); I != IE; ++I)
+ if (I->FrameIndex == FI)
+ return true;
+
+ return false;
+ }
+
+ /// Get an array of scavenging frame indices.
+ void getScavengingFrameIndices(SmallVectorImpl<int> &A) const {
+ for (SmallVectorImpl<ScavengedInfo>::const_iterator I = Scavenged.begin(),
+ IE = Scavenged.end(); I != IE; ++I)
+ if (I->FrameIndex >= 0)
+ A.push_back(I->FrameIndex);
+ }
+
+ /// Make a register of the specific register class
+ /// available and do the appropriate bookkeeping. SPAdj is the stack
+ /// adjustment due to call frame, it's passed along to eliminateFrameIndex().
+ /// Returns the scavenged register.
+ unsigned scavengeRegister(const TargetRegisterClass *RegClass,
+ MachineBasicBlock::iterator I, int SPAdj);
+ unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) {
+ return scavengeRegister(RegClass, MBBI, SPAdj);
+ }
+
+ /// Tell the scavenger a register is used.
+ void setRegUsed(unsigned Reg, LaneBitmask LaneMask = ~0u);
+private:
+ /// Returns true if a register is reserved. It is never "unused".
+ bool isReserved(unsigned Reg) const { return MRI->isReserved(Reg); }
+
+ /// setUsed / setUnused - Mark the state of one or a number of register units.
+ ///
+ void setUsed(BitVector &RegUnits) {
+ RegUnitsAvailable.reset(RegUnits);
+ }
+ void setUnused(BitVector &RegUnits) {
+ RegUnitsAvailable |= RegUnits;
+ }
+
+ /// Processes the current instruction and fill the KillRegUnits and
+ /// DefRegUnits bit vectors.
+ void determineKillsAndDefs();
+
+ /// Add all Reg Units that Reg contains to BV.
+ void addRegUnits(BitVector &BV, unsigned Reg);
+
+ /// Return the candidate register that is unused for the longest after
+ /// StartMI. UseMI is set to the instruction where the search stopped.
+ ///
+ /// No more than InstrLimit instructions are inspected.
+ unsigned findSurvivorReg(MachineBasicBlock::iterator StartMI,
+ BitVector &Candidates,
+ unsigned InstrLimit,
+ MachineBasicBlock::iterator &UseMI);
+
+ /// Allow resetting register state info for multiple
+ /// passes over/within the same function.
+ void initRegState();
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h b/contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h
new file mode 100644
index 0000000..0097e04
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ResourcePriorityQueue.h
@@ -0,0 +1,136 @@
+//===----- ResourcePriorityQueue.h - A DFA-oriented priority queue -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ResourcePriorityQueue class, which is a
+// SchedulingPriorityQueue that schedules using DFA state to
+// reduce the length of the critical path through the basic block
+// on VLIW platforms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
+#define LLVM_CODEGEN_RESOURCEPRIORITYQUEUE_H
+
+#include "llvm/CodeGen/DFAPacketizer.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+ class ResourcePriorityQueue;
+
+ /// Sorting functions for the Available queue.
+ struct resource_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+ ResourcePriorityQueue *PQ;
+ explicit resource_sort(ResourcePriorityQueue *pq) : PQ(pq) {}
+
+ bool operator()(const SUnit* left, const SUnit* right) const;
+ };
+
+ class ResourcePriorityQueue : public SchedulingPriorityQueue {
+ /// SUnits - The SUnits for the current graph.
+ std::vector<SUnit> *SUnits;
+
+ /// NumNodesSolelyBlocking - This vector contains, for every node in the
+ /// Queue, the number of nodes that the node is the sole unscheduled
+ /// predecessor for. This is used as a tie-breaker heuristic for better
+ /// mobility.
+ std::vector<unsigned> NumNodesSolelyBlocking;
+
+ /// Queue - The queue.
+ std::vector<SUnit*> Queue;
+
+ /// RegPressure - Tracking current reg pressure per register class.
+ ///
+ std::vector<unsigned> RegPressure;
+
+ /// RegLimit - Tracking the number of allocatable registers per register
+ /// class.
+ std::vector<unsigned> RegLimit;
+
+ resource_sort Picker;
+ const TargetRegisterInfo *TRI;
+ const TargetLowering *TLI;
+ const TargetInstrInfo *TII;
+ const InstrItineraryData* InstrItins;
+ /// ResourcesModel - Represents VLIW state.
+ /// Not limited to VLIW targets per say, but assumes
+ /// definition of DFA by a target.
+ std::unique_ptr<DFAPacketizer> ResourcesModel;
+
+ /// Resource model - packet/bundle model. Purely
+ /// internal at the time.
+ std::vector<SUnit*> Packet;
+
+ /// Heuristics for estimating register pressure.
+ unsigned ParallelLiveRanges;
+ signed HorizontalVerticalBalance;
+
+ public:
+ ResourcePriorityQueue(SelectionDAGISel *IS);
+
+ bool isBottomUp() const override { return false; }
+
+ void initNodes(std::vector<SUnit> &sunits) override;
+
+ void addNode(const SUnit *SU) override {
+ NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+ }
+
+ void updateNode(const SUnit *SU) override {}
+
+ void releaseState() override {
+ SUnits = nullptr;
+ }
+
+ unsigned getLatency(unsigned NodeNum) const {
+ assert(NodeNum < (*SUnits).size());
+ return (*SUnits)[NodeNum].getHeight();
+ }
+
+ unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
+ assert(NodeNum < NumNodesSolelyBlocking.size());
+ return NumNodesSolelyBlocking[NodeNum];
+ }
+
+ /// Single cost function reflecting benefit of scheduling SU
+ /// in the current cycle.
+ signed SUSchedulingCost (SUnit *SU);
+
+ /// InitNumRegDefsLeft - Determine the # of regs defined by this node.
+ ///
+ void initNumRegDefsLeft(SUnit *SU);
+ void updateNumRegDefsLeft(SUnit *SU);
+ signed regPressureDelta(SUnit *SU, bool RawPressure = false);
+ signed rawRegPressureDelta (SUnit *SU, unsigned RCId);
+
+ bool empty() const override { return Queue.empty(); }
+
+ void push(SUnit *U) override;
+
+ SUnit *pop() override;
+
+ void remove(SUnit *SU) override;
+
+ /// scheduledNode - Main resource tracking point.
+ void scheduledNode(SUnit *Node) override;
+ bool isResourceAvailable(SUnit *SU);
+ void reserveResources(SUnit *SU);
+
+private:
+ void adjustPriorityOfUnscheduledPreds(SUnit *SU);
+ SUnit *getSingleUnscheduledPred(SUnit *SU);
+ unsigned numberRCValPredInSU (SUnit *SU, unsigned RCId);
+ unsigned numberRCValSuccInSU (SUnit *SU, unsigned RCId);
+ };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/RuntimeLibcalls.h b/contrib/llvm/include/llvm/CodeGen/RuntimeLibcalls.h
new file mode 100644
index 0000000..7db0345
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -0,0 +1,427 @@
+//===-- CodeGen/RuntimeLibcalls.h - Runtime Library Calls -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the enum representing the list of runtime library calls
+// the backend may emit during code generation, and also some helper functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_RUNTIMELIBCALLS_H
+#define LLVM_CODEGEN_RUNTIMELIBCALLS_H
+
+#include "llvm/CodeGen/ValueTypes.h"
+
+namespace llvm {
+namespace RTLIB {
+ /// RTLIB::Libcall enum - This enum defines all of the runtime library calls
+ /// the backend can emit. The various long double types cannot be merged,
+ /// because 80-bit library functions use "xf" and 128-bit use "tf".
+ ///
+ /// When adding PPCF128 functions here, note that their names generally need
+ /// to be overridden for Darwin with the xxx$LDBL128 form. See
+ /// PPCISelLowering.cpp.
+ ///
+ enum Libcall {
+ // Integer
+ SHL_I16,
+ SHL_I32,
+ SHL_I64,
+ SHL_I128,
+ SRL_I16,
+ SRL_I32,
+ SRL_I64,
+ SRL_I128,
+ SRA_I16,
+ SRA_I32,
+ SRA_I64,
+ SRA_I128,
+ MUL_I8,
+ MUL_I16,
+ MUL_I32,
+ MUL_I64,
+ MUL_I128,
+ MULO_I32,
+ MULO_I64,
+ MULO_I128,
+ SDIV_I8,
+ SDIV_I16,
+ SDIV_I32,
+ SDIV_I64,
+ SDIV_I128,
+ UDIV_I8,
+ UDIV_I16,
+ UDIV_I32,
+ UDIV_I64,
+ UDIV_I128,
+ SREM_I8,
+ SREM_I16,
+ SREM_I32,
+ SREM_I64,
+ SREM_I128,
+ UREM_I8,
+ UREM_I16,
+ UREM_I32,
+ UREM_I64,
+ UREM_I128,
+ SDIVREM_I8,
+ SDIVREM_I16,
+ SDIVREM_I32,
+ SDIVREM_I64,
+ SDIVREM_I128,
+ UDIVREM_I8,
+ UDIVREM_I16,
+ UDIVREM_I32,
+ UDIVREM_I64,
+ UDIVREM_I128,
+ NEG_I32,
+ NEG_I64,
+
+ // FLOATING POINT
+ ADD_F32,
+ ADD_F64,
+ ADD_F80,
+ ADD_F128,
+ ADD_PPCF128,
+ SUB_F32,
+ SUB_F64,
+ SUB_F80,
+ SUB_F128,
+ SUB_PPCF128,
+ MUL_F32,
+ MUL_F64,
+ MUL_F80,
+ MUL_F128,
+ MUL_PPCF128,
+ DIV_F32,
+ DIV_F64,
+ DIV_F80,
+ DIV_F128,
+ DIV_PPCF128,
+ REM_F32,
+ REM_F64,
+ REM_F80,
+ REM_F128,
+ REM_PPCF128,
+ FMA_F32,
+ FMA_F64,
+ FMA_F80,
+ FMA_F128,
+ FMA_PPCF128,
+ POWI_F32,
+ POWI_F64,
+ POWI_F80,
+ POWI_F128,
+ POWI_PPCF128,
+ SQRT_F32,
+ SQRT_F64,
+ SQRT_F80,
+ SQRT_F128,
+ SQRT_PPCF128,
+ LOG_F32,
+ LOG_F64,
+ LOG_F80,
+ LOG_F128,
+ LOG_PPCF128,
+ LOG2_F32,
+ LOG2_F64,
+ LOG2_F80,
+ LOG2_F128,
+ LOG2_PPCF128,
+ LOG10_F32,
+ LOG10_F64,
+ LOG10_F80,
+ LOG10_F128,
+ LOG10_PPCF128,
+ EXP_F32,
+ EXP_F64,
+ EXP_F80,
+ EXP_F128,
+ EXP_PPCF128,
+ EXP2_F32,
+ EXP2_F64,
+ EXP2_F80,
+ EXP2_F128,
+ EXP2_PPCF128,
+ SIN_F32,
+ SIN_F64,
+ SIN_F80,
+ SIN_F128,
+ SIN_PPCF128,
+ COS_F32,
+ COS_F64,
+ COS_F80,
+ COS_F128,
+ COS_PPCF128,
+ SINCOS_F32,
+ SINCOS_F64,
+ SINCOS_F80,
+ SINCOS_F128,
+ SINCOS_PPCF128,
+ POW_F32,
+ POW_F64,
+ POW_F80,
+ POW_F128,
+ POW_PPCF128,
+ CEIL_F32,
+ CEIL_F64,
+ CEIL_F80,
+ CEIL_F128,
+ CEIL_PPCF128,
+ TRUNC_F32,
+ TRUNC_F64,
+ TRUNC_F80,
+ TRUNC_F128,
+ TRUNC_PPCF128,
+ RINT_F32,
+ RINT_F64,
+ RINT_F80,
+ RINT_F128,
+ RINT_PPCF128,
+ NEARBYINT_F32,
+ NEARBYINT_F64,
+ NEARBYINT_F80,
+ NEARBYINT_F128,
+ NEARBYINT_PPCF128,
+ ROUND_F32,
+ ROUND_F64,
+ ROUND_F80,
+ ROUND_F128,
+ ROUND_PPCF128,
+ FLOOR_F32,
+ FLOOR_F64,
+ FLOOR_F80,
+ FLOOR_F128,
+ FLOOR_PPCF128,
+ COPYSIGN_F32,
+ COPYSIGN_F64,
+ COPYSIGN_F80,
+ COPYSIGN_F128,
+ COPYSIGN_PPCF128,
+ FMIN_F32,
+ FMIN_F64,
+ FMIN_F80,
+ FMIN_F128,
+ FMIN_PPCF128,
+ FMAX_F32,
+ FMAX_F64,
+ FMAX_F80,
+ FMAX_F128,
+ FMAX_PPCF128,
+
+ // CONVERSION
+ FPEXT_F64_F128,
+ FPEXT_F32_F128,
+ FPEXT_F32_F64,
+ FPEXT_F16_F32,
+ FPROUND_F32_F16,
+ FPROUND_F64_F16,
+ FPROUND_F80_F16,
+ FPROUND_F128_F16,
+ FPROUND_PPCF128_F16,
+ FPROUND_F64_F32,
+ FPROUND_F80_F32,
+ FPROUND_F128_F32,
+ FPROUND_PPCF128_F32,
+ FPROUND_F80_F64,
+ FPROUND_F128_F64,
+ FPROUND_PPCF128_F64,
+ FPTOSINT_F32_I32,
+ FPTOSINT_F32_I64,
+ FPTOSINT_F32_I128,
+ FPTOSINT_F64_I32,
+ FPTOSINT_F64_I64,
+ FPTOSINT_F64_I128,
+ FPTOSINT_F80_I32,
+ FPTOSINT_F80_I64,
+ FPTOSINT_F80_I128,
+ FPTOSINT_F128_I32,
+ FPTOSINT_F128_I64,
+ FPTOSINT_F128_I128,
+ FPTOSINT_PPCF128_I32,
+ FPTOSINT_PPCF128_I64,
+ FPTOSINT_PPCF128_I128,
+ FPTOUINT_F32_I32,
+ FPTOUINT_F32_I64,
+ FPTOUINT_F32_I128,
+ FPTOUINT_F64_I32,
+ FPTOUINT_F64_I64,
+ FPTOUINT_F64_I128,
+ FPTOUINT_F80_I32,
+ FPTOUINT_F80_I64,
+ FPTOUINT_F80_I128,
+ FPTOUINT_F128_I32,
+ FPTOUINT_F128_I64,
+ FPTOUINT_F128_I128,
+ FPTOUINT_PPCF128_I32,
+ FPTOUINT_PPCF128_I64,
+ FPTOUINT_PPCF128_I128,
+ SINTTOFP_I32_F32,
+ SINTTOFP_I32_F64,
+ SINTTOFP_I32_F80,
+ SINTTOFP_I32_F128,
+ SINTTOFP_I32_PPCF128,
+ SINTTOFP_I64_F32,
+ SINTTOFP_I64_F64,
+ SINTTOFP_I64_F80,
+ SINTTOFP_I64_F128,
+ SINTTOFP_I64_PPCF128,
+ SINTTOFP_I128_F32,
+ SINTTOFP_I128_F64,
+ SINTTOFP_I128_F80,
+ SINTTOFP_I128_F128,
+ SINTTOFP_I128_PPCF128,
+ UINTTOFP_I32_F32,
+ UINTTOFP_I32_F64,
+ UINTTOFP_I32_F80,
+ UINTTOFP_I32_F128,
+ UINTTOFP_I32_PPCF128,
+ UINTTOFP_I64_F32,
+ UINTTOFP_I64_F64,
+ UINTTOFP_I64_F80,
+ UINTTOFP_I64_F128,
+ UINTTOFP_I64_PPCF128,
+ UINTTOFP_I128_F32,
+ UINTTOFP_I128_F64,
+ UINTTOFP_I128_F80,
+ UINTTOFP_I128_F128,
+ UINTTOFP_I128_PPCF128,
+
+ // COMPARISON
+ OEQ_F32,
+ OEQ_F64,
+ OEQ_F128,
+ UNE_F32,
+ UNE_F64,
+ UNE_F128,
+ OGE_F32,
+ OGE_F64,
+ OGE_F128,
+ OLT_F32,
+ OLT_F64,
+ OLT_F128,
+ OLE_F32,
+ OLE_F64,
+ OLE_F128,
+ OGT_F32,
+ OGT_F64,
+ OGT_F128,
+ UO_F32,
+ UO_F64,
+ UO_F128,
+ O_F32,
+ O_F64,
+ O_F128,
+
+ // MEMORY
+ MEMCPY,
+ MEMSET,
+ MEMMOVE,
+
+ // EXCEPTION HANDLING
+ UNWIND_RESUME,
+
+ // Family ATOMICs
+ SYNC_VAL_COMPARE_AND_SWAP_1,
+ SYNC_VAL_COMPARE_AND_SWAP_2,
+ SYNC_VAL_COMPARE_AND_SWAP_4,
+ SYNC_VAL_COMPARE_AND_SWAP_8,
+ SYNC_VAL_COMPARE_AND_SWAP_16,
+ SYNC_LOCK_TEST_AND_SET_1,
+ SYNC_LOCK_TEST_AND_SET_2,
+ SYNC_LOCK_TEST_AND_SET_4,
+ SYNC_LOCK_TEST_AND_SET_8,
+ SYNC_LOCK_TEST_AND_SET_16,
+ SYNC_FETCH_AND_ADD_1,
+ SYNC_FETCH_AND_ADD_2,
+ SYNC_FETCH_AND_ADD_4,
+ SYNC_FETCH_AND_ADD_8,
+ SYNC_FETCH_AND_ADD_16,
+ SYNC_FETCH_AND_SUB_1,
+ SYNC_FETCH_AND_SUB_2,
+ SYNC_FETCH_AND_SUB_4,
+ SYNC_FETCH_AND_SUB_8,
+ SYNC_FETCH_AND_SUB_16,
+ SYNC_FETCH_AND_AND_1,
+ SYNC_FETCH_AND_AND_2,
+ SYNC_FETCH_AND_AND_4,
+ SYNC_FETCH_AND_AND_8,
+ SYNC_FETCH_AND_AND_16,
+ SYNC_FETCH_AND_OR_1,
+ SYNC_FETCH_AND_OR_2,
+ SYNC_FETCH_AND_OR_4,
+ SYNC_FETCH_AND_OR_8,
+ SYNC_FETCH_AND_OR_16,
+ SYNC_FETCH_AND_XOR_1,
+ SYNC_FETCH_AND_XOR_2,
+ SYNC_FETCH_AND_XOR_4,
+ SYNC_FETCH_AND_XOR_8,
+ SYNC_FETCH_AND_XOR_16,
+ SYNC_FETCH_AND_NAND_1,
+ SYNC_FETCH_AND_NAND_2,
+ SYNC_FETCH_AND_NAND_4,
+ SYNC_FETCH_AND_NAND_8,
+ SYNC_FETCH_AND_NAND_16,
+ SYNC_FETCH_AND_MAX_1,
+ SYNC_FETCH_AND_MAX_2,
+ SYNC_FETCH_AND_MAX_4,
+ SYNC_FETCH_AND_MAX_8,
+ SYNC_FETCH_AND_MAX_16,
+ SYNC_FETCH_AND_UMAX_1,
+ SYNC_FETCH_AND_UMAX_2,
+ SYNC_FETCH_AND_UMAX_4,
+ SYNC_FETCH_AND_UMAX_8,
+ SYNC_FETCH_AND_UMAX_16,
+ SYNC_FETCH_AND_MIN_1,
+ SYNC_FETCH_AND_MIN_2,
+ SYNC_FETCH_AND_MIN_4,
+ SYNC_FETCH_AND_MIN_8,
+ SYNC_FETCH_AND_MIN_16,
+ SYNC_FETCH_AND_UMIN_1,
+ SYNC_FETCH_AND_UMIN_2,
+ SYNC_FETCH_AND_UMIN_4,
+ SYNC_FETCH_AND_UMIN_8,
+ SYNC_FETCH_AND_UMIN_16,
+
+ // Stack Protector Fail.
+ STACKPROTECTOR_CHECK_FAIL,
+
+ UNKNOWN_LIBCALL
+ };
+
+ /// getFPEXT - Return the FPEXT_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getFPEXT(EVT OpVT, EVT RetVT);
+
+ /// getFPROUND - Return the FPROUND_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getFPROUND(EVT OpVT, EVT RetVT);
+
+ /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getFPTOSINT(EVT OpVT, EVT RetVT);
+
+ /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getFPTOUINT(EVT OpVT, EVT RetVT);
+
+ /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getSINTTOFP(EVT OpVT, EVT RetVT);
+
+ /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getUINTTOFP(EVT OpVT, EVT RetVT);
+
+ /// Return the SYNC_FETCH_AND_* value for the given opcode and type, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getATOMIC(unsigned Opc, MVT VT);
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
new file mode 100644
index 0000000..bda9dbd
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAG.h
@@ -0,0 +1,760 @@
+//===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleDAG class, which is used as the common
+// base class for instruction schedulers. This encapsulates the scheduling DAG,
+// which is shared between SelectionDAG and MachineInstr scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDAG_H
+#define LLVM_CODEGEN_SCHEDULEDAG_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+ class SUnit;
+ class MachineConstantPool;
+ class MachineFunction;
+ class MachineRegisterInfo;
+ class MachineInstr;
+ struct MCSchedClassDesc;
+ class TargetRegisterInfo;
+ class ScheduleDAG;
+ class SDNode;
+ class TargetInstrInfo;
+ class MCInstrDesc;
+ class TargetMachine;
+ class TargetRegisterClass;
+ template<class Graph> class GraphWriter;
+
+ /// SDep - Scheduling dependency. This represents one direction of an
+ /// edge in the scheduling DAG.
+ class SDep {
+ public:
+ /// Kind - These are the different kinds of scheduling dependencies.
+ enum Kind {
+ Data, ///< Regular data dependence (aka true-dependence).
+ Anti, ///< A register anti-dependedence (aka WAR).
+ Output, ///< A register output-dependence (aka WAW).
+ Order ///< Any other ordering dependency.
+ };
+
+ // Strong dependencies must be respected by the scheduler. Artificial
+ // dependencies may be removed only if they are redundant with another
+ // strong depedence.
+ //
+ // Weak dependencies may be violated by the scheduling strategy, but only if
+ // the strategy can prove it is correct to do so.
+ //
+ // Strong OrderKinds must occur before "Weak".
+ // Weak OrderKinds must occur after "Weak".
+ enum OrderKind {
+ Barrier, ///< An unknown scheduling barrier.
+ MayAliasMem, ///< Nonvolatile load/Store instructions that may alias.
+ MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
+ Artificial, ///< Arbitrary strong DAG edge (no real dependence).
+ Weak, ///< Arbitrary weak DAG edge.
+ Cluster ///< Weak DAG edge linking a chain of clustered instrs.
+ };
+
+ private:
+ /// Dep - A pointer to the depending/depended-on SUnit, and an enum
+ /// indicating the kind of the dependency.
+ PointerIntPair<SUnit *, 2, Kind> Dep;
+
+ /// Contents - A union discriminated by the dependence kind.
+ union {
+ /// Reg - For Data, Anti, and Output dependencies, the associated
+ /// register. For Data dependencies that don't currently have a register
+ /// assigned, this is set to zero.
+ unsigned Reg;
+
+ /// Order - Additional information about Order dependencies.
+ unsigned OrdKind; // enum OrderKind
+ } Contents;
+
+ /// Latency - The time associated with this edge. Often this is just
+ /// the value of the Latency field of the predecessor, however advanced
+ /// models may provide additional information about specific edges.
+ unsigned Latency;
+
+ public:
+ /// SDep - Construct a null SDep. This is only for use by container
+ /// classes which require default constructors. SUnits may not
+ /// have null SDep edges.
+ SDep() : Dep(nullptr, Data) {}
+
+ /// SDep - Construct an SDep with the specified values.
+ SDep(SUnit *S, Kind kind, unsigned Reg)
+ : Dep(S, kind), Contents() {
+ switch (kind) {
+ default:
+ llvm_unreachable("Reg given for non-register dependence!");
+ case Anti:
+ case Output:
+ assert(Reg != 0 &&
+ "SDep::Anti and SDep::Output must use a non-zero Reg!");
+ Contents.Reg = Reg;
+ Latency = 0;
+ break;
+ case Data:
+ Contents.Reg = Reg;
+ Latency = 1;
+ break;
+ }
+ }
+ SDep(SUnit *S, OrderKind kind)
+ : Dep(S, Order), Contents(), Latency(0) {
+ Contents.OrdKind = kind;
+ }
+
+ /// Return true if the specified SDep is equivalent except for latency.
+ bool overlaps(const SDep &Other) const;
+
+ bool operator==(const SDep &Other) const {
+ return overlaps(Other) && Latency == Other.Latency;
+ }
+
+ bool operator!=(const SDep &Other) const {
+ return !operator==(Other);
+ }
+
+ /// getLatency - Return the latency value for this edge, which roughly
+ /// means the minimum number of cycles that must elapse between the
+ /// predecessor and the successor, given that they have this edge
+ /// between them.
+ unsigned getLatency() const {
+ return Latency;
+ }
+
+ /// setLatency - Set the latency for this edge.
+ void setLatency(unsigned Lat) {
+ Latency = Lat;
+ }
+
+ //// getSUnit - Return the SUnit to which this edge points.
+ SUnit *getSUnit() const;
+
+ //// setSUnit - Assign the SUnit to which this edge points.
+ void setSUnit(SUnit *SU);
+
+ /// getKind - Return an enum value representing the kind of the dependence.
+ Kind getKind() const;
+
+ /// isCtrl - Shorthand for getKind() != SDep::Data.
+ bool isCtrl() const {
+ return getKind() != Data;
+ }
+
+ /// isNormalMemory - Test if this is an Order dependence between two
+ /// memory accesses where both sides of the dependence access memory
+ /// in non-volatile and fully modeled ways.
+ bool isNormalMemory() const {
+ return getKind() == Order && (Contents.OrdKind == MayAliasMem
+ || Contents.OrdKind == MustAliasMem);
+ }
+
+ /// isBarrier - Test if this is an Order dependence that is marked
+ /// as a barrier.
+ bool isBarrier() const {
+ return getKind() == Order && Contents.OrdKind == Barrier;
+ }
+
+ /// isNormalMemoryOrBarrier - Test if this is could be any kind of memory
+ /// dependence.
+ bool isNormalMemoryOrBarrier() const {
+ return (isNormalMemory() || isBarrier());
+ }
+
+ /// isMustAlias - Test if this is an Order dependence that is marked
+ /// as "must alias", meaning that the SUnits at either end of the edge
+ /// have a memory dependence on a known memory location.
+ bool isMustAlias() const {
+ return getKind() == Order && Contents.OrdKind == MustAliasMem;
+ }
+
+ /// isWeak - Test if this a weak dependence. Weak dependencies are
+ /// considered DAG edges for height computation and other heuristics, but do
+ /// not force ordering. Breaking a weak edge may require the scheduler to
+ /// compensate, for example by inserting a copy.
+ bool isWeak() const {
+ return getKind() == Order && Contents.OrdKind >= Weak;
+ }
+
+ /// isArtificial - Test if this is an Order dependence that is marked
+ /// as "artificial", meaning it isn't necessary for correctness.
+ bool isArtificial() const {
+ return getKind() == Order && Contents.OrdKind == Artificial;
+ }
+
+ /// isCluster - Test if this is an Order dependence that is marked
+ /// as "cluster", meaning it is artificial and wants to be adjacent.
+ bool isCluster() const {
+ return getKind() == Order && Contents.OrdKind == Cluster;
+ }
+
+ /// isAssignedRegDep - Test if this is a Data dependence that is
+ /// associated with a register.
+ bool isAssignedRegDep() const {
+ return getKind() == Data && Contents.Reg != 0;
+ }
+
+ /// getReg - Return the register associated with this edge. This is
+ /// only valid on Data, Anti, and Output edges. On Data edges, this
+ /// value may be zero, meaning there is no associated register.
+ unsigned getReg() const {
+ assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
+ "getReg called on non-register dependence edge!");
+ return Contents.Reg;
+ }
+
+ /// setReg - Assign the associated register for this edge. This is
+ /// only valid on Data, Anti, and Output edges. On Anti and Output
+ /// edges, this value must not be zero. On Data edges, the value may
+ /// be zero, which would mean that no specific register is associated
+ /// with this edge.
+ void setReg(unsigned Reg) {
+ assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
+ "setReg called on non-register dependence edge!");
+ assert((getKind() != Anti || Reg != 0) &&
+ "SDep::Anti edge cannot use the zero register!");
+ assert((getKind() != Output || Reg != 0) &&
+ "SDep::Output edge cannot use the zero register!");
+ Contents.Reg = Reg;
+ }
+ };
+
+ template <>
+ struct isPodLike<SDep> { static const bool value = true; };
+
+ /// SUnit - Scheduling unit. This is a node in the scheduling DAG.
+ class SUnit {
+ private:
+ enum : unsigned { BoundaryID = ~0u };
+
+ SDNode *Node; // Representative node.
+ MachineInstr *Instr; // Alternatively, a MachineInstr.
+ public:
+ SUnit *OrigNode; // If not this, the node from which
+ // this node was cloned.
+ // (SD scheduling only)
+
+ const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass.
+
+ // Preds/Succs - The SUnits before/after us in the graph.
+ SmallVector<SDep, 4> Preds; // All sunit predecessors.
+ SmallVector<SDep, 4> Succs; // All sunit successors.
+
+ typedef SmallVectorImpl<SDep>::iterator pred_iterator;
+ typedef SmallVectorImpl<SDep>::iterator succ_iterator;
+ typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
+ typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;
+
+ unsigned NodeNum; // Entry # of node in the node vector.
+ unsigned NodeQueueId; // Queue id of node.
+ unsigned NumPreds; // # of SDep::Data preds.
+ unsigned NumSuccs; // # of SDep::Data sucss.
+ unsigned NumPredsLeft; // # of preds not scheduled.
+ unsigned NumSuccsLeft; // # of succs not scheduled.
+ unsigned WeakPredsLeft; // # of weak preds not scheduled.
+ unsigned WeakSuccsLeft; // # of weak succs not scheduled.
+ unsigned short NumRegDefsLeft; // # of reg defs with no scheduled use.
+ unsigned short Latency; // Node latency.
+ bool isVRegCycle : 1; // May use and def the same vreg.
+ bool isCall : 1; // Is a function call.
+ bool isCallOp : 1; // Is a function call operand.
+ bool isTwoAddress : 1; // Is a two-address instruction.
+ bool isCommutable : 1; // Is a commutable instruction.
+ bool hasPhysRegUses : 1; // Has physreg uses.
+ bool hasPhysRegDefs : 1; // Has physreg defs that are being used.
+ bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not.
+ bool isPending : 1; // True once pending.
+ bool isAvailable : 1; // True once available.
+ bool isScheduled : 1; // True once scheduled.
+ bool isScheduleHigh : 1; // True if preferable to schedule high.
+ bool isScheduleLow : 1; // True if preferable to schedule low.
+ bool isCloned : 1; // True if this node has been cloned.
+ bool isUnbuffered : 1; // Uses an unbuffered resource.
+ bool hasReservedResource : 1; // Uses a reserved resource.
+ Sched::Preference SchedulingPref; // Scheduling preference.
+
+ private:
+ bool isDepthCurrent : 1; // True if Depth is current.
+ bool isHeightCurrent : 1; // True if Height is current.
+ unsigned Depth; // Node depth.
+ unsigned Height; // Node height.
+ public:
+ unsigned TopReadyCycle; // Cycle relative to start when node is ready.
+ unsigned BotReadyCycle; // Cycle relative to end when node is ready.
+
+ const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
+ const TargetRegisterClass *CopySrcRC;
+
+ /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
+ /// an SDNode and any nodes flagged to it.
+ SUnit(SDNode *node, unsigned nodenum)
+ : Node(node), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
+ NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
+ NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
+ NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
+ isCallOp(false), isTwoAddress(false), isCommutable(false),
+ hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
+ isPending(false), isAvailable(false), isScheduled(false),
+ isScheduleHigh(false), isScheduleLow(false), isCloned(false),
+ isUnbuffered(false), hasReservedResource(false),
+ SchedulingPref(Sched::None), isDepthCurrent(false),
+ isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
+ BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
+
+ /// SUnit - Construct an SUnit for post-regalloc scheduling to represent
+ /// a MachineInstr.
+ SUnit(MachineInstr *instr, unsigned nodenum)
+ : Node(nullptr), Instr(instr), OrigNode(nullptr), SchedClass(nullptr),
+ NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
+ NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
+ NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
+ isCallOp(false), isTwoAddress(false), isCommutable(false),
+ hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
+ isPending(false), isAvailable(false), isScheduled(false),
+ isScheduleHigh(false), isScheduleLow(false), isCloned(false),
+ isUnbuffered(false), hasReservedResource(false),
+ SchedulingPref(Sched::None), isDepthCurrent(false),
+ isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
+ BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
+
+ /// SUnit - Construct a placeholder SUnit.
+ SUnit()
+ : Node(nullptr), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
+ NodeNum(BoundaryID), NodeQueueId(0), NumPreds(0), NumSuccs(0),
+ NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
+ NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
+ isCallOp(false), isTwoAddress(false), isCommutable(false),
+ hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
+ isPending(false), isAvailable(false), isScheduled(false),
+ isScheduleHigh(false), isScheduleLow(false), isCloned(false),
+ isUnbuffered(false), hasReservedResource(false),
+ SchedulingPref(Sched::None), isDepthCurrent(false),
+ isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
+ BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
+
+ /// \brief Boundary nodes are placeholders for the boundary of the
+ /// scheduling region.
+ ///
+ /// BoundaryNodes can have DAG edges, including Data edges, but they do not
+ /// correspond to schedulable entities (e.g. instructions) and do not have a
+ /// valid ID. Consequently, always check for boundary nodes before accessing
+ /// an assoicative data structure keyed on node ID.
+ bool isBoundaryNode() const { return NodeNum == BoundaryID; }
+
+ /// setNode - Assign the representative SDNode for this SUnit.
+ /// This may be used during pre-regalloc scheduling.
+ void setNode(SDNode *N) {
+ assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
+ Node = N;
+ }
+
+ /// getNode - Return the representative SDNode for this SUnit.
+ /// This may be used during pre-regalloc scheduling.
+ SDNode *getNode() const {
+ assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
+ return Node;
+ }
+
+ /// isInstr - Return true if this SUnit refers to a machine instruction as
+ /// opposed to an SDNode.
+ bool isInstr() const { return Instr; }
+
+ /// setInstr - Assign the instruction for the SUnit.
+ /// This may be used during post-regalloc scheduling.
+ void setInstr(MachineInstr *MI) {
+ assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
+ Instr = MI;
+ }
+
+ /// getInstr - Return the representative MachineInstr for this SUnit.
+ /// This may be used during post-regalloc scheduling.
+ MachineInstr *getInstr() const {
+ assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
+ return Instr;
+ }
+
+ /// addPred - This adds the specified edge as a pred of the current node if
+ /// not already. It also adds the current node as a successor of the
+ /// specified node.
+ bool addPred(const SDep &D, bool Required = true);
+
+ /// removePred - This removes the specified edge as a pred of the current
+ /// node if it exists. It also removes the current node as a successor of
+ /// the specified node.
+ void removePred(const SDep &D);
+
+ /// getDepth - Return the depth of this node, which is the length of the
+ /// maximum path up to any node which has no predecessors.
+ unsigned getDepth() const {
+ if (!isDepthCurrent)
+ const_cast<SUnit *>(this)->ComputeDepth();
+ return Depth;
+ }
+
+ /// getHeight - Return the height of this node, which is the length of the
+ /// maximum path down to any node which has no successors.
+ unsigned getHeight() const {
+ if (!isHeightCurrent)
+ const_cast<SUnit *>(this)->ComputeHeight();
+ return Height;
+ }
+
+ /// setDepthToAtLeast - If NewDepth is greater than this node's
+ /// depth value, set it to be the new depth value. This also
+ /// recursively marks successor nodes dirty.
+ void setDepthToAtLeast(unsigned NewDepth);
+
+ /// setDepthToAtLeast - If NewDepth is greater than this node's
+ /// depth value, set it to be the new height value. This also
+ /// recursively marks predecessor nodes dirty.
+ void setHeightToAtLeast(unsigned NewHeight);
+
+ /// setDepthDirty - Set a flag in this node to indicate that its
+ /// stored Depth value will require recomputation the next time
+ /// getDepth() is called.
+ void setDepthDirty();
+
+ /// setHeightDirty - Set a flag in this node to indicate that its
+ /// stored Height value will require recomputation the next time
+ /// getHeight() is called.
+ void setHeightDirty();
+
+ /// isPred - Test if node N is a predecessor of this node.
+ bool isPred(SUnit *N) {
+ for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i)
+ if (Preds[i].getSUnit() == N)
+ return true;
+ return false;
+ }
+
+ /// isSucc - Test if node N is a successor of this node.
+ bool isSucc(SUnit *N) {
+ for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i)
+ if (Succs[i].getSUnit() == N)
+ return true;
+ return false;
+ }
+
+ bool isTopReady() const {
+ return NumPredsLeft == 0;
+ }
+ bool isBottomReady() const {
+ return NumSuccsLeft == 0;
+ }
+
+ /// \brief Order this node's predecessor edges such that the critical path
+ /// edge occurs first.
+ void biasCriticalPath();
+
+ void dump(const ScheduleDAG *G) const;
+ void dumpAll(const ScheduleDAG *G) const;
+ void print(raw_ostream &O, const ScheduleDAG *G) const;
+
+ private:
+ void ComputeDepth();
+ void ComputeHeight();
+ };
+
+ /// Return true if the specified SDep is equivalent except for latency.
+ inline bool SDep::overlaps(const SDep &Other) const {
+ if (Dep != Other.Dep)
+ return false;
+ switch (Dep.getInt()) {
+ case Data:
+ case Anti:
+ case Output:
+ return Contents.Reg == Other.Contents.Reg;
+ case Order:
+ return Contents.OrdKind == Other.Contents.OrdKind;
+ }
+ llvm_unreachable("Invalid dependency kind!");
+ }
+
+ //// getSUnit - Return the SUnit to which this edge points.
+ inline SUnit *SDep::getSUnit() const { return Dep.getPointer(); }
+
+ //// setSUnit - Assign the SUnit to which this edge points.
+ inline void SDep::setSUnit(SUnit *SU) { Dep.setPointer(SU); }
+
+ /// getKind - Return an enum value representing the kind of the dependence.
+ inline SDep::Kind SDep::getKind() const { return Dep.getInt(); }
+
+ //===--------------------------------------------------------------------===//
+ /// SchedulingPriorityQueue - This interface is used to plug different
+ /// priorities computation algorithms into the list scheduler. It implements
+ /// the interface of a standard priority queue, where nodes are inserted in
+ /// arbitrary order and returned in priority order. The computation of the
+ /// priority and the representation of the queue are totally up to the
+ /// implementation to decide.
+ ///
+ class SchedulingPriorityQueue {
+ virtual void anchor();
+ unsigned CurCycle;
+ bool HasReadyFilter;
+ public:
+ SchedulingPriorityQueue(bool rf = false):
+ CurCycle(0), HasReadyFilter(rf) {}
+ virtual ~SchedulingPriorityQueue() {}
+
+ virtual bool isBottomUp() const = 0;
+
+ virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
+ virtual void addNode(const SUnit *SU) = 0;
+ virtual void updateNode(const SUnit *SU) = 0;
+ virtual void releaseState() = 0;
+
+ virtual bool empty() const = 0;
+
+ bool hasReadyFilter() const { return HasReadyFilter; }
+
+ virtual bool tracksRegPressure() const { return false; }
+
+ virtual bool isReady(SUnit *) const {
+ assert(!HasReadyFilter && "The ready filter must override isReady()");
+ return true;
+ }
+ virtual void push(SUnit *U) = 0;
+
+ void push_all(const std::vector<SUnit *> &Nodes) {
+ for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
+ E = Nodes.end(); I != E; ++I)
+ push(*I);
+ }
+
+ virtual SUnit *pop() = 0;
+
+ virtual void remove(SUnit *SU) = 0;
+
+ virtual void dump(ScheduleDAG *) const {}
+
+ /// scheduledNode - As each node is scheduled, this method is invoked. This
+ /// allows the priority function to adjust the priority of related
+ /// unscheduled nodes, for example.
+ ///
+ virtual void scheduledNode(SUnit *) {}
+
+ virtual void unscheduledNode(SUnit *) {}
+
+ void setCurCycle(unsigned Cycle) {
+ CurCycle = Cycle;
+ }
+
+ unsigned getCurCycle() const {
+ return CurCycle;
+ }
+ };
+
+ class ScheduleDAG {
+ public:
+ const TargetMachine &TM; // Target processor
+ const TargetInstrInfo *TII; // Target instruction information
+ const TargetRegisterInfo *TRI; // Target processor register info
+ MachineFunction &MF; // Machine function
+ MachineRegisterInfo &MRI; // Virtual/real register map
+ std::vector<SUnit> SUnits; // The scheduling units.
+ SUnit EntrySU; // Special node for the region entry.
+ SUnit ExitSU; // Special node for the region exit.
+
+#ifdef NDEBUG
+ static const bool StressSched = false;
+#else
+ bool StressSched;
+#endif
+
+ explicit ScheduleDAG(MachineFunction &mf);
+
+ virtual ~ScheduleDAG();
+
+ /// clearDAG - clear the DAG state (between regions).
+ void clearDAG();
+
+ /// getInstrDesc - Return the MCInstrDesc of this SUnit.
+ /// Return NULL for SDNodes without a machine opcode.
+ const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
+ if (SU->isInstr()) return &SU->getInstr()->getDesc();
+ return getNodeDesc(SU->getNode());
+ }
+
+ /// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered
+ /// using 'dot'.
+ ///
+ virtual void viewGraph(const Twine &Name, const Twine &Title);
+ virtual void viewGraph();
+
+ virtual void dumpNode(const SUnit *SU) const = 0;
+
+ /// getGraphNodeLabel - Return a label for an SUnit node in a visualization
+ /// of the ScheduleDAG.
+ virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
+
+ /// getDAGLabel - Return a label for the region of code covered by the DAG.
+ virtual std::string getDAGName() const = 0;
+
+ /// addCustomGraphFeatures - Add custom features for a visualization of
+ /// the ScheduleDAG.
+ virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
+
+#ifndef NDEBUG
+ /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
+ /// their state is consistent. Return the number of scheduled SUnits.
+ unsigned VerifyScheduledDAG(bool isBottomUp);
+#endif
+
+ private:
+ // Return the MCInstrDesc of this SDNode or NULL.
+ const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
+ };
+
+ class SUnitIterator : public std::iterator<std::forward_iterator_tag,
+ SUnit, ptrdiff_t> {
+ SUnit *Node;
+ unsigned Operand;
+
+ SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
+ public:
+ bool operator==(const SUnitIterator& x) const {
+ return Operand == x.Operand;
+ }
+ bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
+
+ pointer operator*() const {
+ return Node->Preds[Operand].getSUnit();
+ }
+ pointer operator->() const { return operator*(); }
+
+ SUnitIterator& operator++() { // Preincrement
+ ++Operand;
+ return *this;
+ }
+ SUnitIterator operator++(int) { // Postincrement
+ SUnitIterator tmp = *this; ++*this; return tmp;
+ }
+
+ static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
+ static SUnitIterator end (SUnit *N) {
+ return SUnitIterator(N, (unsigned)N->Preds.size());
+ }
+
+ unsigned getOperand() const { return Operand; }
+ const SUnit *getNode() const { return Node; }
+ /// isCtrlDep - Test if this is not an SDep::Data dependence.
+ bool isCtrlDep() const {
+ return getSDep().isCtrl();
+ }
+ bool isArtificialDep() const {
+ return getSDep().isArtificial();
+ }
+ const SDep &getSDep() const {
+ return Node->Preds[Operand];
+ }
+ };
+
+ template <> struct GraphTraits<SUnit*> {
+ typedef SUnit NodeType;
+ typedef SUnitIterator ChildIteratorType;
+ static inline NodeType *getEntryNode(SUnit *N) { return N; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return SUnitIterator::begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return SUnitIterator::end(N);
+ }
+ };
+
+ template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
+ typedef std::vector<SUnit>::iterator nodes_iterator;
+ static nodes_iterator nodes_begin(ScheduleDAG *G) {
+ return G->SUnits.begin();
+ }
+ static nodes_iterator nodes_end(ScheduleDAG *G) {
+ return G->SUnits.end();
+ }
+ };
+
+ /// ScheduleDAGTopologicalSort is a class that computes a topological
+ /// ordering for SUnits and provides methods for dynamically updating
+ /// the ordering as new edges are added.
+ ///
+ /// This allows a very fast implementation of IsReachable, for example.
+ ///
+ class ScheduleDAGTopologicalSort {
+ /// SUnits - A reference to the ScheduleDAG's SUnits.
+ std::vector<SUnit> &SUnits;
+ SUnit *ExitSU;
+
+ /// Index2Node - Maps topological index to the node number.
+ std::vector<int> Index2Node;
+ /// Node2Index - Maps the node number to its topological index.
+ std::vector<int> Node2Index;
+ /// Visited - a set of nodes visited during a DFS traversal.
+ BitVector Visited;
+
+ /// DFS - make a DFS traversal and mark all nodes affected by the
+ /// edge insertion. These nodes will later get new topological indexes
+ /// by means of the Shift method.
+ void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
+
+ /// Shift - reassign topological indexes for the nodes in the DAG
+ /// to preserve the topological ordering.
+ void Shift(BitVector& Visited, int LowerBound, int UpperBound);
+
+ /// Allocate - assign the topological index to the node n.
+ void Allocate(int n, int index);
+
+ public:
+ ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
+
+ /// InitDAGTopologicalSorting - create the initial topological
+ /// ordering from the DAG to be scheduled.
+ void InitDAGTopologicalSorting();
+
+ /// IsReachable - Checks if SU is reachable from TargetSU.
+ bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
+
+ /// WillCreateCycle - Return true if addPred(TargetSU, SU) creates a cycle.
+ bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
+
+ /// AddPred - Updates the topological ordering to accommodate an edge
+ /// to be added from SUnit X to SUnit Y.
+ void AddPred(SUnit *Y, SUnit *X);
+
+ /// RemovePred - Updates the topological ordering to accommodate an
+ /// an edge to be removed from the specified node N from the predecessors
+ /// of the current node M.
+ void RemovePred(SUnit *M, SUnit *N);
+
+ typedef std::vector<int>::iterator iterator;
+ typedef std::vector<int>::const_iterator const_iterator;
+ iterator begin() { return Index2Node.begin(); }
+ const_iterator begin() const { return Index2Node.begin(); }
+ iterator end() { return Index2Node.end(); }
+ const_iterator end() const { return Index2Node.end(); }
+
+ typedef std::vector<int>::reverse_iterator reverse_iterator;
+ typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
+ reverse_iterator rbegin() { return Index2Node.rbegin(); }
+ const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
+ reverse_iterator rend() { return Index2Node.rend(); }
+ const_reverse_iterator rend() const { return Index2Node.rend(); }
+ };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
new file mode 100644
index 0000000..c574df0
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -0,0 +1,293 @@
+//==- ScheduleDAGInstrs.h - MachineInstr Scheduling --------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleDAGInstrs class, which implements
+// scheduling for a MachineInstr-based dependency graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
+#define LLVM_CODEGEN_SCHEDULEDAGINSTRS_H
+
+#include "llvm/ADT/SparseMultiSet.h"
+#include "llvm/ADT/SparseSet.h"
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+ class MachineFrameInfo;
+ class MachineLoopInfo;
+ class MachineDominatorTree;
+ class RegPressureTracker;
+ class PressureDiffs;
+
+ /// An individual mapping from virtual register number to SUnit.
+ struct VReg2SUnit {
+ unsigned VirtReg;
+ LaneBitmask LaneMask;
+ SUnit *SU;
+
+ VReg2SUnit(unsigned VReg, LaneBitmask LaneMask, SUnit *SU)
+ : VirtReg(VReg), LaneMask(LaneMask), SU(SU) {}
+
+ unsigned getSparseSetIndex() const {
+ return TargetRegisterInfo::virtReg2Index(VirtReg);
+ }
+ };
+
+ /// Mapping from virtual register to SUnit including an operand index.
+ struct VReg2SUnitOperIdx : public VReg2SUnit {
+ unsigned OperandIndex;
+
+ VReg2SUnitOperIdx(unsigned VReg, LaneBitmask LaneMask,
+ unsigned OperandIndex, SUnit *SU)
+ : VReg2SUnit(VReg, LaneMask, SU), OperandIndex(OperandIndex) {}
+ };
+
+ /// Record a physical register access.
+ /// For non-data-dependent uses, OpIdx == -1.
+ struct PhysRegSUOper {
+ SUnit *SU;
+ int OpIdx;
+ unsigned Reg;
+
+ PhysRegSUOper(SUnit *su, int op, unsigned R): SU(su), OpIdx(op), Reg(R) {}
+
+ unsigned getSparseSetIndex() const { return Reg; }
+ };
+
+ /// Use a SparseMultiSet to track physical registers. Storage is only
+ /// allocated once for the pass. It can be cleared in constant time and reused
+ /// without any frees.
+ typedef SparseMultiSet<PhysRegSUOper, llvm::identity<unsigned>, uint16_t>
+ Reg2SUnitsMap;
+
+ /// Use SparseSet as a SparseMap by relying on the fact that it never
+ /// compares ValueT's, only unsigned keys. This allows the set to be cleared
+ /// between scheduling regions in constant time as long as ValueT does not
+ /// require a destructor.
+ typedef SparseSet<VReg2SUnit, VirtReg2IndexFunctor> VReg2SUnitMap;
+
+ /// Track local uses of virtual registers. These uses are gathered by the DAG
+ /// builder and may be consulted by the scheduler to avoid iterating an entire
+ /// vreg use list.
+ typedef SparseMultiSet<VReg2SUnit, VirtReg2IndexFunctor> VReg2SUnitMultiMap;
+
+ typedef SparseMultiSet<VReg2SUnitOperIdx, VirtReg2IndexFunctor>
+ VReg2SUnitOperIdxMultiMap;
+
+ /// ScheduleDAGInstrs - A ScheduleDAG subclass for scheduling lists of
+ /// MachineInstrs.
+ class ScheduleDAGInstrs : public ScheduleDAG {
+ protected:
+ const MachineLoopInfo *MLI;
+ const MachineFrameInfo *MFI;
+
+ /// TargetSchedModel provides an interface to the machine model.
+ TargetSchedModel SchedModel;
+
+ /// True if the DAG builder should remove kill flags (in preparation for
+ /// rescheduling).
+ bool RemoveKillFlags;
+
+ /// The standard DAG builder does not normally include terminators as DAG
+ /// nodes because it does not create the necessary dependencies to prevent
+ /// reordering. A specialized scheduler can override
+ /// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate
+ /// it has taken responsibility for scheduling the terminator correctly.
+ bool CanHandleTerminators;
+
+ /// Whether lane masks should get tracked.
+ bool TrackLaneMasks;
+
+ /// State specific to the current scheduling region.
+ /// ------------------------------------------------
+
+ /// The block in which to insert instructions
+ MachineBasicBlock *BB;
+
+ /// The beginning of the range to be scheduled.
+ MachineBasicBlock::iterator RegionBegin;
+
+ /// The end of the range to be scheduled.
+ MachineBasicBlock::iterator RegionEnd;
+
+ /// Instructions in this region (distance(RegionBegin, RegionEnd)).
+ unsigned NumRegionInstrs;
+
+ /// After calling BuildSchedGraph, each machine instruction in the current
+ /// scheduling region is mapped to an SUnit.
+ DenseMap<MachineInstr*, SUnit*> MISUnitMap;
+
+ /// After calling BuildSchedGraph, each vreg used in the scheduling region
+ /// is mapped to a set of SUnits. These include all local vreg uses, not
+ /// just the uses for a singly defined vreg.
+ VReg2SUnitMultiMap VRegUses;
+
+ /// State internal to DAG building.
+ /// -------------------------------
+
+ /// Defs, Uses - Remember where defs and uses of each register are as we
+ /// iterate upward through the instructions. This is allocated here instead
+ /// of inside BuildSchedGraph to avoid the need for it to be initialized and
+ /// destructed for each block.
+ Reg2SUnitsMap Defs;
+ Reg2SUnitsMap Uses;
+
+ /// Tracks the last instruction(s) in this region defining each virtual
+ /// register. There may be multiple current definitions for a register with
+ /// disjunct lanemasks.
+ VReg2SUnitMultiMap CurrentVRegDefs;
+ /// Tracks the last instructions in this region using each virtual register.
+ VReg2SUnitOperIdxMultiMap CurrentVRegUses;
+
+ /// PendingLoads - Remember where unknown loads are after the most recent
+ /// unknown store, as we iterate. As with Defs and Uses, this is here
+ /// to minimize construction/destruction.
+ std::vector<SUnit *> PendingLoads;
+
+ /// DbgValues - Remember instruction that precedes DBG_VALUE.
+ /// These are generated by buildSchedGraph but persist so they can be
+ /// referenced when emitting the final schedule.
+ typedef std::vector<std::pair<MachineInstr *, MachineInstr *> >
+ DbgValueVector;
+ DbgValueVector DbgValues;
+ MachineInstr *FirstDbgValue;
+
+ /// Set of live physical registers for updating kill flags.
+ BitVector LiveRegs;
+
+ public:
+ explicit ScheduleDAGInstrs(MachineFunction &mf,
+ const MachineLoopInfo *mli,
+ bool RemoveKillFlags = false);
+
+ ~ScheduleDAGInstrs() override {}
+
+ /// \brief Get the machine model for instruction scheduling.
+ const TargetSchedModel *getSchedModel() const { return &SchedModel; }
+
+ /// \brief Resolve and cache a resolved scheduling class for an SUnit.
+ const MCSchedClassDesc *getSchedClass(SUnit *SU) const {
+ if (!SU->SchedClass && SchedModel.hasInstrSchedModel())
+ SU->SchedClass = SchedModel.resolveSchedClass(SU->getInstr());
+ return SU->SchedClass;
+ }
+
+ /// begin - Return an iterator to the top of the current scheduling region.
+ MachineBasicBlock::iterator begin() const { return RegionBegin; }
+
+ /// end - Return an iterator to the bottom of the current scheduling region.
+ MachineBasicBlock::iterator end() const { return RegionEnd; }
+
+ /// newSUnit - Creates a new SUnit and return a ptr to it.
+ SUnit *newSUnit(MachineInstr *MI);
+
+ /// getSUnit - Return an existing SUnit for this MI, or NULL.
+ SUnit *getSUnit(MachineInstr *MI) const;
+
+ /// startBlock - Prepare to perform scheduling in the given block.
+ virtual void startBlock(MachineBasicBlock *BB);
+
+ /// finishBlock - Clean up after scheduling in the given block.
+ virtual void finishBlock();
+
+ /// Initialize the scheduler state for the next scheduling region.
+ virtual void enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned regioninstrs);
+
+ /// Notify that the scheduler has finished scheduling the current region.
+ virtual void exitRegion();
+
+ /// buildSchedGraph - Build SUnits from the MachineBasicBlock that we are
+ /// input.
+ void buildSchedGraph(AliasAnalysis *AA,
+ RegPressureTracker *RPTracker = nullptr,
+ PressureDiffs *PDiffs = nullptr,
+ bool TrackLaneMasks = false);
+
+ /// addSchedBarrierDeps - Add dependencies from instructions in the current
+ /// list of instructions being scheduled to scheduling barrier. We want to
+ /// make sure instructions which define registers that are either used by
+ /// the terminator or are live-out are properly scheduled. This is
+ /// especially important when the definition latency of the return value(s)
+ /// are too high to be hidden by the branch or when the liveout registers
+ /// used by instructions in the fallthrough block.
+ void addSchedBarrierDeps();
+
+ /// schedule - Order nodes according to selected style, filling
+ /// in the Sequence member.
+ ///
+ /// Typically, a scheduling algorithm will implement schedule() without
+ /// overriding enterRegion() or exitRegion().
+ virtual void schedule() = 0;
+
+ /// finalizeSchedule - Allow targets to perform final scheduling actions at
+ /// the level of the whole MachineFunction. By default does nothing.
+ virtual void finalizeSchedule() {}
+
+ void dumpNode(const SUnit *SU) const override;
+
+ /// Return a label for a DAG node that points to an instruction.
+ std::string getGraphNodeLabel(const SUnit *SU) const override;
+
+ /// Return a label for the region of code covered by the DAG.
+ std::string getDAGName() const override;
+
+ /// \brief Fix register kill flags that scheduling has made invalid.
+ void fixupKills(MachineBasicBlock *MBB);
+ protected:
+ void initSUnits();
+ void addPhysRegDataDeps(SUnit *SU, unsigned OperIdx);
+ void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
+ void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
+ void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
+
+ /// \brief PostRA helper for rewriting kill flags.
+ void startBlockForKills(MachineBasicBlock *BB);
+
+ /// \brief Toggle a register operand kill flag.
+ ///
+ /// Other adjustments may be made to the instruction if necessary. Return
+ /// true if the operand has been deleted, false if not.
+ bool toggleKillFlag(MachineInstr *MI, MachineOperand &MO);
+
+ /// Returns a mask for which lanes get read/written by the given (register)
+ /// machine operand.
+ LaneBitmask getLaneMaskForMO(const MachineOperand &MO) const;
+
+ void collectVRegUses(SUnit *SU);
+ };
+
+ /// newSUnit - Creates a new SUnit and return a ptr to it.
+ inline SUnit *ScheduleDAGInstrs::newSUnit(MachineInstr *MI) {
+#ifndef NDEBUG
+ const SUnit *Addr = SUnits.empty() ? nullptr : &SUnits[0];
+#endif
+ SUnits.emplace_back(MI, (unsigned)SUnits.size());
+ assert((Addr == nullptr || Addr == &SUnits[0]) &&
+ "SUnits std::vector reallocated on the fly!");
+ SUnits.back().OrigNode = &SUnits.back();
+ return &SUnits.back();
+ }
+
+ /// getSUnit - Return an existing SUnit for this MI, or NULL.
+ inline SUnit *ScheduleDAGInstrs::getSUnit(MachineInstr *MI) const {
+ DenseMap<MachineInstr*, SUnit*>::const_iterator I = MISUnitMap.find(MI);
+ if (I == MISUnitMap.end())
+ return nullptr;
+ return I->second;
+ }
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleDFS.h b/contrib/llvm/include/llvm/CodeGen/ScheduleDFS.h
new file mode 100644
index 0000000..b2108ad
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleDFS.h
@@ -0,0 +1,194 @@
+//===- ScheduleDAGILP.h - ILP metric for ScheduleDAGInstrs ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Definition of an ILP metric for machine level instruction scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDFS_H
+#define LLVM_CODEGEN_SCHEDULEDFS_H
+
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/Support/DataTypes.h"
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+class IntEqClasses;
+class ScheduleDAGInstrs;
+class SUnit;
+
+/// \brief Represent the ILP of the subDAG rooted at a DAG node.
+///
+/// ILPValues summarize the DAG subtree rooted at each node. ILPValues are
+/// valid for all nodes regardless of their subtree membership.
+///
+/// When computed using bottom-up DFS, this metric assumes that the DAG is a
+/// forest of trees with roots at the bottom of the schedule branching upward.
+struct ILPValue {
+ unsigned InstrCount;
+ /// Length may either correspond to depth or height, depending on direction,
+ /// and cycles or nodes depending on context.
+ unsigned Length;
+
+ ILPValue(unsigned count, unsigned length):
+ InstrCount(count), Length(length) {}
+
+ // Order by the ILP metric's value.
+ bool operator<(ILPValue RHS) const {
+ return (uint64_t)InstrCount * RHS.Length
+ < (uint64_t)Length * RHS.InstrCount;
+ }
+ bool operator>(ILPValue RHS) const {
+ return RHS < *this;
+ }
+ bool operator<=(ILPValue RHS) const {
+ return (uint64_t)InstrCount * RHS.Length
+ <= (uint64_t)Length * RHS.InstrCount;
+ }
+ bool operator>=(ILPValue RHS) const {
+ return RHS <= *this;
+ }
+
+ void print(raw_ostream &OS) const;
+
+ void dump() const;
+};
+
+/// \brief Compute the values of each DAG node for various metrics during DFS.
+class SchedDFSResult {
+ friend class SchedDFSImpl;
+
+ static const unsigned InvalidSubtreeID = ~0u;
+
+ /// \brief Per-SUnit data computed during DFS for various metrics.
+ ///
+ /// A node's SubtreeID is set to itself when it is visited to indicate that it
+ /// is the root of a subtree. Later it is set to its parent to indicate an
+ /// interior node. Finally, it is set to a representative subtree ID during
+ /// finalization.
+ struct NodeData {
+ unsigned InstrCount;
+ unsigned SubtreeID;
+
+ NodeData(): InstrCount(0), SubtreeID(InvalidSubtreeID) {}
+ };
+
+ /// \brief Per-Subtree data computed during DFS.
+ struct TreeData {
+ unsigned ParentTreeID;
+ unsigned SubInstrCount;
+
+ TreeData(): ParentTreeID(InvalidSubtreeID), SubInstrCount(0) {}
+ };
+
+ /// \brief Record a connection between subtrees and the connection level.
+ struct Connection {
+ unsigned TreeID;
+ unsigned Level;
+
+ Connection(unsigned tree, unsigned level): TreeID(tree), Level(level) {}
+ };
+
+ bool IsBottomUp;
+ unsigned SubtreeLimit;
+ /// DFS results for each SUnit in this DAG.
+ std::vector<NodeData> DFSNodeData;
+
+ // Store per-tree data indexed on tree ID,
+ SmallVector<TreeData, 16> DFSTreeData;
+
+ // For each subtree discovered during DFS, record its connections to other
+ // subtrees.
+ std::vector<SmallVector<Connection, 4> > SubtreeConnections;
+
+ /// Cache the current connection level of each subtree.
+ /// This mutable array is updated during scheduling.
+ std::vector<unsigned> SubtreeConnectLevels;
+
+public:
+ SchedDFSResult(bool IsBU, unsigned lim)
+ : IsBottomUp(IsBU), SubtreeLimit(lim) {}
+
+ /// \brief Get the node cutoff before subtrees are considered significant.
+ unsigned getSubtreeLimit() const { return SubtreeLimit; }
+
+ /// \brief Return true if this DFSResult is uninitialized.
+ ///
+ /// resize() initializes DFSResult, while compute() populates it.
+ bool empty() const { return DFSNodeData.empty(); }
+
+ /// \brief Clear the results.
+ void clear() {
+ DFSNodeData.clear();
+ DFSTreeData.clear();
+ SubtreeConnections.clear();
+ SubtreeConnectLevels.clear();
+ }
+
+ /// \brief Initialize the result data with the size of the DAG.
+ void resize(unsigned NumSUnits) {
+ DFSNodeData.resize(NumSUnits);
+ }
+
+ /// \brief Compute various metrics for the DAG with given roots.
+ void compute(ArrayRef<SUnit> SUnits);
+
+ /// \brief Get the number of instructions in the given subtree and its
+ /// children.
+ unsigned getNumInstrs(const SUnit *SU) const {
+ return DFSNodeData[SU->NodeNum].InstrCount;
+ }
+
+ /// \brief Get the number of instructions in the given subtree not including
+ /// children.
+ unsigned getNumSubInstrs(unsigned SubtreeID) const {
+ return DFSTreeData[SubtreeID].SubInstrCount;
+ }
+
+ /// \brief Get the ILP value for a DAG node.
+ ///
+ /// A leaf node has an ILP of 1/1.
+ ILPValue getILP(const SUnit *SU) const {
+ return ILPValue(DFSNodeData[SU->NodeNum].InstrCount, 1 + SU->getDepth());
+ }
+
+ /// \brief The number of subtrees detected in this DAG.
+ unsigned getNumSubtrees() const { return SubtreeConnectLevels.size(); }
+
+ /// \brief Get the ID of the subtree the given DAG node belongs to.
+ ///
+ /// For convenience, if DFSResults have not been computed yet, give everything
+ /// tree ID 0.
+ unsigned getSubtreeID(const SUnit *SU) const {
+ if (empty())
+ return 0;
+ assert(SU->NodeNum < DFSNodeData.size() && "New Node");
+ return DFSNodeData[SU->NodeNum].SubtreeID;
+ }
+
+ /// \brief Get the connection level of a subtree.
+ ///
+ /// For bottom-up trees, the connection level is the latency depth (in cycles)
+ /// of the deepest connection to another subtree.
+ unsigned getSubtreeLevel(unsigned SubtreeID) const {
+ return SubtreeConnectLevels[SubtreeID];
+ }
+
+ /// \brief Scheduler callback to update SubtreeConnectLevels when a tree is
+ /// initially scheduled.
+ void scheduleTree(unsigned SubtreeID);
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val);
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h b/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h
new file mode 100644
index 0000000..8a40e72
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ScheduleHazardRecognizer.h
@@ -0,0 +1,111 @@
+//=- llvm/CodeGen/ScheduleHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleHazardRecognizer class, which implements
+// hazard-avoidance heuristics for scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
+
+namespace llvm {
+
+class SUnit;
+
+/// HazardRecognizer - This determines whether or not an instruction can be
+/// issued this cycle, and whether or not a noop needs to be inserted to handle
+/// the hazard.
+class ScheduleHazardRecognizer {
+protected:
+ /// MaxLookAhead - Indicate the number of cycles in the scoreboard
+ /// state. Important to restore the state after backtracking. Additionally,
+ /// MaxLookAhead=0 identifies a fake recognizer, allowing the client to
+ /// bypass virtual calls. Currently the PostRA scheduler ignores it.
+ unsigned MaxLookAhead;
+
+public:
+ ScheduleHazardRecognizer(): MaxLookAhead(0) {}
+ virtual ~ScheduleHazardRecognizer();
+
+ enum HazardType {
+ NoHazard, // This instruction can be emitted at this cycle.
+ Hazard, // This instruction can't be emitted at this cycle.
+ NoopHazard // This instruction can't be emitted, and needs noops.
+ };
+
+ unsigned getMaxLookAhead() const { return MaxLookAhead; }
+
+ bool isEnabled() const { return MaxLookAhead != 0; }
+
+ /// atIssueLimit - Return true if no more instructions may be issued in this
+ /// cycle.
+ ///
+ /// FIXME: remove this once MachineScheduler is the only client.
+ virtual bool atIssueLimit() const { return false; }
+
+ /// getHazardType - Return the hazard type of emitting this node. There are
+ /// three possible results. Either:
+ /// * NoHazard: it is legal to issue this instruction on this cycle.
+ /// * Hazard: issuing this instruction would stall the machine. If some
+ /// other instruction is available, issue it first.
+ /// * NoopHazard: issuing this instruction would break the program. If
+ /// some other instruction can be issued, do so, otherwise issue a noop.
+ virtual HazardType getHazardType(SUnit *m, int Stalls = 0) {
+ return NoHazard;
+ }
+
+ /// Reset - This callback is invoked when a new block of
+ /// instructions is about to be schedule. The hazard state should be
+ /// set to an initialized state.
+ virtual void Reset() {}
+
+ /// EmitInstruction - This callback is invoked when an instruction is
+ /// emitted, to advance the hazard state.
+ virtual void EmitInstruction(SUnit *) {}
+
+ /// PreEmitNoops - This callback is invoked prior to emitting an instruction.
+ /// It should return the number of noops to emit prior to the provided
+ /// instruction.
+ /// Note: This is only used during PostRA scheduling. EmitNoop is not called
+ /// for these noops.
+ virtual unsigned PreEmitNoops(SUnit *) {
+ return 0;
+ }
+
+ /// ShouldPreferAnother - This callback may be invoked if getHazardType
+ /// returns NoHazard. If, even though there is no hazard, it would be better to
+ /// schedule another available instruction, this callback should return true.
+ virtual bool ShouldPreferAnother(SUnit *) {
+ return false;
+ }
+
+ /// AdvanceCycle - This callback is invoked whenever the next top-down
+ /// instruction to be scheduled cannot issue in the current cycle, either
+ /// because of latency or resource conflicts. This should increment the
+ /// internal state of the hazard recognizer so that previously "Hazard"
+ /// instructions will now not be hazards.
+ virtual void AdvanceCycle() {}
+
+ /// RecedeCycle - This callback is invoked whenever the next bottom-up
+ /// instruction to be scheduled cannot issue in the current cycle, either
+ /// because of latency or resource conflicts.
+ virtual void RecedeCycle() {}
+
+ /// EmitNoop - This callback is invoked when a noop was added to the
+ /// instruction stream.
+ virtual void EmitNoop() {
+ // Default implementation: count it as a cycle.
+ AdvanceCycle();
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h b/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
new file mode 100644
index 0000000..a7a6227
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/SchedulerRegistry.h
@@ -0,0 +1,106 @@
+//===-- llvm/CodeGen/SchedulerRegistry.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation for instruction scheduler function
+// pass registry (RegisterScheduler).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULERREGISTRY_H
+#define LLVM_CODEGEN_SCHEDULERREGISTRY_H
+
+#include "llvm/CodeGen/MachinePassRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterScheduler class - Track the registration of instruction schedulers.
+///
+//===----------------------------------------------------------------------===//
+
+class SelectionDAGISel;
+class ScheduleDAGSDNodes;
+class SelectionDAG;
+class MachineBasicBlock;
+
+class RegisterScheduler : public MachinePassRegistryNode {
+public:
+ typedef ScheduleDAGSDNodes *(*FunctionPassCtor)(SelectionDAGISel*,
+ CodeGenOpt::Level);
+
+ static MachinePassRegistry Registry;
+
+ RegisterScheduler(const char *N, const char *D, FunctionPassCtor C)
+ : MachinePassRegistryNode(N, D, (MachinePassCtor)C)
+ { Registry.Add(this); }
+ ~RegisterScheduler() { Registry.Remove(this); }
+
+
+ // Accessors.
+ //
+ RegisterScheduler *getNext() const {
+ return (RegisterScheduler *)MachinePassRegistryNode::getNext();
+ }
+ static RegisterScheduler *getList() {
+ return (RegisterScheduler *)Registry.getList();
+ }
+ static void setListener(MachinePassRegistryListener *L) {
+ Registry.setListener(L);
+ }
+};
+
+/// createBURRListDAGScheduler - This creates a bottom up register usage
+/// reduction list scheduler.
+ScheduleDAGSDNodes *createBURRListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
+/// createBURRListDAGScheduler - This creates a bottom up list scheduler that
+/// schedules nodes in source code order when possible.
+ScheduleDAGSDNodes *createSourceListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
+/// createHybridListDAGScheduler - This creates a bottom up register pressure
+/// aware list scheduler that make use of latency information to avoid stalls
+/// for long latency instructions in low register pressure mode. In high
+/// register pressure mode it schedules to reduce register pressure.
+ScheduleDAGSDNodes *createHybridListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level);
+
+/// createILPListDAGScheduler - This creates a bottom up register pressure
+/// aware list scheduler that tries to increase instruction level parallelism
+/// in low register pressure mode. In high register pressure mode it schedules
+/// to reduce register pressure.
+ScheduleDAGSDNodes *createILPListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level);
+
+/// createFastDAGScheduler - This creates a "fast" scheduler.
+///
+ScheduleDAGSDNodes *createFastDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
+/// createVLIWDAGScheduler - Scheduler for VLIW targets. This creates top down
+/// DFA driven list scheduler with clustering heuristic to control
+/// register pressure.
+ScheduleDAGSDNodes *createVLIWDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+/// createDefaultScheduler - This creates an instruction scheduler appropriate
+/// for the target.
+ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
+/// createDAGLinearizer - This creates a "no-scheduling" scheduler which
+/// linearize the DAG using topological order.
+ScheduleDAGSDNodes *createDAGLinearizer(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ScoreboardHazardRecognizer.h b/contrib/llvm/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
new file mode 100644
index 0000000..ab14c2d
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ScoreboardHazardRecognizer.h
@@ -0,0 +1,126 @@
+//=- llvm/CodeGen/ScoreboardHazardRecognizer.h - Schedule Support -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ScoreboardHazardRecognizer class, which
+// encapsulates hazard-avoidance heuristics for scheduling, based on the
+// scheduling itineraries specified for the target.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
+
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstring>
+
+namespace llvm {
+
+class InstrItineraryData;
+class ScheduleDAG;
+class SUnit;
+
+class ScoreboardHazardRecognizer : public ScheduleHazardRecognizer {
+ // Scoreboard to track function unit usage. Scoreboard[0] is a
+ // mask of the FUs in use in the cycle currently being
+ // schedule. Scoreboard[1] is a mask for the next cycle. The
+ // Scoreboard is used as a circular buffer with the current cycle
+ // indicated by Head.
+ //
+ // Scoreboard always counts cycles in forward execution order. If used by a
+ // bottom-up scheduler, then the scoreboard cycles are the inverse of the
+ // scheduler's cycles.
+ class Scoreboard {
+ unsigned *Data;
+
+ // The maximum number of cycles monitored by the Scoreboard. This
+ // value is determined based on the target itineraries to ensure
+ // that all hazards can be tracked.
+ size_t Depth;
+ // Indices into the Scoreboard that represent the current cycle.
+ size_t Head;
+ public:
+ Scoreboard():Data(nullptr), Depth(0), Head(0) { }
+ ~Scoreboard() {
+ delete[] Data;
+ }
+
+ size_t getDepth() const { return Depth; }
+ unsigned& operator[](size_t idx) const {
+ // Depth is expected to be a power-of-2.
+ assert(Depth && !(Depth & (Depth - 1)) &&
+ "Scoreboard was not initialized properly!");
+
+ return Data[(Head + idx) & (Depth-1)];
+ }
+
+ void reset(size_t d = 1) {
+ if (!Data) {
+ Depth = d;
+ Data = new unsigned[Depth];
+ }
+
+ memset(Data, 0, Depth * sizeof(Data[0]));
+ Head = 0;
+ }
+
+ void advance() {
+ Head = (Head + 1) & (Depth-1);
+ }
+
+ void recede() {
+ Head = (Head - 1) & (Depth-1);
+ }
+
+ // Print the scoreboard.
+ void dump() const;
+ };
+
+#ifndef NDEBUG
+ // Support for tracing ScoreboardHazardRecognizer as a component within
+ // another module. Follows the current thread-unsafe model of tracing.
+ static const char *DebugType;
+#endif
+
+ // Itinerary data for the target.
+ const InstrItineraryData *ItinData;
+
+ const ScheduleDAG *DAG;
+
+ /// IssueWidth - Max issue per cycle. 0=Unknown.
+ unsigned IssueWidth;
+
+ /// IssueCount - Count instructions issued in this cycle.
+ unsigned IssueCount;
+
+ Scoreboard ReservedScoreboard;
+ Scoreboard RequiredScoreboard;
+
+public:
+ ScoreboardHazardRecognizer(const InstrItineraryData *ItinData,
+ const ScheduleDAG *DAG,
+ const char *ParentDebugType = "");
+
+ /// atIssueLimit - Return true if no more instructions may be issued in this
+ /// cycle.
+ bool atIssueLimit() const override;
+
+ // Stalls provides an cycle offset at which SU will be scheduled. It will be
+ // negative for bottom-up scheduling.
+ HazardType getHazardType(SUnit *SU, int Stalls) override;
+ void Reset() override;
+ void EmitInstruction(SUnit *SU) override;
+ void AdvanceCycle() override;
+ void RecedeCycle() override;
+};
+
+}
+
+#endif //!LLVM_CODEGEN_SCOREBOARDHAZARDRECOGNIZER_H
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
new file mode 100644
index 0000000..a21e9ae
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -0,0 +1,1328 @@
+//===-- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SelectionDAG class, and transitively defines the
+// SDNode class and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAG_H
+#define LLVM_CODEGEN_SELECTIONDAG_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/DAGCombine.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include "llvm/Target/TargetMachine.h"
+#include <cassert>
+#include <map>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class MachineConstantPoolValue;
+class MachineFunction;
+class MDNode;
+class SDDbgValue;
+class TargetLowering;
+class TargetSelectionDAGInfo;
+
+class SDVTListNode : public FoldingSetNode {
+ friend struct FoldingSetTrait<SDVTListNode>;
+ /// A reference to an Interned FoldingSetNodeID for this node.
+ /// The Allocator in SelectionDAG holds the data.
+ /// SDVTList contains all types which are frequently accessed in SelectionDAG.
+ /// The size of this list is not expected to be big so it won't introduce
+ /// a memory penalty.
+ FoldingSetNodeIDRef FastID;
+ const EVT *VTs;
+ unsigned int NumVTs;
+ /// The hash value for SDVTList is fixed, so cache it to avoid
+ /// hash calculation.
+ unsigned HashValue;
+public:
+ SDVTListNode(const FoldingSetNodeIDRef ID, const EVT *VT, unsigned int Num) :
+ FastID(ID), VTs(VT), NumVTs(Num) {
+ HashValue = ID.ComputeHash();
+ }
+ SDVTList getSDVTList() {
+ SDVTList result = {VTs, NumVTs};
+ return result;
+ }
+};
+
+/// Specialize FoldingSetTrait for SDVTListNode
+/// to avoid computing temp FoldingSetNodeID and hash value.
+template<> struct FoldingSetTrait<SDVTListNode> : DefaultFoldingSetTrait<SDVTListNode> {
+ static void Profile(const SDVTListNode &X, FoldingSetNodeID& ID) {
+ ID = X.FastID;
+ }
+ static bool Equals(const SDVTListNode &X, const FoldingSetNodeID &ID,
+ unsigned IDHash, FoldingSetNodeID &TempID) {
+ if (X.HashValue != IDHash)
+ return false;
+ return ID == X.FastID;
+ }
+ static unsigned ComputeHash(const SDVTListNode &X, FoldingSetNodeID &TempID) {
+ return X.HashValue;
+ }
+};
+
+template<> struct ilist_traits<SDNode> : public ilist_default_traits<SDNode> {
+private:
+ mutable ilist_half_node<SDNode> Sentinel;
+public:
+ SDNode *createSentinel() const {
+ return static_cast<SDNode*>(&Sentinel);
+ }
+ static void destroySentinel(SDNode *) {}
+
+ SDNode *provideInitialHead() const { return createSentinel(); }
+ SDNode *ensureHead(SDNode*) const { return createSentinel(); }
+ static void noteHead(SDNode*, SDNode*) {}
+
+ static void deleteNode(SDNode *) {
+ llvm_unreachable("ilist_traits<SDNode> shouldn't see a deleteNode call!");
+ }
+private:
+ static void createNode(const SDNode &);
+};
+
+/// Keeps track of dbg_value information through SDISel. We do
+/// not build SDNodes for these so as not to perturb the generated code;
+/// instead the info is kept off to the side in this structure. Each SDNode may
+/// have one or more associated dbg_value entries. This information is kept in
+/// DbgValMap.
+/// Byval parameters are handled separately because they don't use alloca's,
+/// which busts the normal mechanism. There is good reason for handling all
+/// parameters separately: they may not have code generated for them, they
+/// should always go at the beginning of the function regardless of other code
+/// motion, and debug info for them is potentially useful even if the parameter
+/// is unused. Right now only byval parameters are handled separately.
+class SDDbgInfo {
+ BumpPtrAllocator Alloc;
+ SmallVector<SDDbgValue*, 32> DbgValues;
+ SmallVector<SDDbgValue*, 32> ByvalParmDbgValues;
+ typedef DenseMap<const SDNode*, SmallVector<SDDbgValue*, 2> > DbgValMapType;
+ DbgValMapType DbgValMap;
+
+ void operator=(const SDDbgInfo&) = delete;
+ SDDbgInfo(const SDDbgInfo&) = delete;
+public:
+ SDDbgInfo() {}
+
+ void add(SDDbgValue *V, const SDNode *Node, bool isParameter) {
+ if (isParameter) {
+ ByvalParmDbgValues.push_back(V);
+ } else DbgValues.push_back(V);
+ if (Node)
+ DbgValMap[Node].push_back(V);
+ }
+
+ /// \brief Invalidate all DbgValues attached to the node and remove
+ /// it from the Node-to-DbgValues map.
+ void erase(const SDNode *Node);
+
+ void clear() {
+ DbgValMap.clear();
+ DbgValues.clear();
+ ByvalParmDbgValues.clear();
+ Alloc.Reset();
+ }
+
+ BumpPtrAllocator &getAlloc() { return Alloc; }
+
+ bool empty() const {
+ return DbgValues.empty() && ByvalParmDbgValues.empty();
+ }
+
+ ArrayRef<SDDbgValue*> getSDDbgValues(const SDNode *Node) {
+ DbgValMapType::iterator I = DbgValMap.find(Node);
+ if (I != DbgValMap.end())
+ return I->second;
+ return ArrayRef<SDDbgValue*>();
+ }
+
+ typedef SmallVectorImpl<SDDbgValue*>::iterator DbgIterator;
+ DbgIterator DbgBegin() { return DbgValues.begin(); }
+ DbgIterator DbgEnd() { return DbgValues.end(); }
+ DbgIterator ByvalParmDbgBegin() { return ByvalParmDbgValues.begin(); }
+ DbgIterator ByvalParmDbgEnd() { return ByvalParmDbgValues.end(); }
+};
+
+class SelectionDAG;
+void checkForCycles(const SelectionDAG *DAG, bool force = false);
+
+/// This is used to represent a portion of an LLVM function in a low-level
+/// Data Dependence DAG representation suitable for instruction selection.
+/// This DAG is constructed as the first step of instruction selection in order
+/// to allow implementation of machine specific optimizations
+/// and code simplifications.
+///
+/// The representation used by the SelectionDAG is a target-independent
+/// representation, which has some similarities to the GCC RTL representation,
+/// but is significantly more simple, powerful, and is a graph form instead of a
+/// linear form.
+///
+class SelectionDAG {
+ const TargetMachine &TM;
+ const TargetSelectionDAGInfo *TSI;
+ const TargetLowering *TLI;
+ MachineFunction *MF;
+ LLVMContext *Context;
+ CodeGenOpt::Level OptLevel;
+
+ /// The starting token.
+ SDNode EntryNode;
+
+ /// The root of the entire DAG.
+ SDValue Root;
+
+ /// A linked list of nodes in the current DAG.
+ ilist<SDNode> AllNodes;
+
+ /// The AllocatorType for allocating SDNodes. We use
+ /// pool allocation with recycling.
+ typedef RecyclingAllocator<BumpPtrAllocator, SDNode, sizeof(LargestSDNode),
+ AlignOf<MostAlignedSDNode>::Alignment>
+ NodeAllocatorType;
+
+ /// Pool allocation for nodes.
+ NodeAllocatorType NodeAllocator;
+
+ /// This structure is used to memoize nodes, automatically performing
+ /// CSE with existing nodes when a duplicate is requested.
+ FoldingSet<SDNode> CSEMap;
+
+ /// Pool allocation for machine-opcode SDNode operands.
+ BumpPtrAllocator OperandAllocator;
+
+ /// Pool allocation for misc. objects that are created once per SelectionDAG.
+ BumpPtrAllocator Allocator;
+
+ /// Tracks dbg_value information through SDISel.
+ SDDbgInfo *DbgInfo;
+
+ uint16_t NextPersistentId = 0;
+
+public:
+ /// Clients of various APIs that cause global effects on
+ /// the DAG can optionally implement this interface. This allows the clients
+ /// to handle the various sorts of updates that happen.
+ ///
+ /// A DAGUpdateListener automatically registers itself with DAG when it is
+ /// constructed, and removes itself when destroyed in RAII fashion.
+ struct DAGUpdateListener {
+ DAGUpdateListener *const Next;
+ SelectionDAG &DAG;
+
+ explicit DAGUpdateListener(SelectionDAG &D)
+ : Next(D.UpdateListeners), DAG(D) {
+ DAG.UpdateListeners = this;
+ }
+
+ virtual ~DAGUpdateListener() {
+ assert(DAG.UpdateListeners == this &&
+ "DAGUpdateListeners must be destroyed in LIFO order");
+ DAG.UpdateListeners = Next;
+ }
+
+ /// The node N that was deleted and, if E is not null, an
+ /// equivalent node E that replaced it.
+ virtual void NodeDeleted(SDNode *N, SDNode *E);
+
+ /// The node N that was updated.
+ virtual void NodeUpdated(SDNode *N);
+ };
+
+ /// When true, additional steps are taken to
+ /// ensure that getConstant() and similar functions return DAG nodes that
+ /// have legal types. This is important after type legalization since
+ /// any illegally typed nodes generated after this point will not experience
+ /// type legalization.
+ bool NewNodesMustHaveLegalTypes;
+
+private:
+ /// DAGUpdateListener is a friend so it can manipulate the listener stack.
+ friend struct DAGUpdateListener;
+
+ /// Linked list of registered DAGUpdateListener instances.
+ /// This stack is maintained by DAGUpdateListener RAII.
+ DAGUpdateListener *UpdateListeners;
+
+ /// Implementation of setSubgraphColor.
+ /// Return whether we had to truncate the search.
+ bool setSubgraphColorHelper(SDNode *N, const char *Color,
+ DenseSet<SDNode *> &visited,
+ int level, bool &printed);
+
+ void operator=(const SelectionDAG&) = delete;
+ SelectionDAG(const SelectionDAG&) = delete;
+
+public:
+ explicit SelectionDAG(const TargetMachine &TM, llvm::CodeGenOpt::Level);
+ ~SelectionDAG();
+
+ /// Prepare this SelectionDAG to process code in the given MachineFunction.
+ void init(MachineFunction &mf);
+
+ /// Clear state and free memory necessary to make this
+ /// SelectionDAG ready to process a new block.
+ void clear();
+
+ MachineFunction &getMachineFunction() const { return *MF; }
+ const DataLayout &getDataLayout() const { return MF->getDataLayout(); }
+ const TargetMachine &getTarget() const { return TM; }
+ const TargetSubtargetInfo &getSubtarget() const { return MF->getSubtarget(); }
+ const TargetLowering &getTargetLoweringInfo() const { return *TLI; }
+ const TargetSelectionDAGInfo &getSelectionDAGInfo() const { return *TSI; }
+ LLVMContext *getContext() const {return Context; }
+
+ /// Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
+ void viewGraph(const std::string &Title);
+ void viewGraph();
+
+#ifndef NDEBUG
+ std::map<const SDNode *, std::string> NodeGraphAttrs;
+#endif
+
+ /// Clear all previously defined node graph attributes.
+ /// Intended to be used from a debugging tool (eg. gdb).
+ void clearGraphAttrs();
+
+ /// Set graph attributes for a node. (eg. "color=red".)
+ void setGraphAttrs(const SDNode *N, const char *Attrs);
+
+ /// Get graph attributes for a node. (eg. "color=red".)
+ /// Used from getNodeAttributes.
+ const std::string getGraphAttrs(const SDNode *N) const;
+
+ /// Convenience for setting node color attribute.
+ void setGraphColor(const SDNode *N, const char *Color);
+
+ /// Convenience for setting subgraph color attribute.
+ void setSubgraphColor(SDNode *N, const char *Color);
+
+ typedef ilist<SDNode>::const_iterator allnodes_const_iterator;
+ allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
+ allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
+ typedef ilist<SDNode>::iterator allnodes_iterator;
+ allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
+ allnodes_iterator allnodes_end() { return AllNodes.end(); }
+ ilist<SDNode>::size_type allnodes_size() const {
+ return AllNodes.size();
+ }
+
+ iterator_range<allnodes_iterator> allnodes() {
+ return make_range(allnodes_begin(), allnodes_end());
+ }
+ iterator_range<allnodes_const_iterator> allnodes() const {
+ return make_range(allnodes_begin(), allnodes_end());
+ }
+
+ /// Return the root tag of the SelectionDAG.
+ const SDValue &getRoot() const { return Root; }
+
+ /// Return the token chain corresponding to the entry of the function.
+ SDValue getEntryNode() const {
+ return SDValue(const_cast<SDNode *>(&EntryNode), 0);
+ }
+
+ /// Set the current root tag of the SelectionDAG.
+ ///
+ const SDValue &setRoot(SDValue N) {
+ assert((!N.getNode() || N.getValueType() == MVT::Other) &&
+ "DAG root value is not a chain!");
+ if (N.getNode())
+ checkForCycles(N.getNode(), this);
+ Root = N;
+ if (N.getNode())
+ checkForCycles(this);
+ return Root;
+ }
+
+ /// This iterates over the nodes in the SelectionDAG, folding
+ /// certain types of nodes together, or eliminating superfluous nodes. The
+ /// Level argument controls whether Combine is allowed to produce nodes and
+ /// types that are illegal on the target.
+ void Combine(CombineLevel Level, AliasAnalysis &AA,
+ CodeGenOpt::Level OptLevel);
+
+ /// This transforms the SelectionDAG into a SelectionDAG that
+ /// only uses types natively supported by the target.
+ /// Returns "true" if it made any changes.
+ ///
+ /// Note that this is an involved process that may invalidate pointers into
+ /// the graph.
+ bool LegalizeTypes();
+
+ /// This transforms the SelectionDAG into a SelectionDAG that is
+ /// compatible with the target instruction selector, as indicated by the
+ /// TargetLowering object.
+ ///
+ /// Note that this is an involved process that may invalidate pointers into
+ /// the graph.
+ void Legalize();
+
+ /// \brief Transforms a SelectionDAG node and any operands to it into a node
+ /// that is compatible with the target instruction selector, as indicated by
+ /// the TargetLowering object.
+ ///
+ /// \returns true if \c N is a valid, legal node after calling this.
+ ///
+ /// This essentially runs a single recursive walk of the \c Legalize process
+ /// over the given node (and its operands). This can be used to incrementally
+ /// legalize the DAG. All of the nodes which are directly replaced,
+ /// potentially including N, are added to the output parameter \c
+ /// UpdatedNodes so that the delta to the DAG can be understood by the
+ /// caller.
+ ///
+ /// When this returns false, N has been legalized in a way that make the
+ /// pointer passed in no longer valid. It may have even been deleted from the
+ /// DAG, and so it shouldn't be used further. When this returns true, the
+ /// N passed in is a legal node, and can be immediately processed as such.
+ /// This may still have done some work on the DAG, and will still populate
+ /// UpdatedNodes with any new nodes replacing those originally in the DAG.
+ bool LegalizeOp(SDNode *N, SmallSetVector<SDNode *, 16> &UpdatedNodes);
+
+ /// This transforms the SelectionDAG into a SelectionDAG
+ /// that only uses vector math operations supported by the target. This is
+ /// necessary as a separate step from Legalize because unrolling a vector
+ /// operation can introduce illegal types, which requires running
+ /// LegalizeTypes again.
+ ///
+ /// This returns true if it made any changes; in that case, LegalizeTypes
+ /// is called again before Legalize.
+ ///
+ /// Note that this is an involved process that may invalidate pointers into
+ /// the graph.
+ bool LegalizeVectors();
+
+ /// This method deletes all unreachable nodes in the SelectionDAG.
+ void RemoveDeadNodes();
+
+ /// Remove the specified node from the system. This node must
+ /// have no referrers.
+ void DeleteNode(SDNode *N);
+
+ /// Return an SDVTList that represents the list of values specified.
+ SDVTList getVTList(EVT VT);
+ SDVTList getVTList(EVT VT1, EVT VT2);
+ SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3);
+ SDVTList getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4);
+ SDVTList getVTList(ArrayRef<EVT> VTs);
+
+ //===--------------------------------------------------------------------===//
+ // Node creation methods.
+ //
+ SDValue getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isTarget = false,
+ bool isOpaque = false);
+ SDValue getConstant(const APInt &Val, SDLoc DL, EVT VT, bool isTarget = false,
+ bool isOpaque = false);
+ SDValue getConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
+ bool isTarget = false, bool isOpaque = false);
+ SDValue getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget = false);
+ SDValue getTargetConstant(uint64_t Val, SDLoc DL, EVT VT,
+ bool isOpaque = false) {
+ return getConstant(Val, DL, VT, true, isOpaque);
+ }
+ SDValue getTargetConstant(const APInt &Val, SDLoc DL, EVT VT,
+ bool isOpaque = false) {
+ return getConstant(Val, DL, VT, true, isOpaque);
+ }
+ SDValue getTargetConstant(const ConstantInt &Val, SDLoc DL, EVT VT,
+ bool isOpaque = false) {
+ return getConstant(Val, DL, VT, true, isOpaque);
+ }
+ // The forms below that take a double should only be used for simple
+ // constants that can be exactly represented in VT. No checks are made.
+ SDValue getConstantFP(double Val, SDLoc DL, EVT VT, bool isTarget = false);
+ SDValue getConstantFP(const APFloat& Val, SDLoc DL, EVT VT,
+ bool isTarget = false);
+ SDValue getConstantFP(const ConstantFP &CF, SDLoc DL, EVT VT,
+ bool isTarget = false);
+ SDValue getTargetConstantFP(double Val, SDLoc DL, EVT VT) {
+ return getConstantFP(Val, DL, VT, true);
+ }
+ SDValue getTargetConstantFP(const APFloat& Val, SDLoc DL, EVT VT) {
+ return getConstantFP(Val, DL, VT, true);
+ }
+ SDValue getTargetConstantFP(const ConstantFP &Val, SDLoc DL, EVT VT) {
+ return getConstantFP(Val, DL, VT, true);
+ }
+ SDValue getGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT,
+ int64_t offset = 0, bool isTargetGA = false,
+ unsigned char TargetFlags = 0);
+ SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT,
+ int64_t offset = 0,
+ unsigned char TargetFlags = 0) {
+ return getGlobalAddress(GV, DL, VT, offset, true, TargetFlags);
+ }
+ SDValue getFrameIndex(int FI, EVT VT, bool isTarget = false);
+ SDValue getTargetFrameIndex(int FI, EVT VT) {
+ return getFrameIndex(FI, VT, true);
+ }
+ SDValue getJumpTable(int JTI, EVT VT, bool isTarget = false,
+ unsigned char TargetFlags = 0);
+ SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags = 0) {
+ return getJumpTable(JTI, VT, true, TargetFlags);
+ }
+ SDValue getConstantPool(const Constant *C, EVT VT,
+ unsigned Align = 0, int Offs = 0, bool isT=false,
+ unsigned char TargetFlags = 0);
+ SDValue getTargetConstantPool(const Constant *C, EVT VT,
+ unsigned Align = 0, int Offset = 0,
+ unsigned char TargetFlags = 0) {
+ return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
+ }
+ SDValue getConstantPool(MachineConstantPoolValue *C, EVT VT,
+ unsigned Align = 0, int Offs = 0, bool isT=false,
+ unsigned char TargetFlags = 0);
+ SDValue getTargetConstantPool(MachineConstantPoolValue *C,
+ EVT VT, unsigned Align = 0,
+ int Offset = 0, unsigned char TargetFlags=0) {
+ return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
+ }
+ SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
+ unsigned char TargetFlags = 0);
+ // When generating a branch to a BB, we don't in general know enough
+ // to provide debug info for the BB at that time, so keep this one around.
+ SDValue getBasicBlock(MachineBasicBlock *MBB);
+ SDValue getBasicBlock(MachineBasicBlock *MBB, SDLoc dl);
+ SDValue getExternalSymbol(const char *Sym, EVT VT);
+ SDValue getExternalSymbol(const char *Sym, SDLoc dl, EVT VT);
+ SDValue getTargetExternalSymbol(const char *Sym, EVT VT,
+ unsigned char TargetFlags = 0);
+ SDValue getMCSymbol(MCSymbol *Sym, EVT VT);
+
+ SDValue getValueType(EVT);
+ SDValue getRegister(unsigned Reg, EVT VT);
+ SDValue getRegisterMask(const uint32_t *RegMask);
+ SDValue getEHLabel(SDLoc dl, SDValue Root, MCSymbol *Label);
+ SDValue getBlockAddress(const BlockAddress *BA, EVT VT,
+ int64_t Offset = 0, bool isTarget = false,
+ unsigned char TargetFlags = 0);
+ SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT,
+ int64_t Offset = 0,
+ unsigned char TargetFlags = 0) {
+ return getBlockAddress(BA, VT, Offset, true, TargetFlags);
+ }
+
+ SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N) {
+ return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
+ getRegister(Reg, N.getValueType()), N);
+ }
+
+ // This version of the getCopyToReg method takes an extra operand, which
+ // indicates that there is potentially an incoming glue value (if Glue is not
+ // null) and that there should be a glue result.
+ SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N,
+ SDValue Glue) {
+ SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+ SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Glue };
+ return getNode(ISD::CopyToReg, dl, VTs,
+ makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
+ }
+
+ // Similar to last getCopyToReg() except parameter Reg is a SDValue
+ SDValue getCopyToReg(SDValue Chain, SDLoc dl, SDValue Reg, SDValue N,
+ SDValue Glue) {
+ SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+ SDValue Ops[] = { Chain, Reg, N, Glue };
+ return getNode(ISD::CopyToReg, dl, VTs,
+ makeArrayRef(Ops, Glue.getNode() ? 4 : 3));
+ }
+
+ SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT) {
+ SDVTList VTs = getVTList(VT, MVT::Other);
+ SDValue Ops[] = { Chain, getRegister(Reg, VT) };
+ return getNode(ISD::CopyFromReg, dl, VTs, Ops);
+ }
+
+ // This version of the getCopyFromReg method takes an extra operand, which
+ // indicates that there is potentially an incoming glue value (if Glue is not
+ // null) and that there should be a glue result.
+ SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT,
+ SDValue Glue) {
+ SDVTList VTs = getVTList(VT, MVT::Other, MVT::Glue);
+ SDValue Ops[] = { Chain, getRegister(Reg, VT), Glue };
+ return getNode(ISD::CopyFromReg, dl, VTs,
+ makeArrayRef(Ops, Glue.getNode() ? 3 : 2));
+ }
+
+ SDValue getCondCode(ISD::CondCode Cond);
+
+ /// Returns the ConvertRndSat Note: Avoid using this node because it may
+ /// disappear in the future and most targets don't support it.
+ SDValue getConvertRndSat(EVT VT, SDLoc dl, SDValue Val, SDValue DTy,
+ SDValue STy,
+ SDValue Rnd, SDValue Sat, ISD::CvtCode Code);
+
+ /// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
+ /// which must be a vector type, must match the number of mask elements
+ /// NumElts. An integer mask element equal to -1 is treated as undefined.
+ SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2,
+ const int *MaskElts);
+ SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2,
+ ArrayRef<int> MaskElts) {
+ assert(VT.getVectorNumElements() == MaskElts.size() &&
+ "Must have the same number of vector elements as mask elements!");
+ return getVectorShuffle(VT, dl, N1, N2, MaskElts.data());
+ }
+
+ /// \brief Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to
+ /// the shuffle node in input but with swapped operands.
+ ///
+ /// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
+ SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
+
+ /// Convert Op, which must be of integer type, to the
+ /// integer type VT, by either any-extending or truncating it.
+ SDValue getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT);
+
+ /// Convert Op, which must be of integer type, to the
+ /// integer type VT, by either sign-extending or truncating it.
+ SDValue getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT);
+
+ /// Convert Op, which must be of integer type, to the
+ /// integer type VT, by either zero-extending or truncating it.
+ SDValue getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT);
+
+ /// Return the expression required to zero extend the Op
+ /// value assuming it was the smaller SrcTy value.
+ SDValue getZeroExtendInReg(SDValue Op, SDLoc DL, EVT SrcTy);
+
+ /// Return an operation which will any-extend the low lanes of the operand
+ /// into the specified vector type. For example,
+ /// this can convert a v16i8 into a v4i32 by any-extending the low four
+ /// lanes of the operand from i8 to i32.
+ SDValue getAnyExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT);
+
+ /// Return an operation which will sign extend the low lanes of the operand
+ /// into the specified vector type. For example,
+ /// this can convert a v16i8 into a v4i32 by sign extending the low four
+ /// lanes of the operand from i8 to i32.
+ SDValue getSignExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT);
+
+ /// Return an operation which will zero extend the low lanes of the operand
+ /// into the specified vector type. For example,
+ /// this can convert a v16i8 into a v4i32 by zero extending the low four
+ /// lanes of the operand from i8 to i32.
+ SDValue getZeroExtendVectorInReg(SDValue Op, SDLoc DL, EVT VT);
+
+ /// Convert Op, which must be of integer type, to the integer type VT,
+ /// by using an extension appropriate for the target's
+ /// BooleanContent for type OpVT or truncating it.
+ SDValue getBoolExtOrTrunc(SDValue Op, SDLoc SL, EVT VT, EVT OpVT);
+
+ /// Create a bitwise NOT operation as (XOR Val, -1).
+ SDValue getNOT(SDLoc DL, SDValue Val, EVT VT);
+
+ /// \brief Create a logical NOT operation as (XOR Val, BooleanOne).
+ SDValue getLogicalNOT(SDLoc DL, SDValue Val, EVT VT);
+
+ /// Return a new CALLSEQ_START node, which always must have a glue result
+ /// (to ensure it's not CSE'd). CALLSEQ_START does not have a useful SDLoc.
+ SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL) {
+ SDVTList VTs = getVTList(MVT::Other, MVT::Glue);
+ SDValue Ops[] = { Chain, Op };
+ return getNode(ISD::CALLSEQ_START, DL, VTs, Ops);
+ }
+
+ /// Return a new CALLSEQ_END node, which always must have a
+ /// glue result (to ensure it's not CSE'd).
+ /// CALLSEQ_END does not have a useful SDLoc.
+ SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
+ SDValue InGlue, SDLoc DL) {
+ SDVTList NodeTys = getVTList(MVT::Other, MVT::Glue);
+ SmallVector<SDValue, 4> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Op1);
+ Ops.push_back(Op2);
+ if (InGlue.getNode())
+ Ops.push_back(InGlue);
+ return getNode(ISD::CALLSEQ_END, DL, NodeTys, Ops);
+ }
+
+ /// Return an UNDEF node. UNDEF does not have a useful SDLoc.
+ SDValue getUNDEF(EVT VT) {
+ return getNode(ISD::UNDEF, SDLoc(), VT);
+ }
+
+ /// Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
+ SDValue getGLOBAL_OFFSET_TABLE(EVT VT) {
+ return getNode(ISD::GLOBAL_OFFSET_TABLE, SDLoc(), VT);
+ }
+
+ /// Gets or creates the specified node.
+ ///
+ SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT,
+ ArrayRef<SDUse> Ops);
+ SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT,
+ ArrayRef<SDValue> Ops, const SDNodeFlags *Flags = nullptr);
+ SDValue getNode(unsigned Opcode, SDLoc DL, ArrayRef<EVT> ResultTys,
+ ArrayRef<SDValue> Ops);
+ SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
+ ArrayRef<SDValue> Ops);
+
+ // Specialize based on number of operands.
+ SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT);
+ SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N);
+ SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
+ const SDNodeFlags *Flags = nullptr);
+ SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
+ SDValue N3);
+ SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
+ SDValue N3, SDValue N4);
+ SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, SDValue N1, SDValue N2,
+ SDValue N3, SDValue N4, SDValue N5);
+
+ // Specialize again based on number of operands for nodes with a VTList
+ // rather than a single VT.
+ SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs);
+ SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N);
+ SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
+ SDValue N2);
+ SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
+ SDValue N2, SDValue N3);
+ SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
+ SDValue N2, SDValue N3, SDValue N4);
+ SDValue getNode(unsigned Opcode, SDLoc DL, SDVTList VTs, SDValue N1,
+ SDValue N2, SDValue N3, SDValue N4, SDValue N5);
+
+ /// Compute a TokenFactor to force all the incoming stack arguments to be
+ /// loaded from the stack. This is used in tail call lowering to protect
+ /// stack arguments from being clobbered.
+ SDValue getStackArgumentTokenFactor(SDValue Chain);
+
+ SDValue getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVol, bool AlwaysInline,
+ bool isTailCall, MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo);
+
+ SDValue getMemmove(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVol, bool isTailCall,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo);
+
+ SDValue getMemset(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool isVol, bool isTailCall,
+ MachinePointerInfo DstPtrInfo);
+
+ /// Helper function to make it easier to build SetCC's if you just
+ /// have an ISD::CondCode instead of an SDValue.
+ ///
+ SDValue getSetCC(SDLoc DL, EVT VT, SDValue LHS, SDValue RHS,
+ ISD::CondCode Cond) {
+ assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() &&
+ "Cannot compare scalars to vectors");
+ assert(LHS.getValueType().isVector() == VT.isVector() &&
+ "Cannot compare scalars to vectors");
+ assert(Cond != ISD::SETCC_INVALID &&
+ "Cannot create a setCC of an invalid node.");
+ return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
+ }
+
+ /// Helper function to make it easier to build Select's if you just
+ /// have operands and don't want to check for vector.
+ SDValue getSelect(SDLoc DL, EVT VT, SDValue Cond,
+ SDValue LHS, SDValue RHS) {
+ assert(LHS.getValueType() == RHS.getValueType() &&
+ "Cannot use select on differing types");
+ assert(VT.isVector() == LHS.getValueType().isVector() &&
+ "Cannot mix vectors and scalars");
+ return getNode(Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
+ Cond, LHS, RHS);
+ }
+
+ /// Helper function to make it easier to build SelectCC's if you
+ /// just have an ISD::CondCode instead of an SDValue.
+ ///
+ SDValue getSelectCC(SDLoc DL, SDValue LHS, SDValue RHS,
+ SDValue True, SDValue False, ISD::CondCode Cond) {
+ return getNode(ISD::SELECT_CC, DL, True.getValueType(),
+ LHS, RHS, True, False, getCondCode(Cond));
+ }
+
+ /// VAArg produces a result and token chain, and takes a pointer
+ /// and a source value as input.
+ SDValue getVAArg(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
+ SDValue SV, unsigned Align);
+
+ /// Gets a node for an atomic cmpxchg op. There are two
+ /// valid Opcodes. ISD::ATOMIC_CMO_SWAP produces the value loaded and a
+ /// chain result. ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS produces the value loaded,
+ /// a success flag (initially i1), and a chain.
+ SDValue getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs,
+ SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp,
+ MachinePointerInfo PtrInfo, unsigned Alignment,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope);
+ SDValue getAtomicCmpSwap(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTs,
+ SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp,
+ MachineMemOperand *MMO,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope);
+
+ /// Gets a node for an atomic op, produces result (if relevant)
+ /// and chain and takes 2 operands.
+ SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDValue Chain,
+ SDValue Ptr, SDValue Val, const Value *PtrVal,
+ unsigned Alignment, AtomicOrdering Ordering,
+ SynchronizationScope SynchScope);
+ SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDValue Chain,
+ SDValue Ptr, SDValue Val, MachineMemOperand *MMO,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope);
+
+ /// Gets a node for an atomic op, produces result and chain and
+ /// takes 1 operand.
+ SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, EVT VT,
+ SDValue Chain, SDValue Ptr, MachineMemOperand *MMO,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope);
+
+ /// Gets a node for an atomic op, produces result and chain and takes N
+ /// operands.
+ SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTList,
+ ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope);
+ SDValue getAtomic(unsigned Opcode, SDLoc dl, EVT MemVT, SDVTList VTList,
+ ArrayRef<SDValue> Ops, MachineMemOperand *MMO,
+ AtomicOrdering Ordering, SynchronizationScope SynchScope);
+
+ /// Creates a MemIntrinsicNode that may produce a
+ /// result and takes a list of operands. Opcode may be INTRINSIC_VOID,
+ /// INTRINSIC_W_CHAIN, or a target-specific opcode with a value not
+ /// less than FIRST_TARGET_MEMORY_OPCODE.
+ SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
+ ArrayRef<SDValue> Ops,
+ EVT MemVT, MachinePointerInfo PtrInfo,
+ unsigned Align = 0, bool Vol = false,
+ bool ReadMem = true, bool WriteMem = true,
+ unsigned Size = 0);
+
+ SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList,
+ ArrayRef<SDValue> Ops,
+ EVT MemVT, MachineMemOperand *MMO);
+
+ /// Create a MERGE_VALUES node from the given operands.
+ SDValue getMergeValues(ArrayRef<SDValue> Ops, SDLoc dl);
+
+ /// Loads are not normal binary operators: their result type is not
+ /// determined by their operands, and they produce a value AND a token chain.
+ ///
+ SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
+ MachinePointerInfo PtrInfo, bool isVolatile,
+ bool isNonTemporal, bool isInvariant, unsigned Alignment,
+ const AAMDNodes &AAInfo = AAMDNodes(),
+ const MDNode *Ranges = nullptr);
+ SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
+ MachineMemOperand *MMO);
+ SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
+ SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo,
+ EVT MemVT, bool isVolatile,
+ bool isNonTemporal, bool isInvariant, unsigned Alignment,
+ const AAMDNodes &AAInfo = AAMDNodes());
+ SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT,
+ SDValue Chain, SDValue Ptr, EVT MemVT,
+ MachineMemOperand *MMO);
+ SDValue getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base,
+ SDValue Offset, ISD::MemIndexedMode AM);
+ SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, SDLoc dl,
+ SDValue Chain, SDValue Ptr, SDValue Offset,
+ MachinePointerInfo PtrInfo, EVT MemVT,
+ bool isVolatile, bool isNonTemporal, bool isInvariant,
+ unsigned Alignment, const AAMDNodes &AAInfo = AAMDNodes(),
+ const MDNode *Ranges = nullptr);
+ SDValue getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
+ EVT VT, SDLoc dl,
+ SDValue Chain, SDValue Ptr, SDValue Offset,
+ EVT MemVT, MachineMemOperand *MMO);
+
+ /// Helper function to build ISD::STORE nodes.
+ SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
+ MachinePointerInfo PtrInfo, bool isVolatile,
+ bool isNonTemporal, unsigned Alignment,
+ const AAMDNodes &AAInfo = AAMDNodes());
+ SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
+ MachineMemOperand *MMO);
+ SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
+ MachinePointerInfo PtrInfo, EVT TVT,
+ bool isNonTemporal, bool isVolatile,
+ unsigned Alignment,
+ const AAMDNodes &AAInfo = AAMDNodes());
+ SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr,
+ EVT TVT, MachineMemOperand *MMO);
+ SDValue getIndexedStore(SDValue OrigStoe, SDLoc dl, SDValue Base,
+ SDValue Offset, ISD::MemIndexedMode AM);
+
+ SDValue getMaskedLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr,
+ SDValue Mask, SDValue Src0, EVT MemVT,
+ MachineMemOperand *MMO, ISD::LoadExtType);
+ SDValue getMaskedStore(SDValue Chain, SDLoc dl, SDValue Val,
+ SDValue Ptr, SDValue Mask, EVT MemVT,
+ MachineMemOperand *MMO, bool IsTrunc);
+ SDValue getMaskedGather(SDVTList VTs, EVT VT, SDLoc dl,
+ ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
+ SDValue getMaskedScatter(SDVTList VTs, EVT VT, SDLoc dl,
+ ArrayRef<SDValue> Ops, MachineMemOperand *MMO);
+ /// Construct a node to track a Value* through the backend.
+ SDValue getSrcValue(const Value *v);
+
+ /// Return an MDNodeSDNode which holds an MDNode.
+ SDValue getMDNode(const MDNode *MD);
+
+ /// Return a bitcast using the SDLoc of the value operand, and casting to the
+ /// provided type. Use getNode to set a custom SDLoc.
+ SDValue getBitcast(EVT VT, SDValue V);
+
+ /// Return an AddrSpaceCastSDNode.
+ SDValue getAddrSpaceCast(SDLoc dl, EVT VT, SDValue Ptr,
+ unsigned SrcAS, unsigned DestAS);
+
+ /// Return the specified value casted to
+ /// the target's desired shift amount type.
+ SDValue getShiftAmountOperand(EVT LHSTy, SDValue Op);
+
+ /// Expand the specified \c ISD::VAARG node as the Legalize pass would.
+ SDValue expandVAArg(SDNode *Node);
+
+ /// Expand the specified \c ISD::VACOPY node as the Legalize pass would.
+ SDValue expandVACopy(SDNode *Node);
+
+ /// *Mutate* the specified node in-place to have the
+ /// specified operands. If the resultant node already exists in the DAG,
+ /// this does not modify the specified node, instead it returns the node that
+ /// already exists. If the resultant node does not exist in the DAG, the
+ /// input node is returned. As a degenerate case, if you specify the same
+ /// input operands as the node already has, the input node is returned.
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
+ SDValue Op3);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
+ SDValue Op3, SDValue Op4);
+ SDNode *UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
+ SDValue Op3, SDValue Op4, SDValue Op5);
+ SDNode *UpdateNodeOperands(SDNode *N, ArrayRef<SDValue> Ops);
+
+ /// These are used for target selectors to *mutate* the
+ /// specified node to have the specified return type, Target opcode, and
+ /// operands. Note that target opcodes are stored as
+ /// ~TargetOpcode in the node opcode field. The resultant node is returned.
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT, SDValue Op1);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+ SDValue Op1, SDValue Op2);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+ SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT,
+ ArrayRef<SDValue> Ops);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1, EVT VT2);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+ EVT VT2, ArrayRef<SDValue> Ops);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+ EVT VT2, EVT VT3, ArrayRef<SDValue> Ops);
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, EVT VT1,
+ EVT VT2, EVT VT3, EVT VT4, ArrayRef<SDValue> Ops);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+ EVT VT2, SDValue Op1);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+ EVT VT2, SDValue Op1, SDValue Op2);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+ EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, EVT VT1,
+ EVT VT2, EVT VT3, SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, SDVTList VTs,
+ ArrayRef<SDValue> Ops);
+
+ /// This *mutates* the specified node to have the specified
+ /// return type, opcode, and operands.
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
+ ArrayRef<SDValue> Ops);
+
+ /// These are used for target selectors to create a new node
+ /// with specified return type(s), MachineInstr opcode, and operands.
+ ///
+ /// Note that getMachineNode returns the resultant node. If there is already
+ /// a node of the specified opcode and operands, it returns that node instead
+ /// of the current one.
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
+ SDValue Op1);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
+ SDValue Op1, SDValue Op2);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
+ SDValue Op1, SDValue Op2, SDValue Op3);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT,
+ ArrayRef<SDValue> Ops);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
+ SDValue Op1);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
+ SDValue Op1, SDValue Op2);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
+ SDValue Op1, SDValue Op2, SDValue Op3);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
+ ArrayRef<SDValue> Ops);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
+ EVT VT3, SDValue Op1, SDValue Op2);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
+ EVT VT3, SDValue Op1, SDValue Op2,
+ SDValue Op3);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
+ EVT VT3, ArrayRef<SDValue> Ops);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, EVT VT1, EVT VT2,
+ EVT VT3, EVT VT4, ArrayRef<SDValue> Ops);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl,
+ ArrayRef<EVT> ResultTys,
+ ArrayRef<SDValue> Ops);
+ MachineSDNode *getMachineNode(unsigned Opcode, SDLoc dl, SDVTList VTs,
+ ArrayRef<SDValue> Ops);
+
+ /// A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
+ SDValue getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT,
+ SDValue Operand);
+
+ /// A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
+ SDValue getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT,
+ SDValue Operand, SDValue Subreg);
+
+ /// Get the specified node if it's already available, or else return NULL.
+ SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs, ArrayRef<SDValue> Ops,
+ const SDNodeFlags *Flags = nullptr);
+
+ /// Creates a SDDbgValue node.
+ SDDbgValue *getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N, unsigned R,
+ bool IsIndirect, uint64_t Off, DebugLoc DL,
+ unsigned O);
+
+ /// Constant
+ SDDbgValue *getConstantDbgValue(MDNode *Var, MDNode *Expr, const Value *C,
+ uint64_t Off, DebugLoc DL, unsigned O);
+
+ /// FrameIndex
+ SDDbgValue *getFrameIndexDbgValue(MDNode *Var, MDNode *Expr, unsigned FI,
+ uint64_t Off, DebugLoc DL, unsigned O);
+
+ /// Remove the specified node from the system. If any of its
+ /// operands then becomes dead, remove them as well. Inform UpdateListener
+ /// for each node deleted.
+ void RemoveDeadNode(SDNode *N);
+
+ /// This method deletes the unreachable nodes in the
+ /// given list, and any nodes that become unreachable as a result.
+ void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
+
+ /// Modify anything using 'From' to use 'To' instead.
+ /// This can cause recursive merging of nodes in the DAG. Use the first
+ /// version if 'From' is known to have a single result, use the second
+ /// if you have two nodes with identical results (or if 'To' has a superset
+ /// of the results of 'From'), use the third otherwise.
+ ///
+ /// These methods all take an optional UpdateListener, which (if not null) is
+ /// informed about nodes that are deleted and modified due to recursive
+ /// changes in the dag.
+ ///
+ /// These functions only replace all existing uses. It's possible that as
+ /// these replacements are being performed, CSE may cause the From node
+ /// to be given new uses. These new uses of From are left in place, and
+ /// not automatically transferred to To.
+ ///
+ void ReplaceAllUsesWith(SDValue From, SDValue Op);
+ void ReplaceAllUsesWith(SDNode *From, SDNode *To);
+ void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
+
+ /// Replace any uses of From with To, leaving
+ /// uses of other values produced by From.Val alone.
+ void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
+
+ /// Like ReplaceAllUsesOfValueWith, but for multiple values at once.
+ /// This correctly handles the case where
+ /// there is an overlap between the From values and the To values.
+ void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
+ unsigned Num);
+
+ /// Topological-sort the AllNodes list and a
+ /// assign a unique node id for each node in the DAG based on their
+ /// topological order. Returns the number of nodes.
+ unsigned AssignTopologicalOrder();
+
+ /// Move node N in the AllNodes list to be immediately
+ /// before the given iterator Position. This may be used to update the
+ /// topological ordering when the list of nodes is modified.
+ void RepositionNode(allnodes_iterator Position, SDNode *N) {
+ AllNodes.insert(Position, AllNodes.remove(N));
+ }
+
+ /// Returns true if the opcode is a commutative binary operation.
+ static bool isCommutativeBinOp(unsigned Opcode) {
+ // FIXME: This should get its info from the td file, so that we can include
+ // target info.
+ switch (Opcode) {
+ case ISD::ADD:
+ case ISD::SMIN:
+ case ISD::SMAX:
+ case ISD::UMIN:
+ case ISD::UMAX:
+ case ISD::MUL:
+ case ISD::MULHU:
+ case ISD::MULHS:
+ case ISD::SMUL_LOHI:
+ case ISD::UMUL_LOHI:
+ case ISD::FADD:
+ case ISD::FMUL:
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR:
+ case ISD::SADDO:
+ case ISD::UADDO:
+ case ISD::ADDC:
+ case ISD::ADDE:
+ case ISD::FMINNUM:
+ case ISD::FMAXNUM:
+ case ISD::FMINNAN:
+ case ISD::FMAXNAN:
+ return true;
+ default: return false;
+ }
+ }
+
+ /// Returns an APFloat semantics tag appropriate for the given type. If VT is
+ /// a vector type, the element semantics are returned.
+ static const fltSemantics &EVTToAPFloatSemantics(EVT VT) {
+ switch (VT.getScalarType().getSimpleVT().SimpleTy) {
+ default: llvm_unreachable("Unknown FP format");
+ case MVT::f16: return APFloat::IEEEhalf;
+ case MVT::f32: return APFloat::IEEEsingle;
+ case MVT::f64: return APFloat::IEEEdouble;
+ case MVT::f80: return APFloat::x87DoubleExtended;
+ case MVT::f128: return APFloat::IEEEquad;
+ case MVT::ppcf128: return APFloat::PPCDoubleDouble;
+ }
+ }
+
+ /// Add a dbg_value SDNode. If SD is non-null that means the
+ /// value is produced by SD.
+ void AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter);
+
+ /// Get the debug values which reference the given SDNode.
+ ArrayRef<SDDbgValue*> GetDbgValues(const SDNode* SD) {
+ return DbgInfo->getSDDbgValues(SD);
+ }
+
+ /// Transfer SDDbgValues.
+ void TransferDbgValues(SDValue From, SDValue To);
+
+ /// Return true if there are any SDDbgValue nodes associated
+ /// with this SelectionDAG.
+ bool hasDebugValues() const { return !DbgInfo->empty(); }
+
+ SDDbgInfo::DbgIterator DbgBegin() { return DbgInfo->DbgBegin(); }
+ SDDbgInfo::DbgIterator DbgEnd() { return DbgInfo->DbgEnd(); }
+ SDDbgInfo::DbgIterator ByvalParmDbgBegin() {
+ return DbgInfo->ByvalParmDbgBegin();
+ }
+ SDDbgInfo::DbgIterator ByvalParmDbgEnd() {
+ return DbgInfo->ByvalParmDbgEnd();
+ }
+
+ void dump() const;
+
+ /// Create a stack temporary, suitable for holding the
+ /// specified value type. If minAlign is specified, the slot size will have
+ /// at least that alignment.
+ SDValue CreateStackTemporary(EVT VT, unsigned minAlign = 1);
+
+ /// Create a stack temporary suitable for holding
+ /// either of the specified value types.
+ SDValue CreateStackTemporary(EVT VT1, EVT VT2);
+
+ SDValue FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
+ SDNode *Cst1, SDNode *Cst2);
+
+ SDValue FoldConstantArithmetic(unsigned Opcode, SDLoc DL, EVT VT,
+ const ConstantSDNode *Cst1,
+ const ConstantSDNode *Cst2);
+
+ SDValue FoldConstantVectorArithmetic(unsigned Opcode, SDLoc DL,
+ EVT VT, ArrayRef<SDValue> Ops,
+ const SDNodeFlags *Flags = nullptr);
+
+ /// Constant fold a setcc to true or false.
+ SDValue FoldSetCC(EVT VT, SDValue N1,
+ SDValue N2, ISD::CondCode Cond, SDLoc dl);
+
+ /// Return true if the sign bit of Op is known to be zero.
+ /// We use this predicate to simplify operations downstream.
+ bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
+
+ /// Return true if 'Op & Mask' is known to be zero. We
+ /// use this predicate to simplify operations downstream. Op and Mask are
+ /// known to be the same type.
+ bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth = 0)
+ const;
+
+ /// Determine which bits of Op are known to be either zero or one and return
+ /// them in the KnownZero/KnownOne bitsets. Targets can implement the
+ /// computeKnownBitsForTargetNode method in the TargetLowering class to allow
+ /// target nodes to be understood.
+ void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne,
+ unsigned Depth = 0) const;
+
+ /// Return the number of times the sign bit of the
+ /// register is replicated into the other bits. We know that at least 1 bit
+ /// is always equal to the sign bit (itself), but other cases can give us
+ /// information. For example, immediately after an "SRA X, 2", we know that
+ /// the top 3 bits are all equal to each other, so we return 3. Targets can
+ /// implement the ComputeNumSignBitsForTarget method in the TargetLowering
+ /// class to allow target nodes to be understood.
+ unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
+
+ /// Return true if the specified operand is an
+ /// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
+ /// ISD::OR with a ConstantSDNode that is guaranteed to have the same
+ /// semantics as an ADD. This handles the equivalence:
+ /// X|Cst == X+Cst iff X&Cst = 0.
+ bool isBaseWithConstantOffset(SDValue Op) const;
+
+ /// Test whether the given SDValue is known to never be NaN.
+ bool isKnownNeverNaN(SDValue Op) const;
+
+ /// Test whether the given SDValue is known to never be
+ /// positive or negative Zero.
+ bool isKnownNeverZero(SDValue Op) const;
+
+ /// Test whether two SDValues are known to compare equal. This
+ /// is true if they are the same value, or if one is negative zero and the
+ /// other positive zero.
+ bool isEqualTo(SDValue A, SDValue B) const;
+
+ /// Return true if A and B have no common bits set. As an example, this can
+ /// allow an 'add' to be transformed into an 'or'.
+ bool haveNoCommonBitsSet(SDValue A, SDValue B) const;
+
+ /// Utility function used by legalize and lowering to
+ /// "unroll" a vector operation by splitting out the scalars and operating
+ /// on each element individually. If the ResNE is 0, fully unroll the vector
+ /// op. If ResNE is less than the width of the vector op, unroll up to ResNE.
+ /// If the ResNE is greater than the width of the vector op, unroll the
+ /// vector op and fill the end of the resulting vector with UNDEFS.
+ SDValue UnrollVectorOp(SDNode *N, unsigned ResNE = 0);
+
+ /// Return true if LD is loading 'Bytes' bytes from a location that is 'Dist'
+ /// units away from the location that the 'Base' load is loading from.
+ bool isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
+ unsigned Bytes, int Dist) const;
+
+ /// Infer alignment of a load / store address. Return 0 if
+ /// it cannot be inferred.
+ unsigned InferPtrAlignment(SDValue Ptr) const;
+
+ /// Compute the VTs needed for the low/hi parts of a type
+ /// which is split (or expanded) into two not necessarily identical pieces.
+ std::pair<EVT, EVT> GetSplitDestVTs(const EVT &VT) const;
+
+ /// Split the vector with EXTRACT_SUBVECTOR using the provides
+ /// VTs and return the low/high part.
+ std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL,
+ const EVT &LoVT, const EVT &HiVT);
+
+ /// Split the vector with EXTRACT_SUBVECTOR and return the low/high part.
+ std::pair<SDValue, SDValue> SplitVector(const SDValue &N, const SDLoc &DL) {
+ EVT LoVT, HiVT;
+ std::tie(LoVT, HiVT) = GetSplitDestVTs(N.getValueType());
+ return SplitVector(N, DL, LoVT, HiVT);
+ }
+
+ /// Split the node's operand with EXTRACT_SUBVECTOR and
+ /// return the low/high part.
+ std::pair<SDValue, SDValue> SplitVectorOperand(const SDNode *N, unsigned OpNo)
+ {
+ return SplitVector(N->getOperand(OpNo), SDLoc(N));
+ }
+
+ /// Append the extracted elements from Start to Count out of the vector Op
+ /// in Args. If Count is 0, all of the elements will be extracted.
+ void ExtractVectorElements(SDValue Op, SmallVectorImpl<SDValue> &Args,
+ unsigned Start = 0, unsigned Count = 0);
+
+ unsigned getEVTAlignment(EVT MemoryVT) const;
+
+private:
+ void InsertNode(SDNode *N);
+ bool RemoveNodeFromCSEMaps(SDNode *N);
+ void AddModifiedNodeToCSEMaps(SDNode *N);
+ SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
+ SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
+ void *&InsertPos);
+ SDNode *FindModifiedNodeSlot(SDNode *N, ArrayRef<SDValue> Ops,
+ void *&InsertPos);
+ SDNode *UpdadeSDLocOnMergedSDNode(SDNode *N, SDLoc loc);
+
+ void DeleteNodeNotInCSEMaps(SDNode *N);
+ void DeallocateNode(SDNode *N);
+
+ void allnodes_clear();
+
+ BinarySDNode *GetBinarySDNode(unsigned Opcode, SDLoc DL, SDVTList VTs,
+ SDValue N1, SDValue N2,
+ const SDNodeFlags *Flags = nullptr);
+
+ /// Look up the node specified by ID in CSEMap. If it exists, return it. If
+ /// not, return the insertion token that will make insertion faster. This
+ /// overload is for nodes other than Constant or ConstantFP, use the other one
+ /// for those.
+ SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
+
+ /// Look up the node specified by ID in CSEMap. If it exists, return it. If
+ /// not, return the insertion token that will make insertion faster. Performs
+ /// additional processing for constant nodes.
+ SDNode *FindNodeOrInsertPos(const FoldingSetNodeID &ID, DebugLoc DL,
+ void *&InsertPos);
+
+ /// List of non-single value types.
+ FoldingSet<SDVTListNode> VTListMap;
+
+ /// Maps to auto-CSE operations.
+ std::vector<CondCodeSDNode*> CondCodeNodes;
+
+ std::vector<SDNode*> ValueTypeNodes;
+ std::map<EVT, SDNode*, EVT::compareRawBits> ExtendedValueTypeNodes;
+ StringMap<SDNode*> ExternalSymbols;
+
+ std::map<std::pair<std::string, unsigned char>,SDNode*> TargetExternalSymbols;
+ DenseMap<MCSymbol *, SDNode *> MCSymbols;
+};
+
+template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
+ typedef SelectionDAG::allnodes_iterator nodes_iterator;
+ static nodes_iterator nodes_begin(SelectionDAG *G) {
+ return G->allnodes_begin();
+ }
+ static nodes_iterator nodes_end(SelectionDAG *G) {
+ return G->allnodes_end();
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
new file mode 100644
index 0000000..a011e4c
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGISel.h
@@ -0,0 +1,306 @@
+//===-- llvm/CodeGen/SelectionDAGISel.h - Common Base Class------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SelectionDAGISel class, which is used as the common
+// base class for SelectionDAG-based instruction selectors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGISEL_H
+#define LLVM_CODEGEN_SELECTIONDAGISEL_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/Pass.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
+namespace llvm {
+ class FastISel;
+ class SelectionDAGBuilder;
+ class SDValue;
+ class MachineRegisterInfo;
+ class MachineBasicBlock;
+ class MachineFunction;
+ class MachineInstr;
+ class TargetLowering;
+ class TargetLibraryInfo;
+ class FunctionLoweringInfo;
+ class ScheduleHazardRecognizer;
+ class GCFunctionInfo;
+ class ScheduleDAGSDNodes;
+ class LoadInst;
+
+/// SelectionDAGISel - This is the common base class used for SelectionDAG-based
+/// pattern-matching instruction selectors.
+class SelectionDAGISel : public MachineFunctionPass {
+public:
+ TargetMachine &TM;
+ const TargetLibraryInfo *LibInfo;
+ FunctionLoweringInfo *FuncInfo;
+ MachineFunction *MF;
+ MachineRegisterInfo *RegInfo;
+ SelectionDAG *CurDAG;
+ SelectionDAGBuilder *SDB;
+ AliasAnalysis *AA;
+ GCFunctionInfo *GFI;
+ CodeGenOpt::Level OptLevel;
+ const TargetInstrInfo *TII;
+ const TargetLowering *TLI;
+
+ static char ID;
+
+ explicit SelectionDAGISel(TargetMachine &tm,
+ CodeGenOpt::Level OL = CodeGenOpt::Default);
+ ~SelectionDAGISel() override;
+
+ const TargetLowering *getTargetLowering() const { return TLI; }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ virtual void EmitFunctionEntryCode() {}
+
+ /// PreprocessISelDAG - This hook allows targets to hack on the graph before
+ /// instruction selection starts.
+ virtual void PreprocessISelDAG() {}
+
+ /// PostprocessISelDAG() - This hook allows the target to hack on the graph
+ /// right after selection.
+ virtual void PostprocessISelDAG() {}
+
+ /// Select - Main hook targets implement to select a node.
+ virtual SDNode *Select(SDNode *N) = 0;
+
+ /// SelectInlineAsmMemoryOperand - Select the specified address as a target
+ /// addressing mode, according to the specified constraint. If this does
+ /// not match or is not implemented, return true. The resultant operands
+ /// (which will appear in the machine instruction) should be added to the
+ /// OutOps vector.
+ virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ unsigned ConstraintID,
+ std::vector<SDValue> &OutOps) {
+ return true;
+ }
+
+ /// IsProfitableToFold - Returns true if it's profitable to fold the specific
+ /// operand node N of U during instruction selection that starts at Root.
+ virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
+
+ /// IsLegalToFold - Returns true if the specific operand node N of
+ /// U can be folded during instruction selection that starts at Root.
+ /// FIXME: This is a static member function because the MSP430/X86
+ /// targets, which uses it during isel. This could become a proper member.
+ static bool IsLegalToFold(SDValue N, SDNode *U, SDNode *Root,
+ CodeGenOpt::Level OptLevel,
+ bool IgnoreChains = false);
+
+ // Opcodes used by the DAG state machine:
+ enum BuiltinOpcodes {
+ OPC_Scope,
+ OPC_RecordNode,
+ OPC_RecordChild0, OPC_RecordChild1, OPC_RecordChild2, OPC_RecordChild3,
+ OPC_RecordChild4, OPC_RecordChild5, OPC_RecordChild6, OPC_RecordChild7,
+ OPC_RecordMemRef,
+ OPC_CaptureGlueInput,
+ OPC_MoveChild,
+ OPC_MoveParent,
+ OPC_CheckSame,
+ OPC_CheckChild0Same, OPC_CheckChild1Same,
+ OPC_CheckChild2Same, OPC_CheckChild3Same,
+ OPC_CheckPatternPredicate,
+ OPC_CheckPredicate,
+ OPC_CheckOpcode,
+ OPC_SwitchOpcode,
+ OPC_CheckType,
+ OPC_SwitchType,
+ OPC_CheckChild0Type, OPC_CheckChild1Type, OPC_CheckChild2Type,
+ OPC_CheckChild3Type, OPC_CheckChild4Type, OPC_CheckChild5Type,
+ OPC_CheckChild6Type, OPC_CheckChild7Type,
+ OPC_CheckInteger,
+ OPC_CheckChild0Integer, OPC_CheckChild1Integer, OPC_CheckChild2Integer,
+ OPC_CheckChild3Integer, OPC_CheckChild4Integer,
+ OPC_CheckCondCode,
+ OPC_CheckValueType,
+ OPC_CheckComplexPat,
+ OPC_CheckAndImm, OPC_CheckOrImm,
+ OPC_CheckFoldableChainNode,
+
+ OPC_EmitInteger,
+ OPC_EmitRegister,
+ OPC_EmitRegister2,
+ OPC_EmitConvertToTarget,
+ OPC_EmitMergeInputChains,
+ OPC_EmitMergeInputChains1_0,
+ OPC_EmitMergeInputChains1_1,
+ OPC_EmitCopyToReg,
+ OPC_EmitNodeXForm,
+ OPC_EmitNode,
+ OPC_MorphNodeTo,
+ OPC_MarkGlueResults,
+ OPC_CompleteMatch
+ };
+
+ enum {
+ OPFL_None = 0, // Node has no chain or glue input and isn't variadic.
+ OPFL_Chain = 1, // Node has a chain input.
+ OPFL_GlueInput = 2, // Node has a glue input.
+ OPFL_GlueOutput = 4, // Node has a glue output.
+ OPFL_MemRefs = 8, // Node gets accumulated MemRefs.
+ OPFL_Variadic0 = 1<<4, // Node is variadic, root has 0 fixed inputs.
+ OPFL_Variadic1 = 2<<4, // Node is variadic, root has 1 fixed inputs.
+ OPFL_Variadic2 = 3<<4, // Node is variadic, root has 2 fixed inputs.
+ OPFL_Variadic3 = 4<<4, // Node is variadic, root has 3 fixed inputs.
+ OPFL_Variadic4 = 5<<4, // Node is variadic, root has 4 fixed inputs.
+ OPFL_Variadic5 = 6<<4, // Node is variadic, root has 5 fixed inputs.
+ OPFL_Variadic6 = 7<<4, // Node is variadic, root has 6 fixed inputs.
+
+ OPFL_VariadicInfo = OPFL_Variadic6
+ };
+
+ /// getNumFixedFromVariadicInfo - Transform an EmitNode flags word into the
+ /// number of fixed arity values that should be skipped when copying from the
+ /// root.
+ static inline int getNumFixedFromVariadicInfo(unsigned Flags) {
+ return ((Flags&OPFL_VariadicInfo) >> 4)-1;
+ }
+
+
+protected:
+ /// DAGSize - Size of DAG being instruction selected.
+ ///
+ unsigned DAGSize;
+
+ /// ReplaceUses - replace all uses of the old node F with the use
+ /// of the new node T.
+ void ReplaceUses(SDValue F, SDValue T) {
+ CurDAG->ReplaceAllUsesOfValueWith(F, T);
+ }
+
+ /// ReplaceUses - replace all uses of the old nodes F with the use
+ /// of the new nodes T.
+ void ReplaceUses(const SDValue *F, const SDValue *T, unsigned Num) {
+ CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num);
+ }
+
+ /// ReplaceUses - replace all uses of the old node F with the use
+ /// of the new node T.
+ void ReplaceUses(SDNode *F, SDNode *T) {
+ CurDAG->ReplaceAllUsesWith(F, T);
+ }
+
+
+ /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
+ /// by tblgen. Others should not call it.
+ void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops, SDLoc DL);
+
+
+public:
+ // Calls to these predicates are generated by tblgen.
+ bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
+ int64_t DesiredMaskS) const;
+ bool CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
+ int64_t DesiredMaskS) const;
+
+
+ /// CheckPatternPredicate - This function is generated by tblgen in the
+ /// target. It runs the specified pattern predicate and returns true if it
+ /// succeeds or false if it fails. The number is a private implementation
+ /// detail to the code tblgen produces.
+ virtual bool CheckPatternPredicate(unsigned PredNo) const {
+ llvm_unreachable("Tblgen should generate the implementation of this!");
+ }
+
+ /// CheckNodePredicate - This function is generated by tblgen in the target.
+ /// It runs node predicate number PredNo and returns true if it succeeds or
+ /// false if it fails. The number is a private implementation
+ /// detail to the code tblgen produces.
+ virtual bool CheckNodePredicate(SDNode *N, unsigned PredNo) const {
+ llvm_unreachable("Tblgen should generate the implementation of this!");
+ }
+
+ virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,
+ unsigned PatternNo,
+ SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {
+ llvm_unreachable("Tblgen should generate the implementation of this!");
+ }
+
+ virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {
+ llvm_unreachable("Tblgen should generate this!");
+ }
+
+ SDNode *SelectCodeCommon(SDNode *NodeToMatch,
+ const unsigned char *MatcherTable,
+ unsigned TableSize);
+
+ /// \brief Return true if complex patterns for this target can mutate the
+ /// DAG.
+ virtual bool ComplexPatternFuncMutatesDAG() const {
+ return false;
+ }
+
+private:
+
+ // Calls to these functions are generated by tblgen.
+ SDNode *Select_INLINEASM(SDNode *N);
+ SDNode *Select_READ_REGISTER(SDNode *N);
+ SDNode *Select_WRITE_REGISTER(SDNode *N);
+ SDNode *Select_UNDEF(SDNode *N);
+ void CannotYetSelect(SDNode *N);
+
+private:
+ void DoInstructionSelection();
+ SDNode *MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTs,
+ ArrayRef<SDValue> Ops, unsigned EmitNodeInfo);
+
+ /// Prepares the landing pad to take incoming values or do other EH
+ /// personality specific tasks. Returns true if the block should be
+ /// instruction selected, false if no code should be emitted for it.
+ bool PrepareEHLandingPad();
+
+ /// \brief Perform instruction selection on all basic blocks in the function.
+ void SelectAllBasicBlocks(const Function &Fn);
+
+ /// \brief Perform instruction selection on a single basic block, for
+ /// instructions between \p Begin and \p End. \p HadTailCall will be set
+ /// to true if a call in the block was translated as a tail call.
+ void SelectBasicBlock(BasicBlock::const_iterator Begin,
+ BasicBlock::const_iterator End,
+ bool &HadTailCall);
+ void FinishBasicBlock();
+
+ void CodeGenAndEmitDAG();
+
+ /// \brief Generate instructions for lowering the incoming arguments of the
+ /// given function.
+ void LowerArguments(const Function &F);
+
+ void ComputeLiveOutVRegInfo();
+
+ /// Create the scheduler. If a specific scheduler was specified
+ /// via the SchedulerRegistry, use it, otherwise select the
+ /// one preferred by the target.
+ ///
+ ScheduleDAGSDNodes *CreateScheduler();
+
+ /// OpcodeOffset - This is a cache used to dispatch efficiently into isel
+ /// state machines that start with a OPC_SwitchOpcode node.
+ std::vector<unsigned> OpcodeOffset;
+
+ void UpdateChainsAndGlue(SDNode *NodeToMatch, SDValue InputChain,
+ const SmallVectorImpl<SDNode*> &ChainNodesMatched,
+ SDValue InputGlue, const SmallVectorImpl<SDNode*> &F,
+ bool isMorphNodeTo);
+
+};
+
+}
+
+#endif /* LLVM_CODEGEN_SELECTIONDAGISEL_H */
diff --git a/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
new file mode 100644
index 0000000..23816bd
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -0,0 +1,2323 @@
+//===-- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SDNode class and derived classes, which are used to
+// represent the nodes and operations present in a SelectionDAG. These nodes
+// and operations are machine code level operations, with some similarities to
+// the GCC RTL representation.
+//
+// Clients should include the SelectionDAG.h file instead of this file directly.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
+#define LLVM_CODEGEN_SELECTIONDAGNODES_H
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
+#include <cassert>
+
+namespace llvm {
+
+class SelectionDAG;
+class GlobalValue;
+class MachineBasicBlock;
+class MachineConstantPoolValue;
+class SDNode;
+class BinaryWithFlagsSDNode;
+class Value;
+class MCSymbol;
+template <typename T> struct DenseMapInfo;
+template <typename T> struct simplify_type;
+template <typename T> struct ilist_traits;
+
+void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
+ bool force = false);
+
+/// This represents a list of ValueType's that has been intern'd by
+/// a SelectionDAG. Instances of this simple value class are returned by
+/// SelectionDAG::getVTList(...).
+///
+struct SDVTList {
+ const EVT *VTs;
+ unsigned int NumVTs;
+};
+
+namespace ISD {
+ /// Node predicates
+
+ /// Return true if the specified node is a
+ /// BUILD_VECTOR where all of the elements are ~0 or undef.
+ bool isBuildVectorAllOnes(const SDNode *N);
+
+ /// Return true if the specified node is a
+ /// BUILD_VECTOR where all of the elements are 0 or undef.
+ bool isBuildVectorAllZeros(const SDNode *N);
+
+ /// \brief Return true if the specified node is a BUILD_VECTOR node of
+ /// all ConstantSDNode or undef.
+ bool isBuildVectorOfConstantSDNodes(const SDNode *N);
+
+ /// \brief Return true if the specified node is a BUILD_VECTOR node of
+ /// all ConstantFPSDNode or undef.
+ bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
+
+ /// Return true if the node has at least one operand
+ /// and all operands of the specified node are ISD::UNDEF.
+ bool allOperandsUndef(const SDNode *N);
+} // end llvm:ISD namespace
+
+//===----------------------------------------------------------------------===//
+/// Unlike LLVM values, Selection DAG nodes may return multiple
+/// values as the result of a computation. Many nodes return multiple values,
+/// from loads (which define a token and a return value) to ADDC (which returns
+/// a result and a carry value), to calls (which may return an arbitrary number
+/// of values).
+///
+/// As such, each use of a SelectionDAG computation must indicate the node that
+/// computes it as well as which return value to use from that node. This pair
+/// of information is represented with the SDValue value type.
+///
+class SDValue {
+ friend struct DenseMapInfo<SDValue>;
+
+ SDNode *Node; // The node defining the value we are using.
+ unsigned ResNo; // Which return value of the node we are using.
+public:
+ SDValue() : Node(nullptr), ResNo(0) {}
+ SDValue(SDNode *node, unsigned resno);
+
+ /// get the index which selects a specific result in the SDNode
+ unsigned getResNo() const { return ResNo; }
+
+ /// get the SDNode which holds the desired result
+ SDNode *getNode() const { return Node; }
+
+ /// set the SDNode
+ void setNode(SDNode *N) { Node = N; }
+
+ inline SDNode *operator->() const { return Node; }
+
+ bool operator==(const SDValue &O) const {
+ return Node == O.Node && ResNo == O.ResNo;
+ }
+ bool operator!=(const SDValue &O) const {
+ return !operator==(O);
+ }
+ bool operator<(const SDValue &O) const {
+ return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
+ }
+ explicit operator bool() const {
+ return Node != nullptr;
+ }
+
+ SDValue getValue(unsigned R) const {
+ return SDValue(Node, R);
+ }
+
+ /// Return true if this node is an operand of N.
+ bool isOperandOf(const SDNode *N) const;
+
+ /// Return the ValueType of the referenced return value.
+ inline EVT getValueType() const;
+
+ /// Return the simple ValueType of the referenced return value.
+ MVT getSimpleValueType() const {
+ return getValueType().getSimpleVT();
+ }
+
+ /// Returns the size of the value in bits.
+ unsigned getValueSizeInBits() const {
+ return getValueType().getSizeInBits();
+ }
+
+ unsigned getScalarValueSizeInBits() const {
+ return getValueType().getScalarType().getSizeInBits();
+ }
+
+ // Forwarding methods - These forward to the corresponding methods in SDNode.
+ inline unsigned getOpcode() const;
+ inline unsigned getNumOperands() const;
+ inline const SDValue &getOperand(unsigned i) const;
+ inline uint64_t getConstantOperandVal(unsigned i) const;
+ inline bool isTargetMemoryOpcode() const;
+ inline bool isTargetOpcode() const;
+ inline bool isMachineOpcode() const;
+ inline bool isUndef() const;
+ inline unsigned getMachineOpcode() const;
+ inline const DebugLoc &getDebugLoc() const;
+ inline void dump() const;
+ inline void dumpr() const;
+
+ /// Return true if this operand (which must be a chain) reaches the
+ /// specified operand without crossing any side-effecting instructions.
+ /// In practice, this looks through token factors and non-volatile loads.
+ /// In order to remain efficient, this only
+ /// looks a couple of nodes in, it does not do an exhaustive search.
+ bool reachesChainWithoutSideEffects(SDValue Dest,
+ unsigned Depth = 2) const;
+
+ /// Return true if there are no nodes using value ResNo of Node.
+ inline bool use_empty() const;
+
+ /// Return true if there is exactly one node using value ResNo of Node.
+ inline bool hasOneUse() const;
+};
+
+
+template<> struct DenseMapInfo<SDValue> {
+ static inline SDValue getEmptyKey() {
+ SDValue V;
+ V.ResNo = -1U;
+ return V;
+ }
+ static inline SDValue getTombstoneKey() {
+ SDValue V;
+ V.ResNo = -2U;
+ return V;
+ }
+ static unsigned getHashValue(const SDValue &Val) {
+ return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
+ (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
+ }
+ static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
+ return LHS == RHS;
+ }
+};
+template <> struct isPodLike<SDValue> { static const bool value = true; };
+
+
+/// Allow casting operators to work directly on
+/// SDValues as if they were SDNode*'s.
+template<> struct simplify_type<SDValue> {
+ typedef SDNode* SimpleType;
+ static SimpleType getSimplifiedValue(SDValue &Val) {
+ return Val.getNode();
+ }
+};
+template<> struct simplify_type<const SDValue> {
+ typedef /*const*/ SDNode* SimpleType;
+ static SimpleType getSimplifiedValue(const SDValue &Val) {
+ return Val.getNode();
+ }
+};
+
+/// Represents a use of a SDNode. This class holds an SDValue,
+/// which records the SDNode being used and the result number, a
+/// pointer to the SDNode using the value, and Next and Prev pointers,
+/// which link together all the uses of an SDNode.
+///
+class SDUse {
+ /// Val - The value being used.
+ SDValue Val;
+ /// User - The user of this value.
+ SDNode *User;
+ /// Prev, Next - Pointers to the uses list of the SDNode referred by
+ /// this operand.
+ SDUse **Prev, *Next;
+
+ SDUse(const SDUse &U) = delete;
+ void operator=(const SDUse &U) = delete;
+
+public:
+ SDUse() : Val(), User(nullptr), Prev(nullptr), Next(nullptr) {}
+
+ /// Normally SDUse will just implicitly convert to an SDValue that it holds.
+ operator const SDValue&() const { return Val; }
+
+ /// If implicit conversion to SDValue doesn't work, the get() method returns
+ /// the SDValue.
+ const SDValue &get() const { return Val; }
+
+ /// This returns the SDNode that contains this Use.
+ SDNode *getUser() { return User; }
+
+ /// Get the next SDUse in the use list.
+ SDUse *getNext() const { return Next; }
+
+ /// Convenience function for get().getNode().
+ SDNode *getNode() const { return Val.getNode(); }
+ /// Convenience function for get().getResNo().
+ unsigned getResNo() const { return Val.getResNo(); }
+ /// Convenience function for get().getValueType().
+ EVT getValueType() const { return Val.getValueType(); }
+
+ /// Convenience function for get().operator==
+ bool operator==(const SDValue &V) const {
+ return Val == V;
+ }
+
+ /// Convenience function for get().operator!=
+ bool operator!=(const SDValue &V) const {
+ return Val != V;
+ }
+
+ /// Convenience function for get().operator<
+ bool operator<(const SDValue &V) const {
+ return Val < V;
+ }
+
+private:
+ friend class SelectionDAG;
+ friend class SDNode;
+
+ void setUser(SDNode *p) { User = p; }
+
+ /// Remove this use from its existing use list, assign it the
+ /// given value, and add it to the new value's node's use list.
+ inline void set(const SDValue &V);
+ /// Like set, but only supports initializing a newly-allocated
+ /// SDUse with a non-null value.
+ inline void setInitial(const SDValue &V);
+ /// Like set, but only sets the Node portion of the value,
+ /// leaving the ResNo portion unmodified.
+ inline void setNode(SDNode *N);
+
+ void addToList(SDUse **List) {
+ Next = *List;
+ if (Next) Next->Prev = &Next;
+ Prev = List;
+ *List = this;
+ }
+
+ void removeFromList() {
+ *Prev = Next;
+ if (Next) Next->Prev = Prev;
+ }
+};
+
+/// simplify_type specializations - Allow casting operators to work directly on
+/// SDValues as if they were SDNode*'s.
+template<> struct simplify_type<SDUse> {
+ typedef SDNode* SimpleType;
+ static SimpleType getSimplifiedValue(SDUse &Val) {
+ return Val.getNode();
+ }
+};
+
+/// These are IR-level optimization flags that may be propagated to SDNodes.
+/// TODO: This data structure should be shared by the IR optimizer and the
+/// the backend.
+struct SDNodeFlags {
+private:
+ bool NoUnsignedWrap : 1;
+ bool NoSignedWrap : 1;
+ bool Exact : 1;
+ bool UnsafeAlgebra : 1;
+ bool NoNaNs : 1;
+ bool NoInfs : 1;
+ bool NoSignedZeros : 1;
+ bool AllowReciprocal : 1;
+
+public:
+ /// Default constructor turns off all optimization flags.
+ SDNodeFlags() {
+ NoUnsignedWrap = false;
+ NoSignedWrap = false;
+ Exact = false;
+ UnsafeAlgebra = false;
+ NoNaNs = false;
+ NoInfs = false;
+ NoSignedZeros = false;
+ AllowReciprocal = false;
+ }
+
+ // These are mutators for each flag.
+ void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
+ void setNoSignedWrap(bool b) { NoSignedWrap = b; }
+ void setExact(bool b) { Exact = b; }
+ void setUnsafeAlgebra(bool b) { UnsafeAlgebra = b; }
+ void setNoNaNs(bool b) { NoNaNs = b; }
+ void setNoInfs(bool b) { NoInfs = b; }
+ void setNoSignedZeros(bool b) { NoSignedZeros = b; }
+ void setAllowReciprocal(bool b) { AllowReciprocal = b; }
+
+ // These are accessors for each flag.
+ bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
+ bool hasNoSignedWrap() const { return NoSignedWrap; }
+ bool hasExact() const { return Exact; }
+ bool hasUnsafeAlgebra() const { return UnsafeAlgebra; }
+ bool hasNoNaNs() const { return NoNaNs; }
+ bool hasNoInfs() const { return NoInfs; }
+ bool hasNoSignedZeros() const { return NoSignedZeros; }
+ bool hasAllowReciprocal() const { return AllowReciprocal; }
+
+ /// Return a raw encoding of the flags.
+ /// This function should only be used to add data to the NodeID value.
+ unsigned getRawFlags() const {
+ return (NoUnsignedWrap << 0) | (NoSignedWrap << 1) | (Exact << 2) |
+ (UnsafeAlgebra << 3) | (NoNaNs << 4) | (NoInfs << 5) |
+ (NoSignedZeros << 6) | (AllowReciprocal << 7);
+ }
+};
+
+/// Represents one node in the SelectionDAG.
+///
+class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
+private:
+ /// The operation that this node performs.
+ int16_t NodeType;
+
+ /// This is true if OperandList was new[]'d. If true,
+ /// then they will be delete[]'d when the node is destroyed.
+ uint16_t OperandsNeedDelete : 1;
+
+ /// This tracks whether this node has one or more dbg_value
+ /// nodes corresponding to it.
+ uint16_t HasDebugValue : 1;
+
+protected:
+ /// This member is defined by this class, but is not used for
+ /// anything. Subclasses can use it to hold whatever state they find useful.
+ /// This field is initialized to zero by the ctor.
+ uint16_t SubclassData : 14;
+
+private:
+ /// Unique id per SDNode in the DAG.
+ int NodeId;
+
+ /// The values that are used by this operation.
+ SDUse *OperandList;
+
+ /// The types of the values this node defines. SDNode's may
+ /// define multiple values simultaneously.
+ const EVT *ValueList;
+
+ /// List of uses for this SDNode.
+ SDUse *UseList;
+
+ /// The number of entries in the Operand/Value list.
+ unsigned short NumOperands, NumValues;
+
+ // The ordering of the SDNodes. It roughly corresponds to the ordering of the
+ // original LLVM instructions.
+ // This is used for turning off scheduling, because we'll forgo
+ // the normal scheduling algorithms and output the instructions according to
+ // this ordering.
+ unsigned IROrder;
+
+ /// Source line information.
+ DebugLoc debugLoc;
+
+ /// Return a pointer to the specified value type.
+ static const EVT *getValueTypeList(EVT VT);
+
+ friend class SelectionDAG;
+ friend struct ilist_traits<SDNode>;
+
+public:
+ /// Unique and persistent id per SDNode in the DAG.
+ /// Used for debug printing.
+ uint16_t PersistentId;
+
+ //===--------------------------------------------------------------------===//
+ // Accessors
+ //
+
+ /// Return the SelectionDAG opcode value for this node. For
+ /// pre-isel nodes (those for which isMachineOpcode returns false), these
+ /// are the opcode values in the ISD and <target>ISD namespaces. For
+ /// post-isel opcodes, see getMachineOpcode.
+ unsigned getOpcode() const { return (unsigned short)NodeType; }
+
+ /// Test if this node has a target-specific opcode (in the
+ /// \<target\>ISD namespace).
+ bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
+
+ /// Test if this node has a target-specific
+ /// memory-referencing opcode (in the \<target\>ISD namespace and
+ /// greater than FIRST_TARGET_MEMORY_OPCODE).
+ bool isTargetMemoryOpcode() const {
+ return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
+ }
+
+ /// Return true if the type of the node type undefined.
+ bool isUndef() const { return NodeType == ISD::UNDEF; }
+
+ /// Test if this node is a memory intrinsic (with valid pointer information).
+ /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
+ /// non-memory intrinsics (with chains) that are not really instances of
+ /// MemSDNode. For such nodes, we need some extra state to determine the
+ /// proper classof relationship.
+ bool isMemIntrinsic() const {
+ return (NodeType == ISD::INTRINSIC_W_CHAIN ||
+ NodeType == ISD::INTRINSIC_VOID) && ((SubclassData >> 13) & 1);
+ }
+
+ /// Test if this node has a post-isel opcode, directly
+ /// corresponding to a MachineInstr opcode.
+ bool isMachineOpcode() const { return NodeType < 0; }
+
+ /// This may only be called if isMachineOpcode returns
+ /// true. It returns the MachineInstr opcode value that the node's opcode
+ /// corresponds to.
+ unsigned getMachineOpcode() const {
+ assert(isMachineOpcode() && "Not a MachineInstr opcode!");
+ return ~NodeType;
+ }
+
+ /// Get this bit.
+ bool getHasDebugValue() const { return HasDebugValue; }
+
+ /// Set this bit.
+ void setHasDebugValue(bool b) { HasDebugValue = b; }
+
+ /// Return true if there are no uses of this node.
+ bool use_empty() const { return UseList == nullptr; }
+
+ /// Return true if there is exactly one use of this node.
+ bool hasOneUse() const {
+ return !use_empty() && std::next(use_begin()) == use_end();
+ }
+
+ /// Return the number of uses of this node. This method takes
+ /// time proportional to the number of uses.
+ size_t use_size() const { return std::distance(use_begin(), use_end()); }
+
+ /// Return the unique node id.
+ int getNodeId() const { return NodeId; }
+
+ /// Set unique node id.
+ void setNodeId(int Id) { NodeId = Id; }
+
+ /// Return the node ordering.
+ unsigned getIROrder() const { return IROrder; }
+
+ /// Set the node ordering.
+ void setIROrder(unsigned Order) { IROrder = Order; }
+
+ /// Return the source location info.
+ const DebugLoc &getDebugLoc() const { return debugLoc; }
+
+ /// Set source location info. Try to avoid this, putting
+ /// it in the constructor is preferable.
+ void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
+
+ /// This class provides iterator support for SDUse
+ /// operands that use a specific SDNode.
+ class use_iterator
+ : public std::iterator<std::forward_iterator_tag, SDUse, ptrdiff_t> {
+ SDUse *Op;
+ explicit use_iterator(SDUse *op) : Op(op) {
+ }
+ friend class SDNode;
+ public:
+ typedef std::iterator<std::forward_iterator_tag,
+ SDUse, ptrdiff_t>::reference reference;
+ typedef std::iterator<std::forward_iterator_tag,
+ SDUse, ptrdiff_t>::pointer pointer;
+
+ use_iterator(const use_iterator &I) : Op(I.Op) {}
+ use_iterator() : Op(nullptr) {}
+
+ bool operator==(const use_iterator &x) const {
+ return Op == x.Op;
+ }
+ bool operator!=(const use_iterator &x) const {
+ return !operator==(x);
+ }
+
+ /// Return true if this iterator is at the end of uses list.
+ bool atEnd() const { return Op == nullptr; }
+
+ // Iterator traversal: forward iteration only.
+ use_iterator &operator++() { // Preincrement
+ assert(Op && "Cannot increment end iterator!");
+ Op = Op->getNext();
+ return *this;
+ }
+
+ use_iterator operator++(int) { // Postincrement
+ use_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ /// Retrieve a pointer to the current user node.
+ SDNode *operator*() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return Op->getUser();
+ }
+
+ SDNode *operator->() const { return operator*(); }
+
+ SDUse &getUse() const { return *Op; }
+
+ /// Retrieve the operand # of this use in its user.
+ unsigned getOperandNo() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return (unsigned)(Op - Op->getUser()->OperandList);
+ }
+ };
+
+ /// Provide iteration support to walk over all uses of an SDNode.
+ use_iterator use_begin() const {
+ return use_iterator(UseList);
+ }
+
+ static use_iterator use_end() { return use_iterator(nullptr); }
+
+ inline iterator_range<use_iterator> uses() {
+ return make_range(use_begin(), use_end());
+ }
+ inline iterator_range<use_iterator> uses() const {
+ return make_range(use_begin(), use_end());
+ }
+
+ /// Return true if there are exactly NUSES uses of the indicated value.
+ /// This method ignores uses of other values defined by this operation.
+ bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
+
+ /// Return true if there are any use of the indicated value.
+ /// This method ignores uses of other values defined by this operation.
+ bool hasAnyUseOfValue(unsigned Value) const;
+
+ /// Return true if this node is the only use of N.
+ bool isOnlyUserOf(const SDNode *N) const;
+
+ /// Return true if this node is an operand of N.
+ bool isOperandOf(const SDNode *N) const;
+
+ /// Return true if this node is a predecessor of N.
+ /// NOTE: Implemented on top of hasPredecessor and every bit as
+ /// expensive. Use carefully.
+ bool isPredecessorOf(const SDNode *N) const {
+ return N->hasPredecessor(this);
+ }
+
+ /// Return true if N is a predecessor of this node.
+ /// N is either an operand of this node, or can be reached by recursively
+ /// traversing up the operands.
+ /// NOTE: This is an expensive method. Use it carefully.
+ bool hasPredecessor(const SDNode *N) const;
+
+ /// Return true if N is a predecessor of this node.
+ /// N is either an operand of this node, or can be reached by recursively
+ /// traversing up the operands.
+ /// In this helper the Visited and worklist sets are held externally to
+ /// cache predecessors over multiple invocations. If you want to test for
+ /// multiple predecessors this method is preferable to multiple calls to
+ /// hasPredecessor. Be sure to clear Visited and Worklist if the DAG
+ /// changes.
+ /// NOTE: This is still very expensive. Use carefully.
+ bool hasPredecessorHelper(const SDNode *N,
+ SmallPtrSetImpl<const SDNode *> &Visited,
+ SmallVectorImpl<const SDNode *> &Worklist) const;
+
+ /// Return the number of values used by this operation.
+ unsigned getNumOperands() const { return NumOperands; }
+
+ /// Helper method returns the integer value of a ConstantSDNode operand.
+ uint64_t getConstantOperandVal(unsigned Num) const;
+
+ const SDValue &getOperand(unsigned Num) const {
+ assert(Num < NumOperands && "Invalid child # of SDNode!");
+ return OperandList[Num];
+ }
+
+ typedef SDUse* op_iterator;
+ op_iterator op_begin() const { return OperandList; }
+ op_iterator op_end() const { return OperandList+NumOperands; }
+ ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
+
+ /// Iterator for directly iterating over the operand SDValue's.
+ struct value_op_iterator
+ : iterator_adaptor_base<value_op_iterator, op_iterator,
+ std::random_access_iterator_tag, SDValue,
+ ptrdiff_t, value_op_iterator *,
+ value_op_iterator *> {
+ explicit value_op_iterator(SDUse *U = nullptr)
+ : iterator_adaptor_base(U) {}
+
+ const SDValue &operator*() const { return I->get(); }
+ };
+
+ iterator_range<value_op_iterator> op_values() const {
+ return make_range(value_op_iterator(op_begin()),
+ value_op_iterator(op_end()));
+ }
+
+ SDVTList getVTList() const {
+ SDVTList X = { ValueList, NumValues };
+ return X;
+ }
+
+ /// If this node has a glue operand, return the node
+ /// to which the glue operand points. Otherwise return NULL.
+ SDNode *getGluedNode() const {
+ if (getNumOperands() != 0 &&
+ getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
+ return getOperand(getNumOperands()-1).getNode();
+ return nullptr;
+ }
+
+ /// If this node has a glue value with a user, return
+ /// the user (there is at most one). Otherwise return NULL.
+ SDNode *getGluedUser() const {
+ for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
+ if (UI.getUse().get().getValueType() == MVT::Glue)
+ return *UI;
+ return nullptr;
+ }
+
+ /// This could be defined as a virtual function and implemented more simply
+ /// and directly, but it is not to avoid creating a vtable for this class.
+ const SDNodeFlags *getFlags() const;
+
+ /// Return the number of values defined/returned by this operator.
+ unsigned getNumValues() const { return NumValues; }
+
+ /// Return the type of a specified result.
+ EVT getValueType(unsigned ResNo) const {
+ assert(ResNo < NumValues && "Illegal result number!");
+ return ValueList[ResNo];
+ }
+
+ /// Return the type of a specified result as a simple type.
+ MVT getSimpleValueType(unsigned ResNo) const {
+ return getValueType(ResNo).getSimpleVT();
+ }
+
+ /// Returns MVT::getSizeInBits(getValueType(ResNo)).
+ unsigned getValueSizeInBits(unsigned ResNo) const {
+ return getValueType(ResNo).getSizeInBits();
+ }
+
+ typedef const EVT* value_iterator;
+ value_iterator value_begin() const { return ValueList; }
+ value_iterator value_end() const { return ValueList+NumValues; }
+
+ /// Return the opcode of this operation for printing.
+ std::string getOperationName(const SelectionDAG *G = nullptr) const;
+ static const char* getIndexedModeName(ISD::MemIndexedMode AM);
+ void print_types(raw_ostream &OS, const SelectionDAG *G) const;
+ void print_details(raw_ostream &OS, const SelectionDAG *G) const;
+ void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
+ void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
+
+ /// Print a SelectionDAG node and all children down to
+ /// the leaves. The given SelectionDAG allows target-specific nodes
+ /// to be printed in human-readable form. Unlike printr, this will
+ /// print the whole DAG, including children that appear multiple
+ /// times.
+ ///
+ void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
+
+ /// Print a SelectionDAG node and children up to
+ /// depth "depth." The given SelectionDAG allows target-specific
+ /// nodes to be printed in human-readable form. Unlike printr, this
+ /// will print children that appear multiple times wherever they are
+ /// used.
+ ///
+ void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
+ unsigned depth = 100) const;
+
+
+ /// Dump this node, for debugging.
+ void dump() const;
+
+ /// Dump (recursively) this node and its use-def subgraph.
+ void dumpr() const;
+
+ /// Dump this node, for debugging.
+ /// The given SelectionDAG allows target-specific nodes to be printed
+ /// in human-readable form.
+ void dump(const SelectionDAG *G) const;
+
+ /// Dump (recursively) this node and its use-def subgraph.
+ /// The given SelectionDAG allows target-specific nodes to be printed
+ /// in human-readable form.
+ void dumpr(const SelectionDAG *G) const;
+
+ /// printrFull to dbgs(). The given SelectionDAG allows
+ /// target-specific nodes to be printed in human-readable form.
+ /// Unlike dumpr, this will print the whole DAG, including children
+ /// that appear multiple times.
+ void dumprFull(const SelectionDAG *G = nullptr) const;
+
+ /// printrWithDepth to dbgs(). The given
+ /// SelectionDAG allows target-specific nodes to be printed in
+ /// human-readable form. Unlike dumpr, this will print children
+ /// that appear multiple times wherever they are used.
+ ///
+ void dumprWithDepth(const SelectionDAG *G = nullptr,
+ unsigned depth = 100) const;
+
+ /// Gather unique data for the node.
+ void Profile(FoldingSetNodeID &ID) const;
+
+ /// This method should only be used by the SDUse class.
+ void addUse(SDUse &U) { U.addToList(&UseList); }
+
+protected:
+ static SDVTList getSDVTList(EVT VT) {
+ SDVTList Ret = { getValueTypeList(VT), 1 };
+ return Ret;
+ }
+
+ SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
+ ArrayRef<SDValue> Ops)
+ : NodeType(Opc), OperandsNeedDelete(true), HasDebugValue(false),
+ SubclassData(0), NodeId(-1),
+ OperandList(Ops.size() ? new SDUse[Ops.size()] : nullptr),
+ ValueList(VTs.VTs), UseList(nullptr), NumOperands(Ops.size()),
+ NumValues(VTs.NumVTs), IROrder(Order), debugLoc(std::move(dl)) {
+ assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
+ assert(NumOperands == Ops.size() &&
+ "NumOperands wasn't wide enough for its operands!");
+ assert(NumValues == VTs.NumVTs &&
+ "NumValues wasn't wide enough for its operands!");
+ for (unsigned i = 0; i != Ops.size(); ++i) {
+ assert(OperandList && "no operands available");
+ OperandList[i].setUser(this);
+ OperandList[i].setInitial(Ops[i]);
+ }
+ checkForCycles(this);
+ }
+
+ /// This constructor adds no operands itself; operands can be
+ /// set later with InitOperands.
+ SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
+ : NodeType(Opc), OperandsNeedDelete(false), HasDebugValue(false),
+ SubclassData(0), NodeId(-1), OperandList(nullptr), ValueList(VTs.VTs),
+ UseList(nullptr), NumOperands(0), NumValues(VTs.NumVTs),
+ IROrder(Order), debugLoc(std::move(dl)) {
+ assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
+ assert(NumValues == VTs.NumVTs &&
+ "NumValues wasn't wide enough for its operands!");
+ }
+
+ /// Initialize the operands list of this with 1 operand.
+ void InitOperands(SDUse *Ops, const SDValue &Op0) {
+ Ops[0].setUser(this);
+ Ops[0].setInitial(Op0);
+ NumOperands = 1;
+ OperandList = Ops;
+ checkForCycles(this);
+ }
+
+ /// Initialize the operands list of this with 2 operands.
+ void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1) {
+ Ops[0].setUser(this);
+ Ops[0].setInitial(Op0);
+ Ops[1].setUser(this);
+ Ops[1].setInitial(Op1);
+ NumOperands = 2;
+ OperandList = Ops;
+ checkForCycles(this);
+ }
+
+ /// Initialize the operands list of this with 3 operands.
+ void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1,
+ const SDValue &Op2) {
+ Ops[0].setUser(this);
+ Ops[0].setInitial(Op0);
+ Ops[1].setUser(this);
+ Ops[1].setInitial(Op1);
+ Ops[2].setUser(this);
+ Ops[2].setInitial(Op2);
+ NumOperands = 3;
+ OperandList = Ops;
+ checkForCycles(this);
+ }
+
+ /// Initialize the operands list of this with 4 operands.
+ void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1,
+ const SDValue &Op2, const SDValue &Op3) {
+ Ops[0].setUser(this);
+ Ops[0].setInitial(Op0);
+ Ops[1].setUser(this);
+ Ops[1].setInitial(Op1);
+ Ops[2].setUser(this);
+ Ops[2].setInitial(Op2);
+ Ops[3].setUser(this);
+ Ops[3].setInitial(Op3);
+ NumOperands = 4;
+ OperandList = Ops;
+ checkForCycles(this);
+ }
+
+ /// Initialize the operands list of this with N operands.
+ void InitOperands(SDUse *Ops, const SDValue *Vals, unsigned N) {
+ for (unsigned i = 0; i != N; ++i) {
+ Ops[i].setUser(this);
+ Ops[i].setInitial(Vals[i]);
+ }
+ NumOperands = N;
+ assert(NumOperands == N &&
+ "NumOperands wasn't wide enough for its operands!");
+ OperandList = Ops;
+ checkForCycles(this);
+ }
+
+ /// Release the operands and set this node to have zero operands.
+ void DropOperands();
+};
+
+/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
+/// into SDNode creation functions.
+/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
+/// from the original Instruction, and IROrder is the ordinal position of
+/// the instruction.
+/// When an SDNode is created after the DAG is being built, both DebugLoc and
+/// the IROrder are propagated from the original SDNode.
+/// So SDLoc class provides two constructors besides the default one, one to
+/// be used by the DAGBuilder, the other to be used by others.
+class SDLoc {
+private:
+ // Ptr could be used for either Instruction* or SDNode*. It is used for
+ // Instruction* if IROrder is not -1.
+ const void *Ptr;
+ int IROrder;
+
+public:
+ SDLoc() : Ptr(nullptr), IROrder(0) {}
+ SDLoc(const SDNode *N) : Ptr(N), IROrder(-1) {
+ assert(N && "null SDNode");
+ }
+ SDLoc(const SDValue V) : Ptr(V.getNode()), IROrder(-1) {
+ assert(Ptr && "null SDNode");
+ }
+ SDLoc(const Instruction *I, int Order) : Ptr(I), IROrder(Order) {
+ assert(Order >= 0 && "bad IROrder");
+ }
+ unsigned getIROrder() {
+ if (IROrder >= 0 || Ptr == nullptr) {
+ return (unsigned)IROrder;
+ }
+ const SDNode *N = (const SDNode*)(Ptr);
+ return N->getIROrder();
+ }
+ DebugLoc getDebugLoc() {
+ if (!Ptr) {
+ return DebugLoc();
+ }
+ if (IROrder >= 0) {
+ const Instruction *I = (const Instruction*)(Ptr);
+ return I->getDebugLoc();
+ }
+ const SDNode *N = (const SDNode*)(Ptr);
+ return N->getDebugLoc();
+ }
+};
+
+
+// Define inline functions from the SDValue class.
+
+inline SDValue::SDValue(SDNode *node, unsigned resno)
+ : Node(node), ResNo(resno) {
+ assert((!Node || ResNo < Node->getNumValues()) &&
+ "Invalid result number for the given node!");
+ assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.");
+}
+
+inline unsigned SDValue::getOpcode() const {
+ return Node->getOpcode();
+}
+inline EVT SDValue::getValueType() const {
+ return Node->getValueType(ResNo);
+}
+inline unsigned SDValue::getNumOperands() const {
+ return Node->getNumOperands();
+}
+inline const SDValue &SDValue::getOperand(unsigned i) const {
+ return Node->getOperand(i);
+}
+inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
+ return Node->getConstantOperandVal(i);
+}
+inline bool SDValue::isTargetOpcode() const {
+ return Node->isTargetOpcode();
+}
+inline bool SDValue::isTargetMemoryOpcode() const {
+ return Node->isTargetMemoryOpcode();
+}
+inline bool SDValue::isMachineOpcode() const {
+ return Node->isMachineOpcode();
+}
+inline unsigned SDValue::getMachineOpcode() const {
+ return Node->getMachineOpcode();
+}
+inline bool SDValue::isUndef() const {
+ return Node->isUndef();
+}
+inline bool SDValue::use_empty() const {
+ return !Node->hasAnyUseOfValue(ResNo);
+}
+inline bool SDValue::hasOneUse() const {
+ return Node->hasNUsesOfValue(1, ResNo);
+}
+inline const DebugLoc &SDValue::getDebugLoc() const {
+ return Node->getDebugLoc();
+}
+inline void SDValue::dump() const {
+ return Node->dump();
+}
+inline void SDValue::dumpr() const {
+ return Node->dumpr();
+}
+// Define inline functions from the SDUse class.
+
+inline void SDUse::set(const SDValue &V) {
+ if (Val.getNode()) removeFromList();
+ Val = V;
+ if (V.getNode()) V.getNode()->addUse(*this);
+}
+
+inline void SDUse::setInitial(const SDValue &V) {
+ Val = V;
+ V.getNode()->addUse(*this);
+}
+
+inline void SDUse::setNode(SDNode *N) {
+ if (Val.getNode()) removeFromList();
+ Val.setNode(N);
+ if (N) N->addUse(*this);
+}
+
+/// This class is used for single-operand SDNodes. This is solely
+/// to allow co-allocation of node operands with the node itself.
+class UnarySDNode : public SDNode {
+ SDUse Op;
+public:
+ UnarySDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
+ SDValue X)
+ : SDNode(Opc, Order, dl, VTs) {
+ InitOperands(&Op, X);
+ }
+};
+
+/// This class is used for two-operand SDNodes. This is solely
+/// to allow co-allocation of node operands with the node itself.
+class BinarySDNode : public SDNode {
+ SDUse Ops[2];
+public:
+ BinarySDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
+ SDValue X, SDValue Y)
+ : SDNode(Opc, Order, dl, VTs) {
+ InitOperands(Ops, X, Y);
+ }
+};
+
+/// Returns true if the opcode is a binary operation with flags.
+static bool isBinOpWithFlags(unsigned Opcode) {
+ switch (Opcode) {
+ case ISD::SDIV:
+ case ISD::UDIV:
+ case ISD::SRA:
+ case ISD::SRL:
+ case ISD::MUL:
+ case ISD::ADD:
+ case ISD::SUB:
+ case ISD::SHL:
+ case ISD::FADD:
+ case ISD::FDIV:
+ case ISD::FMUL:
+ case ISD::FREM:
+ case ISD::FSUB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/// This class is an extension of BinarySDNode
+/// used from those opcodes that have associated extra flags.
+class BinaryWithFlagsSDNode : public BinarySDNode {
+public:
+ SDNodeFlags Flags;
+ BinaryWithFlagsSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
+ SDValue X, SDValue Y, const SDNodeFlags &NodeFlags)
+ : BinarySDNode(Opc, Order, dl, VTs, X, Y), Flags(NodeFlags) {}
+ static bool classof(const SDNode *N) {
+ return isBinOpWithFlags(N->getOpcode());
+ }
+};
+
+/// This class is used for three-operand SDNodes. This is solely
+/// to allow co-allocation of node operands with the node itself.
+class TernarySDNode : public SDNode {
+ SDUse Ops[3];
+public:
+ TernarySDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
+ SDValue X, SDValue Y, SDValue Z)
+ : SDNode(Opc, Order, dl, VTs) {
+ InitOperands(Ops, X, Y, Z);
+ }
+};
+
+
+/// This class is used to form a handle around another node that
+/// is persistent and is updated across invocations of replaceAllUsesWith on its
+/// operand. This node should be directly created by end-users and not added to
+/// the AllNodes list.
+class HandleSDNode : public SDNode {
+ SDUse Op;
+public:
+ explicit HandleSDNode(SDValue X)
+ : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
+ // HandleSDNodes are never inserted into the DAG, so they won't be
+ // auto-numbered. Use ID 65535 as a sentinel.
+ PersistentId = 0xffff;
+ InitOperands(&Op, X);
+ }
+ ~HandleSDNode();
+ const SDValue &getValue() const { return Op; }
+};
+
+class AddrSpaceCastSDNode : public UnarySDNode {
+private:
+ unsigned SrcAddrSpace;
+ unsigned DestAddrSpace;
+
+public:
+ AddrSpaceCastSDNode(unsigned Order, DebugLoc dl, EVT VT, SDValue X,
+ unsigned SrcAS, unsigned DestAS);
+
+ unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
+ unsigned getDestAddressSpace() const { return DestAddrSpace; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ADDRSPACECAST;
+ }
+};
+
+/// This is an abstract virtual class for memory operations.
+class MemSDNode : public SDNode {
+private:
+ // VT of in-memory value.
+ EVT MemoryVT;
+
+protected:
+ /// Memory reference information.
+ MachineMemOperand *MMO;
+
+public:
+ MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
+ EVT MemoryVT, MachineMemOperand *MMO);
+
+ MemSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
+ ArrayRef<SDValue> Ops, EVT MemoryVT, MachineMemOperand *MMO);
+
+ bool readMem() const { return MMO->isLoad(); }
+ bool writeMem() const { return MMO->isStore(); }
+
+ /// Returns alignment and volatility of the memory access
+ unsigned getOriginalAlignment() const {
+ return MMO->getBaseAlignment();
+ }
+ unsigned getAlignment() const {
+ return MMO->getAlignment();
+ }
+
+ /// Return the SubclassData value, which contains an
+ /// encoding of the volatile flag, as well as bits used by subclasses. This
+ /// function should only be used to compute a FoldingSetNodeID value.
+ unsigned getRawSubclassData() const {
+ return SubclassData;
+ }
+
+ // We access subclass data here so that we can check consistency
+ // with MachineMemOperand information.
+ bool isVolatile() const { return (SubclassData >> 5) & 1; }
+ bool isNonTemporal() const { return (SubclassData >> 6) & 1; }
+ bool isInvariant() const { return (SubclassData >> 7) & 1; }
+
+ AtomicOrdering getOrdering() const {
+ return AtomicOrdering((SubclassData >> 8) & 15);
+ }
+ SynchronizationScope getSynchScope() const {
+ return SynchronizationScope((SubclassData >> 12) & 1);
+ }
+
+ // Returns the offset from the location of the access.
+ int64_t getSrcValueOffset() const { return MMO->getOffset(); }
+
+ /// Returns the AA info that describes the dereference.
+ AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
+
+ /// Returns the Ranges that describes the dereference.
+ const MDNode *getRanges() const { return MMO->getRanges(); }
+
+ /// Return the type of the in-memory value.
+ EVT getMemoryVT() const { return MemoryVT; }
+
+ /// Return a MachineMemOperand object describing the memory
+ /// reference performed by operation.
+ MachineMemOperand *getMemOperand() const { return MMO; }
+
+ const MachinePointerInfo &getPointerInfo() const {
+ return MMO->getPointerInfo();
+ }
+
+ /// Return the address space for the associated pointer
+ unsigned getAddressSpace() const {
+ return getPointerInfo().getAddrSpace();
+ }
+
+ /// Update this MemSDNode's MachineMemOperand information
+ /// to reflect the alignment of NewMMO, if it has a greater alignment.
+ /// This must only be used when the new alignment applies to all users of
+ /// this MachineMemOperand.
+ void refineAlignment(const MachineMemOperand *NewMMO) {
+ MMO->refineAlignment(NewMMO);
+ }
+
+ const SDValue &getChain() const { return getOperand(0); }
+ const SDValue &getBasePtr() const {
+ return getOperand(getOpcode() == ISD::STORE ? 2 : 1);
+ }
+
+ // Methods to support isa and dyn_cast
+ static bool classof(const SDNode *N) {
+ // For some targets, we lower some target intrinsics to a MemIntrinsicNode
+ // with either an intrinsic or a target opcode.
+ return N->getOpcode() == ISD::LOAD ||
+ N->getOpcode() == ISD::STORE ||
+ N->getOpcode() == ISD::PREFETCH ||
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
+ N->getOpcode() == ISD::ATOMIC_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD ||
+ N->getOpcode() == ISD::ATOMIC_STORE ||
+ N->getOpcode() == ISD::MLOAD ||
+ N->getOpcode() == ISD::MSTORE ||
+ N->getOpcode() == ISD::MGATHER ||
+ N->getOpcode() == ISD::MSCATTER ||
+ N->isMemIntrinsic() ||
+ N->isTargetMemoryOpcode();
+ }
+};
+
+/// This is an SDNode representing atomic operations.
+class AtomicSDNode : public MemSDNode {
+ SDUse Ops[4];
+
+ /// For cmpxchg instructions, the ordering requirements when a store does not
+ /// occur.
+ AtomicOrdering FailureOrdering;
+
+ void InitAtomic(AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope) {
+ // This must match encodeMemSDNodeFlags() in SelectionDAG.cpp.
+ assert((SuccessOrdering & 15) == SuccessOrdering &&
+ "Ordering may not require more than 4 bits!");
+ assert((FailureOrdering & 15) == FailureOrdering &&
+ "Ordering may not require more than 4 bits!");
+ assert((SynchScope & 1) == SynchScope &&
+ "SynchScope may not require more than 1 bit!");
+ SubclassData |= SuccessOrdering << 8;
+ SubclassData |= SynchScope << 12;
+ this->FailureOrdering = FailureOrdering;
+ assert(getSuccessOrdering() == SuccessOrdering &&
+ "Ordering encoding error!");
+ assert(getFailureOrdering() == FailureOrdering &&
+ "Ordering encoding error!");
+ assert(getSynchScope() == SynchScope && "Synch-scope encoding error!");
+ }
+
+public:
+ // Opc: opcode for atomic
+ // VTL: value type list
+ // Chain: memory chain for operaand
+ // Ptr: address to update as a SDValue
+ // Cmp: compare value
+ // Swp: swap value
+ // SrcVal: address to update as a Value (used for MemOperand)
+ // Align: alignment of memory
+ AtomicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTL,
+ EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp,
+ MachineMemOperand *MMO, AtomicOrdering Ordering,
+ SynchronizationScope SynchScope)
+ : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
+ InitAtomic(Ordering, Ordering, SynchScope);
+ InitOperands(Ops, Chain, Ptr, Cmp, Swp);
+ }
+ AtomicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTL,
+ EVT MemVT,
+ SDValue Chain, SDValue Ptr,
+ SDValue Val, MachineMemOperand *MMO,
+ AtomicOrdering Ordering, SynchronizationScope SynchScope)
+ : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
+ InitAtomic(Ordering, Ordering, SynchScope);
+ InitOperands(Ops, Chain, Ptr, Val);
+ }
+ AtomicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTL,
+ EVT MemVT,
+ SDValue Chain, SDValue Ptr,
+ MachineMemOperand *MMO,
+ AtomicOrdering Ordering, SynchronizationScope SynchScope)
+ : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
+ InitAtomic(Ordering, Ordering, SynchScope);
+ InitOperands(Ops, Chain, Ptr);
+ }
+ AtomicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTL, EVT MemVT,
+ const SDValue* AllOps, SDUse *DynOps, unsigned NumOps,
+ MachineMemOperand *MMO,
+ AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope)
+ : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
+ InitAtomic(SuccessOrdering, FailureOrdering, SynchScope);
+ assert((DynOps || NumOps <= array_lengthof(Ops)) &&
+ "Too many ops for internal storage!");
+ InitOperands(DynOps ? DynOps : Ops, AllOps, NumOps);
+ }
+
+ const SDValue &getBasePtr() const { return getOperand(1); }
+ const SDValue &getVal() const { return getOperand(2); }
+
+ AtomicOrdering getSuccessOrdering() const {
+ return getOrdering();
+ }
+
+ // Not quite enough room in SubclassData for everything, so failure gets its
+ // own field.
+ AtomicOrdering getFailureOrdering() const {
+ return FailureOrdering;
+ }
+
+ bool isCompareAndSwap() const {
+ unsigned Op = getOpcode();
+ return Op == ISD::ATOMIC_CMP_SWAP || Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
+ }
+
+ // Methods to support isa and dyn_cast
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
+ N->getOpcode() == ISD::ATOMIC_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD ||
+ N->getOpcode() == ISD::ATOMIC_STORE;
+ }
+};
+
+/// This SDNode is used for target intrinsics that touch
+/// memory and need an associated MachineMemOperand. Its opcode may be
+/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
+/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
+class MemIntrinsicSDNode : public MemSDNode {
+public:
+ MemIntrinsicSDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs,
+ ArrayRef<SDValue> Ops, EVT MemoryVT,
+ MachineMemOperand *MMO)
+ : MemSDNode(Opc, Order, dl, VTs, Ops, MemoryVT, MMO) {
+ SubclassData |= 1u << 13;
+ }
+
+ // Methods to support isa and dyn_cast
+ static bool classof(const SDNode *N) {
+ // We lower some target intrinsics to their target opcode
+ // early a node with a target opcode can be of this class
+ return N->isMemIntrinsic() ||
+ N->getOpcode() == ISD::PREFETCH ||
+ N->isTargetMemoryOpcode();
+ }
+};
+
+/// This SDNode is used to implement the code generator
+/// support for the llvm IR shufflevector instruction. It combines elements
+/// from two input vectors into a new input vector, with the selection and
+/// ordering of elements determined by an array of integers, referred to as
+/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
+/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
+/// An index of -1 is treated as undef, such that the code generator may put
+/// any value in the corresponding element of the result.
+class ShuffleVectorSDNode : public SDNode {
+ SDUse Ops[2];
+
+ // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
+ // is freed when the SelectionDAG object is destroyed.
+ const int *Mask;
+protected:
+ friend class SelectionDAG;
+ ShuffleVectorSDNode(EVT VT, unsigned Order, DebugLoc dl, SDValue N1,
+ SDValue N2, const int *M)
+ : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {
+ InitOperands(Ops, N1, N2);
+ }
+public:
+
+ ArrayRef<int> getMask() const {
+ EVT VT = getValueType(0);
+ return makeArrayRef(Mask, VT.getVectorNumElements());
+ }
+ int getMaskElt(unsigned Idx) const {
+ assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!");
+ return Mask[Idx];
+ }
+
+ bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
+ int getSplatIndex() const {
+ assert(isSplat() && "Cannot get splat index for non-splat!");
+ EVT VT = getValueType(0);
+ for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
+ if (Mask[i] >= 0)
+ return Mask[i];
+ }
+ llvm_unreachable("Splat with all undef indices?");
+ }
+ static bool isSplatMask(const int *Mask, EVT VT);
+
+ /// Change values in a shuffle permute mask assuming
+ /// the two vector operands have swapped position.
+ static void commuteMask(SmallVectorImpl<int> &Mask) {
+ unsigned NumElems = Mask.size();
+ for (unsigned i = 0; i != NumElems; ++i) {
+ int idx = Mask[i];
+ if (idx < 0)
+ continue;
+ else if (idx < (int)NumElems)
+ Mask[i] = idx + NumElems;
+ else
+ Mask[i] = idx - NumElems;
+ }
+ }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::VECTOR_SHUFFLE;
+ }
+};
+
+class ConstantSDNode : public SDNode {
+ const ConstantInt *Value;
+ friend class SelectionDAG;
+ ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val,
+ DebugLoc DL, EVT VT)
+ : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant,
+ 0, DL, getSDVTList(VT)), Value(val) {
+ SubclassData |= (uint16_t)isOpaque;
+ }
+public:
+
+ const ConstantInt *getConstantIntValue() const { return Value; }
+ const APInt &getAPIntValue() const { return Value->getValue(); }
+ uint64_t getZExtValue() const { return Value->getZExtValue(); }
+ int64_t getSExtValue() const { return Value->getSExtValue(); }
+
+ bool isOne() const { return Value->isOne(); }
+ bool isNullValue() const { return Value->isNullValue(); }
+ bool isAllOnesValue() const { return Value->isAllOnesValue(); }
+
+ bool isOpaque() const { return SubclassData & 1; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::Constant ||
+ N->getOpcode() == ISD::TargetConstant;
+ }
+};
+
+class ConstantFPSDNode : public SDNode {
+ const ConstantFP *Value;
+ friend class SelectionDAG;
+ ConstantFPSDNode(bool isTarget, const ConstantFP *val, DebugLoc DL, EVT VT)
+ : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP,
+ 0, DL, getSDVTList(VT)), Value(val) {
+ }
+public:
+
+ const APFloat& getValueAPF() const { return Value->getValueAPF(); }
+ const ConstantFP *getConstantFPValue() const { return Value; }
+
+ /// Return true if the value is positive or negative zero.
+ bool isZero() const { return Value->isZero(); }
+
+ /// Return true if the value is a NaN.
+ bool isNaN() const { return Value->isNaN(); }
+
+ /// Return true if the value is an infinity
+ bool isInfinity() const { return Value->isInfinity(); }
+
+ /// Return true if the value is negative.
+ bool isNegative() const { return Value->isNegative(); }
+
+ /// We don't rely on operator== working on double values, as
+ /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
+ /// As such, this method can be used to do an exact bit-for-bit comparison of
+ /// two floating point values.
+
+ /// We leave the version with the double argument here because it's just so
+ /// convenient to write "2.0" and the like. Without this function we'd
+ /// have to duplicate its logic everywhere it's called.
+ bool isExactlyValue(double V) const {
+ bool ignored;
+ APFloat Tmp(V);
+ Tmp.convert(Value->getValueAPF().getSemantics(),
+ APFloat::rmNearestTiesToEven, &ignored);
+ return isExactlyValue(Tmp);
+ }
+ bool isExactlyValue(const APFloat& V) const;
+
+ static bool isValueValidForType(EVT VT, const APFloat& Val);
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ConstantFP ||
+ N->getOpcode() == ISD::TargetConstantFP;
+ }
+};
+
+/// Returns true if \p V is a constant integer zero.
+bool isNullConstant(SDValue V);
+/// Returns true if \p V is an FP constant with a value of positive zero.
+bool isNullFPConstant(SDValue V);
+/// Returns true if \p V is an integer constant with all bits set.
+bool isAllOnesConstant(SDValue V);
+/// Returns true if \p V is a constant integer one.
+bool isOneConstant(SDValue V);
+
+class GlobalAddressSDNode : public SDNode {
+ const GlobalValue *TheGlobal;
+ int64_t Offset;
+ unsigned char TargetFlags;
+ friend class SelectionDAG;
+ GlobalAddressSDNode(unsigned Opc, unsigned Order, DebugLoc DL,
+ const GlobalValue *GA, EVT VT, int64_t o,
+ unsigned char TargetFlags);
+public:
+
+ const GlobalValue *getGlobal() const { return TheGlobal; }
+ int64_t getOffset() const { return Offset; }
+ unsigned char getTargetFlags() const { return TargetFlags; }
+ // Return the address space this GlobalAddress belongs to.
+ unsigned getAddressSpace() const;
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::GlobalAddress ||
+ N->getOpcode() == ISD::TargetGlobalAddress ||
+ N->getOpcode() == ISD::GlobalTLSAddress ||
+ N->getOpcode() == ISD::TargetGlobalTLSAddress;
+ }
+};
+
+class FrameIndexSDNode : public SDNode {
+ int FI;
+ friend class SelectionDAG;
+ FrameIndexSDNode(int fi, EVT VT, bool isTarg)
+ : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
+ 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
+ }
+public:
+
+ int getIndex() const { return FI; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::FrameIndex ||
+ N->getOpcode() == ISD::TargetFrameIndex;
+ }
+};
+
+class JumpTableSDNode : public SDNode {
+ int JTI;
+ unsigned char TargetFlags;
+ friend class SelectionDAG;
+ JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned char TF)
+ : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
+ 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
+ }
+public:
+
+ int getIndex() const { return JTI; }
+ unsigned char getTargetFlags() const { return TargetFlags; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::JumpTable ||
+ N->getOpcode() == ISD::TargetJumpTable;
+ }
+};
+
+class ConstantPoolSDNode : public SDNode {
+ union {
+ const Constant *ConstVal;
+ MachineConstantPoolValue *MachineCPVal;
+ } Val;
+ int Offset; // It's a MachineConstantPoolValue if top bit is set.
+ unsigned Alignment; // Minimum alignment requirement of CP (not log2 value).
+ unsigned char TargetFlags;
+ friend class SelectionDAG;
+ ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
+ unsigned Align, unsigned char TF)
+ : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
+ DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
+ TargetFlags(TF) {
+ assert(Offset >= 0 && "Offset is too large");
+ Val.ConstVal = c;
+ }
+ ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
+ EVT VT, int o, unsigned Align, unsigned char TF)
+ : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
+ DebugLoc(), getSDVTList(VT)), Offset(o), Alignment(Align),
+ TargetFlags(TF) {
+ assert(Offset >= 0 && "Offset is too large");
+ Val.MachineCPVal = v;
+ Offset |= 1 << (sizeof(unsigned)*CHAR_BIT-1);
+ }
+public:
+
+ bool isMachineConstantPoolEntry() const {
+ return Offset < 0;
+ }
+
+ const Constant *getConstVal() const {
+ assert(!isMachineConstantPoolEntry() && "Wrong constantpool type");
+ return Val.ConstVal;
+ }
+
+ MachineConstantPoolValue *getMachineCPVal() const {
+ assert(isMachineConstantPoolEntry() && "Wrong constantpool type");
+ return Val.MachineCPVal;
+ }
+
+ int getOffset() const {
+ return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
+ }
+
+ // Return the alignment of this constant pool object, which is either 0 (for
+ // default alignment) or the desired value.
+ unsigned getAlignment() const { return Alignment; }
+ unsigned char getTargetFlags() const { return TargetFlags; }
+
+ Type *getType() const;
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ConstantPool ||
+ N->getOpcode() == ISD::TargetConstantPool;
+ }
+};
+
+/// Completely target-dependent object reference.
+class TargetIndexSDNode : public SDNode {
+ unsigned char TargetFlags;
+ int Index;
+ int64_t Offset;
+ friend class SelectionDAG;
+public:
+
+ TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned char TF)
+ : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
+ TargetFlags(TF), Index(Idx), Offset(Ofs) {}
+public:
+
+ unsigned char getTargetFlags() const { return TargetFlags; }
+ int getIndex() const { return Index; }
+ int64_t getOffset() const { return Offset; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::TargetIndex;
+ }
+};
+
+class BasicBlockSDNode : public SDNode {
+ MachineBasicBlock *MBB;
+ friend class SelectionDAG;
+ /// Debug info is meaningful and potentially useful here, but we create
+ /// blocks out of order when they're jumped to, which makes it a bit
+ /// harder. Let's see if we need it first.
+ explicit BasicBlockSDNode(MachineBasicBlock *mbb)
+ : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
+ {}
+public:
+
+ MachineBasicBlock *getBasicBlock() const { return MBB; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::BasicBlock;
+ }
+};
+
+/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
+class BuildVectorSDNode : public SDNode {
+ // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
+ explicit BuildVectorSDNode() = delete;
+public:
+ /// Check if this is a constant splat, and if so, find the
+ /// smallest element size that splats the vector. If MinSplatBits is
+ /// nonzero, the element size must be at least that large. Note that the
+ /// splat element may be the entire vector (i.e., a one element vector).
+ /// Returns the splat element value in SplatValue. Any undefined bits in
+ /// that value are zero, and the corresponding bits in the SplatUndef mask
+ /// are set. The SplatBitSize value is set to the splat element size in
+ /// bits. HasAnyUndefs is set to true if any bits in the vector are
+ /// undefined. isBigEndian describes the endianness of the target.
+ bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
+ unsigned &SplatBitSize, bool &HasAnyUndefs,
+ unsigned MinSplatBits = 0,
+ bool isBigEndian = false) const;
+
+ /// \brief Returns the splatted value or a null value if this is not a splat.
+ ///
+ /// If passed a non-null UndefElements bitvector, it will resize it to match
+ /// the vector width and set the bits where elements are undef.
+ SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
+
+ /// \brief Returns the splatted constant or null if this is not a constant
+ /// splat.
+ ///
+ /// If passed a non-null UndefElements bitvector, it will resize it to match
+ /// the vector width and set the bits where elements are undef.
+ ConstantSDNode *
+ getConstantSplatNode(BitVector *UndefElements = nullptr) const;
+
+ /// \brief Returns the splatted constant FP or null if this is not a constant
+ /// FP splat.
+ ///
+ /// If passed a non-null UndefElements bitvector, it will resize it to match
+ /// the vector width and set the bits where elements are undef.
+ ConstantFPSDNode *
+ getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
+
+ /// \brief If this is a constant FP splat and the splatted constant FP is an
+ /// exact power or 2, return the log base 2 integer value. Otherwise,
+ /// return -1.
+ ///
+ /// The BitWidth specifies the necessary bit precision.
+ int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
+ uint32_t BitWidth) const;
+
+ bool isConstant() const;
+
+ static inline bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::BUILD_VECTOR;
+ }
+};
+
+/// An SDNode that holds an arbitrary LLVM IR Value. This is
+/// used when the SelectionDAG needs to make a simple reference to something
+/// in the LLVM IR representation.
+///
+class SrcValueSDNode : public SDNode {
+ const Value *V;
+ friend class SelectionDAG;
+ /// Create a SrcValue for a general value.
+ explicit SrcValueSDNode(const Value *v)
+ : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
+
+public:
+ /// Return the contained Value.
+ const Value *getValue() const { return V; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::SRCVALUE;
+ }
+};
+
+class MDNodeSDNode : public SDNode {
+ const MDNode *MD;
+ friend class SelectionDAG;
+ explicit MDNodeSDNode(const MDNode *md)
+ : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
+ {}
+public:
+
+ const MDNode *getMD() const { return MD; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::MDNODE_SDNODE;
+ }
+};
+
+class RegisterSDNode : public SDNode {
+ unsigned Reg;
+ friend class SelectionDAG;
+ RegisterSDNode(unsigned reg, EVT VT)
+ : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {
+ }
+public:
+
+ unsigned getReg() const { return Reg; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::Register;
+ }
+};
+
+class RegisterMaskSDNode : public SDNode {
+ // The memory for RegMask is not owned by the node.
+ const uint32_t *RegMask;
+ friend class SelectionDAG;
+ RegisterMaskSDNode(const uint32_t *mask)
+ : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
+ RegMask(mask) {}
+public:
+
+ const uint32_t *getRegMask() const { return RegMask; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::RegisterMask;
+ }
+};
+
+class BlockAddressSDNode : public SDNode {
+ const BlockAddress *BA;
+ int64_t Offset;
+ unsigned char TargetFlags;
+ friend class SelectionDAG;
+ BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
+ int64_t o, unsigned char Flags)
+ : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
+ BA(ba), Offset(o), TargetFlags(Flags) {
+ }
+public:
+ const BlockAddress *getBlockAddress() const { return BA; }
+ int64_t getOffset() const { return Offset; }
+ unsigned char getTargetFlags() const { return TargetFlags; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::BlockAddress ||
+ N->getOpcode() == ISD::TargetBlockAddress;
+ }
+};
+
+class EHLabelSDNode : public SDNode {
+ SDUse Chain;
+ MCSymbol *Label;
+ friend class SelectionDAG;
+ EHLabelSDNode(unsigned Order, DebugLoc dl, SDValue ch, MCSymbol *L)
+ : SDNode(ISD::EH_LABEL, Order, dl, getSDVTList(MVT::Other)), Label(L) {
+ InitOperands(&Chain, ch);
+ }
+public:
+ MCSymbol *getLabel() const { return Label; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::EH_LABEL;
+ }
+};
+
+class ExternalSymbolSDNode : public SDNode {
+ const char *Symbol;
+ unsigned char TargetFlags;
+
+ friend class SelectionDAG;
+ ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned char TF, EVT VT)
+ : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol,
+ 0, DebugLoc(), getSDVTList(VT)), Symbol(Sym), TargetFlags(TF) {
+ }
+public:
+
+ const char *getSymbol() const { return Symbol; }
+ unsigned char getTargetFlags() const { return TargetFlags; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ExternalSymbol ||
+ N->getOpcode() == ISD::TargetExternalSymbol;
+ }
+};
+
+class MCSymbolSDNode : public SDNode {
+ MCSymbol *Symbol;
+
+ friend class SelectionDAG;
+ MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
+ : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
+
+public:
+ MCSymbol *getMCSymbol() const { return Symbol; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::MCSymbol;
+ }
+};
+
+class CondCodeSDNode : public SDNode {
+ ISD::CondCode Condition;
+ friend class SelectionDAG;
+ explicit CondCodeSDNode(ISD::CondCode Cond)
+ : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
+ Condition(Cond) {
+ }
+public:
+
+ ISD::CondCode get() const { return Condition; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::CONDCODE;
+ }
+};
+
+/// NOTE: avoid using this node as this may disappear in the
+/// future and most targets don't support it.
+class CvtRndSatSDNode : public SDNode {
+ ISD::CvtCode CvtCode;
+ friend class SelectionDAG;
+ explicit CvtRndSatSDNode(EVT VT, unsigned Order, DebugLoc dl,
+ ArrayRef<SDValue> Ops, ISD::CvtCode Code)
+ : SDNode(ISD::CONVERT_RNDSAT, Order, dl, getSDVTList(VT), Ops),
+ CvtCode(Code) {
+ assert(Ops.size() == 5 && "wrong number of operations");
+ }
+public:
+ ISD::CvtCode getCvtCode() const { return CvtCode; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::CONVERT_RNDSAT;
+ }
+};
+
+/// This class is used to represent EVT's, which are used
+/// to parameterize some operations.
+class VTSDNode : public SDNode {
+ EVT ValueType;
+ friend class SelectionDAG;
+ explicit VTSDNode(EVT VT)
+ : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
+ ValueType(VT) {
+ }
+public:
+
+ EVT getVT() const { return ValueType; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::VALUETYPE;
+ }
+};
+
+/// Base class for LoadSDNode and StoreSDNode
+class LSBaseSDNode : public MemSDNode {
+ //! Operand array for load and store
+ /*!
+ \note Moving this array to the base class captures more
+ common functionality shared between LoadSDNode and
+ StoreSDNode
+ */
+ SDUse Ops[4];
+public:
+ LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, DebugLoc dl,
+ SDValue *Operands, unsigned numOperands,
+ SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
+ MachineMemOperand *MMO)
+ : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
+ SubclassData |= AM << 2;
+ assert(getAddressingMode() == AM && "MemIndexedMode encoding error!");
+ InitOperands(Ops, Operands, numOperands);
+ assert((getOffset().getOpcode() == ISD::UNDEF || isIndexed()) &&
+ "Only indexed loads and stores have a non-undef offset operand");
+ }
+
+ const SDValue &getOffset() const {
+ return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
+ }
+
+ /// Return the addressing mode for this load or store:
+ /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
+ ISD::MemIndexedMode getAddressingMode() const {
+ return ISD::MemIndexedMode((SubclassData >> 2) & 7);
+ }
+
+ /// Return true if this is a pre/post inc/dec load/store.
+ bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
+
+ /// Return true if this is NOT a pre/post inc/dec load/store.
+ bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::LOAD ||
+ N->getOpcode() == ISD::STORE;
+ }
+};
+
+/// This class is used to represent ISD::LOAD nodes.
+class LoadSDNode : public LSBaseSDNode {
+ friend class SelectionDAG;
+ LoadSDNode(SDValue *ChainPtrOff, unsigned Order, DebugLoc dl, SDVTList VTs,
+ ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
+ MachineMemOperand *MMO)
+ : LSBaseSDNode(ISD::LOAD, Order, dl, ChainPtrOff, 3, VTs, AM, MemVT, MMO) {
+ SubclassData |= (unsigned short)ETy;
+ assert(getExtensionType() == ETy && "LoadExtType encoding error!");
+ assert(readMem() && "Load MachineMemOperand is not a load!");
+ assert(!writeMem() && "Load MachineMemOperand is a store!");
+ }
+public:
+
+ /// Return whether this is a plain node,
+ /// or one of the varieties of value-extending loads.
+ ISD::LoadExtType getExtensionType() const {
+ return ISD::LoadExtType(SubclassData & 3);
+ }
+
+ const SDValue &getBasePtr() const { return getOperand(1); }
+ const SDValue &getOffset() const { return getOperand(2); }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::LOAD;
+ }
+};
+
+/// This class is used to represent ISD::STORE nodes.
+class StoreSDNode : public LSBaseSDNode {
+ friend class SelectionDAG;
+ StoreSDNode(SDValue *ChainValuePtrOff, unsigned Order, DebugLoc dl,
+ SDVTList VTs, ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
+ MachineMemOperand *MMO)
+ : LSBaseSDNode(ISD::STORE, Order, dl, ChainValuePtrOff, 4,
+ VTs, AM, MemVT, MMO) {
+ SubclassData |= (unsigned short)isTrunc;
+ assert(isTruncatingStore() == isTrunc && "isTrunc encoding error!");
+ assert(!readMem() && "Store MachineMemOperand is a load!");
+ assert(writeMem() && "Store MachineMemOperand is not a store!");
+ }
+public:
+
+ /// Return true if the op does a truncation before store.
+ /// For integers this is the same as doing a TRUNCATE and storing the result.
+ /// For floats, it is the same as doing an FP_ROUND and storing the result.
+ bool isTruncatingStore() const { return SubclassData & 1; }
+
+ const SDValue &getValue() const { return getOperand(1); }
+ const SDValue &getBasePtr() const { return getOperand(2); }
+ const SDValue &getOffset() const { return getOperand(3); }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::STORE;
+ }
+};
+
+/// This base class is used to represent MLOAD and MSTORE nodes
+class MaskedLoadStoreSDNode : public MemSDNode {
+ // Operands
+ SDUse Ops[4];
+public:
+ friend class SelectionDAG;
+ MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order, DebugLoc dl,
+ SDValue *Operands, unsigned numOperands, SDVTList VTs,
+ EVT MemVT, MachineMemOperand *MMO)
+ : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
+ InitOperands(Ops, Operands, numOperands);
+ }
+
+ // In the both nodes address is Op1, mask is Op2:
+ // MaskedLoadSDNode (Chain, ptr, mask, src0), src0 is a passthru value
+ // MaskedStoreSDNode (Chain, ptr, mask, data)
+ // Mask is a vector of i1 elements
+ const SDValue &getBasePtr() const { return getOperand(1); }
+ const SDValue &getMask() const { return getOperand(2); }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::MLOAD ||
+ N->getOpcode() == ISD::MSTORE;
+ }
+};
+
+/// This class is used to represent an MLOAD node
+class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
+public:
+ friend class SelectionDAG;
+ MaskedLoadSDNode(unsigned Order, DebugLoc dl, SDValue *Operands,
+ unsigned numOperands, SDVTList VTs, ISD::LoadExtType ETy,
+ EVT MemVT, MachineMemOperand *MMO)
+ : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, Operands, numOperands,
+ VTs, MemVT, MMO) {
+ SubclassData |= (unsigned short)ETy;
+ }
+
+ ISD::LoadExtType getExtensionType() const {
+ return ISD::LoadExtType(SubclassData & 3);
+ }
+ const SDValue &getSrc0() const { return getOperand(3); }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::MLOAD;
+ }
+};
+
+/// This class is used to represent an MSTORE node
+class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
+
+public:
+ friend class SelectionDAG;
+ MaskedStoreSDNode(unsigned Order, DebugLoc dl, SDValue *Operands,
+ unsigned numOperands, SDVTList VTs, bool isTrunc, EVT MemVT,
+ MachineMemOperand *MMO)
+ : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, Operands, numOperands,
+ VTs, MemVT, MMO) {
+ SubclassData |= (unsigned short)isTrunc;
+ }
+ /// Return true if the op does a truncation before store.
+ /// For integers this is the same as doing a TRUNCATE and storing the result.
+ /// For floats, it is the same as doing an FP_ROUND and storing the result.
+ bool isTruncatingStore() const { return SubclassData & 1; }
+
+ const SDValue &getValue() const { return getOperand(3); }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::MSTORE;
+ }
+};
+
+/// This is a base class used to represent
+/// MGATHER and MSCATTER nodes
+///
+class MaskedGatherScatterSDNode : public MemSDNode {
+ // Operands
+ SDUse Ops[5];
+public:
+ friend class SelectionDAG;
+ MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order, DebugLoc dl,
+ ArrayRef<SDValue> Operands, SDVTList VTs, EVT MemVT,
+ MachineMemOperand *MMO)
+ : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
+ assert(Operands.size() == 5 && "Incompatible number of operands");
+ InitOperands(Ops, Operands.data(), Operands.size());
+ }
+
+ // In the both nodes address is Op1, mask is Op2:
+ // MaskedGatherSDNode (Chain, src0, mask, base, index), src0 is a passthru value
+ // MaskedScatterSDNode (Chain, value, mask, base, index)
+ // Mask is a vector of i1 elements
+ const SDValue &getBasePtr() const { return getOperand(3); }
+ const SDValue &getIndex() const { return getOperand(4); }
+ const SDValue &getMask() const { return getOperand(2); }
+ const SDValue &getValue() const { return getOperand(1); }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::MGATHER ||
+ N->getOpcode() == ISD::MSCATTER;
+ }
+};
+
+/// This class is used to represent an MGATHER node
+///
+class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
+public:
+ friend class SelectionDAG;
+ MaskedGatherSDNode(unsigned Order, DebugLoc dl, ArrayRef<SDValue> Operands,
+ SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
+ : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, Operands, VTs, MemVT,
+ MMO) {
+ assert(getValue().getValueType() == getValueType(0) &&
+ "Incompatible type of the PassThru value in MaskedGatherSDNode");
+ assert(getMask().getValueType().getVectorNumElements() ==
+ getValueType(0).getVectorNumElements() &&
+ "Vector width mismatch between mask and data");
+ assert(getIndex().getValueType().getVectorNumElements() ==
+ getValueType(0).getVectorNumElements() &&
+ "Vector width mismatch between index and data");
+ }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::MGATHER;
+ }
+};
+
+/// This class is used to represent an MSCATTER node
+///
+class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
+
+public:
+ friend class SelectionDAG;
+ MaskedScatterSDNode(unsigned Order, DebugLoc dl,ArrayRef<SDValue> Operands,
+ SDVTList VTs, EVT MemVT, MachineMemOperand *MMO)
+ : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, Operands, VTs, MemVT,
+ MMO) {
+ assert(getMask().getValueType().getVectorNumElements() ==
+ getValue().getValueType().getVectorNumElements() &&
+ "Vector width mismatch between mask and data");
+ assert(getIndex().getValueType().getVectorNumElements() ==
+ getValue().getValueType().getVectorNumElements() &&
+ "Vector width mismatch between index and data");
+ }
+
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::MSCATTER;
+ }
+};
+
+/// An SDNode that represents everything that will be needed
+/// to construct a MachineInstr. These nodes are created during the
+/// instruction selection proper phase.
+class MachineSDNode : public SDNode {
+public:
+ typedef MachineMemOperand **mmo_iterator;
+
+private:
+ friend class SelectionDAG;
+ MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc DL, SDVTList VTs)
+ : SDNode(Opc, Order, DL, VTs), MemRefs(nullptr), MemRefsEnd(nullptr) {}
+
+ /// Operands for this instruction, if they fit here. If
+ /// they don't, this field is unused.
+ SDUse LocalOperands[4];
+
+ /// Memory reference descriptions for this instruction.
+ mmo_iterator MemRefs;
+ mmo_iterator MemRefsEnd;
+
+public:
+ mmo_iterator memoperands_begin() const { return MemRefs; }
+ mmo_iterator memoperands_end() const { return MemRefsEnd; }
+ bool memoperands_empty() const { return MemRefsEnd == MemRefs; }
+
+ /// Assign this MachineSDNodes's memory reference descriptor
+ /// list. This does not transfer ownership.
+ void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
+ for (mmo_iterator MMI = NewMemRefs, MME = NewMemRefsEnd; MMI != MME; ++MMI)
+ assert(*MMI && "Null mem ref detected!");
+ MemRefs = NewMemRefs;
+ MemRefsEnd = NewMemRefsEnd;
+ }
+
+ static bool classof(const SDNode *N) {
+ return N->isMachineOpcode();
+ }
+};
+
+class SDNodeIterator : public std::iterator<std::forward_iterator_tag,
+ SDNode, ptrdiff_t> {
+ const SDNode *Node;
+ unsigned Operand;
+
+ SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
+public:
+ bool operator==(const SDNodeIterator& x) const {
+ return Operand == x.Operand;
+ }
+ bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
+
+ pointer operator*() const {
+ return Node->getOperand(Operand).getNode();
+ }
+ pointer operator->() const { return operator*(); }
+
+ SDNodeIterator& operator++() { // Preincrement
+ ++Operand;
+ return *this;
+ }
+ SDNodeIterator operator++(int) { // Postincrement
+ SDNodeIterator tmp = *this; ++*this; return tmp;
+ }
+ size_t operator-(SDNodeIterator Other) const {
+ assert(Node == Other.Node &&
+ "Cannot compare iterators of two different nodes!");
+ return Operand - Other.Operand;
+ }
+
+ static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
+ static SDNodeIterator end (const SDNode *N) {
+ return SDNodeIterator(N, N->getNumOperands());
+ }
+
+ unsigned getOperand() const { return Operand; }
+ const SDNode *getNode() const { return Node; }
+};
+
+template <> struct GraphTraits<SDNode*> {
+ typedef SDNode NodeType;
+ typedef SDNodeIterator ChildIteratorType;
+ static inline NodeType *getEntryNode(SDNode *N) { return N; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return SDNodeIterator::begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return SDNodeIterator::end(N);
+ }
+};
+
+/// The largest SDNode class.
+typedef MaskedGatherScatterSDNode LargestSDNode;
+
+/// The SDNode class with the greatest alignment requirement.
+typedef GlobalAddressSDNode MostAlignedSDNode;
+
+namespace ISD {
+ /// Returns true if the specified node is a non-extending and unindexed load.
+ inline bool isNormalLoad(const SDNode *N) {
+ const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
+ return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
+ Ld->getAddressingMode() == ISD::UNINDEXED;
+ }
+
+ /// Returns true if the specified node is a non-extending load.
+ inline bool isNON_EXTLoad(const SDNode *N) {
+ return isa<LoadSDNode>(N) &&
+ cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
+ }
+
+ /// Returns true if the specified node is a EXTLOAD.
+ inline bool isEXTLoad(const SDNode *N) {
+ return isa<LoadSDNode>(N) &&
+ cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
+ }
+
+ /// Returns true if the specified node is a SEXTLOAD.
+ inline bool isSEXTLoad(const SDNode *N) {
+ return isa<LoadSDNode>(N) &&
+ cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
+ }
+
+ /// Returns true if the specified node is a ZEXTLOAD.
+ inline bool isZEXTLoad(const SDNode *N) {
+ return isa<LoadSDNode>(N) &&
+ cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
+ }
+
+ /// Returns true if the specified node is an unindexed load.
+ inline bool isUNINDEXEDLoad(const SDNode *N) {
+ return isa<LoadSDNode>(N) &&
+ cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+ }
+
+ /// Returns true if the specified node is a non-truncating
+ /// and unindexed store.
+ inline bool isNormalStore(const SDNode *N) {
+ const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
+ return St && !St->isTruncatingStore() &&
+ St->getAddressingMode() == ISD::UNINDEXED;
+ }
+
+ /// Returns true if the specified node is a non-truncating store.
+ inline bool isNON_TRUNCStore(const SDNode *N) {
+ return isa<StoreSDNode>(N) && !cast<StoreSDNode>(N)->isTruncatingStore();
+ }
+
+ /// Returns true if the specified node is a truncating store.
+ inline bool isTRUNCStore(const SDNode *N) {
+ return isa<StoreSDNode>(N) && cast<StoreSDNode>(N)->isTruncatingStore();
+ }
+
+ /// Returns true if the specified node is an unindexed store.
+ inline bool isUNINDEXEDStore(const SDNode *N) {
+ return isa<StoreSDNode>(N) &&
+ cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+ }
+}
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
new file mode 100644
index 0000000..7b621be
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/SlotIndexes.h
@@ -0,0 +1,717 @@
+//===- llvm/CodeGen/SlotIndexes.h - Slot indexes representation -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements SlotIndex and related classes. The purpose of SlotIndex
+// is to describe a position at which a register can become live, or cease to
+// be live.
+//
+// SlotIndex is mostly a proxy for entries of the SlotIndexList, a class which
+// is held is LiveIntervals and provides the real numbering. This allows
+// LiveIntervals to perform largely transparent renumbering.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SLOTINDEXES_H
+#define LLVM_CODEGEN_SLOTINDEXES_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntervalMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBundle.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+
+ /// This class represents an entry in the slot index list held in the
+ /// SlotIndexes pass. It should not be used directly. See the
+ /// SlotIndex & SlotIndexes classes for the public interface to this
+ /// information.
+ class IndexListEntry : public ilist_node<IndexListEntry> {
+ MachineInstr *mi;
+ unsigned index;
+
+ public:
+
+ IndexListEntry(MachineInstr *mi, unsigned index) : mi(mi), index(index) {}
+
+ MachineInstr* getInstr() const { return mi; }
+ void setInstr(MachineInstr *mi) {
+ this->mi = mi;
+ }
+
+ unsigned getIndex() const { return index; }
+ void setIndex(unsigned index) {
+ this->index = index;
+ }
+
+#ifdef EXPENSIVE_CHECKS
+ // When EXPENSIVE_CHECKS is defined, "erased" index list entries will
+ // actually be moved to a "graveyard" list, and have their pointers
+ // poisoned, so that dangling SlotIndex access can be reliably detected.
+ void setPoison() {
+ intptr_t tmp = reinterpret_cast<intptr_t>(mi);
+ assert(((tmp & 0x1) == 0x0) && "Pointer already poisoned?");
+ tmp |= 0x1;
+ mi = reinterpret_cast<MachineInstr*>(tmp);
+ }
+
+ bool isPoisoned() const { return (reinterpret_cast<intptr_t>(mi) & 0x1) == 0x1; }
+#endif // EXPENSIVE_CHECKS
+
+ };
+
+ template <>
+ struct ilist_traits<IndexListEntry> : public ilist_default_traits<IndexListEntry> {
+ private:
+ mutable ilist_half_node<IndexListEntry> Sentinel;
+ public:
+ IndexListEntry *createSentinel() const {
+ return static_cast<IndexListEntry*>(&Sentinel);
+ }
+ void destroySentinel(IndexListEntry *) const {}
+
+ IndexListEntry *provideInitialHead() const { return createSentinel(); }
+ IndexListEntry *ensureHead(IndexListEntry*) const { return createSentinel(); }
+ static void noteHead(IndexListEntry*, IndexListEntry*) {}
+ void deleteNode(IndexListEntry *N) {}
+
+ private:
+ void createNode(const IndexListEntry &);
+ };
+
+ /// SlotIndex - An opaque wrapper around machine indexes.
+ class SlotIndex {
+ friend class SlotIndexes;
+
+ enum Slot {
+ /// Basic block boundary. Used for live ranges entering and leaving a
+ /// block without being live in the layout neighbor. Also used as the
+ /// def slot of PHI-defs.
+ Slot_Block,
+
+ /// Early-clobber register use/def slot. A live range defined at
+ /// Slot_EarlyCLobber interferes with normal live ranges killed at
+ /// Slot_Register. Also used as the kill slot for live ranges tied to an
+ /// early-clobber def.
+ Slot_EarlyClobber,
+
+ /// Normal register use/def slot. Normal instructions kill and define
+ /// register live ranges at this slot.
+ Slot_Register,
+
+ /// Dead def kill point. Kill slot for a live range that is defined by
+ /// the same instruction (Slot_Register or Slot_EarlyClobber), but isn't
+ /// used anywhere.
+ Slot_Dead,
+
+ Slot_Count
+ };
+
+ PointerIntPair<IndexListEntry*, 2, unsigned> lie;
+
+ SlotIndex(IndexListEntry *entry, unsigned slot)
+ : lie(entry, slot) {}
+
+ IndexListEntry* listEntry() const {
+ assert(isValid() && "Attempt to compare reserved index.");
+#ifdef EXPENSIVE_CHECKS
+ assert(!lie.getPointer()->isPoisoned() &&
+ "Attempt to access deleted list-entry.");
+#endif // EXPENSIVE_CHECKS
+ return lie.getPointer();
+ }
+
+ unsigned getIndex() const {
+ return listEntry()->getIndex() | getSlot();
+ }
+
+ /// Returns the slot for this SlotIndex.
+ Slot getSlot() const {
+ return static_cast<Slot>(lie.getInt());
+ }
+
+ public:
+ enum {
+ /// The default distance between instructions as returned by distance().
+ /// This may vary as instructions are inserted and removed.
+ InstrDist = 4 * Slot_Count
+ };
+
+ /// Construct an invalid index.
+ SlotIndex() : lie(nullptr, 0) {}
+
+ // Construct a new slot index from the given one, and set the slot.
+ SlotIndex(const SlotIndex &li, Slot s) : lie(li.listEntry(), unsigned(s)) {
+ assert(lie.getPointer() != nullptr &&
+ "Attempt to construct index with 0 pointer.");
+ }
+
+ /// Returns true if this is a valid index. Invalid indices do
+ /// not point into an index table, and cannot be compared.
+ bool isValid() const {
+ return lie.getPointer();
+ }
+
+ /// Return true for a valid index.
+ explicit operator bool() const { return isValid(); }
+
+ /// Print this index to the given raw_ostream.
+ void print(raw_ostream &os) const;
+
+ /// Dump this index to stderr.
+ void dump() const;
+
+ /// Compare two SlotIndex objects for equality.
+ bool operator==(SlotIndex other) const {
+ return lie == other.lie;
+ }
+ /// Compare two SlotIndex objects for inequality.
+ bool operator!=(SlotIndex other) const {
+ return lie != other.lie;
+ }
+
+ /// Compare two SlotIndex objects. Return true if the first index
+ /// is strictly lower than the second.
+ bool operator<(SlotIndex other) const {
+ return getIndex() < other.getIndex();
+ }
+ /// Compare two SlotIndex objects. Return true if the first index
+ /// is lower than, or equal to, the second.
+ bool operator<=(SlotIndex other) const {
+ return getIndex() <= other.getIndex();
+ }
+
+ /// Compare two SlotIndex objects. Return true if the first index
+ /// is greater than the second.
+ bool operator>(SlotIndex other) const {
+ return getIndex() > other.getIndex();
+ }
+
+ /// Compare two SlotIndex objects. Return true if the first index
+ /// is greater than, or equal to, the second.
+ bool operator>=(SlotIndex other) const {
+ return getIndex() >= other.getIndex();
+ }
+
+ /// isSameInstr - Return true if A and B refer to the same instruction.
+ static bool isSameInstr(SlotIndex A, SlotIndex B) {
+ return A.lie.getPointer() == B.lie.getPointer();
+ }
+
+ /// isEarlierInstr - Return true if A refers to an instruction earlier than
+ /// B. This is equivalent to A < B && !isSameInstr(A, B).
+ static bool isEarlierInstr(SlotIndex A, SlotIndex B) {
+ return A.listEntry()->getIndex() < B.listEntry()->getIndex();
+ }
+
+ /// Return the distance from this index to the given one.
+ int distance(SlotIndex other) const {
+ return other.getIndex() - getIndex();
+ }
+
+ /// Return the scaled distance from this index to the given one, where all
+ /// slots on the same instruction have zero distance.
+ int getInstrDistance(SlotIndex other) const {
+ return (other.listEntry()->getIndex() - listEntry()->getIndex())
+ / Slot_Count;
+ }
+
+ /// isBlock - Returns true if this is a block boundary slot.
+ bool isBlock() const { return getSlot() == Slot_Block; }
+
+ /// isEarlyClobber - Returns true if this is an early-clobber slot.
+ bool isEarlyClobber() const { return getSlot() == Slot_EarlyClobber; }
+
+ /// isRegister - Returns true if this is a normal register use/def slot.
+ /// Note that early-clobber slots may also be used for uses and defs.
+ bool isRegister() const { return getSlot() == Slot_Register; }
+
+ /// isDead - Returns true if this is a dead def kill slot.
+ bool isDead() const { return getSlot() == Slot_Dead; }
+
+ /// Returns the base index for associated with this index. The base index
+ /// is the one associated with the Slot_Block slot for the instruction
+ /// pointed to by this index.
+ SlotIndex getBaseIndex() const {
+ return SlotIndex(listEntry(), Slot_Block);
+ }
+
+ /// Returns the boundary index for associated with this index. The boundary
+ /// index is the one associated with the Slot_Block slot for the instruction
+ /// pointed to by this index.
+ SlotIndex getBoundaryIndex() const {
+ return SlotIndex(listEntry(), Slot_Dead);
+ }
+
+ /// Returns the register use/def slot in the current instruction for a
+ /// normal or early-clobber def.
+ SlotIndex getRegSlot(bool EC = false) const {
+ return SlotIndex(listEntry(), EC ? Slot_EarlyClobber : Slot_Register);
+ }
+
+ /// Returns the dead def kill slot for the current instruction.
+ SlotIndex getDeadSlot() const {
+ return SlotIndex(listEntry(), Slot_Dead);
+ }
+
+ /// Returns the next slot in the index list. This could be either the
+ /// next slot for the instruction pointed to by this index or, if this
+ /// index is a STORE, the first slot for the next instruction.
+ /// WARNING: This method is considerably more expensive than the methods
+ /// that return specific slots (getUseIndex(), etc). If you can - please
+ /// use one of those methods.
+ SlotIndex getNextSlot() const {
+ Slot s = getSlot();
+ if (s == Slot_Dead) {
+ return SlotIndex(&*++listEntry()->getIterator(), Slot_Block);
+ }
+ return SlotIndex(listEntry(), s + 1);
+ }
+
+ /// Returns the next index. This is the index corresponding to the this
+ /// index's slot, but for the next instruction.
+ SlotIndex getNextIndex() const {
+ return SlotIndex(&*++listEntry()->getIterator(), getSlot());
+ }
+
+ /// Returns the previous slot in the index list. This could be either the
+ /// previous slot for the instruction pointed to by this index or, if this
+ /// index is a Slot_Block, the last slot for the previous instruction.
+ /// WARNING: This method is considerably more expensive than the methods
+ /// that return specific slots (getUseIndex(), etc). If you can - please
+ /// use one of those methods.
+ SlotIndex getPrevSlot() const {
+ Slot s = getSlot();
+ if (s == Slot_Block) {
+ return SlotIndex(&*--listEntry()->getIterator(), Slot_Dead);
+ }
+ return SlotIndex(listEntry(), s - 1);
+ }
+
+ /// Returns the previous index. This is the index corresponding to this
+ /// index's slot, but for the previous instruction.
+ SlotIndex getPrevIndex() const {
+ return SlotIndex(&*--listEntry()->getIterator(), getSlot());
+ }
+
+ };
+
+ template <> struct isPodLike<SlotIndex> { static const bool value = true; };
+
+ inline raw_ostream& operator<<(raw_ostream &os, SlotIndex li) {
+ li.print(os);
+ return os;
+ }
+
+ typedef std::pair<SlotIndex, MachineBasicBlock*> IdxMBBPair;
+
+ inline bool operator<(SlotIndex V, const IdxMBBPair &IM) {
+ return V < IM.first;
+ }
+
+ inline bool operator<(const IdxMBBPair &IM, SlotIndex V) {
+ return IM.first < V;
+ }
+
+ struct Idx2MBBCompare {
+ bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
+ return LHS.first < RHS.first;
+ }
+ };
+
+ /// SlotIndexes pass.
+ ///
+ /// This pass assigns indexes to each instruction.
+ class SlotIndexes : public MachineFunctionPass {
+ private:
+ // IndexListEntry allocator.
+ BumpPtrAllocator ileAllocator;
+
+ typedef ilist<IndexListEntry> IndexList;
+ IndexList indexList;
+
+#ifdef EXPENSIVE_CHECKS
+ IndexList graveyardList;
+#endif // EXPENSIVE_CHECKS
+
+ MachineFunction *mf;
+
+ typedef DenseMap<const MachineInstr*, SlotIndex> Mi2IndexMap;
+ Mi2IndexMap mi2iMap;
+
+ /// MBBRanges - Map MBB number to (start, stop) indexes.
+ SmallVector<std::pair<SlotIndex, SlotIndex>, 8> MBBRanges;
+
+ /// Idx2MBBMap - Sorted list of pairs of index of first instruction
+ /// and MBB id.
+ SmallVector<IdxMBBPair, 8> idx2MBBMap;
+
+ IndexListEntry* createEntry(MachineInstr *mi, unsigned index) {
+ IndexListEntry *entry =
+ static_cast<IndexListEntry*>(
+ ileAllocator.Allocate(sizeof(IndexListEntry),
+ alignOf<IndexListEntry>()));
+
+ new (entry) IndexListEntry(mi, index);
+
+ return entry;
+ }
+
+ /// Renumber locally after inserting curItr.
+ void renumberIndexes(IndexList::iterator curItr);
+
+ public:
+ static char ID;
+
+ SlotIndexes() : MachineFunctionPass(ID) {
+ initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
+ }
+
+ ~SlotIndexes() {
+ // The indexList's nodes are all allocated in the BumpPtrAllocator.
+ indexList.clearAndLeakNodesUnsafely();
+ }
+
+ void getAnalysisUsage(AnalysisUsage &au) const override;
+ void releaseMemory() override;
+
+ bool runOnMachineFunction(MachineFunction &fn) override;
+
+ /// Dump the indexes.
+ void dump() const;
+
+ /// Renumber the index list, providing space for new instructions.
+ void renumberIndexes();
+
+ /// Repair indexes after adding and removing instructions.
+ void repairIndexesInRange(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator Begin,
+ MachineBasicBlock::iterator End);
+
+ /// Returns the zero index for this analysis.
+ SlotIndex getZeroIndex() {
+ assert(indexList.front().getIndex() == 0 && "First index is not 0?");
+ return SlotIndex(&indexList.front(), 0);
+ }
+
+ /// Returns the base index of the last slot in this analysis.
+ SlotIndex getLastIndex() {
+ return SlotIndex(&indexList.back(), 0);
+ }
+
+ /// Returns true if the given machine instr is mapped to an index,
+ /// otherwise returns false.
+ bool hasIndex(const MachineInstr *instr) const {
+ return mi2iMap.count(instr);
+ }
+
+ /// Returns the base index for the given instruction.
+ SlotIndex getInstructionIndex(const MachineInstr *MI) const {
+ // Instructions inside a bundle have the same number as the bundle itself.
+ Mi2IndexMap::const_iterator itr = mi2iMap.find(getBundleStart(MI));
+ assert(itr != mi2iMap.end() && "Instruction not found in maps.");
+ return itr->second;
+ }
+
+ /// Returns the instruction for the given index, or null if the given
+ /// index has no instruction associated with it.
+ MachineInstr* getInstructionFromIndex(SlotIndex index) const {
+ return index.isValid() ? index.listEntry()->getInstr() : nullptr;
+ }
+
+ /// Returns the next non-null index, if one exists.
+ /// Otherwise returns getLastIndex().
+ SlotIndex getNextNonNullIndex(SlotIndex Index) {
+ IndexList::iterator I = Index.listEntry()->getIterator();
+ IndexList::iterator E = indexList.end();
+ while (++I != E)
+ if (I->getInstr())
+ return SlotIndex(&*I, Index.getSlot());
+ // We reached the end of the function.
+ return getLastIndex();
+ }
+
+ /// getIndexBefore - Returns the index of the last indexed instruction
+ /// before MI, or the start index of its basic block.
+ /// MI is not required to have an index.
+ SlotIndex getIndexBefore(const MachineInstr *MI) const {
+ const MachineBasicBlock *MBB = MI->getParent();
+ assert(MBB && "MI must be inserted inna basic block");
+ MachineBasicBlock::const_iterator I = MI, B = MBB->begin();
+ for (;;) {
+ if (I == B)
+ return getMBBStartIdx(MBB);
+ --I;
+ Mi2IndexMap::const_iterator MapItr = mi2iMap.find(I);
+ if (MapItr != mi2iMap.end())
+ return MapItr->second;
+ }
+ }
+
+ /// getIndexAfter - Returns the index of the first indexed instruction
+ /// after MI, or the end index of its basic block.
+ /// MI is not required to have an index.
+ SlotIndex getIndexAfter(const MachineInstr *MI) const {
+ const MachineBasicBlock *MBB = MI->getParent();
+ assert(MBB && "MI must be inserted inna basic block");
+ MachineBasicBlock::const_iterator I = MI, E = MBB->end();
+ for (;;) {
+ ++I;
+ if (I == E)
+ return getMBBEndIdx(MBB);
+ Mi2IndexMap::const_iterator MapItr = mi2iMap.find(I);
+ if (MapItr != mi2iMap.end())
+ return MapItr->second;
+ }
+ }
+
+ /// Return the (start,end) range of the given basic block number.
+ const std::pair<SlotIndex, SlotIndex> &
+ getMBBRange(unsigned Num) const {
+ return MBBRanges[Num];
+ }
+
+ /// Return the (start,end) range of the given basic block.
+ const std::pair<SlotIndex, SlotIndex> &
+ getMBBRange(const MachineBasicBlock *MBB) const {
+ return getMBBRange(MBB->getNumber());
+ }
+
+ /// Returns the first index in the given basic block number.
+ SlotIndex getMBBStartIdx(unsigned Num) const {
+ return getMBBRange(Num).first;
+ }
+
+ /// Returns the first index in the given basic block.
+ SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
+ return getMBBRange(mbb).first;
+ }
+
+ /// Returns the last index in the given basic block number.
+ SlotIndex getMBBEndIdx(unsigned Num) const {
+ return getMBBRange(Num).second;
+ }
+
+ /// Returns the last index in the given basic block.
+ SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
+ return getMBBRange(mbb).second;
+ }
+
+ /// Iterator over the idx2MBBMap (sorted pairs of slot index of basic block
+ /// begin and basic block)
+ typedef SmallVectorImpl<IdxMBBPair>::const_iterator MBBIndexIterator;
+ /// Move iterator to the next IdxMBBPair where the SlotIndex is greater or
+ /// equal to \p To.
+ MBBIndexIterator advanceMBBIndex(MBBIndexIterator I, SlotIndex To) const {
+ return std::lower_bound(I, idx2MBBMap.end(), To);
+ }
+ /// Get an iterator pointing to the IdxMBBPair with the biggest SlotIndex
+ /// that is greater or equal to \p Idx.
+ MBBIndexIterator findMBBIndex(SlotIndex Idx) const {
+ return advanceMBBIndex(idx2MBBMap.begin(), Idx);
+ }
+ /// Returns an iterator for the begin of the idx2MBBMap.
+ MBBIndexIterator MBBIndexBegin() const {
+ return idx2MBBMap.begin();
+ }
+ /// Return an iterator for the end of the idx2MBBMap.
+ MBBIndexIterator MBBIndexEnd() const {
+ return idx2MBBMap.end();
+ }
+
+ /// Returns the basic block which the given index falls in.
+ MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
+ if (MachineInstr *MI = getInstructionFromIndex(index))
+ return MI->getParent();
+
+ MBBIndexIterator I = findMBBIndex(index);
+ // Take the pair containing the index
+ MBBIndexIterator J =
+ ((I != MBBIndexEnd() && I->first > index) ||
+ (I == MBBIndexEnd() && !idx2MBBMap.empty())) ? std::prev(I) : I;
+
+ assert(J != MBBIndexEnd() && J->first <= index &&
+ index < getMBBEndIdx(J->second) &&
+ "index does not correspond to an MBB");
+ return J->second;
+ }
+
+ /// Returns the MBB covering the given range, or null if the range covers
+ /// more than one basic block.
+ MachineBasicBlock* getMBBCoveringRange(SlotIndex start, SlotIndex end) const {
+
+ assert(start < end && "Backwards ranges not allowed.");
+ MBBIndexIterator itr = findMBBIndex(start);
+ if (itr == MBBIndexEnd()) {
+ itr = std::prev(itr);
+ return itr->second;
+ }
+
+ // Check that we don't cross the boundary into this block.
+ if (itr->first < end)
+ return nullptr;
+
+ itr = std::prev(itr);
+
+ if (itr->first <= start)
+ return itr->second;
+
+ return nullptr;
+ }
+
+ /// Insert the given machine instruction into the mapping. Returns the
+ /// assigned index.
+ /// If Late is set and there are null indexes between mi's neighboring
+ /// instructions, create the new index after the null indexes instead of
+ /// before them.
+ SlotIndex insertMachineInstrInMaps(MachineInstr *mi, bool Late = false) {
+ assert(!mi->isInsideBundle() &&
+ "Instructions inside bundles should use bundle start's slot.");
+ assert(mi2iMap.find(mi) == mi2iMap.end() && "Instr already indexed.");
+ // Numbering DBG_VALUE instructions could cause code generation to be
+ // affected by debug information.
+ assert(!mi->isDebugValue() && "Cannot number DBG_VALUE instructions.");
+
+ assert(mi->getParent() != nullptr && "Instr must be added to function.");
+
+ // Get the entries where mi should be inserted.
+ IndexList::iterator prevItr, nextItr;
+ if (Late) {
+ // Insert mi's index immediately before the following instruction.
+ nextItr = getIndexAfter(mi).listEntry()->getIterator();
+ prevItr = std::prev(nextItr);
+ } else {
+ // Insert mi's index immediately after the preceding instruction.
+ prevItr = getIndexBefore(mi).listEntry()->getIterator();
+ nextItr = std::next(prevItr);
+ }
+
+ // Get a number for the new instr, or 0 if there's no room currently.
+ // In the latter case we'll force a renumber later.
+ unsigned dist = ((nextItr->getIndex() - prevItr->getIndex())/2) & ~3u;
+ unsigned newNumber = prevItr->getIndex() + dist;
+
+ // Insert a new list entry for mi.
+ IndexList::iterator newItr =
+ indexList.insert(nextItr, createEntry(mi, newNumber));
+
+ // Renumber locally if we need to.
+ if (dist == 0)
+ renumberIndexes(newItr);
+
+ SlotIndex newIndex(&*newItr, SlotIndex::Slot_Block);
+ mi2iMap.insert(std::make_pair(mi, newIndex));
+ return newIndex;
+ }
+
+ /// Remove the given machine instruction from the mapping.
+ void removeMachineInstrFromMaps(MachineInstr *mi) {
+ // remove index -> MachineInstr and
+ // MachineInstr -> index mappings
+ Mi2IndexMap::iterator mi2iItr = mi2iMap.find(mi);
+ if (mi2iItr != mi2iMap.end()) {
+ IndexListEntry *miEntry(mi2iItr->second.listEntry());
+ assert(miEntry->getInstr() == mi && "Instruction indexes broken.");
+ // FIXME: Eventually we want to actually delete these indexes.
+ miEntry->setInstr(nullptr);
+ mi2iMap.erase(mi2iItr);
+ }
+ }
+
+ /// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
+ /// maps used by register allocator.
+ void replaceMachineInstrInMaps(MachineInstr *mi, MachineInstr *newMI) {
+ Mi2IndexMap::iterator mi2iItr = mi2iMap.find(mi);
+ if (mi2iItr == mi2iMap.end())
+ return;
+ SlotIndex replaceBaseIndex = mi2iItr->second;
+ IndexListEntry *miEntry(replaceBaseIndex.listEntry());
+ assert(miEntry->getInstr() == mi &&
+ "Mismatched instruction in index tables.");
+ miEntry->setInstr(newMI);
+ mi2iMap.erase(mi2iItr);
+ mi2iMap.insert(std::make_pair(newMI, replaceBaseIndex));
+ }
+
+ /// Add the given MachineBasicBlock into the maps.
+ void insertMBBInMaps(MachineBasicBlock *mbb) {
+ MachineFunction::iterator nextMBB =
+ std::next(MachineFunction::iterator(mbb));
+
+ IndexListEntry *startEntry = nullptr;
+ IndexListEntry *endEntry = nullptr;
+ IndexList::iterator newItr;
+ if (nextMBB == mbb->getParent()->end()) {
+ startEntry = &indexList.back();
+ endEntry = createEntry(nullptr, 0);
+ newItr = indexList.insertAfter(startEntry->getIterator(), endEntry);
+ } else {
+ startEntry = createEntry(nullptr, 0);
+ endEntry = getMBBStartIdx(&*nextMBB).listEntry();
+ newItr = indexList.insert(endEntry->getIterator(), startEntry);
+ }
+
+ SlotIndex startIdx(startEntry, SlotIndex::Slot_Block);
+ SlotIndex endIdx(endEntry, SlotIndex::Slot_Block);
+
+ MachineFunction::iterator prevMBB(mbb);
+ assert(prevMBB != mbb->getParent()->end() &&
+ "Can't insert a new block at the beginning of a function.");
+ --prevMBB;
+ MBBRanges[prevMBB->getNumber()].second = startIdx;
+
+ assert(unsigned(mbb->getNumber()) == MBBRanges.size() &&
+ "Blocks must be added in order");
+ MBBRanges.push_back(std::make_pair(startIdx, endIdx));
+ idx2MBBMap.push_back(IdxMBBPair(startIdx, mbb));
+
+ renumberIndexes(newItr);
+ std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
+ }
+
+ /// \brief Free the resources that were required to maintain a SlotIndex.
+ ///
+ /// Once an index is no longer needed (for instance because the instruction
+ /// at that index has been moved), the resources required to maintain the
+ /// index can be relinquished to reduce memory use and improve renumbering
+ /// performance. Any remaining SlotIndex objects that point to the same
+ /// index are left 'dangling' (much the same as a dangling pointer to a
+ /// freed object) and should not be accessed, except to destruct them.
+ ///
+ /// Like dangling pointers, access to dangling SlotIndexes can cause
+ /// painful-to-track-down bugs, especially if the memory for the index
+ /// previously pointed to has been re-used. To detect dangling SlotIndex
+ /// bugs, build with EXPENSIVE_CHECKS=1. This will cause "erased" indexes to
+ /// be retained in a graveyard instead of being freed. Operations on indexes
+ /// in the graveyard will trigger an assertion.
+ void eraseIndex(SlotIndex index) {
+ IndexListEntry *entry = index.listEntry();
+#ifdef EXPENSIVE_CHECKS
+ indexList.remove(entry);
+ graveyardList.push_back(entry);
+ entry->setPoison();
+#else
+ indexList.erase(entry);
+#endif
+ }
+
+ };
+
+
+ // Specialize IntervalMapInfo for half-open slot index intervals.
+ template <>
+ struct IntervalMapInfo<SlotIndex> : IntervalMapHalfOpenInfo<SlotIndex> {
+ };
+
+}
+
+#endif // LLVM_CODEGEN_SLOTINDEXES_H
diff --git a/contrib/llvm/include/llvm/CodeGen/StackMaps.h b/contrib/llvm/include/llvm/CodeGen/StackMaps.h
new file mode 100644
index 0000000..972a616
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/StackMaps.h
@@ -0,0 +1,257 @@
+//===------------------- StackMaps.h - StackMaps ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_STACKMAPS_H
+#define LLVM_CODEGEN_STACKMAPS_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Debug.h"
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+class AsmPrinter;
+class MCExpr;
+class MCStreamer;
+
+/// \brief MI-level patchpoint operands.
+///
+/// MI patchpoint operations take the form:
+/// [<def>], <id>, <numBytes>, <target>, <numArgs>, <cc>, ...
+///
+/// IR patchpoint intrinsics do not have the <cc> operand because calling
+/// convention is part of the subclass data.
+///
+/// SD patchpoint nodes do not have a def operand because it is part of the
+/// SDValue.
+///
+/// Patchpoints following the anyregcc convention are handled specially. For
+/// these, the stack map also records the location of the return value and
+/// arguments.
+class PatchPointOpers {
+public:
+ /// Enumerate the meta operands.
+ enum { IDPos, NBytesPos, TargetPos, NArgPos, CCPos, MetaEnd };
+
+private:
+ const MachineInstr *MI;
+ bool HasDef;
+ bool IsAnyReg;
+
+public:
+ explicit PatchPointOpers(const MachineInstr *MI);
+
+ bool isAnyReg() const { return IsAnyReg; }
+ bool hasDef() const { return HasDef; }
+
+ unsigned getMetaIdx(unsigned Pos = 0) const {
+ assert(Pos < MetaEnd && "Meta operand index out of range.");
+ return (HasDef ? 1 : 0) + Pos;
+ }
+
+ const MachineOperand &getMetaOper(unsigned Pos) {
+ return MI->getOperand(getMetaIdx(Pos));
+ }
+
+ unsigned getArgIdx() const { return getMetaIdx() + MetaEnd; }
+
+ /// Get the operand index of the variable list of non-argument operands.
+ /// These hold the "live state".
+ unsigned getVarIdx() const {
+ return getMetaIdx() + MetaEnd +
+ MI->getOperand(getMetaIdx(NArgPos)).getImm();
+ }
+
+ /// Get the index at which stack map locations will be recorded.
+ /// Arguments are not recorded unless the anyregcc convention is used.
+ unsigned getStackMapStartIdx() const {
+ if (IsAnyReg)
+ return getArgIdx();
+ return getVarIdx();
+ }
+
+ /// \brief Get the next scratch register operand index.
+ unsigned getNextScratchIdx(unsigned StartIdx = 0) const;
+};
+
+/// MI-level Statepoint operands
+///
+/// Statepoint operands take the form:
+/// <id>, <num patch bytes >, <num call arguments>, <call target>,
+/// [call arguments], <StackMaps::ConstantOp>, <calling convention>,
+/// <StackMaps::ConstantOp>, <statepoint flags>,
+/// <StackMaps::ConstantOp>, <num other args>, [other args],
+/// [gc values]
+class StatepointOpers {
+private:
+ // These values are aboolute offsets into the operands of the statepoint
+ // instruction.
+ enum { IDPos, NBytesPos, NCallArgsPos, CallTargetPos, MetaEnd };
+
+ // These values are relative offests from the start of the statepoint meta
+ // arguments (i.e. the end of the call arguments).
+ enum { CCOffset = 1, FlagsOffset = 3, NumVMSArgsOffset = 5 };
+
+public:
+ explicit StatepointOpers(const MachineInstr *MI) : MI(MI) {}
+
+ /// Get starting index of non call related arguments
+ /// (calling convention, statepoint flags, vm state and gc state).
+ unsigned getVarIdx() const {
+ return MI->getOperand(NCallArgsPos).getImm() + MetaEnd;
+ }
+
+ /// Return the ID for the given statepoint.
+ uint64_t getID() const { return MI->getOperand(IDPos).getImm(); }
+
+ /// Return the number of patchable bytes the given statepoint should emit.
+ uint32_t getNumPatchBytes() const {
+ return MI->getOperand(NBytesPos).getImm();
+ }
+
+ /// Returns the target of the underlying call.
+ const MachineOperand &getCallTarget() const {
+ return MI->getOperand(CallTargetPos);
+ }
+
+private:
+ const MachineInstr *MI;
+};
+
+class StackMaps {
+public:
+ struct Location {
+ enum LocationType {
+ Unprocessed,
+ Register,
+ Direct,
+ Indirect,
+ Constant,
+ ConstantIndex
+ };
+ LocationType Type;
+ unsigned Size;
+ unsigned Reg;
+ int64_t Offset;
+ Location() : Type(Unprocessed), Size(0), Reg(0), Offset(0) {}
+ Location(LocationType Type, unsigned Size, unsigned Reg, int64_t Offset)
+ : Type(Type), Size(Size), Reg(Reg), Offset(Offset) {}
+ };
+
+ struct LiveOutReg {
+ unsigned short Reg;
+ unsigned short DwarfRegNum;
+ unsigned short Size;
+
+ LiveOutReg() : Reg(0), DwarfRegNum(0), Size(0) {}
+ LiveOutReg(unsigned short Reg, unsigned short DwarfRegNum,
+ unsigned short Size)
+ : Reg(Reg), DwarfRegNum(DwarfRegNum), Size(Size) {}
+ };
+
+ // OpTypes are used to encode information about the following logical
+ // operand (which may consist of several MachineOperands) for the
+ // OpParser.
+ typedef enum { DirectMemRefOp, IndirectMemRefOp, ConstantOp } OpType;
+
+ StackMaps(AsmPrinter &AP);
+
+ void reset() {
+ CSInfos.clear();
+ ConstPool.clear();
+ FnStackSize.clear();
+ }
+
+ /// \brief Generate a stackmap record for a stackmap instruction.
+ ///
+ /// MI must be a raw STACKMAP, not a PATCHPOINT.
+ void recordStackMap(const MachineInstr &MI);
+
+ /// \brief Generate a stackmap record for a patchpoint instruction.
+ void recordPatchPoint(const MachineInstr &MI);
+
+ /// \brief Generate a stackmap record for a statepoint instruction.
+ void recordStatepoint(const MachineInstr &MI);
+
+ /// If there is any stack map data, create a stack map section and serialize
+ /// the map info into it. This clears the stack map data structures
+ /// afterwards.
+ void serializeToStackMapSection();
+
+private:
+ static const char *WSMP;
+ typedef SmallVector<Location, 8> LocationVec;
+ typedef SmallVector<LiveOutReg, 8> LiveOutVec;
+ typedef MapVector<uint64_t, uint64_t> ConstantPool;
+ typedef MapVector<const MCSymbol *, uint64_t> FnStackSizeMap;
+
+ struct CallsiteInfo {
+ const MCExpr *CSOffsetExpr;
+ uint64_t ID;
+ LocationVec Locations;
+ LiveOutVec LiveOuts;
+ CallsiteInfo() : CSOffsetExpr(nullptr), ID(0) {}
+ CallsiteInfo(const MCExpr *CSOffsetExpr, uint64_t ID,
+ LocationVec &&Locations, LiveOutVec &&LiveOuts)
+ : CSOffsetExpr(CSOffsetExpr), ID(ID), Locations(std::move(Locations)),
+ LiveOuts(std::move(LiveOuts)) {}
+ };
+
+ typedef std::vector<CallsiteInfo> CallsiteInfoList;
+
+ AsmPrinter &AP;
+ CallsiteInfoList CSInfos;
+ ConstantPool ConstPool;
+ FnStackSizeMap FnStackSize;
+
+ MachineInstr::const_mop_iterator
+ parseOperand(MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE, LocationVec &Locs,
+ LiveOutVec &LiveOuts) const;
+
+ /// \brief Create a live-out register record for the given register @p Reg.
+ LiveOutReg createLiveOutReg(unsigned Reg,
+ const TargetRegisterInfo *TRI) const;
+
+ /// \brief Parse the register live-out mask and return a vector of live-out
+ /// registers that need to be recorded in the stackmap.
+ LiveOutVec parseRegisterLiveOutMask(const uint32_t *Mask) const;
+
+ /// This should be called by the MC lowering code _immediately_ before
+ /// lowering the MI to an MCInst. It records where the operands for the
+ /// instruction are stored, and outputs a label to record the offset of
+ /// the call from the start of the text section. In special cases (e.g. AnyReg
+ /// calling convention) the return register is also recorded if requested.
+ void recordStackMapOpers(const MachineInstr &MI, uint64_t ID,
+ MachineInstr::const_mop_iterator MOI,
+ MachineInstr::const_mop_iterator MOE,
+ bool recordResult = false);
+
+ /// \brief Emit the stackmap header.
+ void emitStackmapHeader(MCStreamer &OS);
+
+ /// \brief Emit the function frame record for each function.
+ void emitFunctionFrameRecords(MCStreamer &OS);
+
+ /// \brief Emit the constant pool.
+ void emitConstantPoolEntries(MCStreamer &OS);
+
+ /// \brief Emit the callsite info for each stackmap/patchpoint intrinsic call.
+ void emitCallsiteEntries(MCStreamer &OS);
+
+ void print(raw_ostream &OS);
+ void debug() { print(dbgs()); }
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/StackProtector.h b/contrib/llvm/include/llvm/CodeGen/StackProtector.h
new file mode 100644
index 0000000..8cef85c
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/StackProtector.h
@@ -0,0 +1,129 @@
+//===-- StackProtector.h - Stack Protector Insertion ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass inserts stack protectors into functions which need them. A variable
+// with a random value in it is stored onto the stack before the local variables
+// are allocated. Upon exiting the block, the stored value is checked. If it's
+// changed, then there was some sort of violation and the program aborts.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_STACKPROTECTOR_H
+#define LLVM_CODEGEN_STACKPROTECTOR_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Pass.h"
+#include "llvm/Target/TargetLowering.h"
+
+namespace llvm {
+class Function;
+class Module;
+class PHINode;
+
+class StackProtector : public FunctionPass {
+public:
+ /// SSPLayoutKind. Stack Smashing Protection (SSP) rules require that
+ /// vulnerable stack allocations are located close the stack protector.
+ enum SSPLayoutKind {
+ SSPLK_None, ///< Did not trigger a stack protector. No effect on data
+ ///< layout.
+ SSPLK_LargeArray, ///< Array or nested array >= SSP-buffer-size. Closest
+ ///< to the stack protector.
+ SSPLK_SmallArray, ///< Array or nested array < SSP-buffer-size. 2nd closest
+ ///< to the stack protector.
+ SSPLK_AddrOf ///< The address of this allocation is exposed and
+ ///< triggered protection. 3rd closest to the protector.
+ };
+
+ /// A mapping of AllocaInsts to their required SSP layout.
+ typedef ValueMap<const AllocaInst *, SSPLayoutKind> SSPLayoutMap;
+
+private:
+ const TargetMachine *TM;
+
+ /// TLI - Keep a pointer of a TargetLowering to consult for determining
+ /// target type sizes.
+ const TargetLoweringBase *TLI;
+ const Triple Trip;
+
+ Function *F;
+ Module *M;
+
+ DominatorTree *DT;
+
+ /// Layout - Mapping of allocations to the required SSPLayoutKind.
+ /// StackProtector analysis will update this map when determining if an
+ /// AllocaInst triggers a stack protector.
+ SSPLayoutMap Layout;
+
+ /// \brief The minimum size of buffers that will receive stack smashing
+ /// protection when -fstack-protection is used.
+ unsigned SSPBufferSize;
+
+ /// VisitedPHIs - The set of PHI nodes visited when determining
+ /// if a variable's reference has been taken. This set
+ /// is maintained to ensure we don't visit the same PHI node multiple
+ /// times.
+ SmallPtrSet<const PHINode *, 16> VisitedPHIs;
+
+ /// InsertStackProtectors - Insert code into the prologue and epilogue of
+ /// the function.
+ ///
+ /// - The prologue code loads and stores the stack guard onto the stack.
+ /// - The epilogue checks the value stored in the prologue against the
+ /// original value. It calls __stack_chk_fail if they differ.
+ bool InsertStackProtectors();
+
+ /// CreateFailBB - Create a basic block to jump to when the stack protector
+ /// check fails.
+ BasicBlock *CreateFailBB();
+
+ /// ContainsProtectableArray - Check whether the type either is an array or
+ /// contains an array of sufficient size so that we need stack protectors
+ /// for it.
+ /// \param [out] IsLarge is set to true if a protectable array is found and
+ /// it is "large" ( >= ssp-buffer-size). In the case of a structure with
+ /// multiple arrays, this gets set if any of them is large.
+ bool ContainsProtectableArray(Type *Ty, bool &IsLarge, bool Strong = false,
+ bool InStruct = false) const;
+
+ /// \brief Check whether a stack allocation has its address taken.
+ bool HasAddressTaken(const Instruction *AI);
+
+ /// RequiresStackProtector - Check whether or not this function needs a
+ /// stack protector based upon the stack protector level.
+ bool RequiresStackProtector();
+
+public:
+ static char ID; // Pass identification, replacement for typeid.
+ StackProtector()
+ : FunctionPass(ID), TM(nullptr), TLI(nullptr), SSPBufferSize(0) {
+ initializeStackProtectorPass(*PassRegistry::getPassRegistry());
+ }
+ StackProtector(const TargetMachine *TM)
+ : FunctionPass(ID), TM(TM), TLI(nullptr), Trip(TM->getTargetTriple()),
+ SSPBufferSize(8) {
+ initializeStackProtectorPass(*PassRegistry::getPassRegistry());
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addPreserved<DominatorTreeWrapperPass>();
+ }
+
+ SSPLayoutKind getSSPLayout(const AllocaInst *AI) const;
+ void adjustForColoring(const AllocaInst *From, const AllocaInst *To);
+
+ bool runOnFunction(Function &Fn) override;
+};
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_STACKPROTECTOR_H
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
new file mode 100644
index 0000000..2f13791
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -0,0 +1,168 @@
+//==-- llvm/CodeGen/TargetLoweringObjectFileImpl.h - Object Info -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements classes used to handle lowerings specific to common
+// object file formats.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
+#define LLVM_CODEGEN_TARGETLOWERINGOBJECTFILEIMPL_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/SectionKind.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+
+namespace llvm {
+ class MachineModuleInfo;
+ class Mangler;
+ class MCAsmInfo;
+ class MCExpr;
+ class MCSection;
+ class MCSectionMachO;
+ class MCSymbol;
+ class MCContext;
+ class GlobalValue;
+ class TargetMachine;
+
+
+class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
+ bool UseInitArray;
+ mutable unsigned NextUniqueID = 0;
+
+public:
+ TargetLoweringObjectFileELF() : UseInitArray(false) {}
+
+ ~TargetLoweringObjectFileELF() override {}
+
+ void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
+ const MCSymbol *Sym) const override;
+
+ /// Given a constant with the SectionKind, return a section that it should be
+ /// placed in.
+ MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
+ const Constant *C) const override;
+
+ MCSection *getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ MCSection *getSectionForJumpTable(const Function &F, Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
+ const Function &F) const override;
+
+ /// Return an MCExpr to use for a reference to the specified type info global
+ /// variable from exception handling information.
+ const MCExpr *
+ getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
+ Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const override;
+
+ // The symbol that gets passed to .cfi_personality.
+ MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV, Mangler &Mang,
+ const TargetMachine &TM,
+ MachineModuleInfo *MMI) const override;
+
+ void InitializeELF(bool UseInitArray_);
+ MCSection *getStaticCtorSection(unsigned Priority,
+ const MCSymbol *KeySym) const override;
+ MCSection *getStaticDtorSection(unsigned Priority,
+ const MCSymbol *KeySym) const override;
+};
+
+
+
+class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
+public:
+ ~TargetLoweringObjectFileMachO() override {}
+ TargetLoweringObjectFileMachO();
+
+ /// Emit the module flags that specify the garbage collection information.
+ void emitModuleFlags(MCStreamer &Streamer,
+ ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
+ Mangler &Mang, const TargetMachine &TM) const override;
+
+ MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ MCSection *getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ MCSection *getSectionForConstant(const DataLayout &DL, SectionKind Kind,
+ const Constant *C) const override;
+
+ /// The mach-o version of this method defaults to returning a stub reference.
+ const MCExpr *
+ getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
+ Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const override;
+
+ // The symbol that gets passed to .cfi_personality.
+ MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV, Mangler &Mang,
+ const TargetMachine &TM,
+ MachineModuleInfo *MMI) const override;
+
+ /// Get MachO PC relative GOT entry relocation
+ const MCExpr *getIndirectSymViaGOTPCRel(const MCSymbol *Sym,
+ const MCValue &MV, int64_t Offset,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const override;
+
+ void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
+ Mangler &Mang, const TargetMachine &TM) const override;
+};
+
+
+
+class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
+public:
+ ~TargetLoweringObjectFileCOFF() override {}
+
+ MCSection *getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ MCSection *SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
+ Mangler &Mang, const TargetMachine &TM) const override;
+
+ MCSection *getSectionForJumpTable(const Function &F, Mangler &Mang,
+ const TargetMachine &TM) const override;
+
+ /// Emit Obj-C garbage collection and linker options. Only linker option
+ /// emission is implemented for COFF.
+ void emitModuleFlags(MCStreamer &Streamer,
+ ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
+ Mangler &Mang, const TargetMachine &TM) const override;
+
+ MCSection *getStaticCtorSection(unsigned Priority,
+ const MCSymbol *KeySym) const override;
+ MCSection *getStaticDtorSection(unsigned Priority,
+ const MCSymbol *KeySym) const override;
+
+ void emitLinkerFlagsForGlobal(raw_ostream &OS, const GlobalValue *GV,
+ const Mangler &Mang) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/TargetSchedule.h b/contrib/llvm/include/llvm/CodeGen/TargetSchedule.h
new file mode 100644
index 0000000..81054ab
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/TargetSchedule.h
@@ -0,0 +1,190 @@
+//===-- llvm/CodeGen/TargetSchedule.h - Sched Machine Model -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper around MCSchedModel that allows the interface to
+// benefit from information currently only available in TargetInstrInfo.
+// Ideally, the scheduling interface would be fully defined in the MC layer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
+#define LLVM_CODEGEN_TARGETSCHEDULE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/MC/MCSchedule.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+
+namespace llvm {
+
+class TargetRegisterInfo;
+class TargetSubtargetInfo;
+class TargetInstrInfo;
+class MachineInstr;
+
+/// Provide an instruction scheduling machine model to CodeGen passes.
+class TargetSchedModel {
+ // For efficiency, hold a copy of the statically defined MCSchedModel for this
+ // processor.
+ MCSchedModel SchedModel;
+ InstrItineraryData InstrItins;
+ const TargetSubtargetInfo *STI;
+ const TargetInstrInfo *TII;
+
+ SmallVector<unsigned, 16> ResourceFactors;
+ unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
+ unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor.
+
+ unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const;
+
+public:
+ TargetSchedModel(): SchedModel(MCSchedModel::GetDefaultSchedModel()), STI(nullptr), TII(nullptr) {}
+
+ /// \brief Initialize the machine model for instruction scheduling.
+ ///
+ /// The machine model API keeps a copy of the top-level MCSchedModel table
+ /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
+ /// dynamic properties.
+ void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti,
+ const TargetInstrInfo *tii);
+
+ /// Return the MCSchedClassDesc for this instruction.
+ const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
+
+ /// \brief TargetInstrInfo getter.
+ const TargetInstrInfo *getInstrInfo() const { return TII; }
+
+ /// \brief Return true if this machine model includes an instruction-level
+ /// scheduling model.
+ ///
+ /// This is more detailed than the course grain IssueWidth and default
+ /// latency properties, but separate from the per-cycle itinerary data.
+ bool hasInstrSchedModel() const;
+
+ const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
+
+ /// \brief Return true if this machine model includes cycle-to-cycle itinerary
+ /// data.
+ ///
+ /// This models scheduling at each stage in the processor pipeline.
+ bool hasInstrItineraries() const;
+
+ const InstrItineraryData *getInstrItineraries() const {
+ if (hasInstrItineraries())
+ return &InstrItins;
+ return nullptr;
+ }
+
+ /// \brief Return true if this machine model includes an instruction-level
+ /// scheduling model or cycle-to-cycle itinerary data.
+ bool hasInstrSchedModelOrItineraries() const {
+ return hasInstrSchedModel() || hasInstrItineraries();
+ }
+
+ /// \brief Identify the processor corresponding to the current subtarget.
+ unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
+
+ /// \brief Maximum number of micro-ops that may be scheduled per cycle.
+ unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
+
+ /// \brief Return the number of issue slots required for this MI.
+ unsigned getNumMicroOps(const MachineInstr *MI,
+ const MCSchedClassDesc *SC = nullptr) const;
+
+ /// \brief Get the number of kinds of resources for this target.
+ unsigned getNumProcResourceKinds() const {
+ return SchedModel.getNumProcResourceKinds();
+ }
+
+ /// \brief Get a processor resource by ID for convenience.
+ const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
+ return SchedModel.getProcResource(PIdx);
+ }
+
+#ifndef NDEBUG
+ const char *getResourceName(unsigned PIdx) const {
+ if (!PIdx)
+ return "MOps";
+ return SchedModel.getProcResource(PIdx)->Name;
+ }
+#endif
+
+ typedef const MCWriteProcResEntry *ProcResIter;
+
+ // \brief Get an iterator into the processor resources consumed by this
+ // scheduling class.
+ ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
+ // The subtarget holds a single resource table for all processors.
+ return STI->getWriteProcResBegin(SC);
+ }
+ ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
+ return STI->getWriteProcResEnd(SC);
+ }
+
+ /// \brief Multiply the number of units consumed for a resource by this factor
+ /// to normalize it relative to other resources.
+ unsigned getResourceFactor(unsigned ResIdx) const {
+ return ResourceFactors[ResIdx];
+ }
+
+ /// \brief Multiply number of micro-ops by this factor to normalize it
+ /// relative to other resources.
+ unsigned getMicroOpFactor() const {
+ return MicroOpFactor;
+ }
+
+ /// \brief Multiply cycle count by this factor to normalize it relative to
+ /// other resources. This is the number of resource units per cycle.
+ unsigned getLatencyFactor() const {
+ return ResourceLCM;
+ }
+
+ /// \brief Number of micro-ops that may be buffered for OOO execution.
+ unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }
+
+ /// \brief Number of resource units that may be buffered for OOO execution.
+ /// \return The buffer size in resource units or -1 for unlimited.
+ int getResourceBufferSize(unsigned PIdx) const {
+ return SchedModel.getProcResource(PIdx)->BufferSize;
+ }
+
+ /// \brief Compute operand latency based on the available machine model.
+ ///
+ /// Compute and return the latency of the given data dependent def and use
+ /// when the operand indices are already known. UseMI may be NULL for an
+ /// unknown user.
+ unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
+ const MachineInstr *UseMI, unsigned UseOperIdx)
+ const;
+
+ /// \brief Compute the instruction latency based on the available machine
+ /// model.
+ ///
+ /// Compute and return the expected latency of this instruction independent of
+ /// a particular use. computeOperandLatency is the preferred API, but this is
+ /// occasionally useful to help estimate instruction cost.
+ ///
+ /// If UseDefaultDefLatency is false and no new machine sched model is
+ /// present this method falls back to TII->getInstrLatency with an empty
+ /// instruction itinerary (this is so we preserve the previous behavior of the
+ /// if converter after moving it to TargetSchedModel).
+ unsigned computeInstrLatency(const MachineInstr *MI,
+ bool UseDefaultDefLatency = true) const;
+ unsigned computeInstrLatency(unsigned Opcode) const;
+
+ /// \brief Output dependency latency of a pair of defs of the same register.
+ ///
+ /// This is typically one cycle.
+ unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *DepMI) const;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.h b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
new file mode 100644
index 0000000..929eb88
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.h
@@ -0,0 +1,386 @@
+//===- CodeGen/ValueTypes.h - Low-Level Target independ. types --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the set of low-level target independent types which various
+// values in the code generator are. This allows the target specific behavior
+// of instructions to be described to target independent passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_VALUETYPES_H
+#define LLVM_CODEGEN_VALUETYPES_H
+
+#include "llvm/CodeGen/MachineValueType.h"
+#include <cassert>
+#include <string>
+
+namespace llvm {
+
+ class LLVMContext;
+ class Type;
+
+ /// EVT - Extended Value Type. Capable of holding value types which are not
+ /// native for any processor (such as the i12345 type), as well as the types
+ /// a MVT can represent.
+ struct EVT {
+ private:
+ MVT V;
+ Type *LLVMTy;
+
+ public:
+ LLVM_CONSTEXPR EVT() : V(MVT::INVALID_SIMPLE_VALUE_TYPE), LLVMTy(nullptr) {}
+ LLVM_CONSTEXPR EVT(MVT::SimpleValueType SVT) : V(SVT), LLVMTy(nullptr) {}
+ LLVM_CONSTEXPR EVT(MVT S) : V(S), LLVMTy(nullptr) {}
+
+ bool operator==(EVT VT) const {
+ return !(*this != VT);
+ }
+ bool operator!=(EVT VT) const {
+ if (V.SimpleTy != VT.V.SimpleTy)
+ return true;
+ if (V.SimpleTy < 0)
+ return LLVMTy != VT.LLVMTy;
+ return false;
+ }
+
+ /// getFloatingPointVT - Returns the EVT that represents a floating point
+ /// type with the given number of bits. There are two floating point types
+ /// with 128 bits - this returns f128 rather than ppcf128.
+ static EVT getFloatingPointVT(unsigned BitWidth) {
+ return MVT::getFloatingPointVT(BitWidth);
+ }
+
+ /// getIntegerVT - Returns the EVT that represents an integer with the given
+ /// number of bits.
+ static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) {
+ MVT M = MVT::getIntegerVT(BitWidth);
+ if (M.SimpleTy >= 0)
+ return M;
+ return getExtendedIntegerVT(Context, BitWidth);
+ }
+
+ /// getVectorVT - Returns the EVT that represents a vector NumElements in
+ /// length, where each element is of type VT.
+ static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements) {
+ MVT M = MVT::getVectorVT(VT.V, NumElements);
+ if (M.SimpleTy >= 0)
+ return M;
+ return getExtendedVectorVT(Context, VT, NumElements);
+ }
+
+ /// changeVectorElementTypeToInteger - Return a vector with the same number
+ /// of elements as this vector, but with the element type converted to an
+ /// integer type with the same bitwidth.
+ EVT changeVectorElementTypeToInteger() const {
+ if (!isSimple())
+ return changeExtendedVectorElementTypeToInteger();
+ MVT EltTy = getSimpleVT().getVectorElementType();
+ unsigned BitWidth = EltTy.getSizeInBits();
+ MVT IntTy = MVT::getIntegerVT(BitWidth);
+ MVT VecTy = MVT::getVectorVT(IntTy, getVectorNumElements());
+ assert(VecTy.SimpleTy >= 0 &&
+ "Simple vector VT not representable by simple integer vector VT!");
+ return VecTy;
+ }
+
+ /// Return the type converted to an equivalently sized integer or vector
+ /// with integer element type. Similar to changeVectorElementTypeToInteger,
+ /// but also handles scalars.
+ EVT changeTypeToInteger() {
+ if (isVector())
+ return changeVectorElementTypeToInteger();
+
+ if (isSimple())
+ return MVT::getIntegerVT(getSizeInBits());
+
+ return changeExtendedTypeToInteger();
+ }
+
+ /// isSimple - Test if the given EVT is simple (as opposed to being
+ /// extended).
+ bool isSimple() const {
+ return V.SimpleTy >= 0;
+ }
+
+ /// isExtended - Test if the given EVT is extended (as opposed to
+ /// being simple).
+ bool isExtended() const {
+ return !isSimple();
+ }
+
+ /// isFloatingPoint - Return true if this is a FP, or a vector FP type.
+ bool isFloatingPoint() const {
+ return isSimple() ? V.isFloatingPoint() : isExtendedFloatingPoint();
+ }
+
+ /// isInteger - Return true if this is an integer, or a vector integer type.
+ bool isInteger() const {
+ return isSimple() ? V.isInteger() : isExtendedInteger();
+ }
+
+ /// isVector - Return true if this is a vector value type.
+ bool isVector() const {
+ return isSimple() ? V.isVector() : isExtendedVector();
+ }
+
+ /// is16BitVector - Return true if this is a 16-bit vector type.
+ bool is16BitVector() const {
+ return isSimple() ? V.is16BitVector() : isExtended16BitVector();
+ }
+
+ /// is32BitVector - Return true if this is a 32-bit vector type.
+ bool is32BitVector() const {
+ return isSimple() ? V.is32BitVector() : isExtended32BitVector();
+ }
+
+ /// is64BitVector - Return true if this is a 64-bit vector type.
+ bool is64BitVector() const {
+ return isSimple() ? V.is64BitVector() : isExtended64BitVector();
+ }
+
+ /// is128BitVector - Return true if this is a 128-bit vector type.
+ bool is128BitVector() const {
+ return isSimple() ? V.is128BitVector() : isExtended128BitVector();
+ }
+
+ /// is256BitVector - Return true if this is a 256-bit vector type.
+ bool is256BitVector() const {
+ return isSimple() ? V.is256BitVector() : isExtended256BitVector();
+ }
+
+ /// is512BitVector - Return true if this is a 512-bit vector type.
+ bool is512BitVector() const {
+ return isSimple() ? V.is512BitVector() : isExtended512BitVector();
+ }
+
+ /// is1024BitVector - Return true if this is a 1024-bit vector type.
+ bool is1024BitVector() const {
+ return isSimple() ? V.is1024BitVector() : isExtended1024BitVector();
+ }
+
+ /// is2048BitVector - Return true if this is a 2048-bit vector type.
+ bool is2048BitVector() const {
+ return isSimple() ? V.is2048BitVector() : isExtended2048BitVector();
+ }
+
+ /// isOverloaded - Return true if this is an overloaded type for TableGen.
+ bool isOverloaded() const {
+ return (V==MVT::iAny || V==MVT::fAny || V==MVT::vAny || V==MVT::iPTRAny);
+ }
+
+ /// isByteSized - Return true if the bit size is a multiple of 8.
+ bool isByteSized() const {
+ return (getSizeInBits() & 7) == 0;
+ }
+
+ /// isRound - Return true if the size is a power-of-two number of bytes.
+ bool isRound() const {
+ unsigned BitSize = getSizeInBits();
+ return BitSize >= 8 && !(BitSize & (BitSize - 1));
+ }
+
+ /// bitsEq - Return true if this has the same number of bits as VT.
+ bool bitsEq(EVT VT) const {
+ if (EVT::operator==(VT)) return true;
+ return getSizeInBits() == VT.getSizeInBits();
+ }
+
+ /// bitsGT - Return true if this has more bits than VT.
+ bool bitsGT(EVT VT) const {
+ if (EVT::operator==(VT)) return false;
+ return getSizeInBits() > VT.getSizeInBits();
+ }
+
+ /// bitsGE - Return true if this has no less bits than VT.
+ bool bitsGE(EVT VT) const {
+ if (EVT::operator==(VT)) return true;
+ return getSizeInBits() >= VT.getSizeInBits();
+ }
+
+ /// bitsLT - Return true if this has less bits than VT.
+ bool bitsLT(EVT VT) const {
+ if (EVT::operator==(VT)) return false;
+ return getSizeInBits() < VT.getSizeInBits();
+ }
+
+ /// bitsLE - Return true if this has no more bits than VT.
+ bool bitsLE(EVT VT) const {
+ if (EVT::operator==(VT)) return true;
+ return getSizeInBits() <= VT.getSizeInBits();
+ }
+
+
+ /// getSimpleVT - Return the SimpleValueType held in the specified
+ /// simple EVT.
+ MVT getSimpleVT() const {
+ assert(isSimple() && "Expected a SimpleValueType!");
+ return V;
+ }
+
+ /// getScalarType - If this is a vector type, return the element type,
+ /// otherwise return this.
+ EVT getScalarType() const {
+ return isVector() ? getVectorElementType() : *this;
+ }
+
+ /// getVectorElementType - Given a vector type, return the type of
+ /// each element.
+ EVT getVectorElementType() const {
+ assert(isVector() && "Invalid vector type!");
+ if (isSimple())
+ return V.getVectorElementType();
+ return getExtendedVectorElementType();
+ }
+
+ /// getVectorNumElements - Given a vector type, return the number of
+ /// elements it contains.
+ unsigned getVectorNumElements() const {
+ assert(isVector() && "Invalid vector type!");
+ if (isSimple())
+ return V.getVectorNumElements();
+ return getExtendedVectorNumElements();
+ }
+
+ /// getSizeInBits - Return the size of the specified value type in bits.
+ unsigned getSizeInBits() const {
+ if (isSimple())
+ return V.getSizeInBits();
+ return getExtendedSizeInBits();
+ }
+
+ unsigned getScalarSizeInBits() const {
+ return getScalarType().getSizeInBits();
+ }
+
+ /// getStoreSize - Return the number of bytes overwritten by a store
+ /// of the specified value type.
+ unsigned getStoreSize() const {
+ return (getSizeInBits() + 7) / 8;
+ }
+
+ /// getStoreSizeInBits - Return the number of bits overwritten by a store
+ /// of the specified value type.
+ unsigned getStoreSizeInBits() const {
+ return getStoreSize() * 8;
+ }
+
+ /// getRoundIntegerType - Rounds the bit-width of the given integer EVT up
+ /// to the nearest power of two (and at least to eight), and returns the
+ /// integer EVT with that number of bits.
+ EVT getRoundIntegerType(LLVMContext &Context) const {
+ assert(isInteger() && !isVector() && "Invalid integer type!");
+ unsigned BitWidth = getSizeInBits();
+ if (BitWidth <= 8)
+ return EVT(MVT::i8);
+ return getIntegerVT(Context, 1 << Log2_32_Ceil(BitWidth));
+ }
+
+ /// getHalfSizedIntegerVT - Finds the smallest simple value type that is
+ /// greater than or equal to half the width of this EVT. If no simple
+ /// value type can be found, an extended integer value type of half the
+ /// size (rounded up) is returned.
+ EVT getHalfSizedIntegerVT(LLVMContext &Context) const {
+ assert(isInteger() && !isVector() && "Invalid integer type!");
+ unsigned EVTSize = getSizeInBits();
+ for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
+ IntVT <= MVT::LAST_INTEGER_VALUETYPE; ++IntVT) {
+ EVT HalfVT = EVT((MVT::SimpleValueType)IntVT);
+ if (HalfVT.getSizeInBits() * 2 >= EVTSize)
+ return HalfVT;
+ }
+ return getIntegerVT(Context, (EVTSize + 1) / 2);
+ }
+
+ /// \brief Return a VT for an integer vector type with the size of the
+ /// elements doubled. The typed returned may be an extended type.
+ EVT widenIntegerVectorElementType(LLVMContext &Context) const {
+ EVT EltVT = getVectorElementType();
+ EltVT = EVT::getIntegerVT(Context, 2 * EltVT.getSizeInBits());
+ return EVT::getVectorVT(Context, EltVT, getVectorNumElements());
+ }
+
+ /// isPow2VectorType - Returns true if the given vector is a power of 2.
+ bool isPow2VectorType() const {
+ unsigned NElts = getVectorNumElements();
+ return !(NElts & (NElts - 1));
+ }
+
+ /// getPow2VectorType - Widens the length of the given vector EVT up to
+ /// the nearest power of 2 and returns that type.
+ EVT getPow2VectorType(LLVMContext &Context) const {
+ if (!isPow2VectorType()) {
+ unsigned NElts = getVectorNumElements();
+ unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
+ return EVT::getVectorVT(Context, getVectorElementType(), Pow2NElts);
+ }
+ else {
+ return *this;
+ }
+ }
+
+ /// getEVTString - This function returns value type as a string,
+ /// e.g. "i32".
+ std::string getEVTString() const;
+
+ /// getTypeForEVT - This method returns an LLVM type corresponding to the
+ /// specified EVT. For integer types, this returns an unsigned type. Note
+ /// that this will abort for types that cannot be represented.
+ Type *getTypeForEVT(LLVMContext &Context) const;
+
+ /// getEVT - Return the value type corresponding to the specified type.
+ /// This returns all pointers as iPTR. If HandleUnknown is true, unknown
+ /// types are returned as Other, otherwise they are invalid.
+ static EVT getEVT(Type *Ty, bool HandleUnknown = false);
+
+ intptr_t getRawBits() const {
+ if (isSimple())
+ return V.SimpleTy;
+ else
+ return (intptr_t)(LLVMTy);
+ }
+
+ /// compareRawBits - A meaningless but well-behaved order, useful for
+ /// constructing containers.
+ struct compareRawBits {
+ bool operator()(EVT L, EVT R) const {
+ if (L.V.SimpleTy == R.V.SimpleTy)
+ return L.LLVMTy < R.LLVMTy;
+ else
+ return L.V.SimpleTy < R.V.SimpleTy;
+ }
+ };
+
+ private:
+ // Methods for handling the Extended-type case in functions above.
+ // These are all out-of-line to prevent users of this header file
+ // from having a dependency on Type.h.
+ EVT changeExtendedTypeToInteger() const;
+ EVT changeExtendedVectorElementTypeToInteger() const;
+ static EVT getExtendedIntegerVT(LLVMContext &C, unsigned BitWidth);
+ static EVT getExtendedVectorVT(LLVMContext &C, EVT VT,
+ unsigned NumElements);
+ bool isExtendedFloatingPoint() const LLVM_READONLY;
+ bool isExtendedInteger() const LLVM_READONLY;
+ bool isExtendedVector() const LLVM_READONLY;
+ bool isExtended16BitVector() const LLVM_READONLY;
+ bool isExtended32BitVector() const LLVM_READONLY;
+ bool isExtended64BitVector() const LLVM_READONLY;
+ bool isExtended128BitVector() const LLVM_READONLY;
+ bool isExtended256BitVector() const LLVM_READONLY;
+ bool isExtended512BitVector() const LLVM_READONLY;
+ bool isExtended1024BitVector() const LLVM_READONLY;
+ bool isExtended2048BitVector() const LLVM_READONLY;
+ EVT getExtendedVectorElementType() const;
+ unsigned getExtendedVectorNumElements() const LLVM_READONLY;
+ unsigned getExtendedSizeInBits() const;
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/ValueTypes.td b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
new file mode 100644
index 0000000..f29ec42
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/ValueTypes.td
@@ -0,0 +1,119 @@
+//===- ValueTypes.td - ValueType definitions ---------------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Value types - These values correspond to the register types defined in the
+// ValueTypes.h file. If you update anything here, you must update it there as
+// well!
+//
+//===----------------------------------------------------------------------===//
+
+class ValueType<int size, int value> {
+ string Namespace = "MVT";
+ int Size = size;
+ int Value = value;
+}
+
+def OtherVT: ValueType<0 , 0>; // "Other" value
+def i1 : ValueType<1 , 1>; // One bit boolean value
+def i8 : ValueType<8 , 2>; // 8-bit integer value
+def i16 : ValueType<16 , 3>; // 16-bit integer value
+def i32 : ValueType<32 , 4>; // 32-bit integer value
+def i64 : ValueType<64 , 5>; // 64-bit integer value
+def i128 : ValueType<128, 6>; // 128-bit integer value
+def f16 : ValueType<16 , 7>; // 16-bit floating point value
+def f32 : ValueType<32 , 8>; // 32-bit floating point value
+def f64 : ValueType<64 , 9>; // 64-bit floating point value
+def f80 : ValueType<80 , 10>; // 80-bit floating point value
+def f128 : ValueType<128, 11>; // 128-bit floating point value
+def ppcf128: ValueType<128, 12>; // PPC 128-bit floating point value
+
+def v2i1 : ValueType<2 , 13>; // 2 x i1 vector value
+def v4i1 : ValueType<4 , 14>; // 4 x i1 vector value
+def v8i1 : ValueType<8 , 15>; // 8 x i1 vector value
+def v16i1 : ValueType<16, 16>; // 16 x i1 vector value
+def v32i1 : ValueType<32 , 17>; // 32 x i1 vector value
+def v64i1 : ValueType<64 , 18>; // 64 x i1 vector value
+def v512i1 : ValueType<512, 19>; // 512 x i8 vector value
+def v1024i1: ValueType<1024,20>; //1024 x i8 vector value
+
+def v1i8 : ValueType<16, 21>; // 1 x i8 vector value
+def v2i8 : ValueType<16 , 22>; // 2 x i8 vector value
+def v4i8 : ValueType<32 , 23>; // 4 x i8 vector value
+def v8i8 : ValueType<64 , 24>; // 8 x i8 vector value
+def v16i8 : ValueType<128, 25>; // 16 x i8 vector value
+def v32i8 : ValueType<256, 26>; // 32 x i8 vector value
+def v64i8 : ValueType<512, 27>; // 64 x i8 vector value
+def v128i8 : ValueType<1024,28>; //128 x i8 vector value
+def v256i8 : ValueType<2048,29>; //256 x i8 vector value
+
+def v1i16 : ValueType<16 , 30>; // 1 x i16 vector value
+def v2i16 : ValueType<32 , 31>; // 2 x i16 vector value
+def v4i16 : ValueType<64 , 32>; // 4 x i16 vector value
+def v8i16 : ValueType<128, 33>; // 8 x i16 vector value
+def v16i16 : ValueType<256, 34>; // 16 x i16 vector value
+def v32i16 : ValueType<512, 35>; // 32 x i16 vector value
+def v64i16 : ValueType<1024,36>; // 64 x i16 vector value
+def v128i16: ValueType<2048,37>; //128 x i16 vector value
+
+def v1i32 : ValueType<32 , 38>; // 1 x i32 vector value
+def v2i32 : ValueType<64 , 39>; // 2 x i32 vector value
+def v4i32 : ValueType<128, 40>; // 4 x i32 vector value
+def v8i32 : ValueType<256, 41>; // 8 x i32 vector value
+def v16i32 : ValueType<512, 42>; // 16 x i32 vector value
+def v32i32 : ValueType<1024,43>; // 32 x i32 vector value
+def v64i32 : ValueType<2048,44>; // 32 x i32 vector value
+
+def v1i64 : ValueType<64 , 45>; // 1 x i64 vector value
+def v2i64 : ValueType<128, 46>; // 2 x i64 vector value
+def v4i64 : ValueType<256, 47>; // 4 x i64 vector value
+def v8i64 : ValueType<512, 48>; // 8 x i64 vector value
+def v16i64 : ValueType<1024,49>; // 16 x i64 vector value
+def v32i64 : ValueType<2048,50>; // 32 x i64 vector value
+
+def v1i128 : ValueType<128, 51>; // 1 x i128 vector value
+
+def v2f16 : ValueType<32 , 52>; // 2 x f16 vector value
+def v4f16 : ValueType<64 , 53>; // 4 x f16 vector value
+def v8f16 : ValueType<128, 54>; // 8 x f16 vector value
+def v1f32 : ValueType<32 , 55>; // 1 x f32 vector value
+def v2f32 : ValueType<64 , 56>; // 2 x f32 vector value
+def v4f32 : ValueType<128, 57>; // 4 x f32 vector value
+def v8f32 : ValueType<256, 58>; // 8 x f32 vector value
+def v16f32 : ValueType<512, 59>; // 16 x f32 vector value
+def v1f64 : ValueType<64, 60>; // 1 x f64 vector value
+def v2f64 : ValueType<128, 61>; // 2 x f64 vector value
+def v4f64 : ValueType<256, 62>; // 4 x f64 vector value
+def v8f64 : ValueType<512, 63>; // 8 x f64 vector value
+
+
+def x86mmx : ValueType<64 , 64>; // X86 MMX value
+def FlagVT : ValueType<0 , 65>; // Pre-RA sched glue
+def isVoid : ValueType<0 , 66>; // Produces no value
+def untyped: ValueType<8 , 67>; // Produces an untyped value
+def token : ValueType<0 , 249>; // TokenTy
+def MetadataVT: ValueType<0, 250>; // Metadata
+
+// Pseudo valuetype mapped to the current pointer size to any address space.
+// Should only be used in TableGen.
+def iPTRAny : ValueType<0, 251>;
+
+// Pseudo valuetype to represent "vector of any size"
+def vAny : ValueType<0 , 252>;
+
+// Pseudo valuetype to represent "float of any format"
+def fAny : ValueType<0 , 253>;
+
+// Pseudo valuetype to represent "integer of any bit width"
+def iAny : ValueType<0 , 254>;
+
+// Pseudo valuetype mapped to the current pointer size.
+def iPTR : ValueType<0 , 255>;
+
+// Pseudo valuetype to represent "any type of any size".
+def Any : ValueType<0 , 256>;
diff --git a/contrib/llvm/include/llvm/CodeGen/VirtRegMap.h b/contrib/llvm/include/llvm/CodeGen/VirtRegMap.h
new file mode 100644
index 0000000..d7e9209
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/VirtRegMap.h
@@ -0,0 +1,190 @@
+//===-- llvm/CodeGen/VirtRegMap.h - Virtual Register Map -*- C++ -*--------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a virtual register map. This maps virtual registers to
+// physical registers and virtual registers to stack slots. It is created and
+// updated by a register allocator and then used by a machine code rewriter that
+// adds spill code and rewrites virtual into physical register references.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_VIRTREGMAP_H
+#define LLVM_CODEGEN_VIRTREGMAP_H
+
+#include "llvm/ADT/IndexedMap.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+ class MachineInstr;
+ class MachineFunction;
+ class MachineRegisterInfo;
+ class TargetInstrInfo;
+ class raw_ostream;
+ class SlotIndexes;
+
+ class VirtRegMap : public MachineFunctionPass {
+ public:
+ enum {
+ NO_PHYS_REG = 0,
+ NO_STACK_SLOT = (1L << 30)-1,
+ MAX_STACK_SLOT = (1L << 18)-1
+ };
+
+ private:
+ MachineRegisterInfo *MRI;
+ const TargetInstrInfo *TII;
+ const TargetRegisterInfo *TRI;
+ MachineFunction *MF;
+
+ /// Virt2PhysMap - This is a virtual to physical register
+ /// mapping. Each virtual register is required to have an entry in
+ /// it; even spilled virtual registers (the register mapped to a
+ /// spilled register is the temporary used to load it from the
+ /// stack).
+ IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysMap;
+
+ /// Virt2StackSlotMap - This is virtual register to stack slot
+ /// mapping. Each spilled virtual register has an entry in it
+ /// which corresponds to the stack slot this register is spilled
+ /// at.
+ IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
+
+ /// Virt2SplitMap - This is virtual register to splitted virtual register
+ /// mapping.
+ IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
+
+ /// createSpillSlot - Allocate a spill slot for RC from MFI.
+ unsigned createSpillSlot(const TargetRegisterClass *RC);
+
+ VirtRegMap(const VirtRegMap&) = delete;
+ void operator=(const VirtRegMap&) = delete;
+
+ public:
+ static char ID;
+ VirtRegMap() : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG),
+ Virt2StackSlotMap(NO_STACK_SLOT), Virt2SplitMap(0) { }
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
+
+ MachineFunction &getMachineFunction() const {
+ assert(MF && "getMachineFunction called before runOnMachineFunction");
+ return *MF;
+ }
+
+ MachineRegisterInfo &getRegInfo() const { return *MRI; }
+ const TargetRegisterInfo &getTargetRegInfo() const { return *TRI; }
+
+ void grow();
+
+ /// @brief returns true if the specified virtual register is
+ /// mapped to a physical register
+ bool hasPhys(unsigned virtReg) const {
+ return getPhys(virtReg) != NO_PHYS_REG;
+ }
+
+ /// @brief returns the physical register mapped to the specified
+ /// virtual register
+ unsigned getPhys(unsigned virtReg) const {
+ assert(TargetRegisterInfo::isVirtualRegister(virtReg));
+ return Virt2PhysMap[virtReg];
+ }
+
+ /// @brief creates a mapping for the specified virtual register to
+ /// the specified physical register
+ void assignVirt2Phys(unsigned virtReg, unsigned physReg) {
+ assert(TargetRegisterInfo::isVirtualRegister(virtReg) &&
+ TargetRegisterInfo::isPhysicalRegister(physReg));
+ assert(Virt2PhysMap[virtReg] == NO_PHYS_REG &&
+ "attempt to assign physical register to already mapped "
+ "virtual register");
+ Virt2PhysMap[virtReg] = physReg;
+ }
+
+ /// @brief clears the specified virtual register's, physical
+ /// register mapping
+ void clearVirt(unsigned virtReg) {
+ assert(TargetRegisterInfo::isVirtualRegister(virtReg));
+ assert(Virt2PhysMap[virtReg] != NO_PHYS_REG &&
+ "attempt to clear a not assigned virtual register");
+ Virt2PhysMap[virtReg] = NO_PHYS_REG;
+ }
+
+ /// @brief clears all virtual to physical register mappings
+ void clearAllVirt() {
+ Virt2PhysMap.clear();
+ grow();
+ }
+
+ /// @brief returns true if VirtReg is assigned to its preferred physreg.
+ bool hasPreferredPhys(unsigned VirtReg);
+
+ /// @brief returns true if VirtReg has a known preferred register.
+ /// This returns false if VirtReg has a preference that is a virtual
+ /// register that hasn't been assigned yet.
+ bool hasKnownPreference(unsigned VirtReg);
+
+ /// @brief records virtReg is a split live interval from SReg.
+ void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
+ Virt2SplitMap[virtReg] = SReg;
+ }
+
+ /// @brief returns the live interval virtReg is split from.
+ unsigned getPreSplitReg(unsigned virtReg) const {
+ return Virt2SplitMap[virtReg];
+ }
+
+ /// getOriginal - Return the original virtual register that VirtReg descends
+ /// from through splitting.
+ /// A register that was not created by splitting is its own original.
+ /// This operation is idempotent.
+ unsigned getOriginal(unsigned VirtReg) const {
+ unsigned Orig = getPreSplitReg(VirtReg);
+ return Orig ? Orig : VirtReg;
+ }
+
+ /// @brief returns true if the specified virtual register is not
+ /// mapped to a stack slot or rematerialized.
+ bool isAssignedReg(unsigned virtReg) const {
+ if (getStackSlot(virtReg) == NO_STACK_SLOT)
+ return true;
+ // Split register can be assigned a physical register as well as a
+ // stack slot or remat id.
+ return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg] != NO_PHYS_REG);
+ }
+
+ /// @brief returns the stack slot mapped to the specified virtual
+ /// register
+ int getStackSlot(unsigned virtReg) const {
+ assert(TargetRegisterInfo::isVirtualRegister(virtReg));
+ return Virt2StackSlotMap[virtReg];
+ }
+
+ /// @brief create a mapping for the specifed virtual register to
+ /// the next available stack slot
+ int assignVirt2StackSlot(unsigned virtReg);
+ /// @brief create a mapping for the specified virtual register to
+ /// the specified stack slot
+ void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
+
+ void print(raw_ostream &OS, const Module* M = nullptr) const override;
+ void dump() const;
+ };
+
+ inline raw_ostream &operator<<(raw_ostream &OS, const VirtRegMap &VRM) {
+ VRM.print(OS);
+ return OS;
+ }
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/CodeGen/WinEHFuncInfo.h b/contrib/llvm/include/llvm/CodeGen/WinEHFuncInfo.h
new file mode 100644
index 0000000..70d558f
--- /dev/null
+++ b/contrib/llvm/include/llvm/CodeGen/WinEHFuncInfo.h
@@ -0,0 +1,130 @@
+//===-- llvm/CodeGen/WinEHFuncInfo.h ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Data structures and associated state for Windows exception handling schemes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_WINEHFUNCINFO_H
+#define LLVM_CODEGEN_WINEHFUNCINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/TinyPtrVector.h"
+
+namespace llvm {
+class AllocaInst;
+class BasicBlock;
+class CatchReturnInst;
+class Constant;
+class Function;
+class GlobalVariable;
+class InvokeInst;
+class IntrinsicInst;
+class LandingPadInst;
+class MCExpr;
+class MCSymbol;
+class MachineBasicBlock;
+class Value;
+
+// The following structs respresent the .xdata tables for various
+// Windows-related EH personalities.
+
+typedef PointerUnion<const BasicBlock *, MachineBasicBlock *> MBBOrBasicBlock;
+
+struct CxxUnwindMapEntry {
+ int ToState;
+ MBBOrBasicBlock Cleanup;
+};
+
+/// Similar to CxxUnwindMapEntry, but supports SEH filters.
+struct SEHUnwindMapEntry {
+ /// If unwinding continues through this handler, transition to the handler at
+ /// this state. This indexes into SEHUnwindMap.
+ int ToState = -1;
+
+ bool IsFinally = false;
+
+ /// Holds the filter expression function.
+ const Function *Filter = nullptr;
+
+ /// Holds the __except or __finally basic block.
+ MBBOrBasicBlock Handler;
+};
+
+struct WinEHHandlerType {
+ int Adjectives;
+ /// The CatchObj starts out life as an LLVM alloca and is eventually turned
+ /// frame index.
+ union {
+ const AllocaInst *Alloca;
+ int FrameIndex;
+ } CatchObj = {};
+ GlobalVariable *TypeDescriptor;
+ MBBOrBasicBlock Handler;
+};
+
+struct WinEHTryBlockMapEntry {
+ int TryLow = -1;
+ int TryHigh = -1;
+ int CatchHigh = -1;
+ SmallVector<WinEHHandlerType, 1> HandlerArray;
+};
+
+enum class ClrHandlerType { Catch, Finally, Fault, Filter };
+
+struct ClrEHUnwindMapEntry {
+ MBBOrBasicBlock Handler;
+ uint32_t TypeToken;
+ int Parent;
+ ClrHandlerType HandlerType;
+};
+
+struct WinEHFuncInfo {
+ DenseMap<const Instruction *, int> EHPadStateMap;
+ DenseMap<const FuncletPadInst *, int> FuncletBaseStateMap;
+ DenseMap<const InvokeInst *, int> InvokeStateMap;
+ DenseMap<const CatchReturnInst *, const BasicBlock *>
+ CatchRetSuccessorColorMap;
+ DenseMap<MCSymbol *, std::pair<int, MCSymbol *>> LabelToStateMap;
+ SmallVector<CxxUnwindMapEntry, 4> CxxUnwindMap;
+ SmallVector<WinEHTryBlockMapEntry, 4> TryBlockMap;
+ SmallVector<SEHUnwindMapEntry, 4> SEHUnwindMap;
+ SmallVector<ClrEHUnwindMapEntry, 4> ClrEHUnwindMap;
+ int UnwindHelpFrameIdx = INT_MAX;
+ int PSPSymFrameIdx = INT_MAX;
+
+ int getLastStateNumber() const { return CxxUnwindMap.size() - 1; }
+
+ void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin,
+ MCSymbol *InvokeEnd);
+
+ int EHRegNodeFrameIndex = INT_MAX;
+ int EHRegNodeEndOffset = INT_MAX;
+ int SEHSetFrameOffset = INT_MAX;
+
+ WinEHFuncInfo();
+};
+
+/// Analyze the IR in ParentFn and it's handlers to build WinEHFuncInfo, which
+/// describes the state numbers and tables used by __CxxFrameHandler3. This
+/// analysis assumes that WinEHPrepare has already been run.
+void calculateWinCXXEHStateNumbers(const Function *ParentFn,
+ WinEHFuncInfo &FuncInfo);
+
+void calculateSEHStateNumbers(const Function *ParentFn,
+ WinEHFuncInfo &FuncInfo);
+
+void calculateClrEHStateNumbers(const Function *Fn, WinEHFuncInfo &FuncInfo);
+
+void calculateCatchReturnSuccessorColors(const Function *Fn,
+ WinEHFuncInfo &FuncInfo);
+}
+#endif // LLVM_CODEGEN_WINEHFUNCINFO_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeView.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeView.h
new file mode 100644
index 0000000..7728120
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeView.h
@@ -0,0 +1,367 @@
+//===- CodeView.h -----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_CODEVIEW_H
+#define LLVM_DEBUGINFO_CODEVIEW_CODEVIEW_H
+
+#include <cinttypes>
+
+namespace llvm {
+namespace codeview {
+
+enum class CallingConvention : uint8_t {
+ NearC = 0x00, // near right to left push, caller pops stack
+ FarC = 0x01, // far right to left push, caller pops stack
+ NearPascal = 0x02, // near left to right push, callee pops stack
+ FarPascal = 0x03, // far left to right push, callee pops stack
+ NearFast = 0x04, // near left to right push with regs, callee pops stack
+ FarFast = 0x05, // far left to right push with regs, callee pops stack
+ NearStdCall = 0x07, // near standard call
+ FarStdCall = 0x08, // far standard call
+ NearSysCall = 0x09, // near sys call
+ FarSysCall = 0x0a, // far sys call
+ ThisCall = 0x0b, // this call (this passed in register)
+ MipsCall = 0x0c, // Mips call
+ Generic = 0x0d, // Generic call sequence
+ AlphaCall = 0x0e, // Alpha call
+ PpcCall = 0x0f, // PPC call
+ SHCall = 0x10, // Hitachi SuperH call
+ ArmCall = 0x11, // ARM call
+ AM33Call = 0x12, // AM33 call
+ TriCall = 0x13, // TriCore Call
+ SH5Call = 0x14, // Hitachi SuperH-5 call
+ M32RCall = 0x15, // M32R Call
+ ClrCall = 0x16, // clr call
+ Inline =
+ 0x17, // Marker for routines always inlined and thus lacking a convention
+ NearVector = 0x18 // near left to right push with regs, callee pops stack
+};
+
+enum class ClassOptions : uint16_t {
+ None = 0x0000,
+ Packed = 0x0001,
+ HasConstructorOrDestructor = 0x0002,
+ HasOverloadedOperator = 0x0004,
+ Nested = 0x0008,
+ ContainsNestedClass = 0x0010,
+ HasOverloadedAssignmentOperator = 0x0020,
+ HasConversionOperator = 0x0040,
+ ForwardReference = 0x0080,
+ Scoped = 0x0100,
+ HasUniqueName = 0x0200,
+ Sealed = 0x0400,
+ Intrinsic = 0x2000
+};
+
+inline ClassOptions operator|(ClassOptions a, ClassOptions b) {
+ return static_cast<ClassOptions>(static_cast<uint16_t>(a) |
+ static_cast<uint16_t>(b));
+}
+
+inline ClassOptions operator&(ClassOptions a, ClassOptions b) {
+ return static_cast<ClassOptions>(static_cast<uint16_t>(a) &
+ static_cast<uint16_t>(b));
+}
+
+inline ClassOptions operator~(ClassOptions a) {
+ return static_cast<ClassOptions>(~static_cast<uint16_t>(a));
+}
+
+enum class FrameProcedureOptions : uint32_t {
+ None = 0x00000000,
+ HasAlloca = 0x00000001,
+ HasSetJmp = 0x00000002,
+ HasLongJmp = 0x00000004,
+ HasInlineAssembly = 0x00000008,
+ HasExceptionHandling = 0x00000010,
+ MarkedInline = 0x00000020,
+ HasStructuredExceptionHandling = 0x00000040,
+ Naked = 0x00000080,
+ SecurityChecks = 0x00000100,
+ AsynchronousExceptionHandling = 0x00000200,
+ NoStackOrderingForSecurityChecks = 0x00000400,
+ Inlined = 0x00000800,
+ StrictSecurityChecks = 0x00001000,
+ SafeBuffers = 0x00002000,
+ ProfileGuidedOptimization = 0x00040000,
+ ValidProfileCounts = 0x00080000,
+ OptimizedForSpeed = 0x00100000,
+ GuardCfg = 0x00200000,
+ GuardCfw = 0x00400000
+};
+
+inline FrameProcedureOptions operator|(FrameProcedureOptions a,
+ FrameProcedureOptions b) {
+ return static_cast<FrameProcedureOptions>(static_cast<uint32_t>(a) |
+ static_cast<uint32_t>(b));
+}
+
+inline FrameProcedureOptions operator&(FrameProcedureOptions a,
+ FrameProcedureOptions b) {
+ return static_cast<FrameProcedureOptions>(static_cast<uint32_t>(a) &
+ static_cast<uint32_t>(b));
+}
+
+inline FrameProcedureOptions operator~(FrameProcedureOptions a) {
+ return static_cast<FrameProcedureOptions>(~static_cast<uint32_t>(a));
+}
+
+enum class FunctionOptions : uint8_t {
+ None = 0x00,
+ CxxReturnUdt = 0x01,
+ Constructor = 0x02,
+ ConstructorWithVirtualBases = 0x04
+};
+
+inline FunctionOptions operator|(FunctionOptions a, FunctionOptions b) {
+ return static_cast<FunctionOptions>(static_cast<uint8_t>(a) |
+ static_cast<uint8_t>(b));
+}
+
+inline FunctionOptions operator&(FunctionOptions a, FunctionOptions b) {
+ return static_cast<FunctionOptions>(static_cast<uint8_t>(a) &
+ static_cast<uint8_t>(b));
+}
+
+inline FunctionOptions operator~(FunctionOptions a) {
+ return static_cast<FunctionOptions>(~static_cast<uint8_t>(a));
+}
+
+enum class HfaKind : uint8_t {
+ None = 0x00,
+ Float = 0x01,
+ Double = 0x02,
+ Other = 0x03
+};
+
+enum class MemberAccess : uint8_t {
+ None = 0,
+ Private = 1,
+ Protected = 2,
+ Public = 3
+};
+
+enum class MethodKind : uint8_t {
+ Vanilla = 0x00,
+ Virtual = 0x01,
+ Static = 0x02,
+ Friend = 0x03,
+ IntroducingVirtual = 0x04,
+ PureVirtual = 0x05,
+ PureIntroducingVirtual = 0x06
+};
+
+enum class MethodOptions : uint16_t {
+ None = 0x0000,
+ Pseudo = 0x0020,
+ CompilerGenerated = 0x0100,
+ Sealed = 0x0200
+};
+
+inline MethodOptions operator|(MethodOptions a, MethodOptions b) {
+ return static_cast<MethodOptions>(static_cast<uint16_t>(a) |
+ static_cast<uint16_t>(b));
+}
+
+inline MethodOptions operator&(MethodOptions a, MethodOptions b) {
+ return static_cast<MethodOptions>(static_cast<uint16_t>(a) &
+ static_cast<uint16_t>(b));
+}
+
+inline MethodOptions operator~(MethodOptions a) {
+ return static_cast<MethodOptions>(~static_cast<uint16_t>(a));
+}
+
+enum class ModifierOptions : uint16_t {
+ None = 0x0000,
+ Const = 0x0001,
+ Volatile = 0x0002,
+ Unaligned = 0x0004
+};
+
+inline ModifierOptions operator|(ModifierOptions a, ModifierOptions b) {
+ return static_cast<ModifierOptions>(static_cast<uint16_t>(a) |
+ static_cast<uint16_t>(b));
+}
+
+inline ModifierOptions operator&(ModifierOptions a, ModifierOptions b) {
+ return static_cast<ModifierOptions>(static_cast<uint16_t>(a) &
+ static_cast<uint16_t>(b));
+}
+
+inline ModifierOptions operator~(ModifierOptions a) {
+ return static_cast<ModifierOptions>(~static_cast<uint16_t>(a));
+}
+
+enum class ModuleSubstreamKind : uint32_t {
+ Symbols = 0xf1,
+ Lines = 0xf2,
+ StringTable = 0xf3,
+ FileChecksums = 0xf4,
+ FrameData = 0xf5,
+ InlineeLines = 0xf6,
+ CrossScopeImports = 0xf7,
+ CrossScopeExports = 0xf8
+};
+
+enum class PointerKind : uint8_t {
+ Near16 = 0x00, // 16 bit pointer
+ Far16 = 0x01, // 16:16 far pointer
+ Huge16 = 0x02, // 16:16 huge pointer
+ BasedOnSegment = 0x03, // based on segment
+ BasedOnValue = 0x04, // based on value of base
+ BasedOnSegmentValue = 0x05, // based on segment value of base
+ BasedOnAddress = 0x06, // based on address of base
+ BasedOnSegmentAddress = 0x07, // based on segment address of base
+ BasedOnType = 0x08, // based on type
+ BasedOnSelf = 0x09, // based on self
+ Near32 = 0x0a, // 32 bit pointer
+ Far32 = 0x0b, // 16:32 pointer
+ Near64 = 0x0c // 64 bit pointer
+};
+
+enum class PointerMode : uint8_t {
+ Pointer = 0x00, // "normal" pointer
+ LValueReference = 0x01, // "old" reference
+ PointerToDataMember = 0x02, // pointer to data member
+ PointerToMemberFunction = 0x03, // pointer to member function
+ RValueReference = 0x04 // r-value reference
+};
+
+enum class PointerOptions : uint32_t {
+ None = 0x00000000,
+ Flat32 = 0x00000100,
+ Volatile = 0x00000200,
+ Const = 0x00000400,
+ Unaligned = 0x00000800,
+ Restrict = 0x00001000,
+ WinRTSmartPointer = 0x00080000
+};
+
+inline PointerOptions operator|(PointerOptions a, PointerOptions b) {
+ return static_cast<PointerOptions>(static_cast<uint16_t>(a) |
+ static_cast<uint16_t>(b));
+}
+
+inline PointerOptions operator&(PointerOptions a, PointerOptions b) {
+ return static_cast<PointerOptions>(static_cast<uint16_t>(a) &
+ static_cast<uint16_t>(b));
+}
+
+inline PointerOptions operator~(PointerOptions a) {
+ return static_cast<PointerOptions>(~static_cast<uint16_t>(a));
+}
+
+enum class PointerToMemberRepresentation : uint16_t {
+ Unknown = 0x00, // not specified (pre VC8)
+ SingleInheritanceData = 0x01, // member data, single inheritance
+ MultipleInheritanceData = 0x02, // member data, multiple inheritance
+ VirtualInheritanceData = 0x03, // member data, virtual inheritance
+ GeneralData = 0x04, // member data, most general
+ SingleInheritanceFunction = 0x05, // member function, single inheritance
+ MultipleInheritanceFunction = 0x06, // member function, multiple inheritance
+ VirtualInheritanceFunction = 0x07, // member function, virtual inheritance
+ GeneralFunction = 0x08 // member function, most general
+};
+
+enum class TypeRecordKind : uint16_t {
+ None = 0,
+
+ VirtualTableShape = 0x000a,
+ Label = 0x000e,
+ EndPrecompiledHeader = 0x0014,
+
+ Modifier = 0x1001,
+ Pointer = 0x1002,
+ Procedure = 0x1008,
+ MemberFunction = 0x1009,
+
+ Oem = 0x100f,
+ Oem2 = 0x1011,
+
+ ArgumentList = 0x1201,
+ FieldList = 0x1203,
+ BitField = 0x1205,
+ MethodList = 0x1206,
+
+ BaseClass = 0x1400,
+ VirtualBaseClass = 0x1401,
+ IndirectVirtualBaseClass = 0x1402,
+ Index = 0x1404,
+ VirtualFunctionTablePointer = 0x1409,
+
+ Enumerate = 0x1502,
+ Array = 0x1503,
+ Class = 0x1504,
+ Structure = 0x1505,
+ Union = 0x1506,
+ Enum = 0x1507,
+ Alias = 0x150a,
+ Member = 0x150d,
+ StaticMember = 0x150e,
+ Method = 0x150f,
+ NestedType = 0x1510,
+ OneMethod = 0x1511,
+ VirtualFunctionTable = 0x151d,
+
+ FunctionId = 0x1601,
+ MemberFunctionId = 0x1602,
+ BuildInfo = 0x1603,
+ SubstringList = 0x1604,
+ StringId = 0x1605,
+ UdtSourceLine = 0x1606,
+
+ SByte = 0x8000,
+ Int16 = 0x8001,
+ UInt16 = 0x8002,
+ Int32 = 0x8003,
+ UInt32 = 0x8004,
+ Single = 0x8005,
+ Double = 0x8006,
+ Float80 = 0x8007,
+ Float128 = 0x8008,
+ Int64 = 0x8009,
+ UInt64 = 0x800a,
+ Float48 = 0x800b,
+ Complex32 = 0x800c,
+ Complex64 = 0x800d,
+ Complex80 = 0x800e,
+ Complex128 = 0x800f,
+ VarString = 0x8010,
+
+ Int128 = 0x8017,
+ UInt128 = 0x8018,
+
+ Decimal = 0x8019,
+ Date = 0x801a,
+ Utf8String = 0x801b,
+
+ Float16 = 0x801c
+};
+
+enum class VirtualTableSlotKind : uint8_t {
+ Near16 = 0x00,
+ Far16 = 0x01,
+ This = 0x02,
+ Outer = 0x03,
+ Meta = 0x04,
+ Near = 0x05,
+ Far = 0x06
+};
+
+enum class WindowsRTClassKind : uint8_t {
+ None = 0x00,
+ RefClass = 0x01,
+ ValueClass = 0x02,
+ Interface = 0x03
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewOStream.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewOStream.h
new file mode 100644
index 0000000..14d057a
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/CodeViewOStream.h
@@ -0,0 +1,39 @@
+//===- CodeViewOStream.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_CODEVIEWOSTREAM_H
+#define LLVM_DEBUGINFO_CODEVIEW_CODEVIEWOSTREAM_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+
+namespace llvm {
+namespace codeview {
+
+template <typename Writer> class CodeViewOStream {
+private:
+ CodeViewOStream(const CodeViewOStream &) = delete;
+ CodeViewOStream &operator=(const CodeViewOStream &) = delete;
+
+public:
+ typedef typename Writer::LabelType LabelType;
+
+public:
+ explicit CodeViewOStream(Writer &W);
+
+private:
+ uint64_t size() const { return W.tell(); }
+
+private:
+ Writer &W;
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/FieldListRecordBuilder.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/FieldListRecordBuilder.h
new file mode 100644
index 0000000..1ed6248
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/FieldListRecordBuilder.h
@@ -0,0 +1,78 @@
+//===- FieldListRecordBuilder.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_FIELDLISTRECORDBUILDER_H
+#define LLVM_DEBUGINFO_CODEVIEW_FIELDLISTRECORDBUILDER_H
+
+#include "llvm/DebugInfo/CodeView/ListRecordBuilder.h"
+
+namespace llvm {
+namespace codeview {
+
+class MethodInfo {
+public:
+ MethodInfo() : Access(), Kind(), Options(), Type(), VTableSlotOffset(-1) {}
+
+ MethodInfo(MemberAccess Access, MethodKind Kind, MethodOptions Options,
+ TypeIndex Type, int32_t VTableSlotOffset)
+ : Access(Access), Kind(Kind), Options(Options), Type(Type),
+ VTableSlotOffset(VTableSlotOffset) {}
+
+ MemberAccess getAccess() const { return Access; }
+ MethodKind getKind() const { return Kind; }
+ MethodOptions getOptions() const { return Options; }
+ TypeIndex getType() const { return Type; }
+ int32_t getVTableSlotOffset() const { return VTableSlotOffset; }
+
+private:
+ MemberAccess Access;
+ MethodKind Kind;
+ MethodOptions Options;
+ TypeIndex Type;
+ int32_t VTableSlotOffset;
+};
+
+class FieldListRecordBuilder : public ListRecordBuilder {
+private:
+ FieldListRecordBuilder(const FieldListRecordBuilder &) = delete;
+ void operator=(const FieldListRecordBuilder &) = delete;
+
+public:
+ FieldListRecordBuilder();
+
+ void writeBaseClass(MemberAccess Access, TypeIndex Type, uint64_t Offset);
+ void writeEnumerate(MemberAccess Access, uint64_t Value, StringRef Name);
+ void writeIndirectVirtualBaseClass(MemberAccess Access, TypeIndex Type,
+ TypeIndex VirtualBasePointerType,
+ int64_t VirtualBasePointerOffset,
+ uint64_t SlotIndex);
+ void writeMember(MemberAccess Access, TypeIndex Type, uint64_t Offset,
+ StringRef Name);
+ void writeOneMethod(MemberAccess Access, MethodKind Kind,
+ MethodOptions Options, TypeIndex Type,
+ int32_t VTableSlotOffset, StringRef Name);
+ void writeOneMethod(const MethodInfo &Method, StringRef Name);
+ void writeMethod(uint16_t OverloadCount, TypeIndex MethodList,
+ StringRef Name);
+ void writeNestedType(TypeIndex Type, StringRef Name);
+ void writeStaticMember(MemberAccess Access, TypeIndex Type, StringRef Name);
+ void writeVirtualBaseClass(MemberAccess Access, TypeIndex Type,
+ TypeIndex VirtualBasePointerType,
+ int64_t VirtualBasePointerOffset,
+ uint64_t SlotIndex);
+ void writeVirtualBaseClass(TypeRecordKind Kind, MemberAccess Access,
+ TypeIndex Type, TypeIndex VirtualBasePointerType,
+ int64_t VirtualBasePointerOffset,
+ uint64_t SlotIndex);
+ void writeVirtualFunctionTablePointer(TypeIndex Type);
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/FunctionId.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/FunctionId.h
new file mode 100644
index 0000000..1af3da8
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/FunctionId.h
@@ -0,0 +1,56 @@
+//===- FunctionId.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_FUNCTIONID_H
+#define LLVM_DEBUGINFO_CODEVIEW_FUNCTIONID_H
+
+#include <cinttypes>
+
+namespace llvm {
+namespace codeview {
+
+class FunctionId {
+public:
+ FunctionId() : Index(0) {}
+
+ explicit FunctionId(uint32_t Index) : Index(Index) {}
+
+ uint32_t getIndex() const { return Index; }
+
+private:
+ uint32_t Index;
+};
+
+inline bool operator==(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() == B.getIndex();
+}
+
+inline bool operator!=(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() != B.getIndex();
+}
+
+inline bool operator<(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() < B.getIndex();
+}
+
+inline bool operator<=(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() <= B.getIndex();
+}
+
+inline bool operator>(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() > B.getIndex();
+}
+
+inline bool operator>=(const FunctionId &A, const FunctionId &B) {
+ return A.getIndex() >= B.getIndex();
+}
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/Line.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/Line.h
new file mode 100644
index 0000000..a7cdbda
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/Line.h
@@ -0,0 +1,124 @@
+//===- Line.h ---------------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_LINE_H
+#define LLVM_DEBUGINFO_CODEVIEW_LINE_H
+
+#include <cinttypes>
+
+namespace llvm {
+namespace codeview {
+
+class LineInfo {
+public:
+ static const uint32_t AlwaysStepIntoLineNumber = 0xfeefee;
+ static const uint32_t NeverStepIntoLineNumber = 0xf00f00;
+
+private:
+ static const uint32_t StartLineMask = 0x00ffffff;
+ static const uint32_t EndLineDeltaMask = 0x7f000000;
+ static const int EndLineDeltaShift = 24;
+ static const uint32_t StatementFlag = 0x80000000u;
+
+public:
+ LineInfo(uint32_t StartLine, uint32_t EndLine, bool IsStatement);
+
+ uint32_t getStartLine() const { return LineData & StartLineMask; }
+
+ uint32_t getLineDelta() const {
+ return (LineData & EndLineDeltaMask) >> EndLineDeltaShift;
+ }
+
+ uint32_t getEndLine() const { return getStartLine() + getLineDelta(); }
+
+ bool isStatement() const { return (LineData & StatementFlag) != 0; }
+
+ uint32_t getRawData() const { return LineData; }
+
+ bool isAlwaysStepInto() const {
+ return getStartLine() == AlwaysStepIntoLineNumber;
+ }
+
+ bool isNeverStepInto() const {
+ return getStartLine() == NeverStepIntoLineNumber;
+ }
+
+private:
+ uint32_t LineData;
+};
+
+class ColumnInfo {
+private:
+ static const uint32_t StartColumnMask = 0x0000ffffu;
+ static const uint32_t EndColumnMask = 0xffff0000u;
+ static const int EndColumnShift = 16;
+
+public:
+ ColumnInfo(uint16_t StartColumn, uint16_t EndColumn) {
+ ColumnData =
+ (static_cast<uint32_t>(StartColumn) & StartColumnMask) |
+ ((static_cast<uint32_t>(EndColumn) << EndColumnShift) & EndColumnMask);
+ }
+
+ uint16_t getStartColumn() const {
+ return static_cast<uint16_t>(ColumnData & StartColumnMask);
+ }
+
+ uint16_t getEndColumn() const {
+ return static_cast<uint16_t>((ColumnData & EndColumnMask) >>
+ EndColumnShift);
+ }
+
+ uint32_t getRawData() const { return ColumnData; }
+
+private:
+ uint32_t ColumnData;
+};
+
+class Line {
+private:
+ int32_t CodeOffset;
+ LineInfo LineInf;
+ ColumnInfo ColumnInf;
+
+public:
+ Line(int32_t CodeOffset, uint32_t StartLine, uint32_t EndLine,
+ uint16_t StartColumn, uint16_t EndColumn, bool IsStatement)
+ : CodeOffset(CodeOffset), LineInf(StartLine, EndLine, IsStatement),
+ ColumnInf(StartColumn, EndColumn) {}
+
+ Line(int32_t CodeOffset, LineInfo LineInf, ColumnInfo ColumnInf)
+ : CodeOffset(CodeOffset), LineInf(LineInf), ColumnInf(ColumnInf) {}
+
+ LineInfo getLineInfo() const { return LineInf; }
+
+ ColumnInfo getColumnInfo() const { return ColumnInf; }
+
+ int32_t getCodeOffset() const { return CodeOffset; }
+
+ uint32_t getStartLine() const { return LineInf.getStartLine(); }
+
+ uint32_t getLineDelta() const { return LineInf.getLineDelta(); }
+
+ uint32_t getEndLine() const { return LineInf.getEndLine(); }
+
+ uint16_t getStartColumn() const { return ColumnInf.getStartColumn(); }
+
+ uint16_t getEndColumn() const { return ColumnInf.getEndColumn(); }
+
+ bool isStatement() const { return LineInf.isStatement(); }
+
+ bool isAlwaysStepInto() const { return LineInf.isAlwaysStepInto(); }
+
+ bool isNeverStepInto() const { return LineInf.isNeverStepInto(); }
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/ListRecordBuilder.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/ListRecordBuilder.h
new file mode 100644
index 0000000..df0a2e0
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/ListRecordBuilder.h
@@ -0,0 +1,43 @@
+//===- ListRecordBuilder.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_LISTRECORDBUILDER_H
+#define LLVM_DEBUGINFO_CODEVIEW_LISTRECORDBUILDER_H
+
+#include "llvm/DebugInfo/CodeView/TypeRecordBuilder.h"
+
+namespace llvm {
+namespace codeview {
+
+class ListRecordBuilder {
+private:
+ ListRecordBuilder(const ListRecordBuilder &) = delete;
+ ListRecordBuilder &operator=(const ListRecordBuilder &) = delete;
+
+protected:
+ const int MethodKindShift = 2;
+
+ explicit ListRecordBuilder(TypeRecordKind Kind);
+
+public:
+ llvm::StringRef str() { return Builder.str(); }
+
+protected:
+ void finishSubRecord();
+
+ TypeRecordBuilder &getBuilder() { return Builder; }
+
+private:
+ TypeRecordBuilder Builder;
+ SmallVector<size_t, 4> ContinuationOffsets;
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/MemoryTypeTableBuilder.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/MemoryTypeTableBuilder.h
new file mode 100644
index 0000000..5bfe2a0
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/MemoryTypeTableBuilder.h
@@ -0,0 +1,68 @@
+//===- MemoryTypeTableBuilder.h ---------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MEMORYTYPETABLEBUILDER_H
+#define LLVM_DEBUGINFO_CODEVIEW_MEMORYTYPETABLEBUILDER_H
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/TypeTableBuilder.h"
+#include <functional>
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+namespace llvm {
+namespace codeview {
+
+class MemoryTypeTableBuilder : public TypeTableBuilder {
+public:
+ class Record {
+ public:
+ explicit Record(llvm::StringRef RData);
+
+ const char *data() const { return Data.get(); }
+ uint16_t size() const { return Size; }
+
+ private:
+ uint16_t Size;
+ std::unique_ptr<char[]> Data;
+ };
+
+private:
+ class RecordHash : std::unary_function<llvm::StringRef, size_t> {
+ public:
+ size_t operator()(llvm::StringRef Val) const {
+ return static_cast<size_t>(llvm::hash_value(Val));
+ }
+ };
+
+public:
+ MemoryTypeTableBuilder() {}
+
+ template <typename TFunc> void ForEachRecord(TFunc Func) {
+ uint32_t Index = TypeIndex::FirstNonSimpleIndex;
+
+ for (const std::unique_ptr<Record> &R : Records) {
+ Func(TypeIndex(Index), R.get());
+ ++Index;
+ }
+ }
+
+private:
+ virtual TypeIndex writeRecord(llvm::StringRef Data) override;
+
+private:
+ std::vector<std::unique_ptr<Record>> Records;
+ std::unordered_map<llvm::StringRef, TypeIndex, RecordHash> HashedRecords;
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/MethodListRecordBuilder.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/MethodListRecordBuilder.h
new file mode 100644
index 0000000..faa404d
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/MethodListRecordBuilder.h
@@ -0,0 +1,35 @@
+//===- MethodListRecordBuilder.h --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_METHODLISTRECORDBUILDER_H
+#define LLVM_DEBUGINFO_CODEVIEW_METHODLISTRECORDBUILDER_H
+
+#include "llvm/DebugInfo/CodeView/ListRecordBuilder.h"
+
+namespace llvm {
+namespace codeview {
+
+class MethodInfo;
+
+class MethodListRecordBuilder : public ListRecordBuilder {
+private:
+ MethodListRecordBuilder(const MethodListRecordBuilder &) = delete;
+ MethodListRecordBuilder &operator=(const MethodListRecordBuilder &) = delete;
+
+public:
+ MethodListRecordBuilder();
+
+ void writeMethod(MemberAccess Access, MethodKind Kind, MethodOptions Options,
+ TypeIndex Type, int32_t VTableSlotOffset);
+ void writeMethod(const MethodInfo &Method);
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeIndex.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeIndex.h
new file mode 100644
index 0000000..d3a541b
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeIndex.h
@@ -0,0 +1,176 @@
+//===- TypeIndex.h ----------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEINDEX_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEX_H
+
+#include <cassert>
+#include <cinttypes>
+
+namespace llvm {
+namespace codeview {
+
+enum class SimpleTypeKind : uint32_t {
+ None = 0x0000, // uncharacterized type (no type)
+ Void = 0x0003, // void
+ NotTranslated = 0x0007, // type not translated by cvpack
+ HResult = 0x0008, // OLE/COM HRESULT
+
+ SignedCharacter = 0x0010, // 8 bit signed
+ UnsignedCharacter = 0x0020, // 8 bit unsigned
+ NarrowCharacter = 0x0070, // really a char
+ WideCharacter = 0x0071, // wide char
+
+ SByte = 0x0068, // 8 bit signed int
+ Byte = 0x0069, // 8 bit unsigned int
+ Int16Short = 0x0011, // 16 bit signed
+ UInt16Short = 0x0021, // 16 bit unsigned
+ Int16 = 0x0072, // 16 bit signed int
+ UInt16 = 0x0073, // 16 bit unsigned int
+ Int32Long = 0x0012, // 32 bit signed
+ UInt32Long = 0x0022, // 32 bit unsigned
+ Int32 = 0x0074, // 32 bit signed int
+ UInt32 = 0x0075, // 32 bit unsigned int
+ Int64Quad = 0x0013, // 64 bit signed
+ UInt64Quad = 0x0023, // 64 bit unsigned
+ Int64 = 0x0076, // 64 bit signed int
+ UInt64 = 0x0077, // 64 bit unsigned int
+ Int128 = 0x0078, // 128 bit signed int
+ UInt128 = 0x0079, // 128 bit unsigned int
+
+ Float16 = 0x0046, // 16 bit real
+ Float32 = 0x0040, // 32 bit real
+ Float32PartialPrecision = 0x0045, // 32 bit PP real
+ Float48 = 0x0044, // 48 bit real
+ Float64 = 0x0041, // 64 bit real
+ Float80 = 0x0042, // 80 bit real
+ Float128 = 0x0043, // 128 bit real
+
+ Complex32 = 0x0050, // 32 bit complex
+ Complex64 = 0x0051, // 64 bit complex
+ Complex80 = 0x0052, // 80 bit complex
+ Complex128 = 0x0053, // 128 bit complex
+
+ Boolean8 = 0x0030, // 8 bit boolean
+ Boolean16 = 0x0031, // 16 bit boolean
+ Boolean32 = 0x0032, // 32 bit boolean
+ Boolean64 = 0x0033 // 64 bit boolean
+};
+
+enum class SimpleTypeMode : uint32_t {
+ Direct = 0x00000000, // Not a pointer
+ NearPointer = 0x00000100, // Near pointer
+ FarPointer = 0x00000200, // Far pointer
+ HugePointer = 0x00000300, // Huge pointer
+ NearPointer32 = 0x00000400, // 32 bit near pointer
+ FarPointer32 = 0x00000500, // 32 bit far pointer
+ NearPointer64 = 0x00000600, // 64 bit near pointer
+ NearPointer128 = 0x00000700 // 128 bit near pointer
+};
+
+class TypeIndex {
+public:
+ static const uint32_t FirstNonSimpleIndex = 0x1000;
+ static const uint32_t SimpleKindMask = 0x000000ff;
+ static const uint32_t SimpleModeMask = 0x00000700;
+
+public:
+ TypeIndex() : Index(0) {}
+ explicit TypeIndex(uint32_t Index) : Index(Index) {}
+ explicit TypeIndex(SimpleTypeKind Kind)
+ : Index(static_cast<uint32_t>(Kind)) {}
+ TypeIndex(SimpleTypeKind Kind, SimpleTypeMode Mode)
+ : Index(static_cast<uint32_t>(Kind) | static_cast<uint32_t>(Mode)) {}
+
+ uint32_t getIndex() const { return Index; }
+ bool isSimple() const { return Index < FirstNonSimpleIndex; }
+
+ SimpleTypeKind getSimpleKind() const {
+ assert(isSimple());
+ return static_cast<SimpleTypeKind>(Index & SimpleKindMask);
+ }
+
+ SimpleTypeMode getSimpleMode() const {
+ assert(isSimple());
+ return static_cast<SimpleTypeMode>(Index & SimpleModeMask);
+ }
+
+ static TypeIndex Void() { return TypeIndex(SimpleTypeKind::Void); }
+ static TypeIndex VoidPointer32() {
+ return TypeIndex(SimpleTypeKind::Void, SimpleTypeMode::NearPointer32);
+ }
+ static TypeIndex VoidPointer64() {
+ return TypeIndex(SimpleTypeKind::Void, SimpleTypeMode::NearPointer64);
+ }
+
+ static TypeIndex SignedCharacter() {
+ return TypeIndex(SimpleTypeKind::SignedCharacter);
+ }
+ static TypeIndex UnsignedCharacter() {
+ return TypeIndex(SimpleTypeKind::UnsignedCharacter);
+ }
+ static TypeIndex NarrowCharacter() {
+ return TypeIndex(SimpleTypeKind::NarrowCharacter);
+ }
+ static TypeIndex WideCharacter() {
+ return TypeIndex(SimpleTypeKind::WideCharacter);
+ }
+ static TypeIndex Int16Short() {
+ return TypeIndex(SimpleTypeKind::Int16Short);
+ }
+ static TypeIndex UInt16Short() {
+ return TypeIndex(SimpleTypeKind::UInt16Short);
+ }
+ static TypeIndex Int32() { return TypeIndex(SimpleTypeKind::Int32); }
+ static TypeIndex UInt32() { return TypeIndex(SimpleTypeKind::UInt32); }
+ static TypeIndex Int32Long() { return TypeIndex(SimpleTypeKind::Int32Long); }
+ static TypeIndex UInt32Long() {
+ return TypeIndex(SimpleTypeKind::UInt32Long);
+ }
+ static TypeIndex Int64() { return TypeIndex(SimpleTypeKind::Int64); }
+ static TypeIndex UInt64() { return TypeIndex(SimpleTypeKind::UInt64); }
+ static TypeIndex Int64Quad() { return TypeIndex(SimpleTypeKind::Int64Quad); }
+ static TypeIndex UInt64Quad() {
+ return TypeIndex(SimpleTypeKind::UInt64Quad);
+ }
+
+ static TypeIndex Float32() { return TypeIndex(SimpleTypeKind::Float32); }
+ static TypeIndex Float64() { return TypeIndex(SimpleTypeKind::Float64); }
+
+private:
+ uint32_t Index;
+};
+
+inline bool operator==(const TypeIndex &A, const TypeIndex &B) {
+ return A.getIndex() == B.getIndex();
+}
+
+inline bool operator!=(const TypeIndex &A, const TypeIndex &B) {
+ return A.getIndex() != B.getIndex();
+}
+
+inline bool operator<(const TypeIndex &A, const TypeIndex &B) {
+ return A.getIndex() < B.getIndex();
+}
+
+inline bool operator<=(const TypeIndex &A, const TypeIndex &B) {
+ return A.getIndex() <= B.getIndex();
+}
+
+inline bool operator>(const TypeIndex &A, const TypeIndex &B) {
+ return A.getIndex() > B.getIndex();
+}
+
+inline bool operator>=(const TypeIndex &A, const TypeIndex &B) {
+ return A.getIndex() >= B.getIndex();
+}
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h
new file mode 100644
index 0000000..21755f5
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecord.h
@@ -0,0 +1,270 @@
+//===- TypeRecord.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPERECORD_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPERECORD_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include <cinttypes>
+
+namespace llvm {
+namespace codeview {
+
+class TypeRecord {
+protected:
+ explicit TypeRecord(TypeRecordKind Kind) : Kind(Kind) {}
+
+public:
+ TypeRecordKind getKind() const { return Kind; }
+
+private:
+ TypeRecordKind Kind;
+};
+
+class ModifierRecord : public TypeRecord {
+public:
+ ModifierRecord(TypeIndex ModifiedType, ModifierOptions Options)
+ : TypeRecord(TypeRecordKind::Modifier), ModifiedType(ModifiedType),
+ Options(Options) {}
+
+ TypeIndex getModifiedType() const { return ModifiedType; }
+ ModifierOptions getOptions() const { return Options; }
+
+private:
+ TypeIndex ModifiedType;
+ ModifierOptions Options;
+};
+
+class ProcedureRecord : public TypeRecord {
+public:
+ ProcedureRecord(TypeIndex ReturnType, CallingConvention CallConv,
+ FunctionOptions Options, uint16_t ParameterCount,
+ TypeIndex ArgumentList)
+ : TypeRecord(TypeRecordKind::Procedure), ReturnType(ReturnType),
+ CallConv(CallConv), Options(Options), ParameterCount(ParameterCount),
+ ArgumentList(ArgumentList) {}
+
+ TypeIndex getReturnType() const { return ReturnType; }
+ CallingConvention getCallConv() const { return CallConv; }
+ FunctionOptions getOptions() const { return Options; }
+ uint16_t getParameterCount() const { return ParameterCount; }
+ TypeIndex getArgumentList() const { return ArgumentList; }
+
+private:
+ TypeIndex ReturnType;
+ CallingConvention CallConv;
+ FunctionOptions Options;
+ uint16_t ParameterCount;
+ TypeIndex ArgumentList;
+};
+
+class MemberFunctionRecord : public TypeRecord {
+public:
+ MemberFunctionRecord(TypeIndex ReturnType, TypeIndex ClassType,
+ TypeIndex ThisType, CallingConvention CallConv,
+ FunctionOptions Options, uint16_t ParameterCount,
+ TypeIndex ArgumentList, int32_t ThisPointerAdjustment)
+ : TypeRecord(TypeRecordKind::MemberFunction), ReturnType(ReturnType),
+ ClassType(ClassType), ThisType(ThisType), CallConv(CallConv),
+ Options(Options), ParameterCount(ParameterCount),
+ ArgumentList(ArgumentList),
+ ThisPointerAdjustment(ThisPointerAdjustment) {}
+
+ TypeIndex getReturnType() const { return ReturnType; }
+ TypeIndex getClassType() const { return ClassType; }
+ TypeIndex getThisType() const { return ThisType; }
+ CallingConvention getCallConv() const { return CallConv; }
+ FunctionOptions getOptions() const { return Options; }
+ uint16_t getParameterCount() const { return ParameterCount; }
+ TypeIndex getArgumentList() const { return ArgumentList; }
+ int32_t getThisPointerAdjustment() const { return ThisPointerAdjustment; }
+
+private:
+ TypeIndex ReturnType;
+ TypeIndex ClassType;
+ TypeIndex ThisType;
+ CallingConvention CallConv;
+ FunctionOptions Options;
+ uint16_t ParameterCount;
+ TypeIndex ArgumentList;
+ int32_t ThisPointerAdjustment;
+};
+
+class ArgumentListRecord : public TypeRecord {
+public:
+ explicit ArgumentListRecord(llvm::ArrayRef<TypeIndex> ArgumentTypes)
+ : TypeRecord(TypeRecordKind::ArgumentList), ArgumentTypes(ArgumentTypes) {
+ }
+
+ llvm::ArrayRef<TypeIndex> getArgumentTypes() const { return ArgumentTypes; }
+
+private:
+ llvm::ArrayRef<TypeIndex> ArgumentTypes;
+};
+
+class PointerRecordBase : public TypeRecord {
+public:
+ PointerRecordBase(TypeIndex ReferentType, PointerKind Kind, PointerMode Mode,
+ PointerOptions Options, uint8_t Size)
+ : TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
+ PtrKind(Kind), Mode(Mode), Options(Options), Size(Size) {}
+
+ TypeIndex getReferentType() const { return ReferentType; }
+ PointerKind getPointerKind() const { return PtrKind; }
+ PointerMode getMode() const { return Mode; }
+ PointerOptions getOptions() const { return Options; }
+ uint8_t getSize() const { return Size; }
+
+private:
+ TypeIndex ReferentType;
+ PointerKind PtrKind;
+ PointerMode Mode;
+ PointerOptions Options;
+ uint8_t Size;
+};
+
+class PointerRecord : public PointerRecordBase {
+public:
+ PointerRecord(TypeIndex ReferentType, PointerKind Kind, PointerMode Mode,
+ PointerOptions Options, uint8_t Size)
+ : PointerRecordBase(ReferentType, Kind, Mode, Options, Size) {}
+};
+
+class PointerToMemberRecord : public PointerRecordBase {
+public:
+ PointerToMemberRecord(TypeIndex ReferentType, PointerKind Kind,
+ PointerMode Mode, PointerOptions Options, uint8_t Size,
+ TypeIndex ContainingType,
+ PointerToMemberRepresentation Representation)
+ : PointerRecordBase(ReferentType, Kind, Mode, Options, Size),
+ ContainingType(ContainingType), Representation(Representation) {}
+
+ TypeIndex getContainingType() const { return ContainingType; }
+ PointerToMemberRepresentation getRepresentation() const {
+ return Representation;
+ }
+
+private:
+ TypeIndex ContainingType;
+ PointerToMemberRepresentation Representation;
+};
+
+class ArrayRecord : public TypeRecord {
+public:
+ ArrayRecord(TypeIndex ElementType, TypeIndex IndexType, uint64_t Size,
+ llvm::StringRef Name)
+ : TypeRecord(TypeRecordKind::Array), ElementType(ElementType),
+ IndexType(IndexType), Size(Size), Name(Name) {}
+
+ TypeIndex getElementType() const { return ElementType; }
+ TypeIndex getIndexType() const { return IndexType; }
+ uint64_t getSize() const { return Size; }
+ llvm::StringRef getName() const { return Name; }
+
+private:
+ TypeIndex ElementType;
+ TypeIndex IndexType;
+ uint64_t Size;
+ llvm::StringRef Name;
+};
+
+class TagRecord : public TypeRecord {
+protected:
+ TagRecord(TypeRecordKind Kind, uint16_t MemberCount, ClassOptions Options,
+ TypeIndex FieldList, StringRef Name, StringRef UniqueName)
+ : TypeRecord(Kind), MemberCount(MemberCount), Options(Options),
+ FieldList(FieldList), Name(Name), UniqueName(UniqueName) {}
+
+public:
+ uint16_t getMemberCount() const { return MemberCount; }
+ ClassOptions getOptions() const { return Options; }
+ TypeIndex getFieldList() const { return FieldList; }
+ StringRef getName() const { return Name; }
+ StringRef getUniqueName() const { return UniqueName; }
+
+private:
+ uint16_t MemberCount;
+ ClassOptions Options;
+ TypeIndex FieldList;
+ StringRef Name;
+ StringRef UniqueName;
+};
+
+class AggregateRecord : public TagRecord {
+public:
+ AggregateRecord(TypeRecordKind Kind, uint16_t MemberCount,
+ ClassOptions Options, HfaKind Hfa,
+ WindowsRTClassKind WinRTKind, TypeIndex FieldList,
+ TypeIndex DerivationList, TypeIndex VTableShape,
+ uint64_t Size, StringRef Name, StringRef UniqueName)
+ : TagRecord(Kind, MemberCount, Options, FieldList, Name, UniqueName),
+ Hfa(Hfa), WinRTKind(WinRTKind), DerivationList(DerivationList),
+ VTableShape(VTableShape), Size(Size) {}
+
+ HfaKind getHfa() const { return Hfa; }
+ WindowsRTClassKind getWinRTKind() const { return WinRTKind; }
+ TypeIndex getDerivationList() const { return DerivationList; }
+ TypeIndex getVTableShape() const { return VTableShape; }
+ uint64_t getSize() const { return Size; }
+
+private:
+ HfaKind Hfa;
+ WindowsRTClassKind WinRTKind;
+ TypeIndex DerivationList;
+ TypeIndex VTableShape;
+ uint64_t Size;
+};
+
+class EnumRecord : public TagRecord {
+public:
+ EnumRecord(uint16_t MemberCount, ClassOptions Options, TypeIndex FieldList,
+ StringRef Name, StringRef UniqueName, TypeIndex UnderlyingType)
+ : TagRecord(TypeRecordKind::Enum, MemberCount, Options, FieldList, Name,
+ UniqueName),
+ UnderlyingType(UnderlyingType) {}
+
+ TypeIndex getUnderlyingType() const { return UnderlyingType; }
+
+private:
+ TypeIndex UnderlyingType;
+};
+
+class BitFieldRecord : TypeRecord {
+public:
+ BitFieldRecord(TypeIndex Type, uint8_t BitSize, uint8_t BitOffset)
+ : TypeRecord(TypeRecordKind::BitField), Type(Type), BitSize(BitSize),
+ BitOffset(BitOffset) {}
+
+ TypeIndex getType() const { return Type; }
+ uint8_t getBitOffset() const { return BitOffset; }
+ uint8_t getBitSize() const { return BitSize; }
+
+private:
+ TypeIndex Type;
+ uint8_t BitSize;
+ uint8_t BitOffset;
+};
+
+class VirtualTableShapeRecord : TypeRecord {
+public:
+ explicit VirtualTableShapeRecord(ArrayRef<VirtualTableSlotKind> Slots)
+ : TypeRecord(TypeRecordKind::VirtualTableShape), Slots(Slots) {}
+
+ ArrayRef<VirtualTableSlotKind> getSlots() const { return Slots; }
+
+private:
+ ArrayRef<VirtualTableSlotKind> Slots;
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h
new file mode 100644
index 0000000..1f48cf7
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeRecordBuilder.h
@@ -0,0 +1,57 @@
+//===- TypeRecordBuilder.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPERECORDBUILDER_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPERECORDBUILDER_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace codeview {
+
+class TypeRecordBuilder {
+private:
+ TypeRecordBuilder(const TypeRecordBuilder &) = delete;
+ TypeRecordBuilder &operator=(const TypeRecordBuilder &) = delete;
+
+public:
+ explicit TypeRecordBuilder(TypeRecordKind Kind);
+
+ void writeUInt8(uint8_t Value);
+ void writeInt16(int16_t Value);
+ void writeUInt16(uint16_t Value);
+ void writeInt32(int32_t Value);
+ void writeUInt32(uint32_t Value);
+ void writeInt64(int64_t Value);
+ void writeUInt64(uint64_t Value);
+ void writeTypeIndex(TypeIndex TypeInd);
+ void writeTypeRecordKind(TypeRecordKind Kind);
+ void writeEncodedInteger(int64_t Value);
+ void writeEncodedSignedInteger(int64_t Value);
+ void writeEncodedUnsignedInteger(uint64_t Value);
+ void writeNullTerminatedString(const char *Value);
+ void writeNullTerminatedString(StringRef Value);
+
+ llvm::StringRef str();
+
+ uint64_t size() const { return Stream.tell(); }
+
+private:
+ llvm::SmallVector<char, 256> Buffer;
+ llvm::raw_svector_ostream Stream;
+ llvm::support::endian::Writer<llvm::support::endianness::little> Writer;
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h
new file mode 100644
index 0000000..9de110e
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeSymbolEmitter.h
@@ -0,0 +1,37 @@
+//===- TypeSymbolEmitter.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPESYMBOLEMITTER_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPESYMBOLEMITTER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+
+namespace llvm {
+namespace codeview {
+
+class TypeSymbolEmitter {
+private:
+ TypeSymbolEmitter(const TypeSymbolEmitter &) = delete;
+ TypeSymbolEmitter &operator=(const TypeSymbolEmitter &) = delete;
+
+protected:
+ TypeSymbolEmitter() {}
+
+public:
+ virtual ~TypeSymbolEmitter() {}
+
+public:
+ virtual void writeUserDefinedType(TypeIndex TI, StringRef Name) = 0;
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
new file mode 100644
index 0000000..2c950e8
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/TypeTableBuilder.h
@@ -0,0 +1,60 @@
+//===- TypeTableBuilder.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPETABLEBUILDER_H
+#define LLVM_DEBUGINFO_CODEVIEW_TYPETABLEBUILDER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeRecord.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+namespace codeview {
+
+class FieldListRecordBuilder;
+class MethodListRecordBuilder;
+class TypeRecordBuilder;
+
+class TypeTableBuilder {
+private:
+ TypeTableBuilder(const TypeTableBuilder &) = delete;
+ TypeTableBuilder &operator=(const TypeTableBuilder &) = delete;
+
+protected:
+ TypeTableBuilder();
+
+public:
+ virtual ~TypeTableBuilder();
+
+public:
+ TypeIndex writeModifier(const ModifierRecord &Record);
+ TypeIndex writeProcedure(const ProcedureRecord &Record);
+ TypeIndex writeMemberFunction(const MemberFunctionRecord &Record);
+ TypeIndex writeArgumentList(const ArgumentListRecord &Record);
+ TypeIndex writeRecord(TypeRecordBuilder &builder);
+ TypeIndex writePointer(const PointerRecord &Record);
+ TypeIndex writePointerToMember(const PointerToMemberRecord &Record);
+ TypeIndex writeArray(const ArrayRecord &Record);
+ TypeIndex writeAggregate(const AggregateRecord &Record);
+ TypeIndex writeEnum(const EnumRecord &Record);
+ TypeIndex writeBitField(const BitFieldRecord &Record);
+ TypeIndex writeVirtualTableShape(const VirtualTableShapeRecord &Record);
+
+ TypeIndex writeFieldList(FieldListRecordBuilder &FieldList);
+ TypeIndex writeMethodList(MethodListRecordBuilder &MethodList);
+
+private:
+ virtual TypeIndex writeRecord(llvm::StringRef record) = 0;
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DIContext.h b/contrib/llvm/include/llvm/DebugInfo/DIContext.h
new file mode 100644
index 0000000..6659a97
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DIContext.h
@@ -0,0 +1,198 @@
+//===-- DIContext.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines DIContext, an abstract data structure that holds
+// debug information data.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DICONTEXT_H
+#define LLVM_DEBUGINFO_DICONTEXT_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/RelocVisitor.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/DataTypes.h"
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// DILineInfo - a format-neutral container for source line information.
+struct DILineInfo {
+ std::string FileName;
+ std::string FunctionName;
+ uint32_t Line;
+ uint32_t Column;
+
+ DILineInfo()
+ : FileName("<invalid>"), FunctionName("<invalid>"), Line(0), Column(0) {}
+
+ bool operator==(const DILineInfo &RHS) const {
+ return Line == RHS.Line && Column == RHS.Column &&
+ FileName == RHS.FileName && FunctionName == RHS.FunctionName;
+ }
+ bool operator!=(const DILineInfo &RHS) const {
+ return !(*this == RHS);
+ }
+};
+
+typedef SmallVector<std::pair<uint64_t, DILineInfo>, 16> DILineInfoTable;
+
+/// DIInliningInfo - a format-neutral container for inlined code description.
+class DIInliningInfo {
+ SmallVector<DILineInfo, 4> Frames;
+ public:
+ DIInliningInfo() {}
+ DILineInfo getFrame(unsigned Index) const {
+ assert(Index < Frames.size());
+ return Frames[Index];
+ }
+ DILineInfo *getMutableFrame(unsigned Index) {
+ assert(Index < Frames.size());
+ return &Frames[Index];
+ }
+ uint32_t getNumberOfFrames() const {
+ return Frames.size();
+ }
+ void addFrame(const DILineInfo &Frame) {
+ Frames.push_back(Frame);
+ }
+};
+
+/// DIGlobal - container for description of a global variable.
+struct DIGlobal {
+ std::string Name;
+ uint64_t Start;
+ uint64_t Size;
+
+ DIGlobal() : Name("<invalid>"), Start(0), Size(0) {}
+};
+
+/// A DINameKind is passed to name search methods to specify a
+/// preference regarding the type of name resolution the caller wants.
+enum class DINameKind { None, ShortName, LinkageName };
+
+/// DILineInfoSpecifier - controls which fields of DILineInfo container
+/// should be filled with data.
+struct DILineInfoSpecifier {
+ enum class FileLineInfoKind { None, Default, AbsoluteFilePath };
+ typedef DINameKind FunctionNameKind;
+
+ FileLineInfoKind FLIKind;
+ FunctionNameKind FNKind;
+
+ DILineInfoSpecifier(FileLineInfoKind FLIKind = FileLineInfoKind::Default,
+ FunctionNameKind FNKind = FunctionNameKind::None)
+ : FLIKind(FLIKind), FNKind(FNKind) {}
+};
+
+/// Selects which debug sections get dumped.
+enum DIDumpType {
+ DIDT_Null,
+ DIDT_All,
+ DIDT_Abbrev,
+ DIDT_AbbrevDwo,
+ DIDT_Aranges,
+ DIDT_Frames,
+ DIDT_Info,
+ DIDT_InfoDwo,
+ DIDT_Types,
+ DIDT_TypesDwo,
+ DIDT_Line,
+ DIDT_LineDwo,
+ DIDT_Loc,
+ DIDT_LocDwo,
+ DIDT_Macro,
+ DIDT_Ranges,
+ DIDT_Pubnames,
+ DIDT_Pubtypes,
+ DIDT_GnuPubnames,
+ DIDT_GnuPubtypes,
+ DIDT_Str,
+ DIDT_StrDwo,
+ DIDT_StrOffsetsDwo,
+ DIDT_AppleNames,
+ DIDT_AppleTypes,
+ DIDT_AppleNamespaces,
+ DIDT_AppleObjC,
+ DIDT_CUIndex,
+ DIDT_TUIndex,
+};
+
+class DIContext {
+public:
+ enum DIContextKind {
+ CK_DWARF,
+ CK_PDB
+ };
+ DIContextKind getKind() const { return Kind; }
+
+ DIContext(DIContextKind K) : Kind(K) {}
+ virtual ~DIContext() {}
+
+ virtual void dump(raw_ostream &OS, DIDumpType DumpType = DIDT_All) = 0;
+
+ virtual DILineInfo getLineInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
+ virtual DILineInfoTable getLineInfoForAddressRange(uint64_t Address,
+ uint64_t Size, DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
+ virtual DIInliningInfo getInliningInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
+private:
+ const DIContextKind Kind;
+};
+
+/// An inferface for inquiring the load address of a loaded object file
+/// to be used by the DIContext implementations when applying relocations
+/// on the fly.
+class LoadedObjectInfo {
+protected:
+ LoadedObjectInfo(const LoadedObjectInfo &) = default;
+ LoadedObjectInfo() = default;
+
+public:
+ virtual ~LoadedObjectInfo() = default;
+
+ /// Obtain the Load Address of a section by SectionRef.
+ ///
+ /// Calculate the address of the given section.
+ /// The section need not be present in the local address space. The addresses
+ /// need to be consistent with the addresses used to query the DIContext and
+ /// the output of this function should be deterministic, i.e. repeated calls with
+ /// the same Sec should give the same address.
+ virtual uint64_t getSectionLoadAddress(const object::SectionRef &Sec) const = 0;
+
+ /// If conveniently available, return the content of the given Section.
+ ///
+ /// When the section is available in the local address space, in relocated (loaded)
+ /// form, e.g. because it was relocated by a JIT for execution, this function
+ /// should provide the contents of said section in `Data`. If the loaded section
+ /// is not available, or the cost of retrieving it would be prohibitive, this
+ /// function should return false. In that case, relocations will be read from the
+ /// local (unrelocated) object file and applied on the fly. Note that this method
+ /// is used purely for optimzation purposes in the common case of JITting in the
+ /// local address space, so returning false should always be correct.
+ virtual bool getLoadedSectionContents(const object::SectionRef &Sec,
+ StringRef &Data) const {
+ return false;
+ }
+
+ /// Obtain a copy of this LoadedObjectInfo.
+ ///
+ /// The caller is responsible for deallocation once the copy is no longer required.
+ virtual std::unique_ptr<LoadedObjectInfo> clone() const = 0;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h
new file mode 100644
index 0000000..6ab5d5c
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h
@@ -0,0 +1,62 @@
+//===-- DWARFAbbreviationDeclaration.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFABBREVIATIONDECLARATION_H
+#define LLVM_LIB_DEBUGINFO_DWARFABBREVIATIONDECLARATION_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/DataExtractor.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class DWARFAbbreviationDeclaration {
+public:
+ struct AttributeSpec {
+ AttributeSpec(uint16_t Attr, uint16_t Form) : Attr(Attr), Form(Form) {}
+ uint16_t Attr;
+ uint16_t Form;
+ };
+ typedef SmallVector<AttributeSpec, 8> AttributeSpecVector;
+
+ DWARFAbbreviationDeclaration();
+
+ uint32_t getCode() const { return Code; }
+ uint32_t getTag() const { return Tag; }
+ bool hasChildren() const { return HasChildren; }
+
+ typedef iterator_range<AttributeSpecVector::const_iterator>
+ attr_iterator_range;
+
+ attr_iterator_range attributes() const {
+ return attr_iterator_range(AttributeSpecs.begin(), AttributeSpecs.end());
+ }
+
+ uint16_t getFormByIndex(uint32_t idx) const {
+ return idx < AttributeSpecs.size() ? AttributeSpecs[idx].Form : 0;
+ }
+
+ uint32_t findAttributeIndex(uint16_t attr) const;
+ bool extract(DataExtractor Data, uint32_t* OffsetPtr);
+ void dump(raw_ostream &OS) const;
+
+private:
+ void clear();
+
+ uint32_t Code;
+ uint32_t Tag;
+ bool HasChildren;
+
+ AttributeSpecVector AttributeSpecs;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
new file mode 100644
index 0000000..47dbf5f
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
@@ -0,0 +1,54 @@
+//===--- DWARFAcceleratorTable.h --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFACCELERATORTABLE_H
+#define LLVM_LIB_DEBUGINFO_DWARFACCELERATORTABLE_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
+#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include <cstdint>
+
+namespace llvm {
+
+class DWARFAcceleratorTable {
+
+ struct Header {
+ uint32_t Magic;
+ uint16_t Version;
+ uint16_t HashFunction;
+ uint32_t NumBuckets;
+ uint32_t NumHashes;
+ uint32_t HeaderDataLength;
+ };
+
+ struct HeaderData {
+ typedef uint16_t AtomType;
+ typedef uint16_t Form;
+ uint32_t DIEOffsetBase;
+ SmallVector<std::pair<AtomType, Form>, 3> Atoms;
+ };
+
+ struct Header Hdr;
+ struct HeaderData HdrData;
+ DataExtractor AccelSection;
+ DataExtractor StringSection;
+ const RelocAddrMap& Relocs;
+public:
+ DWARFAcceleratorTable(DataExtractor AccelSection, DataExtractor StringSection,
+ const RelocAddrMap &Relocs)
+ : AccelSection(AccelSection), StringSection(StringSection), Relocs(Relocs) {}
+
+ bool extract();
+ void dump(raw_ostream &OS) const;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h
new file mode 100644
index 0000000..bae3154
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFCompileUnit.h
@@ -0,0 +1,34 @@
+//===-- DWARFCompileUnit.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFCOMPILEUNIT_H
+#define LLVM_LIB_DEBUGINFO_DWARFCOMPILEUNIT_H
+
+#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
+
+namespace llvm {
+
+class DWARFCompileUnit : public DWARFUnit {
+public:
+ DWARFCompileUnit(DWARFContext &Context, const DWARFSection &Section,
+ const DWARFDebugAbbrev *DA, StringRef RS, StringRef SS,
+ StringRef SOS, StringRef AOS, StringRef LS, bool LE,
+ const DWARFUnitSectionBase &UnitSection,
+ const DWARFUnitIndex::Entry *Entry)
+ : DWARFUnit(Context, Section, DA, RS, SS, SOS, AOS, LS, LE, UnitSection,
+ Entry) {}
+ void dump(raw_ostream &OS);
+ static const DWARFSectionKind Section = DW_SECT_INFO;
+ // VTable anchor.
+ ~DWARFCompileUnit() override;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
new file mode 100644
index 0000000..c91012b
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -0,0 +1,319 @@
+//===-- DWARFContext.h ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===/
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFCONTEXT_H
+#define LLVM_LIB_DEBUGINFO_DWARFCONTEXT_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFCompileUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugAranges.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugFrame.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugLine.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugLoc.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugMacro.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
+#include "llvm/DebugInfo/DWARF/DWARFSection.h"
+#include "llvm/DebugInfo/DWARF/DWARFTypeUnit.h"
+#include <vector>
+
+namespace llvm {
+
+// In place of applying the relocations to the data we've read from disk we use
+// a separate mapping table to the side and checking that at locations in the
+// dwarf where we expect relocated values. This adds a bit of complexity to the
+// dwarf parsing/extraction at the benefit of not allocating memory for the
+// entire size of the debug info sections.
+typedef DenseMap<uint64_t, std::pair<uint8_t, int64_t> > RelocAddrMap;
+
+/// DWARFContext
+/// This data structure is the top level entity that deals with dwarf debug
+/// information parsing. The actual data is supplied through pure virtual
+/// methods that a concrete implementation provides.
+class DWARFContext : public DIContext {
+
+ DWARFUnitSection<DWARFCompileUnit> CUs;
+ std::vector<DWARFUnitSection<DWARFTypeUnit>> TUs;
+ std::unique_ptr<DWARFUnitIndex> CUIndex;
+ std::unique_ptr<DWARFUnitIndex> TUIndex;
+ std::unique_ptr<DWARFDebugAbbrev> Abbrev;
+ std::unique_ptr<DWARFDebugLoc> Loc;
+ std::unique_ptr<DWARFDebugAranges> Aranges;
+ std::unique_ptr<DWARFDebugLine> Line;
+ std::unique_ptr<DWARFDebugFrame> DebugFrame;
+ std::unique_ptr<DWARFDebugMacro> Macro;
+
+ DWARFUnitSection<DWARFCompileUnit> DWOCUs;
+ std::vector<DWARFUnitSection<DWARFTypeUnit>> DWOTUs;
+ std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO;
+ std::unique_ptr<DWARFDebugLocDWO> LocDWO;
+
+ DWARFContext(DWARFContext &) = delete;
+ DWARFContext &operator=(DWARFContext &) = delete;
+
+ /// Read compile units from the debug_info section (if necessary)
+ /// and store them in CUs.
+ void parseCompileUnits();
+
+ /// Read type units from the debug_types sections (if necessary)
+ /// and store them in TUs.
+ void parseTypeUnits();
+
+ /// Read compile units from the debug_info.dwo section (if necessary)
+ /// and store them in DWOCUs.
+ void parseDWOCompileUnits();
+
+ /// Read type units from the debug_types.dwo section (if necessary)
+ /// and store them in DWOTUs.
+ void parseDWOTypeUnits();
+
+public:
+ DWARFContext() : DIContext(CK_DWARF) {}
+
+ static bool classof(const DIContext *DICtx) {
+ return DICtx->getKind() == CK_DWARF;
+ }
+
+ void dump(raw_ostream &OS, DIDumpType DumpType = DIDT_All) override;
+
+ typedef DWARFUnitSection<DWARFCompileUnit>::iterator_range cu_iterator_range;
+ typedef DWARFUnitSection<DWARFTypeUnit>::iterator_range tu_iterator_range;
+ typedef iterator_range<std::vector<DWARFUnitSection<DWARFTypeUnit>>::iterator> tu_section_iterator_range;
+
+ /// Get compile units in this context.
+ cu_iterator_range compile_units() {
+ parseCompileUnits();
+ return cu_iterator_range(CUs.begin(), CUs.end());
+ }
+
+ /// Get type units in this context.
+ tu_section_iterator_range type_unit_sections() {
+ parseTypeUnits();
+ return tu_section_iterator_range(TUs.begin(), TUs.end());
+ }
+
+ /// Get compile units in the DWO context.
+ cu_iterator_range dwo_compile_units() {
+ parseDWOCompileUnits();
+ return cu_iterator_range(DWOCUs.begin(), DWOCUs.end());
+ }
+
+ /// Get type units in the DWO context.
+ tu_section_iterator_range dwo_type_unit_sections() {
+ parseDWOTypeUnits();
+ return tu_section_iterator_range(DWOTUs.begin(), DWOTUs.end());
+ }
+
+ /// Get the number of compile units in this context.
+ unsigned getNumCompileUnits() {
+ parseCompileUnits();
+ return CUs.size();
+ }
+
+ /// Get the number of compile units in this context.
+ unsigned getNumTypeUnits() {
+ parseTypeUnits();
+ return TUs.size();
+ }
+
+ /// Get the number of compile units in the DWO context.
+ unsigned getNumDWOCompileUnits() {
+ parseDWOCompileUnits();
+ return DWOCUs.size();
+ }
+
+ /// Get the number of compile units in the DWO context.
+ unsigned getNumDWOTypeUnits() {
+ parseDWOTypeUnits();
+ return DWOTUs.size();
+ }
+
+ /// Get the compile unit at the specified index for this compile unit.
+ DWARFCompileUnit *getCompileUnitAtIndex(unsigned index) {
+ parseCompileUnits();
+ return CUs[index].get();
+ }
+
+ /// Get the compile unit at the specified index for the DWO compile units.
+ DWARFCompileUnit *getDWOCompileUnitAtIndex(unsigned index) {
+ parseDWOCompileUnits();
+ return DWOCUs[index].get();
+ }
+
+ const DWARFUnitIndex &getCUIndex();
+ const DWARFUnitIndex &getTUIndex();
+
+ /// Get a pointer to the parsed DebugAbbrev object.
+ const DWARFDebugAbbrev *getDebugAbbrev();
+
+ /// Get a pointer to the parsed DebugLoc object.
+ const DWARFDebugLoc *getDebugLoc();
+
+ /// Get a pointer to the parsed dwo abbreviations object.
+ const DWARFDebugAbbrev *getDebugAbbrevDWO();
+
+ /// Get a pointer to the parsed DebugLoc object.
+ const DWARFDebugLocDWO *getDebugLocDWO();
+
+ /// Get a pointer to the parsed DebugAranges object.
+ const DWARFDebugAranges *getDebugAranges();
+
+ /// Get a pointer to the parsed frame information object.
+ const DWARFDebugFrame *getDebugFrame();
+
+ /// Get a pointer to the parsed DebugMacro object.
+ const DWARFDebugMacro *getDebugMacro();
+
+ /// Get a pointer to a parsed line table corresponding to a compile unit.
+ const DWARFDebugLine::LineTable *getLineTableForUnit(DWARFUnit *cu);
+
+ DILineInfo getLineInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+ DILineInfoTable getLineInfoForAddressRange(uint64_t Address, uint64_t Size,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+ DIInliningInfo getInliningInfoForAddress(uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+
+ virtual bool isLittleEndian() const = 0;
+ virtual uint8_t getAddressSize() const = 0;
+ virtual const DWARFSection &getInfoSection() = 0;
+ typedef MapVector<object::SectionRef, DWARFSection,
+ std::map<object::SectionRef, unsigned>> TypeSectionMap;
+ virtual const TypeSectionMap &getTypesSections() = 0;
+ virtual StringRef getAbbrevSection() = 0;
+ virtual const DWARFSection &getLocSection() = 0;
+ virtual StringRef getARangeSection() = 0;
+ virtual StringRef getDebugFrameSection() = 0;
+ virtual const DWARFSection &getLineSection() = 0;
+ virtual StringRef getStringSection() = 0;
+ virtual StringRef getRangeSection() = 0;
+ virtual StringRef getMacinfoSection() = 0;
+ virtual StringRef getPubNamesSection() = 0;
+ virtual StringRef getPubTypesSection() = 0;
+ virtual StringRef getGnuPubNamesSection() = 0;
+ virtual StringRef getGnuPubTypesSection() = 0;
+
+ // Sections for DWARF5 split dwarf proposal.
+ virtual const DWARFSection &getInfoDWOSection() = 0;
+ virtual const TypeSectionMap &getTypesDWOSections() = 0;
+ virtual StringRef getAbbrevDWOSection() = 0;
+ virtual const DWARFSection &getLineDWOSection() = 0;
+ virtual const DWARFSection &getLocDWOSection() = 0;
+ virtual StringRef getStringDWOSection() = 0;
+ virtual StringRef getStringOffsetDWOSection() = 0;
+ virtual StringRef getRangeDWOSection() = 0;
+ virtual StringRef getAddrSection() = 0;
+ virtual const DWARFSection& getAppleNamesSection() = 0;
+ virtual const DWARFSection& getAppleTypesSection() = 0;
+ virtual const DWARFSection& getAppleNamespacesSection() = 0;
+ virtual const DWARFSection& getAppleObjCSection() = 0;
+ virtual StringRef getCUIndexSection() = 0;
+ virtual StringRef getTUIndexSection() = 0;
+
+ static bool isSupportedVersion(unsigned version) {
+ return version == 2 || version == 3 || version == 4 || version == 5;
+ }
+private:
+ /// Return the compile unit that includes an offset (relative to .debug_info).
+ DWARFCompileUnit *getCompileUnitForOffset(uint32_t Offset);
+
+ /// Return the compile unit which contains instruction with provided
+ /// address.
+ DWARFCompileUnit *getCompileUnitForAddress(uint64_t Address);
+};
+
+/// DWARFContextInMemory is the simplest possible implementation of a
+/// DWARFContext. It assumes all content is available in memory and stores
+/// pointers to it.
+class DWARFContextInMemory : public DWARFContext {
+ virtual void anchor();
+ bool IsLittleEndian;
+ uint8_t AddressSize;
+ DWARFSection InfoSection;
+ TypeSectionMap TypesSections;
+ StringRef AbbrevSection;
+ DWARFSection LocSection;
+ StringRef ARangeSection;
+ StringRef DebugFrameSection;
+ DWARFSection LineSection;
+ StringRef StringSection;
+ StringRef RangeSection;
+ StringRef MacinfoSection;
+ StringRef PubNamesSection;
+ StringRef PubTypesSection;
+ StringRef GnuPubNamesSection;
+ StringRef GnuPubTypesSection;
+
+ // Sections for DWARF5 split dwarf proposal.
+ DWARFSection InfoDWOSection;
+ TypeSectionMap TypesDWOSections;
+ StringRef AbbrevDWOSection;
+ DWARFSection LineDWOSection;
+ DWARFSection LocDWOSection;
+ StringRef StringDWOSection;
+ StringRef StringOffsetDWOSection;
+ StringRef RangeDWOSection;
+ StringRef AddrSection;
+ DWARFSection AppleNamesSection;
+ DWARFSection AppleTypesSection;
+ DWARFSection AppleNamespacesSection;
+ DWARFSection AppleObjCSection;
+ StringRef CUIndexSection;
+ StringRef TUIndexSection;
+
+ SmallVector<SmallString<32>, 4> UncompressedSections;
+
+public:
+ DWARFContextInMemory(const object::ObjectFile &Obj,
+ const LoadedObjectInfo *L = nullptr);
+ bool isLittleEndian() const override { return IsLittleEndian; }
+ uint8_t getAddressSize() const override { return AddressSize; }
+ const DWARFSection &getInfoSection() override { return InfoSection; }
+ const TypeSectionMap &getTypesSections() override { return TypesSections; }
+ StringRef getAbbrevSection() override { return AbbrevSection; }
+ const DWARFSection &getLocSection() override { return LocSection; }
+ StringRef getARangeSection() override { return ARangeSection; }
+ StringRef getDebugFrameSection() override { return DebugFrameSection; }
+ const DWARFSection &getLineSection() override { return LineSection; }
+ StringRef getStringSection() override { return StringSection; }
+ StringRef getRangeSection() override { return RangeSection; }
+ StringRef getMacinfoSection() override { return MacinfoSection; }
+ StringRef getPubNamesSection() override { return PubNamesSection; }
+ StringRef getPubTypesSection() override { return PubTypesSection; }
+ StringRef getGnuPubNamesSection() override { return GnuPubNamesSection; }
+ StringRef getGnuPubTypesSection() override { return GnuPubTypesSection; }
+ const DWARFSection& getAppleNamesSection() override { return AppleNamesSection; }
+ const DWARFSection& getAppleTypesSection() override { return AppleTypesSection; }
+ const DWARFSection& getAppleNamespacesSection() override { return AppleNamespacesSection; }
+ const DWARFSection& getAppleObjCSection() override { return AppleObjCSection; }
+
+ // Sections for DWARF5 split dwarf proposal.
+ const DWARFSection &getInfoDWOSection() override { return InfoDWOSection; }
+ const TypeSectionMap &getTypesDWOSections() override {
+ return TypesDWOSections;
+ }
+ StringRef getAbbrevDWOSection() override { return AbbrevDWOSection; }
+ const DWARFSection &getLineDWOSection() override { return LineDWOSection; }
+ const DWARFSection &getLocDWOSection() override { return LocDWOSection; }
+ StringRef getStringDWOSection() override { return StringDWOSection; }
+ StringRef getStringOffsetDWOSection() override {
+ return StringOffsetDWOSection;
+ }
+ StringRef getRangeDWOSection() override { return RangeDWOSection; }
+ StringRef getAddrSection() override {
+ return AddrSection;
+ }
+ StringRef getCUIndexSection() override { return CUIndexSection; }
+ StringRef getTUIndexSection() override { return TUIndexSection; }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h
new file mode 100644
index 0000000..2114208
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h
@@ -0,0 +1,63 @@
+//===-- DWARFDebugAbbrev.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFDEBUGABBREV_H
+#define LLVM_LIB_DEBUGINFO_DWARFDEBUGABBREV_H
+
+#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
+#include <list>
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+class DWARFAbbreviationDeclarationSet {
+ uint32_t Offset;
+ /// Code of the first abbreviation, if all abbreviations in the set have
+ /// consecutive codes. UINT32_MAX otherwise.
+ uint32_t FirstAbbrCode;
+ std::vector<DWARFAbbreviationDeclaration> Decls;
+
+public:
+ DWARFAbbreviationDeclarationSet();
+
+ uint32_t getOffset() const { return Offset; }
+ void dump(raw_ostream &OS) const;
+ bool extract(DataExtractor Data, uint32_t *OffsetPtr);
+
+ const DWARFAbbreviationDeclaration *
+ getAbbreviationDeclaration(uint32_t AbbrCode) const;
+
+private:
+ void clear();
+};
+
+class DWARFDebugAbbrev {
+ typedef std::map<uint64_t, DWARFAbbreviationDeclarationSet>
+ DWARFAbbreviationDeclarationSetMap;
+
+ DWARFAbbreviationDeclarationSetMap AbbrDeclSets;
+ mutable DWARFAbbreviationDeclarationSetMap::const_iterator PrevAbbrOffsetPos;
+
+public:
+ DWARFDebugAbbrev();
+
+ const DWARFAbbreviationDeclarationSet *
+ getAbbreviationDeclarationSet(uint64_t CUAbbrOffset) const;
+
+ void dump(raw_ostream &OS) const;
+ void extract(DataExtractor Data);
+
+private:
+ void clear();
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h
new file mode 100644
index 0000000..837a8e6
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h
@@ -0,0 +1,70 @@
+//===-- DWARFDebugArangeSet.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFDEBUGARANGESET_H
+#define LLVM_LIB_DEBUGINFO_DWARFDEBUGARANGESET_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/DataExtractor.h"
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+class DWARFDebugArangeSet {
+public:
+ struct Header {
+ // The total length of the entries for that set, not including the length
+ // field itself.
+ uint32_t Length;
+ // The offset from the beginning of the .debug_info section of the
+ // compilation unit entry referenced by the table.
+ uint32_t CuOffset;
+ // The DWARF version number.
+ uint16_t Version;
+ // The size in bytes of an address on the target architecture. For segmented
+ // addressing, this is the size of the offset portion of the address.
+ uint8_t AddrSize;
+ // The size in bytes of a segment descriptor on the target architecture.
+ // If the target system uses a flat address space, this value is 0.
+ uint8_t SegSize;
+ };
+
+ struct Descriptor {
+ uint64_t Address;
+ uint64_t Length;
+ uint64_t getEndAddress() const { return Address + Length; }
+ };
+
+private:
+ typedef std::vector<Descriptor> DescriptorColl;
+ typedef iterator_range<DescriptorColl::const_iterator> desc_iterator_range;
+
+ uint32_t Offset;
+ Header HeaderData;
+ DescriptorColl ArangeDescriptors;
+
+public:
+ DWARFDebugArangeSet() { clear(); }
+ void clear();
+ bool extract(DataExtractor data, uint32_t *offset_ptr);
+ void dump(raw_ostream &OS) const;
+
+ uint32_t getCompileUnitDIEOffset() const { return HeaderData.CuOffset; }
+
+ desc_iterator_range descriptors() const {
+ return desc_iterator_range(ArangeDescriptors.begin(),
+ ArangeDescriptors.end());
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h
new file mode 100644
index 0000000..791f010
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h
@@ -0,0 +1,87 @@
+//===-- DWARFDebugAranges.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFDEBUGARANGES_H
+#define LLVM_LIB_DEBUGINFO_DWARFDEBUGARANGES_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Support/DataExtractor.h"
+#include <vector>
+
+namespace llvm {
+
+class DWARFContext;
+
+class DWARFDebugAranges {
+public:
+ void generate(DWARFContext *CTX);
+ uint32_t findAddress(uint64_t Address) const;
+
+private:
+ void clear();
+ void extract(DataExtractor DebugArangesData);
+
+ // Call appendRange multiple times and then call construct.
+ void appendRange(uint32_t CUOffset, uint64_t LowPC, uint64_t HighPC);
+ void construct();
+
+ struct Range {
+ explicit Range(uint64_t LowPC = -1ULL, uint64_t HighPC = -1ULL,
+ uint32_t CUOffset = -1U)
+ : LowPC(LowPC), Length(HighPC - LowPC), CUOffset(CUOffset) {}
+
+ void setHighPC(uint64_t HighPC) {
+ if (HighPC == -1ULL || HighPC <= LowPC)
+ Length = 0;
+ else
+ Length = HighPC - LowPC;
+ }
+ uint64_t HighPC() const {
+ if (Length)
+ return LowPC + Length;
+ return -1ULL;
+ }
+
+ bool containsAddress(uint64_t Address) const {
+ return LowPC <= Address && Address < HighPC();
+ }
+ bool operator<(const Range &other) const {
+ return LowPC < other.LowPC;
+ }
+
+ uint64_t LowPC; // Start of address range.
+ uint32_t Length; // End of address range (not including this address).
+ uint32_t CUOffset; // Offset of the compile unit or die.
+ };
+
+ struct RangeEndpoint {
+ uint64_t Address;
+ uint32_t CUOffset;
+ bool IsRangeStart;
+
+ RangeEndpoint(uint64_t Address, uint32_t CUOffset, bool IsRangeStart)
+ : Address(Address), CUOffset(CUOffset), IsRangeStart(IsRangeStart) {}
+
+ bool operator<(const RangeEndpoint &Other) const {
+ return Address < Other.Address;
+ }
+ };
+
+
+ typedef std::vector<Range> RangeColl;
+ typedef RangeColl::const_iterator RangeCollIterator;
+
+ std::vector<RangeEndpoint> Endpoints;
+ RangeColl Aranges;
+ DenseSet<uint32_t> ParsedCUOffsets;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h
new file mode 100644
index 0000000..be925cb
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugFrame.h
@@ -0,0 +1,43 @@
+//===-- DWARFDebugFrame.h - Parsing of .debug_frame -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFDEBUGFRAME_H
+#define LLVM_LIB_DEBUGINFO_DWARFDEBUGFRAME_H
+
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class FrameEntry;
+
+/// \brief A parsed .debug_frame section
+///
+class DWARFDebugFrame {
+public:
+ DWARFDebugFrame();
+ ~DWARFDebugFrame();
+
+ /// \brief Dump the section data into the given stream.
+ void dump(raw_ostream &OS) const;
+
+ /// \brief Parse the section from raw data.
+ /// data is assumed to be pointing to the beginning of the section.
+ void parse(DataExtractor Data);
+
+private:
+ std::vector<std::unique_ptr<FrameEntry>> Entries;
+};
+
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h
new file mode 100644
index 0000000..f29d5fe
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h
@@ -0,0 +1,160 @@
+//===-- DWARFDebugInfoEntry.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFDEBUGINFOENTRY_H
+#define LLVM_LIB_DEBUGINFO_DWARFDEBUGINFOENTRY_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class DWARFDebugAranges;
+class DWARFCompileUnit;
+class DWARFUnit;
+class DWARFContext;
+class DWARFFormValue;
+struct DWARFDebugInfoEntryInlinedChain;
+
+/// DWARFDebugInfoEntryMinimal - A DIE with only the minimum required data.
+class DWARFDebugInfoEntryMinimal {
+ /// Offset within the .debug_info of the start of this entry.
+ uint32_t Offset;
+
+ /// How many to add to "this" to get the sibling.
+ uint32_t SiblingIdx;
+
+ const DWARFAbbreviationDeclaration *AbbrevDecl;
+public:
+ DWARFDebugInfoEntryMinimal()
+ : Offset(0), SiblingIdx(0), AbbrevDecl(nullptr) {}
+
+ void dump(raw_ostream &OS, DWARFUnit *u, unsigned recurseDepth,
+ unsigned indent = 0) const;
+ void dumpAttribute(raw_ostream &OS, DWARFUnit *u, uint32_t *offset_ptr,
+ uint16_t attr, uint16_t form, unsigned indent = 0) const;
+
+ /// Extracts a debug info entry, which is a child of a given unit,
+ /// starting at a given offset. If DIE can't be extracted, returns false and
+ /// doesn't change OffsetPtr.
+ bool extractFast(const DWARFUnit *U, uint32_t *OffsetPtr);
+
+ uint32_t getTag() const { return AbbrevDecl ? AbbrevDecl->getTag() : 0; }
+ bool isNULL() const { return AbbrevDecl == nullptr; }
+
+ /// Returns true if DIE represents a subprogram (not inlined).
+ bool isSubprogramDIE() const;
+ /// Returns true if DIE represents a subprogram or an inlined
+ /// subroutine.
+ bool isSubroutineDIE() const;
+
+ uint32_t getOffset() const { return Offset; }
+ bool hasChildren() const { return !isNULL() && AbbrevDecl->hasChildren(); }
+
+ // We know we are kept in a vector of contiguous entries, so we know
+ // our sibling will be some index after "this".
+ const DWARFDebugInfoEntryMinimal *getSibling() const {
+ return SiblingIdx > 0 ? this + SiblingIdx : nullptr;
+ }
+
+ // We know we are kept in a vector of contiguous entries, so we know
+ // we don't need to store our child pointer, if we have a child it will
+ // be the next entry in the list...
+ const DWARFDebugInfoEntryMinimal *getFirstChild() const {
+ return hasChildren() ? this + 1 : nullptr;
+ }
+
+ void setSibling(const DWARFDebugInfoEntryMinimal *Sibling) {
+ if (Sibling) {
+ // We know we are kept in a vector of contiguous entries, so we know
+ // our sibling will be some index after "this".
+ SiblingIdx = Sibling - this;
+ } else
+ SiblingIdx = 0;
+ }
+
+ const DWARFAbbreviationDeclaration *getAbbreviationDeclarationPtr() const {
+ return AbbrevDecl;
+ }
+
+ bool getAttributeValue(const DWARFUnit *U, const uint16_t Attr,
+ DWARFFormValue &FormValue) const;
+
+ const char *getAttributeValueAsString(const DWARFUnit *U, const uint16_t Attr,
+ const char *FailValue) const;
+
+ uint64_t getAttributeValueAsAddress(const DWARFUnit *U, const uint16_t Attr,
+ uint64_t FailValue) const;
+
+ uint64_t getAttributeValueAsUnsignedConstant(const DWARFUnit *U,
+ const uint16_t Attr,
+ uint64_t FailValue) const;
+
+ uint64_t getAttributeValueAsReference(const DWARFUnit *U, const uint16_t Attr,
+ uint64_t FailValue) const;
+
+ uint64_t getAttributeValueAsSectionOffset(const DWARFUnit *U,
+ const uint16_t Attr,
+ uint64_t FailValue) const;
+
+ uint64_t getRangesBaseAttribute(const DWARFUnit *U, uint64_t FailValue) const;
+
+ /// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU.
+ /// Returns true if both attributes are present.
+ bool getLowAndHighPC(const DWARFUnit *U, uint64_t &LowPC,
+ uint64_t &HighPC) const;
+
+ DWARFAddressRangesVector getAddressRanges(const DWARFUnit *U) const;
+
+ void collectChildrenAddressRanges(const DWARFUnit *U,
+ DWARFAddressRangesVector &Ranges) const;
+
+ bool addressRangeContainsAddress(const DWARFUnit *U,
+ const uint64_t Address) const;
+
+ /// If a DIE represents a subprogram (or inlined subroutine),
+ /// returns its mangled name (or short name, if mangled is missing).
+ /// This name may be fetched from specification or abstract origin
+ /// for this subprogram. Returns null if no name is found.
+ const char *getSubroutineName(const DWARFUnit *U, DINameKind Kind) const;
+
+ /// Return the DIE name resolving DW_AT_sepcification or
+ /// DW_AT_abstract_origin references if necessary.
+ /// Returns null if no name is found.
+ const char *getName(const DWARFUnit *U, DINameKind Kind) const;
+
+ /// Retrieves values of DW_AT_call_file, DW_AT_call_line and
+ /// DW_AT_call_column from DIE (or zeroes if they are missing).
+ void getCallerFrame(const DWARFUnit *U, uint32_t &CallFile,
+ uint32_t &CallLine, uint32_t &CallColumn) const;
+
+ /// Get inlined chain for a given address, rooted at the current DIE.
+ /// Returns empty chain if address is not contained in address range
+ /// of current DIE.
+ DWARFDebugInfoEntryInlinedChain
+ getInlinedChainForAddress(const DWARFUnit *U, const uint64_t Address) const;
+};
+
+/// DWARFDebugInfoEntryInlinedChain - represents a chain of inlined_subroutine
+/// DIEs, (possibly ending with subprogram DIE), all of which are contained
+/// in some concrete inlined instance tree. Address range for each DIE
+/// (except the last DIE) in this chain is contained in address
+/// range for next DIE in the chain.
+struct DWARFDebugInfoEntryInlinedChain {
+ DWARFDebugInfoEntryInlinedChain() : U(nullptr) {}
+ SmallVector<DWARFDebugInfoEntryMinimal, 4> DIEs;
+ const DWARFUnit *U;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
new file mode 100644
index 0000000..760950b
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
@@ -0,0 +1,252 @@
+//===-- DWARFDebugLine.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFDEBUGLINE_H
+#define LLVM_LIB_DEBUGINFO_DWARFDEBUGLINE_H
+
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include "llvm/Support/DataExtractor.h"
+#include <map>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+class DWARFDebugLine {
+public:
+ DWARFDebugLine(const RelocAddrMap* LineInfoRelocMap) : RelocMap(LineInfoRelocMap) {}
+ struct FileNameEntry {
+ FileNameEntry() : Name(nullptr), DirIdx(0), ModTime(0), Length(0) {}
+
+ const char *Name;
+ uint64_t DirIdx;
+ uint64_t ModTime;
+ uint64_t Length;
+ };
+
+ struct Prologue {
+ Prologue();
+
+ // The size in bytes of the statement information for this compilation unit
+ // (not including the total_length field itself).
+ uint64_t TotalLength;
+ // Version identifier for the statement information format.
+ uint16_t Version;
+ // The number of bytes following the prologue_length field to the beginning
+ // of the first byte of the statement program itself.
+ uint64_t PrologueLength;
+ // The size in bytes of the smallest target machine instruction. Statement
+ // program opcodes that alter the address register first multiply their
+ // operands by this value.
+ uint8_t MinInstLength;
+ // The maximum number of individual operations that may be encoded in an
+ // instruction.
+ uint8_t MaxOpsPerInst;
+ // The initial value of theis_stmtregister.
+ uint8_t DefaultIsStmt;
+ // This parameter affects the meaning of the special opcodes. See below.
+ int8_t LineBase;
+ // This parameter affects the meaning of the special opcodes. See below.
+ uint8_t LineRange;
+ // The number assigned to the first special opcode.
+ uint8_t OpcodeBase;
+ std::vector<uint8_t> StandardOpcodeLengths;
+ std::vector<const char*> IncludeDirectories;
+ std::vector<FileNameEntry> FileNames;
+
+ bool IsDWARF64;
+ uint32_t sizeofTotalLength() const {
+ return IsDWARF64 ? 12 : 4;
+ }
+ uint32_t sizeofPrologueLength() const {
+ return IsDWARF64 ? 8 : 4;
+ }
+
+ // Length of the prologue in bytes.
+ uint32_t getLength() const {
+ return PrologueLength + sizeofTotalLength() + sizeof(Version) +
+ sizeofPrologueLength();
+ }
+ // Length of the line table data in bytes (not including the prologue).
+ uint32_t getStatementTableLength() const {
+ return TotalLength + sizeofTotalLength() - getLength();
+ }
+ int32_t getMaxLineIncrementForSpecialOpcode() const {
+ return LineBase + (int8_t)LineRange - 1;
+ }
+
+ void clear();
+ void dump(raw_ostream &OS) const;
+ bool parse(DataExtractor debug_line_data, uint32_t *offset_ptr);
+ };
+
+ // Standard .debug_line state machine structure.
+ struct Row {
+ explicit Row(bool default_is_stmt = false);
+
+ /// Called after a row is appended to the matrix.
+ void postAppend();
+ void reset(bool default_is_stmt);
+ void dump(raw_ostream &OS) const;
+
+ static bool orderByAddress(const Row& LHS, const Row& RHS) {
+ return LHS.Address < RHS.Address;
+ }
+
+ // The program-counter value corresponding to a machine instruction
+ // generated by the compiler.
+ uint64_t Address;
+ // An unsigned integer indicating a source line number. Lines are numbered
+ // beginning at 1. The compiler may emit the value 0 in cases where an
+ // instruction cannot be attributed to any source line.
+ uint32_t Line;
+ // An unsigned integer indicating a column number within a source line.
+ // Columns are numbered beginning at 1. The value 0 is reserved to indicate
+ // that a statement begins at the 'left edge' of the line.
+ uint16_t Column;
+ // An unsigned integer indicating the identity of the source file
+ // corresponding to a machine instruction.
+ uint16_t File;
+ // An unsigned integer whose value encodes the applicable instruction set
+ // architecture for the current instruction.
+ uint8_t Isa;
+ // An unsigned integer representing the DWARF path discriminator value
+ // for this location.
+ uint32_t Discriminator;
+ // A boolean indicating that the current instruction is the beginning of a
+ // statement.
+ uint8_t IsStmt:1,
+ // A boolean indicating that the current instruction is the
+ // beginning of a basic block.
+ BasicBlock:1,
+ // A boolean indicating that the current address is that of the
+ // first byte after the end of a sequence of target machine
+ // instructions.
+ EndSequence:1,
+ // A boolean indicating that the current address is one (of possibly
+ // many) where execution should be suspended for an entry breakpoint
+ // of a function.
+ PrologueEnd:1,
+ // A boolean indicating that the current address is one (of possibly
+ // many) where execution should be suspended for an exit breakpoint
+ // of a function.
+ EpilogueBegin:1;
+ };
+
+ // Represents a series of contiguous machine instructions. Line table for each
+ // compilation unit may consist of multiple sequences, which are not
+ // guaranteed to be in the order of ascending instruction address.
+ struct Sequence {
+ // Sequence describes instructions at address range [LowPC, HighPC)
+ // and is described by line table rows [FirstRowIndex, LastRowIndex).
+ uint64_t LowPC;
+ uint64_t HighPC;
+ unsigned FirstRowIndex;
+ unsigned LastRowIndex;
+ bool Empty;
+
+ Sequence();
+ void reset();
+
+ static bool orderByLowPC(const Sequence& LHS, const Sequence& RHS) {
+ return LHS.LowPC < RHS.LowPC;
+ }
+ bool isValid() const {
+ return !Empty && (LowPC < HighPC) && (FirstRowIndex < LastRowIndex);
+ }
+ bool containsPC(uint64_t pc) const {
+ return (LowPC <= pc && pc < HighPC);
+ }
+ };
+
+ struct LineTable {
+ LineTable();
+
+ // Represents an invalid row
+ const uint32_t UnknownRowIndex = UINT32_MAX;
+
+ void appendRow(const DWARFDebugLine::Row &R) {
+ Rows.push_back(R);
+ }
+ void appendSequence(const DWARFDebugLine::Sequence &S) {
+ Sequences.push_back(S);
+ }
+
+ // Returns the index of the row with file/line info for a given address,
+ // or UnknownRowIndex if there is no such row.
+ uint32_t lookupAddress(uint64_t address) const;
+
+ bool lookupAddressRange(uint64_t address, uint64_t size,
+ std::vector<uint32_t> &result) const;
+
+ // Extracts filename by its index in filename table in prologue.
+ // Returns true on success.
+ bool getFileNameByIndex(uint64_t FileIndex, const char *CompDir,
+ DILineInfoSpecifier::FileLineInfoKind Kind,
+ std::string &Result) const;
+
+ // Fills the Result argument with the file and line information
+ // corresponding to Address. Returns true on success.
+ bool getFileLineInfoForAddress(uint64_t Address, const char *CompDir,
+ DILineInfoSpecifier::FileLineInfoKind Kind,
+ DILineInfo &Result) const;
+
+ void dump(raw_ostream &OS) const;
+ void clear();
+
+ /// Parse prologue and all rows.
+ bool parse(DataExtractor debug_line_data, const RelocAddrMap *RMap,
+ uint32_t *offset_ptr);
+
+ struct Prologue Prologue;
+ typedef std::vector<Row> RowVector;
+ typedef RowVector::const_iterator RowIter;
+ typedef std::vector<Sequence> SequenceVector;
+ typedef SequenceVector::const_iterator SequenceIter;
+ RowVector Rows;
+ SequenceVector Sequences;
+
+ private:
+ uint32_t findRowInSeq(const DWARFDebugLine::Sequence &seq,
+ uint64_t address) const;
+ };
+
+ const LineTable *getLineTable(uint32_t offset) const;
+ const LineTable *getOrParseLineTable(DataExtractor debug_line_data,
+ uint32_t offset);
+
+private:
+ struct ParsingState {
+ ParsingState(struct LineTable *LT);
+
+ void resetRowAndSequence();
+ void appendRowToMatrix(uint32_t offset);
+
+ // Line table we're currently parsing.
+ struct LineTable *LineTable;
+ // The row number that starts at zero for the prologue, and increases for
+ // each row added to the matrix.
+ unsigned RowNumber;
+ struct Row Row;
+ struct Sequence Sequence;
+ };
+
+ typedef std::map<uint32_t, LineTable> LineTableMapTy;
+ typedef LineTableMapTy::iterator LineTableIter;
+ typedef LineTableMapTy::const_iterator LineTableConstIter;
+
+ const RelocAddrMap *RelocMap;
+ LineTableMapTy LineTableMap;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h
new file mode 100644
index 0000000..bd44c2e
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugLoc.h
@@ -0,0 +1,81 @@
+//===-- DWARFDebugLoc.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFDEBUGLOC_H
+#define LLVM_LIB_DEBUGINFO_DWARFDEBUGLOC_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include "llvm/Support/DataExtractor.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class DWARFDebugLoc {
+ /// A single location within a location list.
+ struct Entry {
+ /// The beginning address of the instruction range.
+ uint64_t Begin;
+ /// The ending address of the instruction range.
+ uint64_t End;
+ /// The location of the variable within the specified range.
+ SmallVector<unsigned char, 4> Loc;
+ };
+
+ /// A list of locations that contain one variable.
+ struct LocationList {
+ /// The beginning offset where this location list is stored in the debug_loc
+ /// section.
+ unsigned Offset;
+ /// All the locations in which the variable is stored.
+ SmallVector<Entry, 2> Entries;
+ };
+
+ typedef SmallVector<LocationList, 4> LocationLists;
+
+ /// A list of all the variables in the debug_loc section, each one describing
+ /// the locations in which the variable is stored.
+ LocationLists Locations;
+
+ /// A map used to resolve binary relocations.
+ const RelocAddrMap &RelocMap;
+
+public:
+ DWARFDebugLoc(const RelocAddrMap &LocRelocMap) : RelocMap(LocRelocMap) {}
+ /// Print the location lists found within the debug_loc section.
+ void dump(raw_ostream &OS) const;
+ /// Parse the debug_loc section accessible via the 'data' parameter using the
+ /// specified address size to interpret the address ranges.
+ void parse(DataExtractor data, unsigned AddressSize);
+};
+
+class DWARFDebugLocDWO {
+ struct Entry {
+ uint64_t Start;
+ uint32_t Length;
+ SmallVector<unsigned char, 4> Loc;
+ };
+
+ struct LocationList {
+ unsigned Offset;
+ SmallVector<Entry, 2> Entries;
+ };
+
+ typedef SmallVector<LocationList, 4> LocationLists;
+
+ LocationLists Locations;
+
+public:
+ void parse(DataExtractor data);
+ void dump(raw_ostream &OS) const;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h
new file mode 100644
index 0000000..f791096
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugMacro.h
@@ -0,0 +1,59 @@
+//===-- DWARFDebugMacro.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARF_DWARFDEBUGMACRO_H
+#define LLVM_DEBUGINFO_DWARF_DWARFDEBUGMACRO_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Dwarf.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class DWARFDebugMacro {
+ /// A single macro entry within a macro list.
+ struct Entry {
+ /// The type of the macro entry.
+ uint32_t Type;
+ union {
+ /// The source line where the macro is defined.
+ uint64_t Line;
+ /// Vendor extension constant value.
+ uint64_t ExtConstant;
+ };
+
+ union {
+ /// The string (name, value) of the macro entry.
+ const char *MacroStr;
+ // An unsigned integer indicating the identity of the source file.
+ uint64_t File;
+ /// Vendor extension string.
+ const char *ExtStr;
+ };
+ };
+
+ typedef SmallVector<Entry, 4> MacroList;
+
+ /// A list of all the macro entries in the debug_macinfo section.
+ MacroList Macros;
+
+public:
+ DWARFDebugMacro() {}
+ /// Print the macro list found within the debug_macinfo section.
+ void dump(raw_ostream &OS) const;
+ /// Parse the debug_macinfo section accessible via the 'data' parameter.
+ void parse(DataExtractor data);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h
new file mode 100644
index 0000000..c930bd6
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h
@@ -0,0 +1,79 @@
+//===-- DWARFDebugRangeList.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFDEBUGRANGELIST_H
+#define LLVM_LIB_DEBUGINFO_DWARFDEBUGRANGELIST_H
+
+#include "llvm/Support/DataExtractor.h"
+#include <vector>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// DWARFAddressRangesVector - represents a set of absolute address ranges.
+typedef std::vector<std::pair<uint64_t, uint64_t>> DWARFAddressRangesVector;
+
+class DWARFDebugRangeList {
+public:
+ struct RangeListEntry {
+ // A beginning address offset. This address offset has the size of an
+ // address and is relative to the applicable base address of the
+ // compilation unit referencing this range list. It marks the beginning
+ // of an address range.
+ uint64_t StartAddress;
+ // An ending address offset. This address offset again has the size of
+ // an address and is relative to the applicable base address of the
+ // compilation unit referencing this range list. It marks the first
+ // address past the end of the address range. The ending address must
+ // be greater than or equal to the beginning address.
+ uint64_t EndAddress;
+ // The end of any given range list is marked by an end of list entry,
+ // which consists of a 0 for the beginning address offset
+ // and a 0 for the ending address offset.
+ bool isEndOfListEntry() const {
+ return (StartAddress == 0) && (EndAddress == 0);
+ }
+ // A base address selection entry consists of:
+ // 1. The value of the largest representable address offset
+ // (for example, 0xffffffff when the size of an address is 32 bits).
+ // 2. An address, which defines the appropriate base address for
+ // use in interpreting the beginning and ending address offsets of
+ // subsequent entries of the location list.
+ bool isBaseAddressSelectionEntry(uint8_t AddressSize) const {
+ assert(AddressSize == 4 || AddressSize == 8);
+ if (AddressSize == 4)
+ return StartAddress == -1U;
+ else
+ return StartAddress == -1ULL;
+ }
+ };
+
+private:
+ // Offset in .debug_ranges section.
+ uint32_t Offset;
+ uint8_t AddressSize;
+ std::vector<RangeListEntry> Entries;
+
+public:
+ DWARFDebugRangeList() { clear(); }
+ void clear();
+ void dump(raw_ostream &OS) const;
+ bool extract(DataExtractor data, uint32_t *offset_ptr);
+ const std::vector<RangeListEntry> &getEntries() { return Entries; }
+
+ /// getAbsoluteRanges - Returns absolute address ranges defined by this range
+ /// list. Has to be passed base address of the compile unit referencing this
+ /// range list.
+ DWARFAddressRangesVector getAbsoluteRanges(uint64_t BaseAddress) const;
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_DWARFDEBUGRANGELIST_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
new file mode 100644
index 0000000..3c32a3e
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
@@ -0,0 +1,99 @@
+//===-- DWARFFormValue.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_DWARFFORMVALUE_H
+#define LLVM_DEBUGINFO_DWARFFORMVALUE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/DataExtractor.h"
+
+namespace llvm {
+
+class DWARFUnit;
+class raw_ostream;
+
+class DWARFFormValue {
+public:
+ enum FormClass {
+ FC_Unknown,
+ FC_Address,
+ FC_Block,
+ FC_Constant,
+ FC_String,
+ FC_Flag,
+ FC_Reference,
+ FC_Indirect,
+ FC_SectionOffset,
+ FC_Exprloc
+ };
+
+private:
+ struct ValueType {
+ ValueType() : data(nullptr) {
+ uval = 0;
+ }
+
+ union {
+ uint64_t uval;
+ int64_t sval;
+ const char* cstr;
+ };
+ const uint8_t* data;
+ };
+
+ uint16_t Form; // Form for this value.
+ ValueType Value; // Contains all data for the form.
+
+public:
+ DWARFFormValue(uint16_t Form = 0) : Form(Form) {}
+ uint16_t getForm() const { return Form; }
+ bool isFormClass(FormClass FC) const;
+
+ void dump(raw_ostream &OS, const DWARFUnit *U) const;
+
+ /// \brief extracts a value in data at offset *offset_ptr.
+ ///
+ /// The passed DWARFUnit is allowed to be nullptr, in which
+ /// case no relocation processing will be performed and some
+ /// kind of forms that depend on Unit information are disallowed.
+ /// \returns whether the extraction succeeded.
+ bool extractValue(DataExtractor data, uint32_t *offset_ptr,
+ const DWARFUnit *u);
+ bool isInlinedCStr() const {
+ return Value.data != nullptr && Value.data == (const uint8_t*)Value.cstr;
+ }
+
+ /// getAsFoo functions below return the extracted value as Foo if only
+ /// DWARFFormValue has form class is suitable for representing Foo.
+ Optional<uint64_t> getAsReference(const DWARFUnit *U) const;
+ Optional<uint64_t> getAsUnsignedConstant() const;
+ Optional<int64_t> getAsSignedConstant() const;
+ Optional<const char *> getAsCString(const DWARFUnit *U) const;
+ Optional<uint64_t> getAsAddress(const DWARFUnit *U) const;
+ Optional<uint64_t> getAsSectionOffset() const;
+ Optional<ArrayRef<uint8_t>> getAsBlock() const;
+
+ bool skipValue(DataExtractor debug_info_data, uint32_t *offset_ptr,
+ const DWARFUnit *u) const;
+ static bool skipValue(uint16_t form, DataExtractor debug_info_data,
+ uint32_t *offset_ptr, const DWARFUnit *u);
+ static bool skipValue(uint16_t form, DataExtractor debug_info_data,
+ uint32_t *offset_ptr, uint16_t Version,
+ uint8_t AddrSize);
+
+ static ArrayRef<uint8_t> getFixedFormSizes(uint8_t AddrSize,
+ uint16_t Version);
+private:
+ void dumpString(raw_ostream &OS, const DWARFUnit *U) const;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h
new file mode 100644
index 0000000..d7fe303
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h
@@ -0,0 +1,22 @@
+//===-- DWARFRelocMap.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFRELOCMAP_H
+#define LLVM_LIB_DEBUGINFO_DWARFRELOCMAP_H
+
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+
+typedef DenseMap<uint64_t, std::pair<uint8_t, int64_t> > RelocAddrMap;
+
+} // namespace llvm
+
+#endif
+
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFSection.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFSection.h
new file mode 100644
index 0000000..3e27b52
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFSection.h
@@ -0,0 +1,25 @@
+//===-- DWARFSection.h ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFSECTION_H
+#define LLVM_LIB_DEBUGINFO_DWARFSECTION_H
+
+#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+struct DWARFSection {
+ StringRef Data;
+ RelocAddrMap Relocs;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h
new file mode 100644
index 0000000..894a88d
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFTypeUnit.h
@@ -0,0 +1,42 @@
+//===-- DWARFTypeUnit.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFTYPEUNIT_H
+#define LLVM_LIB_DEBUGINFO_DWARFTYPEUNIT_H
+
+#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
+
+namespace llvm {
+
+class DWARFTypeUnit : public DWARFUnit {
+private:
+ uint64_t TypeHash;
+ uint32_t TypeOffset;
+public:
+ DWARFTypeUnit(DWARFContext &Context, const DWARFSection &Section,
+ const DWARFDebugAbbrev *DA, StringRef RS, StringRef SS,
+ StringRef SOS, StringRef AOS, StringRef LS, bool LE,
+ const DWARFUnitSectionBase &UnitSection,
+ const DWARFUnitIndex::Entry *Entry)
+ : DWARFUnit(Context, Section, DA, RS, SS, SOS, AOS, LS, LE, UnitSection,
+ Entry) {}
+ uint32_t getHeaderSize() const override {
+ return DWARFUnit::getHeaderSize() + 12;
+ }
+ void dump(raw_ostream &OS);
+ static const DWARFSectionKind Section = DW_SECT_TYPES;
+
+protected:
+ bool extractImpl(DataExtractor debug_info, uint32_t *offset_ptr) override;
+};
+
+}
+
+#endif
+
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
new file mode 100644
index 0000000..681b2aa
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h
@@ -0,0 +1,299 @@
+//===-- DWARFUnit.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFUNIT_H
+#define LLVM_LIB_DEBUGINFO_DWARFUNIT_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugAbbrev.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugInfoEntry.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
+#include "llvm/DebugInfo/DWARF/DWARFRelocMap.h"
+#include "llvm/DebugInfo/DWARF/DWARFSection.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnitIndex.h"
+#include <vector>
+
+namespace llvm {
+
+namespace object {
+class ObjectFile;
+}
+
+class DWARFContext;
+class DWARFDebugAbbrev;
+class DWARFUnit;
+class StringRef;
+class raw_ostream;
+
+/// Base class for all DWARFUnitSection classes. This provides the
+/// functionality common to all unit types.
+class DWARFUnitSectionBase {
+public:
+ /// Returns the Unit that contains the given section offset in the
+ /// same section this Unit originated from.
+ virtual DWARFUnit *getUnitForOffset(uint32_t Offset) const = 0;
+
+ void parse(DWARFContext &C, const DWARFSection &Section);
+ void parseDWO(DWARFContext &C, const DWARFSection &DWOSection,
+ DWARFUnitIndex *Index = nullptr);
+
+protected:
+ virtual void parseImpl(DWARFContext &Context, const DWARFSection &Section,
+ const DWARFDebugAbbrev *DA, StringRef RS, StringRef SS,
+ StringRef SOS, StringRef AOS, StringRef LS,
+ bool isLittleEndian) = 0;
+
+ ~DWARFUnitSectionBase() = default;
+};
+
+const DWARFUnitIndex &getDWARFUnitIndex(DWARFContext &Context,
+ DWARFSectionKind Kind);
+
+/// Concrete instance of DWARFUnitSection, specialized for one Unit type.
+template<typename UnitType>
+class DWARFUnitSection final : public SmallVector<std::unique_ptr<UnitType>, 1>,
+ public DWARFUnitSectionBase {
+ bool Parsed;
+
+public:
+ DWARFUnitSection() : Parsed(false) {}
+ DWARFUnitSection(DWARFUnitSection &&DUS) :
+ SmallVector<std::unique_ptr<UnitType>, 1>(std::move(DUS)), Parsed(DUS.Parsed) {}
+
+ typedef llvm::SmallVectorImpl<std::unique_ptr<UnitType>> UnitVector;
+ typedef typename UnitVector::iterator iterator;
+ typedef llvm::iterator_range<typename UnitVector::iterator> iterator_range;
+
+ UnitType *getUnitForOffset(uint32_t Offset) const override {
+ auto *CU = std::upper_bound(
+ this->begin(), this->end(), Offset,
+ [](uint32_t LHS, const std::unique_ptr<UnitType> &RHS) {
+ return LHS < RHS->getNextUnitOffset();
+ });
+ if (CU != this->end())
+ return CU->get();
+ return nullptr;
+ }
+
+private:
+ void parseImpl(DWARFContext &Context, const DWARFSection &Section,
+ const DWARFDebugAbbrev *DA, StringRef RS, StringRef SS,
+ StringRef SOS, StringRef AOS, StringRef LS, bool LE) override {
+ if (Parsed)
+ return;
+ const auto &Index = getDWARFUnitIndex(Context, UnitType::Section);
+ DataExtractor Data(Section.Data, LE, 0);
+ uint32_t Offset = 0;
+ while (Data.isValidOffset(Offset)) {
+ auto U = llvm::make_unique<UnitType>(Context, Section, DA, RS, SS, SOS,
+ AOS, LS, LE, *this,
+ Index.getFromOffset(Offset));
+ if (!U->extract(Data, &Offset))
+ break;
+ this->push_back(std::move(U));
+ Offset = this->back()->getNextUnitOffset();
+ }
+ Parsed = true;
+ }
+};
+
+class DWARFUnit {
+ DWARFContext &Context;
+ // Section containing this DWARFUnit.
+ const DWARFSection &InfoSection;
+
+ const DWARFDebugAbbrev *Abbrev;
+ StringRef RangeSection;
+ uint32_t RangeSectionBase;
+ StringRef LineSection;
+ StringRef StringSection;
+ StringRef StringOffsetSection;
+ StringRef AddrOffsetSection;
+ uint32_t AddrOffsetSectionBase;
+ bool isLittleEndian;
+ const DWARFUnitSectionBase &UnitSection;
+
+ uint32_t Offset;
+ uint32_t Length;
+ uint16_t Version;
+ const DWARFAbbreviationDeclarationSet *Abbrevs;
+ uint8_t AddrSize;
+ uint64_t BaseAddr;
+ // The compile unit debug information entry items.
+ std::vector<DWARFDebugInfoEntryMinimal> DieArray;
+
+ class DWOHolder {
+ object::OwningBinary<object::ObjectFile> DWOFile;
+ std::unique_ptr<DWARFContext> DWOContext;
+ DWARFUnit *DWOU;
+ public:
+ DWOHolder(StringRef DWOPath);
+ DWARFUnit *getUnit() const { return DWOU; }
+ };
+ std::unique_ptr<DWOHolder> DWO;
+
+ const DWARFUnitIndex::Entry *IndexEntry;
+
+protected:
+ virtual bool extractImpl(DataExtractor debug_info, uint32_t *offset_ptr);
+ /// Size in bytes of the unit header.
+ virtual uint32_t getHeaderSize() const { return 11; }
+
+public:
+ DWARFUnit(DWARFContext &Context, const DWARFSection &Section,
+ const DWARFDebugAbbrev *DA, StringRef RS, StringRef SS,
+ StringRef SOS, StringRef AOS, StringRef LS, bool LE,
+ const DWARFUnitSectionBase &UnitSection,
+ const DWARFUnitIndex::Entry *IndexEntry = nullptr);
+
+ virtual ~DWARFUnit();
+
+ DWARFContext& getContext() const { return Context; }
+
+ StringRef getLineSection() const { return LineSection; }
+ StringRef getStringSection() const { return StringSection; }
+ StringRef getStringOffsetSection() const { return StringOffsetSection; }
+ void setAddrOffsetSection(StringRef AOS, uint32_t Base) {
+ AddrOffsetSection = AOS;
+ AddrOffsetSectionBase = Base;
+ }
+ void setRangesSection(StringRef RS, uint32_t Base) {
+ RangeSection = RS;
+ RangeSectionBase = Base;
+ }
+
+ bool getAddrOffsetSectionItem(uint32_t Index, uint64_t &Result) const;
+ // FIXME: Result should be uint64_t in DWARF64.
+ bool getStringOffsetSectionItem(uint32_t Index, uint32_t &Result) const;
+
+ DataExtractor getDebugInfoExtractor() const {
+ return DataExtractor(InfoSection.Data, isLittleEndian, AddrSize);
+ }
+ DataExtractor getStringExtractor() const {
+ return DataExtractor(StringSection, false, 0);
+ }
+
+ const RelocAddrMap *getRelocMap() const { return &InfoSection.Relocs; }
+
+ bool extract(DataExtractor debug_info, uint32_t* offset_ptr);
+
+ /// extractRangeList - extracts the range list referenced by this compile
+ /// unit from .debug_ranges section. Returns true on success.
+ /// Requires that compile unit is already extracted.
+ bool extractRangeList(uint32_t RangeListOffset,
+ DWARFDebugRangeList &RangeList) const;
+ void clear();
+ uint32_t getOffset() const { return Offset; }
+ uint32_t getNextUnitOffset() const { return Offset + Length + 4; }
+ uint32_t getLength() const { return Length; }
+ uint16_t getVersion() const { return Version; }
+ const DWARFAbbreviationDeclarationSet *getAbbreviations() const {
+ return Abbrevs;
+ }
+ uint8_t getAddressByteSize() const { return AddrSize; }
+ uint64_t getBaseAddress() const { return BaseAddr; }
+
+ void setBaseAddress(uint64_t base_addr) {
+ BaseAddr = base_addr;
+ }
+
+ const DWARFDebugInfoEntryMinimal *getUnitDIE(bool ExtractUnitDIEOnly = true) {
+ extractDIEsIfNeeded(ExtractUnitDIEOnly);
+ return DieArray.empty() ? nullptr : &DieArray[0];
+ }
+
+ const char *getCompilationDir();
+ uint64_t getDWOId();
+
+ void collectAddressRanges(DWARFAddressRangesVector &CURanges);
+
+ /// getInlinedChainForAddress - fetches inlined chain for a given address.
+ /// Returns empty chain if there is no subprogram containing address. The
+ /// chain is valid as long as parsed compile unit DIEs are not cleared.
+ DWARFDebugInfoEntryInlinedChain getInlinedChainForAddress(uint64_t Address);
+
+ /// getUnitSection - Return the DWARFUnitSection containing this unit.
+ const DWARFUnitSectionBase &getUnitSection() const { return UnitSection; }
+
+ /// \brief Returns the number of DIEs in the unit. Parses the unit
+ /// if necessary.
+ unsigned getNumDIEs() {
+ extractDIEsIfNeeded(false);
+ return DieArray.size();
+ }
+
+ /// \brief Return the index of a DIE inside the unit's DIE vector.
+ ///
+ /// It is illegal to call this method with a DIE that hasn't be
+ /// created by this unit. In other word, it's illegal to call this
+ /// method on a DIE that isn't accessible by following
+ /// children/sibling links starting from this unit's getUnitDIE().
+ uint32_t getDIEIndex(const DWARFDebugInfoEntryMinimal *DIE) {
+ assert(!DieArray.empty() && DIE >= &DieArray[0] &&
+ DIE < &DieArray[0] + DieArray.size());
+ return DIE - &DieArray[0];
+ }
+
+ /// \brief Return the DIE object at the given index.
+ const DWARFDebugInfoEntryMinimal *getDIEAtIndex(unsigned Index) const {
+ assert(Index < DieArray.size());
+ return &DieArray[Index];
+ }
+
+ /// \brief Return the DIE object for a given offset inside the
+ /// unit's DIE vector.
+ ///
+ /// The unit needs to have his DIEs extracted for this method to work.
+ const DWARFDebugInfoEntryMinimal *getDIEForOffset(uint32_t Offset) const {
+ assert(!DieArray.empty());
+ auto it = std::lower_bound(
+ DieArray.begin(), DieArray.end(), Offset,
+ [](const DWARFDebugInfoEntryMinimal &LHS, uint32_t Offset) {
+ return LHS.getOffset() < Offset;
+ });
+ return it == DieArray.end() ? nullptr : &*it;
+ }
+
+ uint32_t getLineTableOffset() const {
+ if (IndexEntry)
+ if (const auto *Contrib = IndexEntry->getOffset(DW_SECT_LINE))
+ return Contrib->Offset;
+ return 0;
+ }
+
+private:
+ /// Size in bytes of the .debug_info data associated with this compile unit.
+ size_t getDebugInfoSize() const { return Length + 4 - getHeaderSize(); }
+
+ /// extractDIEsIfNeeded - Parses a compile unit and indexes its DIEs if it
+ /// hasn't already been done. Returns the number of DIEs parsed at this call.
+ size_t extractDIEsIfNeeded(bool CUDieOnly);
+ /// extractDIEsToVector - Appends all parsed DIEs to a vector.
+ void extractDIEsToVector(bool AppendCUDie, bool AppendNonCUDIEs,
+ std::vector<DWARFDebugInfoEntryMinimal> &DIEs) const;
+ /// setDIERelations - We read in all of the DIE entries into our flat list
+ /// of DIE entries and now we need to go back through all of them and set the
+ /// parent, sibling and child pointers for quick DIE navigation.
+ void setDIERelations();
+ /// clearDIEs - Clear parsed DIEs to keep memory usage low.
+ void clearDIEs(bool KeepCUDie);
+
+ /// parseDWO - Parses .dwo file for current compile unit. Returns true if
+ /// it was actually constructed.
+ bool parseDWO();
+
+ /// getSubprogramForAddress - Returns subprogram DIE with address range
+ /// encompassing the provided address. The pointer is alive as long as parsed
+ /// compile unit DIEs are not cleared.
+ const DWARFDebugInfoEntryMinimal *getSubprogramForAddress(uint64_t Address);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h
new file mode 100644
index 0000000..a85c2f9
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnitIndex.h
@@ -0,0 +1,81 @@
+//===-- DWARFUnitIndex.h --------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_DEBUGINFO_DWARFUNITINDEX_H
+#define LLVM_LIB_DEBUGINFO_DWARFUNITINDEX_H
+
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdint>
+
+namespace llvm {
+
+enum DWARFSectionKind {
+ DW_SECT_INFO = 1,
+ DW_SECT_TYPES,
+ DW_SECT_ABBREV,
+ DW_SECT_LINE,
+ DW_SECT_LOC,
+ DW_SECT_STR_OFFSETS,
+ DW_SECT_MACINFO,
+ DW_SECT_MACRO,
+};
+
+class DWARFUnitIndex {
+ struct Header {
+ uint32_t Version;
+ uint32_t NumColumns;
+ uint32_t NumUnits;
+ uint32_t NumBuckets = 0;
+
+ bool parse(DataExtractor IndexData, uint32_t *OffsetPtr);
+ void dump(raw_ostream &OS) const;
+ };
+
+public:
+ class Entry {
+ public:
+ struct SectionContribution {
+ uint32_t Offset;
+ uint32_t Length;
+ };
+
+ private:
+ const DWARFUnitIndex *Index;
+ uint64_t Signature;
+ std::unique_ptr<SectionContribution[]> Contributions;
+ friend class DWARFUnitIndex;
+
+ public:
+ const SectionContribution *getOffset(DWARFSectionKind Sec) const;
+ const SectionContribution *getOffset() const;
+ };
+
+private:
+ struct Header Header;
+
+ DWARFSectionKind InfoColumnKind;
+ int InfoColumn = -1;
+ std::unique_ptr<DWARFSectionKind[]> ColumnKinds;
+ std::unique_ptr<Entry[]> Rows;
+
+ static StringRef getColumnHeader(DWARFSectionKind DS);
+ bool parseImpl(DataExtractor IndexData);
+
+public:
+ bool parse(DataExtractor IndexData);
+ DWARFUnitIndex(DWARFSectionKind InfoColumnKind)
+ : InfoColumnKind(InfoColumnKind) {}
+ void dump(raw_ostream &OS) const;
+ const Entry *getFromOffset(uint32_t Offset) const;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h b/contrib/llvm/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h
new file mode 100644
index 0000000..b5fa8c3
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/ConcreteSymbolEnumerator.h
@@ -0,0 +1,59 @@
+//===- ConcreteSymbolEnumerator.h -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H
+#define LLVM_DEBUGINFO_PDB_CONCRETESYMBOLENUMERATOR_H
+
+#include "IPDBEnumChildren.h"
+#include "llvm/Support/Casting.h"
+#include <memory>
+
+namespace llvm {
+
+template <typename ChildType>
+class ConcreteSymbolEnumerator : public IPDBEnumChildren<ChildType> {
+public:
+ ConcreteSymbolEnumerator(std::unique_ptr<IPDBEnumSymbols> SymbolEnumerator)
+ : Enumerator(std::move(SymbolEnumerator)) {}
+
+ ~ConcreteSymbolEnumerator() override {}
+
+ uint32_t getChildCount() const override {
+ return Enumerator->getChildCount();
+ }
+
+ std::unique_ptr<ChildType> getChildAtIndex(uint32_t Index) const override {
+ std::unique_ptr<PDBSymbol> Child = Enumerator->getChildAtIndex(Index);
+ return make_concrete_child(std::move(Child));
+ }
+
+ std::unique_ptr<ChildType> getNext() override {
+ std::unique_ptr<PDBSymbol> Child = Enumerator->getNext();
+ return make_concrete_child(std::move(Child));
+ }
+
+ void reset() override { Enumerator->reset(); }
+
+ ConcreteSymbolEnumerator<ChildType> *clone() const override {
+ std::unique_ptr<IPDBEnumSymbols> WrappedClone(Enumerator->clone());
+ return new ConcreteSymbolEnumerator<ChildType>(std::move(WrappedClone));
+ }
+
+private:
+ std::unique_ptr<ChildType>
+ make_concrete_child(std::unique_ptr<PDBSymbol> Child) const {
+ ChildType *ConcreteChild = dyn_cast_or_null<ChildType>(Child.release());
+ return std::unique_ptr<ChildType>(ConcreteChild);
+ }
+
+ std::unique_ptr<IPDBEnumSymbols> Enumerator;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h
new file mode 100644
index 0000000..7b2bc14
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIADataStream.h
@@ -0,0 +1,33 @@
+//===- DIADataStream.h - DIA implementation of IPDBDataStream ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIADATASTREAM_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIADATASTREAM_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBDataStream.h"
+
+namespace llvm {
+class DIADataStream : public IPDBDataStream {
+public:
+ explicit DIADataStream(CComPtr<IDiaEnumDebugStreamData> DiaStreamData);
+
+ uint32_t getRecordCount() const override;
+ std::string getName() const override;
+ llvm::Optional<RecordType> getItemAtIndex(uint32_t Index) const override;
+ bool getNext(RecordType &Record) override;
+ void reset() override;
+ DIADataStream *clone() const override;
+
+private:
+ CComPtr<IDiaEnumDebugStreamData> StreamData;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h
new file mode 100644
index 0000000..375bcdd
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumDebugStreams.h
@@ -0,0 +1,35 @@
+//==- DIAEnumDebugStreams.h - DIA Debug Stream Enumerator impl ---*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMDEBUGSTREAMS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMDEBUGSTREAMS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+
+namespace llvm {
+
+class IPDBDataStream;
+
+class DIAEnumDebugStreams : public IPDBEnumChildren<IPDBDataStream> {
+public:
+ explicit DIAEnumDebugStreams(CComPtr<IDiaEnumDebugStreams> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+ ChildTypePtr getNext() override;
+ void reset() override;
+ DIAEnumDebugStreams *clone() const override;
+
+private:
+ CComPtr<IDiaEnumDebugStreams> Enumerator;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h
new file mode 100644
index 0000000..4cc85ed
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumLineNumbers.h
@@ -0,0 +1,35 @@
+//==- DIAEnumLineNumbers.h - DIA Line Number Enumerator impl -----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMLINENUMBERS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMLINENUMBERS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+
+namespace llvm {
+
+class IPDBLineNumber;
+
+class DIAEnumLineNumbers : public IPDBEnumChildren<IPDBLineNumber> {
+public:
+ explicit DIAEnumLineNumbers(CComPtr<IDiaEnumLineNumbers> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+ ChildTypePtr getNext() override;
+ void reset() override;
+ DIAEnumLineNumbers *clone() const override;
+
+private:
+ CComPtr<IDiaEnumLineNumbers> Enumerator;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h
new file mode 100644
index 0000000..88625f6
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSourceFiles.h
@@ -0,0 +1,37 @@
+//==- DIAEnumSourceFiles.h - DIA Source File Enumerator impl -----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSOURCEFILES_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSOURCEFILES_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+
+namespace llvm {
+
+class DIASession;
+
+class DIAEnumSourceFiles : public IPDBEnumChildren<IPDBSourceFile> {
+public:
+ explicit DIAEnumSourceFiles(const DIASession &PDBSession,
+ CComPtr<IDiaEnumSourceFiles> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ ChildTypePtr getChildAtIndex(uint32_t Index) const override;
+ ChildTypePtr getNext() override;
+ void reset() override;
+ DIAEnumSourceFiles *clone() const override;
+
+private:
+ const DIASession &Session;
+ CComPtr<IDiaEnumSourceFiles> Enumerator;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h
new file mode 100644
index 0000000..fe343f7
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIAEnumSymbols.h
@@ -0,0 +1,37 @@
+//==- DIAEnumSymbols.h - DIA Symbol Enumerator impl --------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIAENUMSYMBOLS_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIAENUMSYMBOLS_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
+
+namespace llvm {
+
+class DIASession;
+
+class DIAEnumSymbols : public IPDBEnumChildren<PDBSymbol> {
+public:
+ explicit DIAEnumSymbols(const DIASession &Session,
+ CComPtr<IDiaEnumSymbols> DiaEnumerator);
+
+ uint32_t getChildCount() const override;
+ std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
+ std::unique_ptr<PDBSymbol> getNext() override;
+ void reset() override;
+ DIAEnumSymbols *clone() const override;
+
+private:
+ const DIASession &Session;
+ CComPtr<IDiaEnumSymbols> Enumerator;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h
new file mode 100644
index 0000000..5950a0d
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIALineNumber.h
@@ -0,0 +1,39 @@
+//===- DIALineNumber.h - DIA implementation of IPDBLineNumber ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIALINENUMBER_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIALINENUMBER_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBLineNumber.h"
+
+namespace llvm {
+class DIALineNumber : public IPDBLineNumber {
+public:
+ explicit DIALineNumber(CComPtr<IDiaLineNumber> DiaLineNumber);
+
+ uint32_t getLineNumber() const override;
+ uint32_t getLineNumberEnd() const override;
+ uint32_t getColumnNumber() const override;
+ uint32_t getColumnNumberEnd() const override;
+ uint32_t getAddressSection() const override;
+ uint32_t getAddressOffset() const override;
+ uint32_t getRelativeVirtualAddress() const override;
+ uint64_t getVirtualAddress() const override;
+ uint32_t getLength() const override;
+ uint32_t getSourceFileId() const override;
+ uint32_t getCompilandId() const override;
+ bool isStatement() const override;
+
+private:
+ CComPtr<IDiaLineNumber> LineNumber;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h
new file mode 100644
index 0000000..9308b8e
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIARawSymbol.h
@@ -0,0 +1,206 @@
+//===- DIARawSymbol.h - DIA implementation of IPDBRawSymbol ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIARAWSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIARAWSYMBOL_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBRawSymbol.h"
+
+namespace llvm {
+class DIASession;
+class DIARawSymbol : public IPDBRawSymbol {
+public:
+ DIARawSymbol(const DIASession &PDBSession, CComPtr<IDiaSymbol> DiaSymbol);
+
+ void dump(raw_ostream &OS, int Indent) const override;
+
+ CComPtr<IDiaSymbol> getDiaSymbol() const { return Symbol; }
+
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildren(PDB_SymType Type) const override;
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildren(PDB_SymType Type, StringRef Name,
+ PDB_NameSearchFlags Flags) const override;
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildrenByRVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
+ uint32_t RVA) const override;
+ std::unique_ptr<IPDBEnumSymbols>
+ findInlineFramesByRVA(uint32_t RVA) const override;
+
+ void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes) const override;
+ void getFrontEndVersion(VersionInfo &Version) const override;
+ void getBackEndVersion(VersionInfo &Version) const override;
+ PDB_MemberAccess getAccess() const override;
+ uint32_t getAddressOffset() const override;
+ uint32_t getAddressSection() const override;
+ uint32_t getAge() const override;
+ uint32_t getArrayIndexTypeId() const override;
+ uint32_t getBaseDataOffset() const override;
+ uint32_t getBaseDataSlot() const override;
+ uint32_t getBaseSymbolId() const override;
+ PDB_BuiltinType getBuiltinType() const override;
+ uint32_t getBitPosition() const override;
+ PDB_CallingConv getCallingConvention() const override;
+ uint32_t getClassParentId() const override;
+ std::string getCompilerName() const override;
+ uint32_t getCount() const override;
+ uint32_t getCountLiveRanges() const override;
+ PDB_Lang getLanguage() const override;
+ uint32_t getLexicalParentId() const override;
+ std::string getLibraryName() const override;
+ uint32_t getLiveRangeStartAddressOffset() const override;
+ uint32_t getLiveRangeStartAddressSection() const override;
+ uint32_t getLiveRangeStartRelativeVirtualAddress() const override;
+ PDB_RegisterId getLocalBasePointerRegisterId() const override;
+ uint32_t getLowerBoundId() const override;
+ uint32_t getMemorySpaceKind() const override;
+ std::string getName() const override;
+ uint32_t getNumberOfAcceleratorPointerTags() const override;
+ uint32_t getNumberOfColumns() const override;
+ uint32_t getNumberOfModifiers() const override;
+ uint32_t getNumberOfRegisterIndices() const override;
+ uint32_t getNumberOfRows() const override;
+ std::string getObjectFileName() const override;
+ uint32_t getOemId() const override;
+ uint32_t getOemSymbolId() const override;
+ uint32_t getOffsetInUdt() const override;
+ PDB_Cpu getPlatform() const override;
+ uint32_t getRank() const override;
+ PDB_RegisterId getRegisterId() const override;
+ uint32_t getRegisterType() const override;
+ uint32_t getRelativeVirtualAddress() const override;
+ uint32_t getSamplerSlot() const override;
+ uint32_t getSignature() const override;
+ uint32_t getSizeInUdt() const override;
+ uint32_t getSlot() const override;
+ std::string getSourceFileName() const override;
+ uint32_t getStride() const override;
+ uint32_t getSubTypeId() const override;
+ std::string getSymbolsFileName() const override;
+ uint32_t getSymIndexId() const override;
+ uint32_t getTargetOffset() const override;
+ uint32_t getTargetRelativeVirtualAddress() const override;
+ uint64_t getTargetVirtualAddress() const override;
+ uint32_t getTargetSection() const override;
+ uint32_t getTextureSlot() const override;
+ uint32_t getTimeStamp() const override;
+ uint32_t getToken() const override;
+ uint32_t getTypeId() const override;
+ uint32_t getUavSlot() const override;
+ std::string getUndecoratedName() const override;
+ uint32_t getUnmodifiedTypeId() const override;
+ uint32_t getUpperBoundId() const override;
+ Variant getValue() const override;
+ uint32_t getVirtualBaseDispIndex() const override;
+ uint32_t getVirtualBaseOffset() const override;
+ uint32_t getVirtualTableShapeId() const override;
+ PDB_DataKind getDataKind() const override;
+ PDB_SymType getSymTag() const override;
+ PDB_UniqueId getGuid() const override;
+ int32_t getOffset() const override;
+ int32_t getThisAdjust() const override;
+ int32_t getVirtualBasePointerOffset() const override;
+ PDB_LocType getLocationType() const override;
+ PDB_Machine getMachineType() const override;
+ PDB_ThunkOrdinal getThunkOrdinal() const override;
+ uint64_t getLength() const override;
+ uint64_t getLiveRangeLength() const override;
+ uint64_t getVirtualAddress() const override;
+ PDB_UdtType getUdtKind() const override;
+ bool hasConstructor() const override;
+ bool hasCustomCallingConvention() const override;
+ bool hasFarReturn() const override;
+ bool isCode() const override;
+ bool isCompilerGenerated() const override;
+ bool isConstType() const override;
+ bool isEditAndContinueEnabled() const override;
+ bool isFunction() const override;
+ bool getAddressTaken() const override;
+ bool getNoStackOrdering() const override;
+ bool hasAlloca() const override;
+ bool hasAssignmentOperator() const override;
+ bool hasCTypes() const override;
+ bool hasCastOperator() const override;
+ bool hasDebugInfo() const override;
+ bool hasEH() const override;
+ bool hasEHa() const override;
+ bool hasInlAsm() const override;
+ bool hasInlineAttribute() const override;
+ bool hasInterruptReturn() const override;
+ bool hasFramePointer() const override;
+ bool hasLongJump() const override;
+ bool hasManagedCode() const override;
+ bool hasNestedTypes() const override;
+ bool hasNoInlineAttribute() const override;
+ bool hasNoReturnAttribute() const override;
+ bool hasOptimizedCodeDebugInfo() const override;
+ bool hasOverloadedOperator() const override;
+ bool hasSEH() const override;
+ bool hasSecurityChecks() const override;
+ bool hasSetJump() const override;
+ bool hasStrictGSCheck() const override;
+ bool isAcceleratorGroupSharedLocal() const override;
+ bool isAcceleratorPointerTagLiveRange() const override;
+ bool isAcceleratorStubFunction() const override;
+ bool isAggregated() const override;
+ bool isIntroVirtualFunction() const override;
+ bool isCVTCIL() const override;
+ bool isConstructorVirtualBase() const override;
+ bool isCxxReturnUdt() const override;
+ bool isDataAligned() const override;
+ bool isHLSLData() const override;
+ bool isHotpatchable() const override;
+ bool isIndirectVirtualBaseClass() const override;
+ bool isInterfaceUdt() const override;
+ bool isIntrinsic() const override;
+ bool isLTCG() const override;
+ bool isLocationControlFlowDependent() const override;
+ bool isMSILNetmodule() const override;
+ bool isMatrixRowMajor() const override;
+ bool isManagedCode() const override;
+ bool isMSILCode() const override;
+ bool isMultipleInheritance() const override;
+ bool isNaked() const override;
+ bool isNested() const override;
+ bool isOptimizedAway() const override;
+ bool isPacked() const override;
+ bool isPointerBasedOnSymbolValue() const override;
+ bool isPointerToDataMember() const override;
+ bool isPointerToMemberFunction() const override;
+ bool isPureVirtual() const override;
+ bool isRValueReference() const override;
+ bool isRefUdt() const override;
+ bool isReference() const override;
+ bool isRestrictedType() const override;
+ bool isReturnValue() const override;
+ bool isSafeBuffers() const override;
+ bool isScoped() const override;
+ bool isSdl() const override;
+ bool isSingleInheritance() const override;
+ bool isSplitted() const override;
+ bool isStatic() const override;
+ bool hasPrivateSymbols() const override;
+ bool isUnalignedType() const override;
+ bool isUnreached() const override;
+ bool isValueUdt() const override;
+ bool isVirtual() const override;
+ bool isVirtualBaseClass() const override;
+ bool isVirtualInheritance() const override;
+ bool isVolatileType() const override;
+ bool wasInlined() const override;
+ std::string getUnused() const override;
+
+private:
+ const DIASession &Session;
+ CComPtr<IDiaSymbol> Symbol;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASession.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASession.h
new file mode 100644
index 0000000..9a8600f
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASession.h
@@ -0,0 +1,51 @@
+//===- DIASession.h - DIA implementation of IPDBSession ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASESSION_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASESSION_H
+
+#include "DIASupport.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/PDB/IPDBSession.h"
+
+namespace llvm {
+class DIASession : public IPDBSession {
+public:
+ explicit DIASession(CComPtr<IDiaSession> DiaSession);
+
+ static PDB_ErrorCode createFromPdb(StringRef Path,
+ std::unique_ptr<IPDBSession> &Session);
+ static PDB_ErrorCode createFromExe(StringRef Path,
+ std::unique_ptr<IPDBSession> &Session);
+
+ uint64_t getLoadAddress() const override;
+ void setLoadAddress(uint64_t Address) override;
+ std::unique_ptr<PDBSymbolExe> getGlobalScope() const override;
+ std::unique_ptr<PDBSymbol> getSymbolById(uint32_t SymbolId) const override;
+
+ std::unique_ptr<PDBSymbol>
+ findSymbolByAddress(uint64_t Address, PDB_SymType Type) const override;
+
+ std::unique_ptr<IPDBEnumLineNumbers>
+ findLineNumbersByAddress(uint64_t Address, uint32_t Length) const override;
+
+ std::unique_ptr<IPDBEnumSourceFiles> getAllSourceFiles() const override;
+ std::unique_ptr<IPDBEnumSourceFiles> getSourceFilesForCompiland(
+ const PDBSymbolCompiland &Compiland) const override;
+ std::unique_ptr<IPDBSourceFile>
+ getSourceFileById(uint32_t FileId) const override;
+
+ std::unique_ptr<IPDBEnumDataStreams> getDebugStreams() const override;
+
+private:
+ CComPtr<IDiaSession> Session;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h
new file mode 100644
index 0000000..c424e27
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASourceFile.h
@@ -0,0 +1,36 @@
+//===- DIASourceFile.h - DIA implementation of IPDBSourceFile ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASOURCEFILE_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASOURCEFILE_H
+
+#include "DIASupport.h"
+#include "llvm/DebugInfo/PDB/IPDBSourceFile.h"
+
+namespace llvm {
+class DIASession;
+
+class DIASourceFile : public IPDBSourceFile {
+public:
+ explicit DIASourceFile(const DIASession &Session,
+ CComPtr<IDiaSourceFile> DiaSourceFile);
+
+ std::string getFileName() const override;
+ uint32_t getUniqueId() const override;
+ std::string getChecksum() const override;
+ PDB_Checksum getChecksumType() const override;
+ std::unique_ptr<IPDBEnumSymbols> getCompilands() const override;
+
+private:
+ const DIASession &Session;
+ CComPtr<IDiaSourceFile> SourceFile;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASupport.h b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASupport.h
new file mode 100644
index 0000000..407a345
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/DIA/DIASupport.h
@@ -0,0 +1,33 @@
+//===- DIASupport.h - Common header includes for DIA ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Common defines and header includes for all LLVMDebugInfoPDBDIA. The
+// definitions here configure the necessary #defines and include system headers
+// in the proper order for using DIA.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
+#define LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
+
+// Require at least Vista
+#define NTDDI_VERSION NTDDI_VISTA
+#define _WIN32_WINNT _WIN32_WINNT_VISTA
+#define WINVER _WIN32_WINNT_VISTA
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+
+// atlbase.h has to come before windows.h
+#include <atlbase.h>
+#include <windows.h>
+
+// DIA headers must come after windows headers.
+#include <cvconst.h>
+#include <dia2.h>
+
+#endif // LLVM_DEBUGINFO_PDB_DIA_DIASUPPORT_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBDataStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBDataStream.h
new file mode 100644
index 0000000..808a0f3
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBDataStream.h
@@ -0,0 +1,37 @@
+//===- IPDBDataStream.h - base interface for child enumerator -*- C++ ---*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBDATASTREAM_H
+#define LLVM_DEBUGINFO_PDB_IPDBDATASTREAM_H
+
+#include "PDBTypes.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+/// IPDBDataStream defines an interface used to represent a stream consisting
+/// of a name and a series of records whose formats depend on the particular
+/// stream type.
+class IPDBDataStream {
+public:
+ typedef llvm::SmallVector<uint8_t, 32> RecordType;
+
+ virtual ~IPDBDataStream();
+
+ virtual uint32_t getRecordCount() const = 0;
+ virtual std::string getName() const = 0;
+ virtual llvm::Optional<RecordType> getItemAtIndex(uint32_t Index) const = 0;
+ virtual bool getNext(RecordType &Record) = 0;
+ virtual void reset() = 0;
+ virtual IPDBDataStream *clone() const = 0;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
new file mode 100644
index 0000000..645ac96
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBEnumChildren.h
@@ -0,0 +1,33 @@
+//===- IPDBEnumChildren.h - base interface for child enumerator -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
+#define LLVM_DEBUGINFO_PDB_IPDBENUMCHILDREN_H
+
+#include "PDBTypes.h"
+#include <memory>
+
+namespace llvm {
+
+template <typename ChildType> class IPDBEnumChildren {
+public:
+ typedef std::unique_ptr<ChildType> ChildTypePtr;
+ typedef IPDBEnumChildren<ChildType> MyType;
+
+ virtual ~IPDBEnumChildren() {}
+
+ virtual uint32_t getChildCount() const = 0;
+ virtual ChildTypePtr getChildAtIndex(uint32_t Index) const = 0;
+ virtual ChildTypePtr getNext() = 0;
+ virtual void reset() = 0;
+ virtual MyType *clone() const = 0;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBLineNumber.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBLineNumber.h
new file mode 100644
index 0000000..92cd58d
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBLineNumber.h
@@ -0,0 +1,36 @@
+//===- IPDBLineNumber.h - base interface for PDB line no. info ---*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBLINENUMBER_H
+#define LLVM_DEBUGINFO_PDB_IPDBLINENUMBER_H
+
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class IPDBLineNumber {
+public:
+ virtual ~IPDBLineNumber();
+
+ virtual uint32_t getLineNumber() const = 0;
+ virtual uint32_t getLineNumberEnd() const = 0;
+ virtual uint32_t getColumnNumber() const = 0;
+ virtual uint32_t getColumnNumberEnd() const = 0;
+ virtual uint32_t getAddressSection() const = 0;
+ virtual uint32_t getAddressOffset() const = 0;
+ virtual uint32_t getRelativeVirtualAddress() const = 0;
+ virtual uint64_t getVirtualAddress() const = 0;
+ virtual uint32_t getLength() const = 0;
+ virtual uint32_t getSourceFileId() const = 0;
+ virtual uint32_t getCompilandId() const = 0;
+ virtual bool isStatement() const = 0;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h
new file mode 100644
index 0000000..139bff5
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBRawSymbol.h
@@ -0,0 +1,211 @@
+//===- IPDBRawSymbol.h - base interface for PDB symbol types ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBRAWSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_IPDBRAWSYMBOL_H
+
+#include "PDBTypes.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <memory>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// IPDBRawSymbol defines an interface used to represent an arbitrary symbol.
+/// It exposes a monolithic interface consisting of accessors for the union of
+/// all properties that are valid for any symbol type. This interface is then
+/// wrapped by a concrete class which exposes only those set of methods valid
+/// for this particular symbol type. See PDBSymbol.h for more details.
+class IPDBRawSymbol {
+public:
+ virtual ~IPDBRawSymbol();
+
+ virtual void dump(raw_ostream &OS, int Indent) const = 0;
+
+ virtual std::unique_ptr<IPDBEnumSymbols>
+ findChildren(PDB_SymType Type) const = 0;
+
+ virtual std::unique_ptr<IPDBEnumSymbols>
+ findChildren(PDB_SymType Type, StringRef Name,
+ PDB_NameSearchFlags Flags) const = 0;
+ virtual std::unique_ptr<IPDBEnumSymbols>
+ findChildrenByRVA(PDB_SymType Type, StringRef Name, PDB_NameSearchFlags Flags,
+ uint32_t RVA) const = 0;
+ virtual std::unique_ptr<IPDBEnumSymbols>
+ findInlineFramesByRVA(uint32_t RVA) const = 0;
+
+ virtual void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes) const = 0;
+ virtual void getBackEndVersion(VersionInfo &Version) const = 0;
+ virtual PDB_MemberAccess getAccess() const = 0;
+ virtual uint32_t getAddressOffset() const = 0;
+ virtual uint32_t getAddressSection() const = 0;
+ virtual uint32_t getAge() const = 0;
+ virtual uint32_t getArrayIndexTypeId() const = 0;
+ virtual uint32_t getBaseDataOffset() const = 0;
+ virtual uint32_t getBaseDataSlot() const = 0;
+ virtual uint32_t getBaseSymbolId() const = 0;
+ virtual PDB_BuiltinType getBuiltinType() const = 0;
+ virtual uint32_t getBitPosition() const = 0;
+ virtual PDB_CallingConv getCallingConvention() const = 0;
+ virtual uint32_t getClassParentId() const = 0;
+ virtual std::string getCompilerName() const = 0;
+ virtual uint32_t getCount() const = 0;
+ virtual uint32_t getCountLiveRanges() const = 0;
+ virtual void getFrontEndVersion(VersionInfo &Version) const = 0;
+ virtual PDB_Lang getLanguage() const = 0;
+ virtual uint32_t getLexicalParentId() const = 0;
+ virtual std::string getLibraryName() const = 0;
+ virtual uint32_t getLiveRangeStartAddressOffset() const = 0;
+ virtual uint32_t getLiveRangeStartAddressSection() const = 0;
+ virtual uint32_t getLiveRangeStartRelativeVirtualAddress() const = 0;
+ virtual PDB_RegisterId getLocalBasePointerRegisterId() const = 0;
+ virtual uint32_t getLowerBoundId() const = 0;
+ virtual uint32_t getMemorySpaceKind() const = 0;
+ virtual std::string getName() const = 0;
+ virtual uint32_t getNumberOfAcceleratorPointerTags() const = 0;
+ virtual uint32_t getNumberOfColumns() const = 0;
+ virtual uint32_t getNumberOfModifiers() const = 0;
+ virtual uint32_t getNumberOfRegisterIndices() const = 0;
+ virtual uint32_t getNumberOfRows() const = 0;
+ virtual std::string getObjectFileName() const = 0;
+ virtual uint32_t getOemId() const = 0;
+ virtual uint32_t getOemSymbolId() const = 0;
+ virtual uint32_t getOffsetInUdt() const = 0;
+ virtual PDB_Cpu getPlatform() const = 0;
+ virtual uint32_t getRank() const = 0;
+ virtual PDB_RegisterId getRegisterId() const = 0;
+ virtual uint32_t getRegisterType() const = 0;
+ virtual uint32_t getRelativeVirtualAddress() const = 0;
+ virtual uint32_t getSamplerSlot() const = 0;
+ virtual uint32_t getSignature() const = 0;
+ virtual uint32_t getSizeInUdt() const = 0;
+ virtual uint32_t getSlot() const = 0;
+ virtual std::string getSourceFileName() const = 0;
+ virtual uint32_t getStride() const = 0;
+ virtual uint32_t getSubTypeId() const = 0;
+ virtual std::string getSymbolsFileName() const = 0;
+ virtual uint32_t getSymIndexId() const = 0;
+ virtual uint32_t getTargetOffset() const = 0;
+ virtual uint32_t getTargetRelativeVirtualAddress() const = 0;
+ virtual uint64_t getTargetVirtualAddress() const = 0;
+ virtual uint32_t getTargetSection() const = 0;
+ virtual uint32_t getTextureSlot() const = 0;
+ virtual uint32_t getTimeStamp() const = 0;
+ virtual uint32_t getToken() const = 0;
+ virtual uint32_t getTypeId() const = 0;
+ virtual uint32_t getUavSlot() const = 0;
+ virtual std::string getUndecoratedName() const = 0;
+ virtual uint32_t getUnmodifiedTypeId() const = 0;
+ virtual uint32_t getUpperBoundId() const = 0;
+ virtual Variant getValue() const = 0;
+ virtual uint32_t getVirtualBaseDispIndex() const = 0;
+ virtual uint32_t getVirtualBaseOffset() const = 0;
+ virtual uint32_t getVirtualTableShapeId() const = 0;
+ virtual PDB_DataKind getDataKind() const = 0;
+ virtual PDB_SymType getSymTag() const = 0;
+ virtual PDB_UniqueId getGuid() const = 0;
+ virtual int32_t getOffset() const = 0;
+ virtual int32_t getThisAdjust() const = 0;
+ virtual int32_t getVirtualBasePointerOffset() const = 0;
+ virtual PDB_LocType getLocationType() const = 0;
+ virtual PDB_Machine getMachineType() const = 0;
+ virtual PDB_ThunkOrdinal getThunkOrdinal() const = 0;
+ virtual uint64_t getLength() const = 0;
+ virtual uint64_t getLiveRangeLength() const = 0;
+ virtual uint64_t getVirtualAddress() const = 0;
+ virtual PDB_UdtType getUdtKind() const = 0;
+ virtual bool hasConstructor() const = 0;
+ virtual bool hasCustomCallingConvention() const = 0;
+ virtual bool hasFarReturn() const = 0;
+ virtual bool isCode() const = 0;
+ virtual bool isCompilerGenerated() const = 0;
+ virtual bool isConstType() const = 0;
+ virtual bool isEditAndContinueEnabled() const = 0;
+ virtual bool isFunction() const = 0;
+ virtual bool getAddressTaken() const = 0;
+ virtual bool getNoStackOrdering() const = 0;
+ virtual bool hasAlloca() const = 0;
+ virtual bool hasAssignmentOperator() const = 0;
+ virtual bool hasCTypes() const = 0;
+ virtual bool hasCastOperator() const = 0;
+ virtual bool hasDebugInfo() const = 0;
+ virtual bool hasEH() const = 0;
+ virtual bool hasEHa() const = 0;
+ virtual bool hasFramePointer() const = 0;
+ virtual bool hasInlAsm() const = 0;
+ virtual bool hasInlineAttribute() const = 0;
+ virtual bool hasInterruptReturn() const = 0;
+ virtual bool hasLongJump() const = 0;
+ virtual bool hasManagedCode() const = 0;
+ virtual bool hasNestedTypes() const = 0;
+ virtual bool hasNoInlineAttribute() const = 0;
+ virtual bool hasNoReturnAttribute() const = 0;
+ virtual bool hasOptimizedCodeDebugInfo() const = 0;
+ virtual bool hasOverloadedOperator() const = 0;
+ virtual bool hasSEH() const = 0;
+ virtual bool hasSecurityChecks() const = 0;
+ virtual bool hasSetJump() const = 0;
+ virtual bool hasStrictGSCheck() const = 0;
+ virtual bool isAcceleratorGroupSharedLocal() const = 0;
+ virtual bool isAcceleratorPointerTagLiveRange() const = 0;
+ virtual bool isAcceleratorStubFunction() const = 0;
+ virtual bool isAggregated() const = 0;
+ virtual bool isIntroVirtualFunction() const = 0;
+ virtual bool isCVTCIL() const = 0;
+ virtual bool isConstructorVirtualBase() const = 0;
+ virtual bool isCxxReturnUdt() const = 0;
+ virtual bool isDataAligned() const = 0;
+ virtual bool isHLSLData() const = 0;
+ virtual bool isHotpatchable() const = 0;
+ virtual bool isIndirectVirtualBaseClass() const = 0;
+ virtual bool isInterfaceUdt() const = 0;
+ virtual bool isIntrinsic() const = 0;
+ virtual bool isLTCG() const = 0;
+ virtual bool isLocationControlFlowDependent() const = 0;
+ virtual bool isMSILNetmodule() const = 0;
+ virtual bool isMatrixRowMajor() const = 0;
+ virtual bool isManagedCode() const = 0;
+ virtual bool isMSILCode() const = 0;
+ virtual bool isMultipleInheritance() const = 0;
+ virtual bool isNaked() const = 0;
+ virtual bool isNested() const = 0;
+ virtual bool isOptimizedAway() const = 0;
+ virtual bool isPacked() const = 0;
+ virtual bool isPointerBasedOnSymbolValue() const = 0;
+ virtual bool isPointerToDataMember() const = 0;
+ virtual bool isPointerToMemberFunction() const = 0;
+ virtual bool isPureVirtual() const = 0;
+ virtual bool isRValueReference() const = 0;
+ virtual bool isRefUdt() const = 0;
+ virtual bool isReference() const = 0;
+ virtual bool isRestrictedType() const = 0;
+ virtual bool isReturnValue() const = 0;
+ virtual bool isSafeBuffers() const = 0;
+ virtual bool isScoped() const = 0;
+ virtual bool isSdl() const = 0;
+ virtual bool isSingleInheritance() const = 0;
+ virtual bool isSplitted() const = 0;
+ virtual bool isStatic() const = 0;
+ virtual bool hasPrivateSymbols() const = 0;
+ virtual bool isUnalignedType() const = 0;
+ virtual bool isUnreached() const = 0;
+ virtual bool isValueUdt() const = 0;
+ virtual bool isVirtual() const = 0;
+ virtual bool isVirtualBaseClass() const = 0;
+ virtual bool isVirtualInheritance() const = 0;
+ virtual bool isVolatileType() const = 0;
+ virtual bool wasInlined() const = 0;
+ virtual std::string getUnused() const = 0;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSession.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSession.h
new file mode 100644
index 0000000..a130a38
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSession.h
@@ -0,0 +1,61 @@
+//===- IPDBSession.h - base interface for a PDB symbol context --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBSESSION_H
+#define LLVM_DEBUGINFO_PDB_IPDBSESSION_H
+
+#include "PDBTypes.h"
+#include "llvm/Support/Casting.h"
+#include <memory>
+
+namespace llvm {
+
+class PDBSymbolCompiland;
+class PDBSymbolExe;
+
+/// IPDBSession defines an interface used to provide a context for querying
+/// debug information from a debug data source (for example, a PDB).
+class IPDBSession {
+public:
+ virtual ~IPDBSession();
+
+ virtual uint64_t getLoadAddress() const = 0;
+ virtual void setLoadAddress(uint64_t Address) = 0;
+ virtual std::unique_ptr<PDBSymbolExe> getGlobalScope() const = 0;
+ virtual std::unique_ptr<PDBSymbol> getSymbolById(uint32_t SymbolId) const = 0;
+
+ template <typename T>
+ std::unique_ptr<T> getConcreteSymbolById(uint32_t SymbolId) const {
+ auto Symbol(getSymbolById(SymbolId));
+ if (!Symbol)
+ return nullptr;
+
+ T *ConcreteSymbol = dyn_cast<T>(Symbol.get());
+ if (!ConcreteSymbol)
+ return nullptr;
+ Symbol.release();
+ return std::unique_ptr<T>(ConcreteSymbol);
+ }
+
+ virtual std::unique_ptr<PDBSymbol>
+ findSymbolByAddress(uint64_t Address, PDB_SymType Type) const = 0;
+ virtual std::unique_ptr<IPDBEnumLineNumbers>
+ findLineNumbersByAddress(uint64_t Address, uint32_t Length) const = 0;
+
+ virtual std::unique_ptr<IPDBEnumSourceFiles> getAllSourceFiles() const = 0;
+ virtual std::unique_ptr<IPDBEnumSourceFiles>
+ getSourceFilesForCompiland(const PDBSymbolCompiland &Compiland) const = 0;
+ virtual std::unique_ptr<IPDBSourceFile>
+ getSourceFileById(uint32_t FileId) const = 0;
+
+ virtual std::unique_ptr<IPDBEnumDataStreams> getDebugStreams() const = 0;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSourceFile.h b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSourceFile.h
new file mode 100644
index 0000000..55000ef
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/IPDBSourceFile.h
@@ -0,0 +1,37 @@
+//===- IPDBSourceFile.h - base interface for a PDB source file --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBSOURCEFILE_H
+#define LLVM_DEBUGINFO_PDB_IPDBSOURCEFILE_H
+
+#include "PDBTypes.h"
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// IPDBSourceFile defines an interface used to represent source files whose
+/// information are stored in the PDB.
+class IPDBSourceFile {
+public:
+ virtual ~IPDBSourceFile();
+
+ void dump(raw_ostream &OS, int Indent) const;
+
+ virtual std::string getFileName() const = 0;
+ virtual uint32_t getUniqueId() const = 0;
+ virtual std::string getChecksum() const = 0;
+ virtual PDB_Checksum getChecksumType() const = 0;
+ virtual std::unique_ptr<IPDBEnumSymbols> getCompilands() const = 0;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDB.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDB.h
new file mode 100644
index 0000000..5df3be8
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDB.h
@@ -0,0 +1,26 @@
+//===- PDB.h - base header file for creating a PDB reader -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDB_H
+#define LLVM_DEBUGINFO_PDB_PDB_H
+
+#include "PDBTypes.h"
+#include <memory>
+
+namespace llvm {
+class StringRef;
+
+PDB_ErrorCode loadDataForPDB(PDB_ReaderType Type, StringRef Path,
+ std::unique_ptr<IPDBSession> &Session);
+
+PDB_ErrorCode loadDataForEXE(PDB_ReaderType Type, StringRef Path,
+ std::unique_ptr<IPDBSession> &Session);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBContext.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBContext.h
new file mode 100644
index 0000000..9404a59
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBContext.h
@@ -0,0 +1,59 @@
+//===-- PDBContext.h --------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===/
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBCONTEXT_H
+#define LLVM_DEBUGINFO_PDB_PDBCONTEXT_H
+
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/PDB/IPDBSession.h"
+
+namespace llvm {
+
+namespace object {
+class COFFObjectFile;
+}
+
+/// PDBContext
+/// This data structure is the top level entity that deals with PDB debug
+/// information parsing. This data structure exists only when there is a
+/// need for a transparent interface to different debug information formats
+/// (e.g. PDB and DWARF). More control and power over the debug information
+/// access can be had by using the PDB interfaces directly.
+class PDBContext : public DIContext {
+
+ PDBContext(PDBContext &) = delete;
+ PDBContext &operator=(PDBContext &) = delete;
+
+public:
+ PDBContext(const object::COFFObjectFile &Object,
+ std::unique_ptr<IPDBSession> PDBSession);
+
+ static bool classof(const DIContext *DICtx) {
+ return DICtx->getKind() == CK_PDB;
+ }
+
+ void dump(raw_ostream &OS, DIDumpType DumpType = DIDT_All) override;
+
+ DILineInfo getLineInfoForAddress(
+ uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+ DILineInfoTable getLineInfoForAddressRange(
+ uint64_t Address, uint64_t Size,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+ DIInliningInfo getInliningInfoForAddress(
+ uint64_t Address,
+ DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
+
+private:
+ std::string getFunctionName(uint64_t Address, DINameKind NameKind) const;
+ std::unique_ptr<IPDBSession> Session;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBExtras.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBExtras.h
new file mode 100644
index 0000000..48ce1c1
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBExtras.h
@@ -0,0 +1,38 @@
+//===- PDBExtras.h - helper functions and classes for PDBs -------*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBEXTRAS_H
+#define LLVM_DEBUGINFO_PDB_PDBEXTRAS_H
+
+#include "PDBTypes.h"
+#include "llvm/Support/raw_ostream.h"
+#include <unordered_map>
+
+namespace llvm {
+typedef std::unordered_map<PDB_SymType, int> TagStats;
+
+raw_ostream &operator<<(raw_ostream &OS, const PDB_VariantType &Value);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_CallingConv &Conv);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_DataKind &Data);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_RegisterId &Reg);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_LocType &Loc);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_ThunkOrdinal &Thunk);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_Checksum &Checksum);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_Lang &Lang);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_SymType &Tag);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_MemberAccess &Access);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_UdtType &Type);
+raw_ostream &operator<<(raw_ostream &OS, const PDB_UniqueId &Id);
+
+raw_ostream &operator<<(raw_ostream &OS, const Variant &Value);
+raw_ostream &operator<<(raw_ostream &OS, const VersionInfo &Version);
+raw_ostream &operator<<(raw_ostream &OS, const TagStats &Stats);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymDumper.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymDumper.h
new file mode 100644
index 0000000..65110f3
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymDumper.h
@@ -0,0 +1,61 @@
+//===- PDBSymDumper.h - base interface for PDB symbol dumper *- C++ -----*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMDUMPER_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMDUMPER_H
+
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymDumper {
+public:
+ PDBSymDumper(bool ShouldRequireImpl);
+ virtual ~PDBSymDumper();
+
+ virtual void dump(const PDBSymbolAnnotation &Symbol);
+ virtual void dump(const PDBSymbolBlock &Symbol);
+ virtual void dump(const PDBSymbolCompiland &Symbol);
+ virtual void dump(const PDBSymbolCompilandDetails &Symbol);
+ virtual void dump(const PDBSymbolCompilandEnv &Symbol);
+ virtual void dump(const PDBSymbolCustom &Symbol);
+ virtual void dump(const PDBSymbolData &Symbol);
+ virtual void dump(const PDBSymbolExe &Symbol);
+ virtual void dump(const PDBSymbolFunc &Symbol);
+ virtual void dump(const PDBSymbolFuncDebugEnd &Symbol);
+ virtual void dump(const PDBSymbolFuncDebugStart &Symbol);
+ virtual void dump(const PDBSymbolLabel &Symbol);
+ virtual void dump(const PDBSymbolPublicSymbol &Symbol);
+ virtual void dump(const PDBSymbolThunk &Symbol);
+ virtual void dump(const PDBSymbolTypeArray &Symbol);
+ virtual void dump(const PDBSymbolTypeBaseClass &Symbol);
+ virtual void dump(const PDBSymbolTypeBuiltin &Symbol);
+ virtual void dump(const PDBSymbolTypeCustom &Symbol);
+ virtual void dump(const PDBSymbolTypeDimension &Symbol);
+ virtual void dump(const PDBSymbolTypeEnum &Symbol);
+ virtual void dump(const PDBSymbolTypeFriend &Symbol);
+ virtual void dump(const PDBSymbolTypeFunctionArg &Symbol);
+ virtual void dump(const PDBSymbolTypeFunctionSig &Symbol);
+ virtual void dump(const PDBSymbolTypeManaged &Symbol);
+ virtual void dump(const PDBSymbolTypePointer &Symbol);
+ virtual void dump(const PDBSymbolTypeTypedef &Symbol);
+ virtual void dump(const PDBSymbolTypeUDT &Symbol);
+ virtual void dump(const PDBSymbolTypeVTable &Symbol);
+ virtual void dump(const PDBSymbolTypeVTableShape &Symbol);
+ virtual void dump(const PDBSymbolUnknown &Symbol);
+ virtual void dump(const PDBSymbolUsingNamespace &Symbol);
+
+private:
+ bool RequireImpl;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbol.h
new file mode 100644
index 0000000..4360c54
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbol.h
@@ -0,0 +1,97 @@
+//===- PDBSymbol.h - base class for user-facing symbol types -----*- C++-*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_IPDBSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_IPDBSYMBOL_H
+
+#include "ConcreteSymbolEnumerator.h"
+#include "IPDBRawSymbol.h"
+#include "PDBExtras.h"
+#include "PDBTypes.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include <unordered_map>
+
+#define FORWARD_SYMBOL_METHOD(MethodName) \
+ auto MethodName() const->decltype(RawSymbol->MethodName()) { \
+ return RawSymbol->MethodName(); \
+ }
+
+namespace llvm {
+
+class IPDBRawSymbol;
+class raw_ostream;
+
+#define DECLARE_PDB_SYMBOL_CONCRETE_TYPE(TagValue) \
+ static const PDB_SymType Tag = TagValue; \
+ static bool classof(const PDBSymbol *S) { return S->getSymTag() == Tag; }
+
+/// PDBSymbol defines the base of the inheritance hierarchy for concrete symbol
+/// types (e.g. functions, executables, vtables, etc). All concrete symbol
+/// types inherit from PDBSymbol and expose the exact set of methods that are
+/// valid for that particular symbol type, as described in the Microsoft
+/// reference "Lexical and Class Hierarchy of Symbol Types":
+/// https://msdn.microsoft.com/en-us/library/370hs6k4.aspx
+class PDBSymbol {
+protected:
+ PDBSymbol(const IPDBSession &PDBSession, std::unique_ptr<IPDBRawSymbol> Symbol);
+
+public:
+ static std::unique_ptr<PDBSymbol>
+ create(const IPDBSession &PDBSession, std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ virtual ~PDBSymbol();
+
+ /// Dumps the contents of a symbol a raw_ostream. By default this will just
+ /// call dump() on the underlying RawSymbol, which allows us to discover
+ /// unknown properties, but individual implementations of PDBSymbol may
+ /// override the behavior to only dump known fields.
+ virtual void dump(PDBSymDumper &Dumper) const = 0;
+ void defaultDump(raw_ostream &OS, int Indent) const;
+
+ PDB_SymType getSymTag() const;
+
+ template <typename T> std::unique_ptr<T> findOneChild() const {
+ auto Enumerator(findAllChildren<T>());
+ return Enumerator->getNext();
+ }
+
+ template <typename T>
+ std::unique_ptr<ConcreteSymbolEnumerator<T>> findAllChildren() const {
+ auto BaseIter = RawSymbol->findChildren(T::Tag);
+ return llvm::make_unique<ConcreteSymbolEnumerator<T>>(std::move(BaseIter));
+ }
+ std::unique_ptr<IPDBEnumSymbols> findAllChildren(PDB_SymType Type) const;
+ std::unique_ptr<IPDBEnumSymbols> findAllChildren() const;
+
+ std::unique_ptr<IPDBEnumSymbols>
+ findChildren(PDB_SymType Type, StringRef Name,
+ PDB_NameSearchFlags Flags) const;
+ std::unique_ptr<IPDBEnumSymbols> findChildrenByRVA(PDB_SymType Type,
+ StringRef Name,
+ PDB_NameSearchFlags Flags,
+ uint32_t RVA) const;
+ std::unique_ptr<IPDBEnumSymbols> findInlineFramesByRVA(uint32_t RVA) const;
+
+ const IPDBRawSymbol &getRawSymbol() const { return *RawSymbol; }
+ IPDBRawSymbol &getRawSymbol() { return *RawSymbol; }
+
+ const IPDBSession &getSession() const { return Session; }
+
+ std::unique_ptr<IPDBEnumSymbols> getChildStats(TagStats &Stats) const;
+
+protected:
+ const IPDBSession &Session;
+ const std::unique_ptr<IPDBRawSymbol> RawSymbol;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h
new file mode 100644
index 0000000..c055dd7
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolAnnotation.h
@@ -0,0 +1,39 @@
+//===- PDBSymbolAnnotation.h - Accessors for querying PDB annotations ---*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLANNOTATION_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLANNOTATION_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolAnnotation : public PDBSymbol {
+public:
+ PDBSymbolAnnotation(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Annotation)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getAddressOffset)
+ FORWARD_SYMBOL_METHOD(getAddressSection)
+ FORWARD_SYMBOL_METHOD(getDataKind)
+ FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ // FORWARD_SYMBOL_METHOD(getValue)
+ FORWARD_SYMBOL_METHOD(getVirtualAddress)
+};
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLANNOTATION_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h
new file mode 100644
index 0000000..2ca1250
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolBlock.h
@@ -0,0 +1,41 @@
+//===- PDBSymbolBlock.h - Accessors for querying PDB blocks -------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLBLOCK_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLBLOCK_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolBlock : public PDBSymbol {
+public:
+ PDBSymbolBlock(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Block)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getAddressOffset)
+ FORWARD_SYMBOL_METHOD(getAddressSection)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getLocationType)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getVirtualAddress)
+};
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLBLOCK_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h
new file mode 100644
index 0000000..f8c796a
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompiland.h
@@ -0,0 +1,38 @@
+//===- PDBSymbolCompiland.h - Accessors for querying PDB compilands -----*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILAND_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILAND_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolCompiland : public PDBSymbol {
+public:
+ PDBSymbolCompiland(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> CompilandSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Compiland)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(isEditAndContinueEnabled)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getLibraryName)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(getSourceFileName)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+};
+}
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILAND_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h
new file mode 100644
index 0000000..7f29d6b
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandDetails.h
@@ -0,0 +1,55 @@
+//===- PDBSymbolCompilandDetails.h - PDB compiland details ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDDETAILS_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDDETAILS_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolCompilandDetails : public PDBSymbol {
+public:
+ PDBSymbolCompilandDetails(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CompilandDetails)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ void getFrontEndVersion(VersionInfo &Version) const {
+ RawSymbol->getFrontEndVersion(Version);
+ }
+
+ void getBackEndVersion(VersionInfo &Version) const {
+ RawSymbol->getBackEndVersion(Version);
+ }
+
+ FORWARD_SYMBOL_METHOD(getCompilerName)
+ FORWARD_SYMBOL_METHOD(isEditAndContinueEnabled)
+ FORWARD_SYMBOL_METHOD(hasDebugInfo)
+ FORWARD_SYMBOL_METHOD(hasManagedCode)
+ FORWARD_SYMBOL_METHOD(hasSecurityChecks)
+ FORWARD_SYMBOL_METHOD(isCVTCIL)
+ FORWARD_SYMBOL_METHOD(isDataAligned)
+ FORWARD_SYMBOL_METHOD(isHotpatchable)
+ FORWARD_SYMBOL_METHOD(isLTCG)
+ FORWARD_SYMBOL_METHOD(isMSILNetmodule)
+ FORWARD_SYMBOL_METHOD(getLanguage)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getPlatform)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBFUNCTION_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h
new file mode 100644
index 0000000..7e2ea90
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCompilandEnv.h
@@ -0,0 +1,37 @@
+//===- PDBSymbolCompilandEnv.h - compiland environment variables *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDENV_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDENV_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolCompilandEnv : public PDBSymbol {
+public:
+ PDBSymbolCompilandEnv(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CompilandEnv)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ std::string getValue() const;
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLCOMPILANDENV_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h
new file mode 100644
index 0000000..86bfd57
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolCustom.h
@@ -0,0 +1,39 @@
+//===- PDBSymbolCustom.h - compiler-specific types --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLCUSTOM_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLCUSTOM_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+/// PDBSymbolCustom represents symbols that are compiler-specific and do not
+/// fit anywhere else in the lexical hierarchy.
+/// https://msdn.microsoft.com/en-us/library/d88sf09h.aspx
+class PDBSymbolCustom : public PDBSymbol {
+public:
+ PDBSymbolCustom(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> CustomSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Custom)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ void getDataBytes(llvm::SmallVector<uint8_t, 32> &bytes);
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLCUSTOM_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolData.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolData.h
new file mode 100644
index 0000000..79cbbf0
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolData.h
@@ -0,0 +1,61 @@
+//===- PDBSymbolData.h - PDB data (e.g. variable) accessors -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLDATA_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLDATA_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolData : public PDBSymbol {
+public:
+ PDBSymbolData(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> DataSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Data)
+
+ std::unique_ptr<PDBSymbol> getType() const;
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getAccess)
+ FORWARD_SYMBOL_METHOD(getAddressOffset)
+ FORWARD_SYMBOL_METHOD(getAddressSection)
+ FORWARD_SYMBOL_METHOD(getAddressTaken)
+ FORWARD_SYMBOL_METHOD(getBitPosition)
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(isCompilerGenerated)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(getDataKind)
+ FORWARD_SYMBOL_METHOD(isAggregated)
+ FORWARD_SYMBOL_METHOD(isSplitted)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getLocationType)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(getOffset)
+ FORWARD_SYMBOL_METHOD(getRegisterId)
+ FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getSlot)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getToken)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(getValue)
+ FORWARD_SYMBOL_METHOD(getVirtualAddress)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLDATA_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h
new file mode 100644
index 0000000..7c5f302a
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolExe.h
@@ -0,0 +1,46 @@
+//===- PDBSymbolExe.h - Accessors for querying executables in a PDB ----*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLEXE_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLEXE_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolExe : public PDBSymbol {
+public:
+ PDBSymbolExe(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> ExeSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Exe)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getAge)
+ FORWARD_SYMBOL_METHOD(getGuid)
+ FORWARD_SYMBOL_METHOD(hasCTypes)
+ FORWARD_SYMBOL_METHOD(hasPrivateSymbols)
+ FORWARD_SYMBOL_METHOD(getMachineType)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(getSignature)
+ FORWARD_SYMBOL_METHOD(getSymbolsFileName)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+
+private:
+ void dumpChildren(raw_ostream &OS, StringRef Label, PDB_SymType ChildType,
+ int Indent) const;
+};
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLEXE_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h
new file mode 100644
index 0000000..9db41d5
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFunc.h
@@ -0,0 +1,80 @@
+//===- PDBSymbolFunc.h - class representing a function instance -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNC_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNC_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolFunc : public PDBSymbol {
+public:
+ PDBSymbolFunc(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> FuncSymbol);
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ std::unique_ptr<PDBSymbolTypeFunctionSig> getSignature() const;
+ std::unique_ptr<PDBSymbolTypeUDT> getClassParent() const;
+ std::unique_ptr<IPDBEnumChildren<PDBSymbolData>> getArguments() const;
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Function)
+
+ FORWARD_SYMBOL_METHOD(getAccess)
+ FORWARD_SYMBOL_METHOD(getAddressOffset)
+ FORWARD_SYMBOL_METHOD(getAddressSection)
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(isCompilerGenerated)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
+ FORWARD_SYMBOL_METHOD(hasFarReturn)
+ FORWARD_SYMBOL_METHOD(hasAlloca)
+ FORWARD_SYMBOL_METHOD(hasEH)
+ FORWARD_SYMBOL_METHOD(hasEHa)
+ FORWARD_SYMBOL_METHOD(hasInlAsm)
+ FORWARD_SYMBOL_METHOD(hasLongJump)
+ FORWARD_SYMBOL_METHOD(hasSEH)
+ FORWARD_SYMBOL_METHOD(hasSecurityChecks)
+ FORWARD_SYMBOL_METHOD(hasSetJump)
+ FORWARD_SYMBOL_METHOD(hasInterruptReturn)
+ FORWARD_SYMBOL_METHOD(isIntroVirtualFunction)
+ FORWARD_SYMBOL_METHOD(hasInlineAttribute)
+ FORWARD_SYMBOL_METHOD(isNaked)
+ FORWARD_SYMBOL_METHOD(isStatic)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getLocalBasePointerRegisterId)
+ FORWARD_SYMBOL_METHOD(getLocationType)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(hasFramePointer)
+ FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
+ FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
+ FORWARD_SYMBOL_METHOD(isUnreached)
+ FORWARD_SYMBOL_METHOD(getNoStackOrdering)
+ FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
+ FORWARD_SYMBOL_METHOD(isPureVirtual)
+ FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getToken)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(getUndecoratedName)
+ FORWARD_SYMBOL_METHOD(isVirtual)
+ FORWARD_SYMBOL_METHOD(getVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getVirtualBaseOffset)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNC_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h
new file mode 100644
index 0000000..34d551c
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugEnd.h
@@ -0,0 +1,49 @@
+//===- PDBSymbolFuncDebugEnd.h - function end bounds info -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGEND_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGEND_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolFuncDebugEnd : public PDBSymbol {
+public:
+ PDBSymbolFuncDebugEnd(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> FuncDebugEndSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FuncDebugEnd)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getAddressOffset)
+ FORWARD_SYMBOL_METHOD(getAddressSection)
+ FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
+ FORWARD_SYMBOL_METHOD(hasFarReturn)
+ FORWARD_SYMBOL_METHOD(hasInterruptReturn)
+ FORWARD_SYMBOL_METHOD(isStatic)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getLocationType)
+ FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
+ FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
+ FORWARD_SYMBOL_METHOD(isUnreached)
+ FORWARD_SYMBOL_METHOD(getOffset)
+ FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
+ FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getVirtualAddress)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGEND_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h
new file mode 100644
index 0000000..7671be4
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolFuncDebugStart.h
@@ -0,0 +1,49 @@
+//===- PDBSymbolFuncDebugStart.h - function start bounds info ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGSTART_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGSTART_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolFuncDebugStart : public PDBSymbol {
+public:
+ PDBSymbolFuncDebugStart(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> FuncDebugStartSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FuncDebugStart)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getAddressOffset)
+ FORWARD_SYMBOL_METHOD(getAddressSection)
+ FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
+ FORWARD_SYMBOL_METHOD(hasFarReturn)
+ FORWARD_SYMBOL_METHOD(hasInterruptReturn)
+ FORWARD_SYMBOL_METHOD(isStatic)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getLocationType)
+ FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
+ FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
+ FORWARD_SYMBOL_METHOD(isUnreached)
+ FORWARD_SYMBOL_METHOD(getOffset)
+ FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
+ FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getVirtualAddress)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLFUNCDEBUGSTART_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h
new file mode 100644
index 0000000..9d9903a
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolLabel.h
@@ -0,0 +1,49 @@
+//===- PDBSymbolLabel.h - label info ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLLABEL_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLLABEL_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolLabel : public PDBSymbol {
+public:
+ PDBSymbolLabel(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> LabelSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Label)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getAddressOffset)
+ FORWARD_SYMBOL_METHOD(getAddressSection)
+ FORWARD_SYMBOL_METHOD(hasCustomCallingConvention)
+ FORWARD_SYMBOL_METHOD(hasFarReturn)
+ FORWARD_SYMBOL_METHOD(hasInterruptReturn)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getLocationType)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(hasNoInlineAttribute)
+ FORWARD_SYMBOL_METHOD(hasNoReturnAttribute)
+ FORWARD_SYMBOL_METHOD(isUnreached)
+ FORWARD_SYMBOL_METHOD(getOffset)
+ FORWARD_SYMBOL_METHOD(hasOptimizedCodeDebugInfo)
+ FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getVirtualAddress)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLLABEL_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h
new file mode 100644
index 0000000..70dfcb5
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolPublicSymbol.h
@@ -0,0 +1,47 @@
+//===- PDBSymbolPublicSymbol.h - public symbol info -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLPUBLICSYMBOL_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLPUBLICSYMBOL_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolPublicSymbol : public PDBSymbol {
+public:
+ PDBSymbolPublicSymbol(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> PublicSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::PublicSymbol)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getAddressOffset)
+ FORWARD_SYMBOL_METHOD(getAddressSection)
+ FORWARD_SYMBOL_METHOD(isCode)
+ FORWARD_SYMBOL_METHOD(isFunction)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getLocationType)
+ FORWARD_SYMBOL_METHOD(isManagedCode)
+ FORWARD_SYMBOL_METHOD(isMSILCode)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getUndecoratedName)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLPUBLICSYMBOL_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h
new file mode 100644
index 0000000..bd5a9b2
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolThunk.h
@@ -0,0 +1,57 @@
+//===- PDBSymbolThunk.h - Support for querying PDB thunks ---------------*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTHUNK_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTHUNK_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+#include <string>
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolThunk : public PDBSymbol {
+public:
+ PDBSymbolThunk(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> ThunkSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Thunk)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getAccess)
+ FORWARD_SYMBOL_METHOD(getAddressOffset)
+ FORWARD_SYMBOL_METHOD(getAddressSection)
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(isIntroVirtualFunction)
+ FORWARD_SYMBOL_METHOD(isStatic)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(isPureVirtual)
+ FORWARD_SYMBOL_METHOD(getRelativeVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getTargetOffset)
+ FORWARD_SYMBOL_METHOD(getTargetRelativeVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getTargetVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getTargetSection)
+ FORWARD_SYMBOL_METHOD(getThunkOrdinal)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(isVirtual)
+ FORWARD_SYMBOL_METHOD(getVirtualAddress)
+ FORWARD_SYMBOL_METHOD(getVirtualBaseOffset)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTHUNK_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h
new file mode 100644
index 0000000..513a9ec
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeArray.h
@@ -0,0 +1,45 @@
+//===- PDBSymbolTypeArray.h - array type information ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEARRAY_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEARRAY_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeArray : public PDBSymbol {
+public:
+ PDBSymbolTypeArray(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> ArrayTypeSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::ArrayType)
+
+ std::unique_ptr<PDBSymbol> getElementType() const;
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getArrayIndexTypeId)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(getCount)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getRank)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEARRAY_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h
new file mode 100644
index 0000000..2a9a8a0
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBaseClass.h
@@ -0,0 +1,60 @@
+//===- PDBSymbolTypeBaseClass.h - base class type information ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBASECLASS_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBASECLASS_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeBaseClass : public PDBSymbol {
+public:
+ PDBSymbolTypeBaseClass(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::BaseClass)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getAccess)
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(hasConstructor)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
+ FORWARD_SYMBOL_METHOD(hasCastOperator)
+ FORWARD_SYMBOL_METHOD(hasNestedTypes)
+ FORWARD_SYMBOL_METHOD(isIndirectVirtualBaseClass)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(isNested)
+ FORWARD_SYMBOL_METHOD(getOffset)
+ FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
+ FORWARD_SYMBOL_METHOD(isPacked)
+ FORWARD_SYMBOL_METHOD(isScoped)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+ FORWARD_SYMBOL_METHOD(getUdtKind)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+
+ FORWARD_SYMBOL_METHOD(isVirtualBaseClass)
+ FORWARD_SYMBOL_METHOD(getVirtualBaseDispIndex)
+ FORWARD_SYMBOL_METHOD(getVirtualBasePointerOffset)
+ // FORWARD_SYMBOL_METHOD(getVirtualBaseTableType)
+ FORWARD_SYMBOL_METHOD(getVirtualTableShapeId)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBASECLASS_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h
new file mode 100644
index 0000000..69a2028
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeBuiltin.h
@@ -0,0 +1,40 @@
+//===- PDBSymbolTypeBuiltin.h - builtin type information --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBUILTIN_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBUILTIN_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeBuiltin : public PDBSymbol {
+public:
+ PDBSymbolTypeBuiltin(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::BuiltinType)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getBuiltinType)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEBUILTIN_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h
new file mode 100644
index 0000000..c41c489
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeCustom.h
@@ -0,0 +1,36 @@
+//===- PDBSymbolTypeCustom.h - custom compiler type information -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPECUSTOM_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPECUSTOM_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeCustom : public PDBSymbol {
+public:
+ PDBSymbolTypeCustom(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::CustomType)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getOemId)
+ FORWARD_SYMBOL_METHOD(getOemSymbolId)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPECUSTOM_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h
new file mode 100644
index 0000000..3f22ed8
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeDimension.h
@@ -0,0 +1,36 @@
+//===- PDBSymbolTypeDimension.h - array dimension type info -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEDIMENSION_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEDIMENSION_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeDimension : public PDBSymbol {
+public:
+ PDBSymbolTypeDimension(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Dimension)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getLowerBoundId)
+ FORWARD_SYMBOL_METHOD(getUpperBoundId)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEDIMENSION_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h
new file mode 100644
index 0000000..3188c71
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeEnum.h
@@ -0,0 +1,55 @@
+//===- PDBSymbolTypeEnum.h - enum type info ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEENUM_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEENUM_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeEnum : public PDBSymbol {
+public:
+ PDBSymbolTypeEnum(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> EnumTypeSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Enum)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ std::unique_ptr<PDBSymbolTypeUDT> getClassParent() const;
+ std::unique_ptr<PDBSymbolTypeBuiltin> getUnderlyingType() const;
+
+ FORWARD_SYMBOL_METHOD(getBuiltinType)
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(getUnmodifiedTypeId)
+ FORWARD_SYMBOL_METHOD(hasConstructor)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
+ FORWARD_SYMBOL_METHOD(hasCastOperator)
+ FORWARD_SYMBOL_METHOD(hasNestedTypes)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(isNested)
+ FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
+ FORWARD_SYMBOL_METHOD(isPacked)
+ FORWARD_SYMBOL_METHOD(isScoped)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEENUM_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h
new file mode 100644
index 0000000..4d393d7
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFriend.h
@@ -0,0 +1,37 @@
+//===- PDBSymbolTypeFriend.h - friend type info -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFRIEND_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFRIEND_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeFriend : public PDBSymbol {
+public:
+ PDBSymbolTypeFriend(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Friend)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFRIEND_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h
new file mode 100644
index 0000000..14f79d9
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionArg.h
@@ -0,0 +1,37 @@
+//===- PDBSymbolTypeFunctionArg.h - function arg type info ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONARG_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONARG_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeFunctionArg : public PDBSymbol {
+public:
+ PDBSymbolTypeFunctionArg(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FunctionArg)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONARG_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h
new file mode 100644
index 0000000..4bb4265
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeFunctionSig.h
@@ -0,0 +1,50 @@
+//===- PDBSymbolTypeFunctionSig.h - function signature type info *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONSIG_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONSIG_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeFunctionSig : public PDBSymbol {
+public:
+ PDBSymbolTypeFunctionSig(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::FunctionSig)
+
+ std::unique_ptr<PDBSymbol> getReturnType() const;
+ std::unique_ptr<IPDBEnumSymbols> getArguments() const;
+ std::unique_ptr<PDBSymbol> getClassParent() const;
+
+ void dump(PDBSymDumper &Dumper) const override;
+ void dumpArgList(raw_ostream &OS) const;
+
+ FORWARD_SYMBOL_METHOD(getCallingConvention)
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(getUnmodifiedTypeId)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(getCount)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ // FORWARD_SYMBOL_METHOD(getObjectPointerType)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getThisAdjust)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEFUNCTIONSIG_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h
new file mode 100644
index 0000000..cbfcec8
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeManaged.h
@@ -0,0 +1,35 @@
+//===- PDBSymbolTypeManaged.h - managed type info ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEMANAGED_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEMANAGED_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeManaged : public PDBSymbol {
+public:
+ PDBSymbolTypeManaged(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::ManagedType)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEMANAGED_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h
new file mode 100644
index 0000000..33578ba
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypePointer.h
@@ -0,0 +1,43 @@
+//===- PDBSymbolTypePointer.h - pointer type info ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEPOINTER_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEPOINTER_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypePointer : public PDBSymbol {
+public:
+ PDBSymbolTypePointer(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::PointerType)
+
+ std::unique_ptr<PDBSymbol> getPointeeType() const;
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(isReference)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEPOINTER_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h
new file mode 100644
index 0000000..5ad83bb
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeTypedef.h
@@ -0,0 +1,54 @@
+//===- PDBSymbolTypeTypedef.h - typedef type info ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPETYPEDEF_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPETYPEDEF_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeTypedef : public PDBSymbol {
+public:
+ PDBSymbolTypeTypedef(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::Typedef)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getBuiltinType)
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(hasConstructor)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
+ FORWARD_SYMBOL_METHOD(hasCastOperator)
+ FORWARD_SYMBOL_METHOD(hasNestedTypes)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(isNested)
+ FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
+ FORWARD_SYMBOL_METHOD(isPacked)
+ FORWARD_SYMBOL_METHOD(isReference)
+ FORWARD_SYMBOL_METHOD(isScoped)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+ FORWARD_SYMBOL_METHOD(getUdtKind)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(getVirtualTableShapeId)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPETYPEDEF_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h
new file mode 100644
index 0000000..99cc307
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeUDT.h
@@ -0,0 +1,52 @@
+//===- PDBSymbolTypeUDT.h - UDT type info -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEUDT_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEUDT_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeUDT : public PDBSymbol {
+public:
+ PDBSymbolTypeUDT(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> UDTSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::UDT)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(getUnmodifiedTypeId)
+ FORWARD_SYMBOL_METHOD(hasConstructor)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(hasAssignmentOperator)
+ FORWARD_SYMBOL_METHOD(hasCastOperator)
+ FORWARD_SYMBOL_METHOD(hasNestedTypes)
+ FORWARD_SYMBOL_METHOD(getLength)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(isNested)
+ FORWARD_SYMBOL_METHOD(hasOverloadedOperator)
+ FORWARD_SYMBOL_METHOD(isPacked)
+ FORWARD_SYMBOL_METHOD(isScoped)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getUdtKind)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(getVirtualTableShapeId)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEUDT_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h
new file mode 100644
index 0000000..6efc549
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTable.h
@@ -0,0 +1,40 @@
+//===- PDBSymbolTypeVTable.h - VTable type info -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLE_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLE_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeVTable : public PDBSymbol {
+public:
+ PDBSymbolTypeVTable(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> VtblSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::VTable)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getClassParentId)
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(getTypeId)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLE_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h
new file mode 100644
index 0000000..f407595
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolTypeVTableShape.h
@@ -0,0 +1,39 @@
+//===- PDBSymbolTypeVTableShape.h - VTable shape info -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLESHAPE_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLESHAPE_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolTypeVTableShape : public PDBSymbol {
+public:
+ PDBSymbolTypeVTableShape(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> VtblShapeSymbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::VTableShape)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(isConstType)
+ FORWARD_SYMBOL_METHOD(getCount)
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+ FORWARD_SYMBOL_METHOD(isUnalignedType)
+ FORWARD_SYMBOL_METHOD(isVolatileType)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLTYPEVTABLESHAPE_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h
new file mode 100644
index 0000000..94bd2c1
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUnknown.h
@@ -0,0 +1,34 @@
+//===- PDBSymbolUnknown.h - unknown symbol type -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLUNKNOWN_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLUNKNOWN_H
+
+#include "PDBSymbol.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolUnknown : public PDBSymbol {
+public:
+ PDBSymbolUnknown(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> UnknownSymbol);
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ static bool classof(const PDBSymbol *S) {
+ return (S->getSymTag() == PDB_SymType::None ||
+ S->getSymTag() >= PDB_SymType::Max);
+ }
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLUNKNOWN_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h
new file mode 100644
index 0000000..7072f34
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBSymbolUsingNamespace.h
@@ -0,0 +1,36 @@
+//===- PDBSymbolUsingNamespace.h - using namespace info ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBSYMBOLUSINGNAMESPACE_H
+#define LLVM_DEBUGINFO_PDB_PDBSYMBOLUSINGNAMESPACE_H
+
+#include "PDBSymbol.h"
+#include "PDBTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+class PDBSymbolUsingNamespace : public PDBSymbol {
+public:
+ PDBSymbolUsingNamespace(const IPDBSession &PDBSession,
+ std::unique_ptr<IPDBRawSymbol> Symbol);
+
+ DECLARE_PDB_SYMBOL_CONCRETE_TYPE(PDB_SymType::UsingNamespace)
+
+ void dump(PDBSymDumper &Dumper) const override;
+
+ FORWARD_SYMBOL_METHOD(getLexicalParentId)
+ FORWARD_SYMBOL_METHOD(getName)
+ FORWARD_SYMBOL_METHOD(getSymIndexId)
+};
+
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_PDBSYMBOLUSINGNAMESPACE_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/PDBTypes.h b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBTypes.h
new file mode 100644
index 0000000..a932a56
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/PDB/PDBTypes.h
@@ -0,0 +1,547 @@
+//===- PDBTypes.h - Defines enums for various fields contained in PDB ---*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_PDBTYPES_H
+#define LLVM_DEBUGINFO_PDB_PDBTYPES_H
+
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Endian.h"
+#include <functional>
+#include <stdint.h>
+
+namespace llvm {
+
+class PDBSymDumper;
+class PDBSymbol;
+
+class IPDBDataStream;
+template <class T> class IPDBEnumChildren;
+class IPDBLineNumber;
+class IPDBRawSymbol;
+class IPDBSession;
+class IPDBSourceFile;
+
+typedef IPDBEnumChildren<PDBSymbol> IPDBEnumSymbols;
+typedef IPDBEnumChildren<IPDBSourceFile> IPDBEnumSourceFiles;
+typedef IPDBEnumChildren<IPDBDataStream> IPDBEnumDataStreams;
+typedef IPDBEnumChildren<IPDBLineNumber> IPDBEnumLineNumbers;
+
+class PDBSymbolExe;
+class PDBSymbolCompiland;
+class PDBSymbolCompilandDetails;
+class PDBSymbolCompilandEnv;
+class PDBSymbolFunc;
+class PDBSymbolBlock;
+class PDBSymbolData;
+class PDBSymbolAnnotation;
+class PDBSymbolLabel;
+class PDBSymbolPublicSymbol;
+class PDBSymbolTypeUDT;
+class PDBSymbolTypeEnum;
+class PDBSymbolTypeFunctionSig;
+class PDBSymbolTypePointer;
+class PDBSymbolTypeArray;
+class PDBSymbolTypeBuiltin;
+class PDBSymbolTypeTypedef;
+class PDBSymbolTypeBaseClass;
+class PDBSymbolTypeFriend;
+class PDBSymbolTypeFunctionArg;
+class PDBSymbolFuncDebugStart;
+class PDBSymbolFuncDebugEnd;
+class PDBSymbolUsingNamespace;
+class PDBSymbolTypeVTableShape;
+class PDBSymbolTypeVTable;
+class PDBSymbolCustom;
+class PDBSymbolThunk;
+class PDBSymbolTypeCustom;
+class PDBSymbolTypeManaged;
+class PDBSymbolTypeDimension;
+class PDBSymbolUnknown;
+
+/// Specifies which PDB reader implementation is to be used. Only a value
+/// of PDB_ReaderType::DIA is supported.
+enum class PDB_ReaderType {
+ DIA = 0,
+};
+
+/// Defines a 128-bit unique identifier. This maps to a GUID on Windows, but
+/// is abstracted here for the purposes of non-Windows platforms that don't have
+/// the GUID structure defined.
+struct PDB_UniqueId {
+ uint64_t HighPart;
+ uint64_t LowPart;
+};
+
+/// An enumeration indicating the type of data contained in this table.
+enum class PDB_TableType {
+ Symbols,
+ SourceFiles,
+ LineNumbers,
+ SectionContribs,
+ Segments,
+ InjectedSources,
+ FrameData
+};
+
+/// Defines flags used for enumerating child symbols. This corresponds to the
+/// NameSearchOptions enumeration which is documented here:
+/// https://msdn.microsoft.com/en-us/library/yat28ads.aspx
+enum PDB_NameSearchFlags {
+ NS_Default = 0x0,
+ NS_CaseSensitive = 0x1,
+ NS_CaseInsensitive = 0x2,
+ NS_FileNameExtMatch = 0x4,
+ NS_Regex = 0x8,
+ NS_UndecoratedName = 0x10
+};
+
+/// Specifies the hash algorithm that a source file from a PDB was hashed with.
+/// This corresponds to the CV_SourceChksum_t enumeration and are documented
+/// here: https://msdn.microsoft.com/en-us/library/e96az21x.aspx
+enum class PDB_Checksum { None = 0, MD5 = 1, SHA1 = 2 };
+
+/// These values correspond to the CV_CPU_TYPE_e enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/b2fc64ek.aspx
+enum class PDB_Cpu {
+ Intel8080 = 0x0,
+ Intel8086 = 0x1,
+ Intel80286 = 0x2,
+ Intel80386 = 0x3,
+ Intel80486 = 0x4,
+ Pentium = 0x5,
+ PentiumPro = 0x6,
+ Pentium3 = 0x7,
+ MIPS = 0x10,
+ MIPS16 = 0x11,
+ MIPS32 = 0x12,
+ MIPS64 = 0x13,
+ MIPSI = 0x14,
+ MIPSII = 0x15,
+ MIPSIII = 0x16,
+ MIPSIV = 0x17,
+ MIPSV = 0x18,
+ M68000 = 0x20,
+ M68010 = 0x21,
+ M68020 = 0x22,
+ M68030 = 0x23,
+ M68040 = 0x24,
+ Alpha = 0x30,
+ Alpha21164 = 0x31,
+ Alpha21164A = 0x32,
+ Alpha21264 = 0x33,
+ Alpha21364 = 0x34,
+ PPC601 = 0x40,
+ PPC603 = 0x41,
+ PPC604 = 0x42,
+ PPC620 = 0x43,
+ PPCFP = 0x44,
+ PPCBE = 0x45,
+ SH3 = 0x50,
+ SH3E = 0x51,
+ SH3DSP = 0x52,
+ SH4 = 0x53,
+ SHMedia = 0x54,
+ ARM3 = 0x60,
+ ARM4 = 0x61,
+ ARM4T = 0x62,
+ ARM5 = 0x63,
+ ARM5T = 0x64,
+ ARM6 = 0x65,
+ ARM_XMAC = 0x66,
+ ARM_WMMX = 0x67,
+ ARM7 = 0x68,
+ Omni = 0x70,
+ Ia64 = 0x80,
+ Ia64_2 = 0x81,
+ CEE = 0x90,
+ AM33 = 0xa0,
+ M32R = 0xb0,
+ TriCore = 0xc0,
+ X64 = 0xd0,
+ EBC = 0xe0,
+ Thumb = 0xf0,
+ ARMNT = 0xf4,
+ D3D11_Shader = 0x100,
+};
+
+enum class PDB_Machine {
+ Invalid = 0xffff,
+ Unknown = 0x0,
+ Am33 = 0x13,
+ Amd64 = 0x8664,
+ Arm = 0x1C0,
+ ArmNT = 0x1C4,
+ Ebc = 0xEBC,
+ x86 = 0x14C,
+ Ia64 = 0x200,
+ M32R = 0x9041,
+ Mips16 = 0x266,
+ MipsFpu = 0x366,
+ MipsFpu16 = 0x466,
+ PowerPC = 0x1F0,
+ PowerPCFP = 0x1F1,
+ R4000 = 0x166,
+ SH3 = 0x1A2,
+ SH3DSP = 0x1A3,
+ SH4 = 0x1A6,
+ SH5 = 0x1A8,
+ Thumb = 0x1C2,
+ WceMipsV2 = 0x169
+};
+
+/// These values correspond to the CV_call_e enumeration, and are documented
+/// at the following locations:
+/// https://msdn.microsoft.com/en-us/library/b2fc64ek.aspx
+/// https://msdn.microsoft.com/en-us/library/windows/desktop/ms680207(v=vs.85).aspx
+///
+enum class PDB_CallingConv {
+ NearCdecl = 0x00,
+ FarCdecl = 0x01,
+ NearPascal = 0x02,
+ FarPascal = 0x03,
+ NearFastcall = 0x04,
+ FarFastcall = 0x05,
+ Skipped = 0x06,
+ NearStdcall = 0x07,
+ FarStdcall = 0x08,
+ NearSyscall = 0x09,
+ FarSyscall = 0x0a,
+ Thiscall = 0x0b,
+ MipsCall = 0x0c,
+ Generic = 0x0d,
+ Alphacall = 0x0e,
+ Ppccall = 0x0f,
+ SuperHCall = 0x10,
+ Armcall = 0x11,
+ AM33call = 0x12,
+ Tricall = 0x13,
+ Sh5call = 0x14,
+ M32R = 0x15,
+ Clrcall = 0x16,
+ Inline = 0x17,
+ NearVectorcall = 0x18,
+ Reserved = 0x19,
+};
+
+/// These values correspond to the CV_CFL_LANG enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/bw3aekw6.aspx
+enum class PDB_Lang {
+ C = 0x00,
+ Cpp = 0x01,
+ Fortran = 0x02,
+ Masm = 0x03,
+ Pascal = 0x04,
+ Basic = 0x05,
+ Cobol = 0x06,
+ Link = 0x07,
+ Cvtres = 0x08,
+ Cvtpgd = 0x09,
+ CSharp = 0x0a,
+ VB = 0x0b,
+ ILAsm = 0x0c,
+ Java = 0x0d,
+ JScript = 0x0e,
+ MSIL = 0x0f,
+ HLSL = 0x10
+};
+
+/// These values correspond to the DataKind enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/b2x2t313.aspx
+enum class PDB_DataKind {
+ Unknown,
+ Local,
+ StaticLocal,
+ Param,
+ ObjectPtr,
+ FileStatic,
+ Global,
+ Member,
+ StaticMember,
+ Constant
+};
+
+/// These values correspond to the SymTagEnum enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/bkedss5f.aspx
+enum class PDB_SymType {
+ None,
+ Exe,
+ Compiland,
+ CompilandDetails,
+ CompilandEnv,
+ Function,
+ Block,
+ Data,
+ Annotation,
+ Label,
+ PublicSymbol,
+ UDT,
+ Enum,
+ FunctionSig,
+ PointerType,
+ ArrayType,
+ BuiltinType,
+ Typedef,
+ BaseClass,
+ Friend,
+ FunctionArg,
+ FuncDebugStart,
+ FuncDebugEnd,
+ UsingNamespace,
+ VTableShape,
+ VTable,
+ Custom,
+ Thunk,
+ CustomType,
+ ManagedType,
+ Dimension,
+ Max
+};
+
+/// These values correspond to the LocationType enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/f57kaez3.aspx
+enum class PDB_LocType {
+ Null,
+ Static,
+ TLS,
+ RegRel,
+ ThisRel,
+ Enregistered,
+ BitField,
+ Slot,
+ IlRel,
+ MetaData,
+ Constant,
+ Max
+};
+
+/// These values correspond to the THUNK_ORDINAL enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/dh0k8hft.aspx
+enum class PDB_ThunkOrdinal {
+ Standard,
+ ThisAdjustor,
+ Vcall,
+ Pcode,
+ UnknownLoad,
+ TrampIncremental,
+ BranchIsland
+};
+
+/// These values correspond to the UdtKind enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/wcstk66t.aspx
+enum class PDB_UdtType { Struct, Class, Union, Interface };
+
+/// These values correspond to the StackFrameTypeEnum enumeration, and are
+/// documented here: https://msdn.microsoft.com/en-us/library/bc5207xw.aspx.
+enum class PDB_StackFrameType { FPO, KernelTrap, KernelTSS, EBP, FrameData };
+
+/// These values correspond to the StackFrameTypeEnum enumeration, and are
+/// documented here: https://msdn.microsoft.com/en-us/library/bc5207xw.aspx.
+enum class PDB_MemoryType { Code, Data, Stack, HeapCode };
+
+/// These values correspond to the Basictype enumeration, and are documented
+/// here: https://msdn.microsoft.com/en-us/library/4szdtzc3.aspx
+enum class PDB_BuiltinType {
+ None = 0,
+ Void = 1,
+ Char = 2,
+ WCharT = 3,
+ Int = 6,
+ UInt = 7,
+ Float = 8,
+ BCD = 9,
+ Bool = 10,
+ Long = 13,
+ ULong = 14,
+ Currency = 25,
+ Date = 26,
+ Variant = 27,
+ Complex = 28,
+ Bitfield = 29,
+ BSTR = 30,
+ HResult = 31
+};
+
+enum class PDB_RegisterId {
+ Unknown = 0,
+ VFrame = 30006,
+ AL = 1,
+ CL = 2,
+ DL = 3,
+ BL = 4,
+ AH = 5,
+ CH = 6,
+ DH = 7,
+ BH = 8,
+ AX = 9,
+ CX = 10,
+ DX = 11,
+ BX = 12,
+ SP = 13,
+ BP = 14,
+ SI = 15,
+ DI = 16,
+ EAX = 17,
+ ECX = 18,
+ EDX = 19,
+ EBX = 20,
+ ESP = 21,
+ EBP = 22,
+ ESI = 23,
+ EDI = 24,
+ ES = 25,
+ CS = 26,
+ SS = 27,
+ DS = 28,
+ FS = 29,
+ GS = 30,
+ IP = 31,
+ RAX = 328,
+ RBX = 329,
+ RCX = 330,
+ RDX = 331,
+ RSI = 332,
+ RDI = 333,
+ RBP = 334,
+ RSP = 335,
+ R8 = 336,
+ R9 = 337,
+ R10 = 338,
+ R11 = 339,
+ R12 = 340,
+ R13 = 341,
+ R14 = 342,
+ R15 = 343,
+};
+
+enum class PDB_MemberAccess { Private = 1, Protected = 2, Public = 3 };
+
+enum class PDB_ErrorCode {
+ Success,
+ NoPdbImpl,
+ InvalidPath,
+ InvalidFileFormat,
+ InvalidParameter,
+ AlreadyLoaded,
+ UnknownError,
+ NoMemory,
+ DebugInfoMismatch
+};
+
+struct VersionInfo {
+ uint32_t Major;
+ uint32_t Minor;
+ uint32_t Build;
+ uint32_t QFE;
+};
+
+enum PDB_VariantType {
+ Empty,
+ Unknown,
+ Int8,
+ Int16,
+ Int32,
+ Int64,
+ Single,
+ Double,
+ UInt8,
+ UInt16,
+ UInt32,
+ UInt64,
+ Bool,
+};
+
+struct Variant {
+ Variant()
+ : Type(PDB_VariantType::Empty) {
+ }
+
+ PDB_VariantType Type;
+ union {
+ bool Bool;
+ int8_t Int8;
+ int16_t Int16;
+ int32_t Int32;
+ int64_t Int64;
+ float Single;
+ double Double;
+ uint8_t UInt8;
+ uint16_t UInt16;
+ uint32_t UInt32;
+ uint64_t UInt64;
+ };
+#define VARIANT_EQUAL_CASE(Enum) \
+ case PDB_VariantType::Enum: \
+ return Enum == Other.Enum;
+ bool operator==(const Variant &Other) const {
+ if (Type != Other.Type)
+ return false;
+ switch (Type) {
+ VARIANT_EQUAL_CASE(Bool)
+ VARIANT_EQUAL_CASE(Int8)
+ VARIANT_EQUAL_CASE(Int16)
+ VARIANT_EQUAL_CASE(Int32)
+ VARIANT_EQUAL_CASE(Int64)
+ VARIANT_EQUAL_CASE(Single)
+ VARIANT_EQUAL_CASE(Double)
+ VARIANT_EQUAL_CASE(UInt8)
+ VARIANT_EQUAL_CASE(UInt16)
+ VARIANT_EQUAL_CASE(UInt32)
+ VARIANT_EQUAL_CASE(UInt64)
+ default:
+ return true;
+ }
+ }
+#undef VARIANT_EQUAL_CASE
+ bool operator!=(const Variant &Other) const { return !(*this == Other); }
+};
+
+namespace PDB {
+static const char Magic[] = {'M', 'i', 'c', 'r', 'o', 's', 'o', 'f',
+ 't', ' ', 'C', '/', 'C', '+', '+', ' ',
+ 'M', 'S', 'F', ' ', '7', '.', '0', '0',
+ '\r', '\n', '\x1a', 'D', 'S', '\0', '\0', '\0'};
+
+// The superblock is overlaid at the beginning of the file (offset 0).
+// It starts with a magic header and is followed by information which describes
+// the layout of the file system.
+struct SuperBlock {
+ char MagicBytes[sizeof(Magic)];
+ // The file system is split into a variable number of fixed size elements.
+ // These elements are referred to as blocks. The size of a block may vary
+ // from system to system.
+ support::ulittle32_t BlockSize;
+ // This field's purpose is not yet known.
+ support::ulittle32_t Unknown0;
+ // This contains the number of blocks resident in the file system. In
+ // practice, NumBlocks * BlockSize is equivalent to the size of the PDB file.
+ support::ulittle32_t NumBlocks;
+ // This contains the number of bytes which make up the directory.
+ support::ulittle32_t NumDirectoryBytes;
+ // This field's purpose is not yet known.
+ support::ulittle32_t Unknown1;
+ // This contains the block # of the block map.
+ support::ulittle32_t BlockMapAddr;
+};
+}
+
+} // namespace llvm
+
+namespace std {
+template <> struct hash<llvm::PDB_SymType> {
+ typedef llvm::PDB_SymType argument_type;
+ typedef std::size_t result_type;
+
+ result_type operator()(const argument_type &Arg) const {
+ return std::hash<int>()(static_cast<int>(Arg));
+ }
+};
+}
+
+
+#endif
diff --git a/contrib/llvm/include/llvm/DebugInfo/Symbolize/DIPrinter.h b/contrib/llvm/include/llvm/DebugInfo/Symbolize/DIPrinter.h
new file mode 100644
index 0000000..0703fb1
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/Symbolize/DIPrinter.h
@@ -0,0 +1,47 @@
+//===- llvm/DebugInfo/Symbolize/DIPrinter.h ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the DIPrinter class, which is responsible for printing
+// structures defined in DebugInfo/DIContext.h
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_SYMBOLIZE_DIPRINTER_H
+#define LLVM_DEBUGINFO_SYMBOLIZE_DIPRINTER_H
+
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+struct DILineInfo;
+class DIInliningInfo;
+struct DIGlobal;
+
+namespace symbolize {
+
+class DIPrinter {
+ raw_ostream &OS;
+ bool PrintFunctionNames;
+ bool PrintPretty;
+ void printName(const DILineInfo &Info, bool Inlined);
+
+public:
+ DIPrinter(raw_ostream &OS, bool PrintFunctionNames = true,
+ bool PrintPretty = false)
+ : OS(OS), PrintFunctionNames(PrintFunctionNames),
+ PrintPretty(PrintPretty) {}
+
+ DIPrinter &operator<<(const DILineInfo &Info);
+ DIPrinter &operator<<(const DIInliningInfo &Info);
+ DIPrinter &operator<<(const DIGlobal &Global);
+};
+}
+}
+
+#endif
+
diff --git a/contrib/llvm/include/llvm/DebugInfo/Symbolize/SymbolizableModule.h b/contrib/llvm/include/llvm/DebugInfo/Symbolize/SymbolizableModule.h
new file mode 100644
index 0000000..ff9cc80
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/Symbolize/SymbolizableModule.h
@@ -0,0 +1,53 @@
+//===-- SymbolizableModule.h ------------------------------------ C++ -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SymbolizableModule interface.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEMODULE_H
+#define LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEMODULE_H
+
+#include "llvm/DebugInfo/DIContext.h"
+#include <memory>
+#include <string>
+
+namespace llvm {
+namespace object {
+class ObjectFile;
+}
+}
+
+namespace llvm {
+namespace symbolize {
+
+using FunctionNameKind = DILineInfoSpecifier::FunctionNameKind;
+
+class SymbolizableModule {
+public:
+ virtual ~SymbolizableModule() {}
+ virtual DILineInfo symbolizeCode(uint64_t ModuleOffset,
+ FunctionNameKind FNKind,
+ bool UseSymbolTable) const = 0;
+ virtual DIInliningInfo symbolizeInlinedCode(uint64_t ModuleOffset,
+ FunctionNameKind FNKind,
+ bool UseSymbolTable) const = 0;
+ virtual DIGlobal symbolizeData(uint64_t ModuleOffset) const = 0;
+
+ // Return true if this is a 32-bit x86 PE COFF module.
+ virtual bool isWin32Module() const = 0;
+
+ // Returns the preferred base of the module, i.e. where the loader would place
+ // it in memory assuming there were no conflicts.
+ virtual uint64_t getModulePreferredBase() const = 0;
+};
+
+} // namespace symbolize
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZABLEMODULE_H
diff --git a/contrib/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h b/contrib/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h
new file mode 100644
index 0000000..ec3ae00
--- /dev/null
+++ b/contrib/llvm/include/llvm/DebugInfo/Symbolize/Symbolize.h
@@ -0,0 +1,105 @@
+//===-- Symbolize.h --------------------------------------------- C++ -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Header for LLVM symbolization library.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZE_H
+#define LLVM_DEBUGINFO_SYMBOLIZE_SYMBOLIZE_H
+
+#include "llvm/DebugInfo/Symbolize/SymbolizableModule.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/ErrorOr.h"
+#include <map>
+#include <memory>
+#include <string>
+
+namespace llvm {
+namespace symbolize {
+
+using namespace object;
+using FunctionNameKind = DILineInfoSpecifier::FunctionNameKind;
+
+class LLVMSymbolizer {
+public:
+ struct Options {
+ FunctionNameKind PrintFunctions;
+ bool UseSymbolTable : 1;
+ bool Demangle : 1;
+ bool RelativeAddresses : 1;
+ std::string DefaultArch;
+ std::vector<std::string> DsymHints;
+ Options(FunctionNameKind PrintFunctions = FunctionNameKind::LinkageName,
+ bool UseSymbolTable = true, bool Demangle = true,
+ bool RelativeAddresses = false, std::string DefaultArch = "")
+ : PrintFunctions(PrintFunctions), UseSymbolTable(UseSymbolTable),
+ Demangle(Demangle), RelativeAddresses(RelativeAddresses),
+ DefaultArch(DefaultArch) {}
+ };
+
+ LLVMSymbolizer(const Options &Opts = Options()) : Opts(Opts) {}
+ ~LLVMSymbolizer() {
+ flush();
+ }
+
+ ErrorOr<DILineInfo> symbolizeCode(const std::string &ModuleName,
+ uint64_t ModuleOffset);
+ ErrorOr<DIInliningInfo> symbolizeInlinedCode(const std::string &ModuleName,
+ uint64_t ModuleOffset);
+ ErrorOr<DIGlobal> symbolizeData(const std::string &ModuleName,
+ uint64_t ModuleOffset);
+ void flush();
+ static std::string DemangleName(const std::string &Name,
+ const SymbolizableModule *ModInfo);
+
+private:
+ // Bundles together object file with code/data and object file with
+ // corresponding debug info. These objects can be the same.
+ typedef std::pair<ObjectFile*, ObjectFile*> ObjectPair;
+
+ ErrorOr<SymbolizableModule *>
+ getOrCreateModuleInfo(const std::string &ModuleName);
+ ObjectFile *lookUpDsymFile(const std::string &Path,
+ const MachOObjectFile *ExeObj,
+ const std::string &ArchName);
+ ObjectFile *lookUpDebuglinkObject(const std::string &Path,
+ const ObjectFile *Obj,
+ const std::string &ArchName);
+
+ /// \brief Returns pair of pointers to object and debug object.
+ ErrorOr<ObjectPair> getOrCreateObjectPair(const std::string &Path,
+ const std::string &ArchName);
+
+ /// \brief Return a pointer to object file at specified path, for a specified
+ /// architecture (e.g. if path refers to a Mach-O universal binary, only one
+ /// object file from it will be returned).
+ ErrorOr<ObjectFile *> getOrCreateObject(const std::string &Path,
+ const std::string &ArchName);
+
+ std::map<std::string, ErrorOr<std::unique_ptr<SymbolizableModule>>> Modules;
+
+ /// \brief Contains cached results of getOrCreateObjectPair().
+ std::map<std::pair<std::string, std::string>, ErrorOr<ObjectPair>>
+ ObjectPairForPathArch;
+
+ /// \brief Contains parsed binary for each path, or parsing error.
+ std::map<std::string, ErrorOr<OwningBinary<Binary>>> BinaryForPath;
+
+ /// \brief Parsed object file for path/architecture pair, where "path" refers
+ /// to Mach-O universal binary.
+ std::map<std::pair<std::string, std::string>, ErrorOr<std::unique_ptr<ObjectFile>>>
+ ObjectForUBPathAndArch;
+
+ Options Opts;
+};
+
+} // namespace symbolize
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
new file mode 100644
index 0000000..a730260
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/ExecutionEngine.h
@@ -0,0 +1,649 @@
+//===- ExecutionEngine.h - Abstract Execution Engine Interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the abstract interface that implements execution support
+// for LLVM.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H
+#define LLVM_EXECUTIONENGINE_EXECUTIONENGINE_H
+
+#include "RuntimeDyld.h"
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/MC/MCCodeGenInfo.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <map>
+#include <string>
+#include <vector>
+#include <functional>
+
+namespace llvm {
+
+struct GenericValue;
+class Constant;
+class DataLayout;
+class ExecutionEngine;
+class Function;
+class GlobalVariable;
+class GlobalValue;
+class JITEventListener;
+class MachineCodeInfo;
+class MCJITMemoryManager;
+class MutexGuard;
+class ObjectCache;
+class RTDyldMemoryManager;
+class Triple;
+class Type;
+
+namespace object {
+ class Archive;
+ class ObjectFile;
+}
+
+/// \brief Helper class for helping synchronize access to the global address map
+/// table. Access to this class should be serialized under a mutex.
+class ExecutionEngineState {
+public:
+ typedef StringMap<uint64_t> GlobalAddressMapTy;
+
+private:
+
+ /// GlobalAddressMap - A mapping between LLVM global symbol names values and
+ /// their actualized version...
+ GlobalAddressMapTy GlobalAddressMap;
+
+ /// GlobalAddressReverseMap - This is the reverse mapping of GlobalAddressMap,
+ /// used to convert raw addresses into the LLVM global value that is emitted
+ /// at the address. This map is not computed unless getGlobalValueAtAddress
+ /// is called at some point.
+ std::map<uint64_t, std::string> GlobalAddressReverseMap;
+
+public:
+
+ GlobalAddressMapTy &getGlobalAddressMap() {
+ return GlobalAddressMap;
+ }
+
+ std::map<uint64_t, std::string> &getGlobalAddressReverseMap() {
+ return GlobalAddressReverseMap;
+ }
+
+ /// \brief Erase an entry from the mapping table.
+ ///
+ /// \returns The address that \p ToUnmap was happed to.
+ uint64_t RemoveMapping(StringRef Name);
+};
+
+using FunctionCreator = std::function<void *(const std::string &)>;
+
+/// \brief Abstract interface for implementation execution of LLVM modules,
+/// designed to support both interpreter and just-in-time (JIT) compiler
+/// implementations.
+class ExecutionEngine {
+ /// The state object holding the global address mapping, which must be
+ /// accessed synchronously.
+ //
+ // FIXME: There is no particular need the entire map needs to be
+ // synchronized. Wouldn't a reader-writer design be better here?
+ ExecutionEngineState EEState;
+
+ /// The target data for the platform for which execution is being performed.
+ ///
+ /// Note: the DataLayout is LLVMContext specific because it has an
+ /// internal cache based on type pointers. It makes unsafe to reuse the
+ /// ExecutionEngine across context, we don't enforce this rule but undefined
+ /// behavior can occurs if the user tries to do it.
+ const DataLayout DL;
+
+ /// Whether lazy JIT compilation is enabled.
+ bool CompilingLazily;
+
+ /// Whether JIT compilation of external global variables is allowed.
+ bool GVCompilationDisabled;
+
+ /// Whether the JIT should perform lookups of external symbols (e.g.,
+ /// using dlsym).
+ bool SymbolSearchingDisabled;
+
+ /// Whether the JIT should verify IR modules during compilation.
+ bool VerifyModules;
+
+ friend class EngineBuilder; // To allow access to JITCtor and InterpCtor.
+
+protected:
+ /// The list of Modules that we are JIT'ing from. We use a SmallVector to
+ /// optimize for the case where there is only one module.
+ SmallVector<std::unique_ptr<Module>, 1> Modules;
+
+ /// getMemoryforGV - Allocate memory for a global variable.
+ virtual char *getMemoryForGV(const GlobalVariable *GV);
+
+ static ExecutionEngine *(*MCJITCtor)(
+ std::unique_ptr<Module> M,
+ std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MM,
+ std::shared_ptr<RuntimeDyld::SymbolResolver> SR,
+ std::unique_ptr<TargetMachine> TM);
+
+ static ExecutionEngine *(*OrcMCJITReplacementCtor)(
+ std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MM,
+ std::shared_ptr<RuntimeDyld::SymbolResolver> SR,
+ std::unique_ptr<TargetMachine> TM);
+
+ static ExecutionEngine *(*InterpCtor)(std::unique_ptr<Module> M,
+ std::string *ErrorStr);
+
+ /// LazyFunctionCreator - If an unknown function is needed, this function
+ /// pointer is invoked to create it. If this returns null, the JIT will
+ /// abort.
+ FunctionCreator LazyFunctionCreator;
+
+ /// getMangledName - Get mangled name.
+ std::string getMangledName(const GlobalValue *GV);
+
+public:
+ /// lock - This lock protects the ExecutionEngine and MCJIT classes. It must
+ /// be held while changing the internal state of any of those classes.
+ sys::Mutex lock;
+
+ //===--------------------------------------------------------------------===//
+ // ExecutionEngine Startup
+ //===--------------------------------------------------------------------===//
+
+ virtual ~ExecutionEngine();
+
+ /// Add a Module to the list of modules that we can JIT from.
+ virtual void addModule(std::unique_ptr<Module> M) {
+ Modules.push_back(std::move(M));
+ }
+
+ /// addObjectFile - Add an ObjectFile to the execution engine.
+ ///
+ /// This method is only supported by MCJIT. MCJIT will immediately load the
+ /// object into memory and adds its symbols to the list used to resolve
+ /// external symbols while preparing other objects for execution.
+ ///
+ /// Objects added using this function will not be made executable until
+ /// needed by another object.
+ ///
+ /// MCJIT will take ownership of the ObjectFile.
+ virtual void addObjectFile(std::unique_ptr<object::ObjectFile> O);
+ virtual void addObjectFile(object::OwningBinary<object::ObjectFile> O);
+
+ /// addArchive - Add an Archive to the execution engine.
+ ///
+ /// This method is only supported by MCJIT. MCJIT will use the archive to
+ /// resolve external symbols in objects it is loading. If a symbol is found
+ /// in the Archive the contained object file will be extracted (in memory)
+ /// and loaded for possible execution.
+ virtual void addArchive(object::OwningBinary<object::Archive> A);
+
+ //===--------------------------------------------------------------------===//
+
+ const DataLayout &getDataLayout() const { return DL; }
+
+ /// removeModule - Remove a Module from the list of modules. Returns true if
+ /// M is found.
+ virtual bool removeModule(Module *M);
+
+ /// FindFunctionNamed - Search all of the active modules to find the function that
+ /// defines FnName. This is very slow operation and shouldn't be used for
+ /// general code.
+ virtual Function *FindFunctionNamed(const char *FnName);
+
+ /// FindGlobalVariableNamed - Search all of the active modules to find the global variable
+ /// that defines Name. This is very slow operation and shouldn't be used for
+ /// general code.
+ virtual GlobalVariable *FindGlobalVariableNamed(const char *Name, bool AllowInternal = false);
+
+ /// runFunction - Execute the specified function with the specified arguments,
+ /// and return the result.
+ virtual GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) = 0;
+
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function by using the dlsym function call. As such it is only
+ /// useful for resolving library symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function silently returns a null pointer. Otherwise,
+ /// it prints a message to stderr and aborts.
+ ///
+ /// This function is deprecated for the MCJIT execution engine.
+ virtual void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) = 0;
+
+ /// mapSectionAddress - map a section to its target address space value.
+ /// Map the address of a JIT section as returned from the memory manager
+ /// to the address in the target process as the running code will see it.
+ /// This is the address which will be used for relocation resolution.
+ virtual void mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
+ llvm_unreachable("Re-mapping of section addresses not supported with this "
+ "EE!");
+ }
+
+ /// generateCodeForModule - Run code generation for the specified module and
+ /// load it into memory.
+ ///
+ /// When this function has completed, all code and data for the specified
+ /// module, and any module on which this module depends, will be generated
+ /// and loaded into memory, but relocations will not yet have been applied
+ /// and all memory will be readable and writable but not executable.
+ ///
+ /// This function is primarily useful when generating code for an external
+ /// target, allowing the client an opportunity to remap section addresses
+ /// before relocations are applied. Clients that intend to execute code
+ /// locally can use the getFunctionAddress call, which will generate code
+ /// and apply final preparations all in one step.
+ ///
+ /// This method has no effect for the interpeter.
+ virtual void generateCodeForModule(Module *M) {}
+
+ /// finalizeObject - ensure the module is fully processed and is usable.
+ ///
+ /// It is the user-level function for completing the process of making the
+ /// object usable for execution. It should be called after sections within an
+ /// object have been relocated using mapSectionAddress. When this method is
+ /// called the MCJIT execution engine will reapply relocations for a loaded
+ /// object. This method has no effect for the interpeter.
+ virtual void finalizeObject() {}
+
+ /// runStaticConstructorsDestructors - This method is used to execute all of
+ /// the static constructors or destructors for a program.
+ ///
+ /// \param isDtors - Run the destructors instead of constructors.
+ virtual void runStaticConstructorsDestructors(bool isDtors);
+
+ /// This method is used to execute all of the static constructors or
+ /// destructors for a particular module.
+ ///
+ /// \param isDtors - Run the destructors instead of constructors.
+ void runStaticConstructorsDestructors(Module &module, bool isDtors);
+
+
+ /// runFunctionAsMain - This is a helper function which wraps runFunction to
+ /// handle the common task of starting up main with the specified argc, argv,
+ /// and envp parameters.
+ int runFunctionAsMain(Function *Fn, const std::vector<std::string> &argv,
+ const char * const * envp);
+
+
+ /// addGlobalMapping - Tell the execution engine that the specified global is
+ /// at the specified location. This is used internally as functions are JIT'd
+ /// and as global variables are laid out in memory. It can and should also be
+ /// used by clients of the EE that want to have an LLVM global overlay
+ /// existing data in memory. Mappings are automatically removed when their
+ /// GlobalValue is destroyed.
+ void addGlobalMapping(const GlobalValue *GV, void *Addr);
+ void addGlobalMapping(StringRef Name, uint64_t Addr);
+
+ /// clearAllGlobalMappings - Clear all global mappings and start over again,
+ /// for use in dynamic compilation scenarios to move globals.
+ void clearAllGlobalMappings();
+
+ /// clearGlobalMappingsFromModule - Clear all global mappings that came from a
+ /// particular module, because it has been removed from the JIT.
+ void clearGlobalMappingsFromModule(Module *M);
+
+ /// updateGlobalMapping - Replace an existing mapping for GV with a new
+ /// address. This updates both maps as required. If "Addr" is null, the
+ /// entry for the global is removed from the mappings. This returns the old
+ /// value of the pointer, or null if it was not in the map.
+ uint64_t updateGlobalMapping(const GlobalValue *GV, void *Addr);
+ uint64_t updateGlobalMapping(StringRef Name, uint64_t Addr);
+
+ /// getAddressToGlobalIfAvailable - This returns the address of the specified
+ /// global symbol.
+ uint64_t getAddressToGlobalIfAvailable(StringRef S);
+
+ /// getPointerToGlobalIfAvailable - This returns the address of the specified
+ /// global value if it is has already been codegen'd, otherwise it returns
+ /// null.
+ void *getPointerToGlobalIfAvailable(StringRef S);
+ void *getPointerToGlobalIfAvailable(const GlobalValue *GV);
+
+ /// getPointerToGlobal - This returns the address of the specified global
+ /// value. This may involve code generation if it's a function.
+ ///
+ /// This function is deprecated for the MCJIT execution engine. Use
+ /// getGlobalValueAddress instead.
+ void *getPointerToGlobal(const GlobalValue *GV);
+
+ /// getPointerToFunction - The different EE's represent function bodies in
+ /// different ways. They should each implement this to say what a function
+ /// pointer should look like. When F is destroyed, the ExecutionEngine will
+ /// remove its global mapping and free any machine code. Be sure no threads
+ /// are running inside F when that happens.
+ ///
+ /// This function is deprecated for the MCJIT execution engine. Use
+ /// getFunctionAddress instead.
+ virtual void *getPointerToFunction(Function *F) = 0;
+
+ /// getPointerToFunctionOrStub - If the specified function has been
+ /// code-gen'd, return a pointer to the function. If not, compile it, or use
+ /// a stub to implement lazy compilation if available. See
+ /// getPointerToFunction for the requirements on destroying F.
+ ///
+ /// This function is deprecated for the MCJIT execution engine. Use
+ /// getFunctionAddress instead.
+ virtual void *getPointerToFunctionOrStub(Function *F) {
+ // Default implementation, just codegen the function.
+ return getPointerToFunction(F);
+ }
+
+ /// getGlobalValueAddress - Return the address of the specified global
+ /// value. This may involve code generation.
+ ///
+ /// This function should not be called with the interpreter engine.
+ virtual uint64_t getGlobalValueAddress(const std::string &Name) {
+ // Default implementation for the interpreter. MCJIT will override this.
+ // JIT and interpreter clients should use getPointerToGlobal instead.
+ return 0;
+ }
+
+ /// getFunctionAddress - Return the address of the specified function.
+ /// This may involve code generation.
+ virtual uint64_t getFunctionAddress(const std::string &Name) {
+ // Default implementation for the interpreter. MCJIT will override this.
+ // Interpreter clients should use getPointerToFunction instead.
+ return 0;
+ }
+
+ /// getGlobalValueAtAddress - Return the LLVM global value object that starts
+ /// at the specified address.
+ ///
+ const GlobalValue *getGlobalValueAtAddress(void *Addr);
+
+ /// StoreValueToMemory - Stores the data in Val of type Ty at address Ptr.
+ /// Ptr is the address of the memory at which to store Val, cast to
+ /// GenericValue *. It is not a pointer to a GenericValue containing the
+ /// address at which to store Val.
+ void StoreValueToMemory(const GenericValue &Val, GenericValue *Ptr,
+ Type *Ty);
+
+ void InitializeMemory(const Constant *Init, void *Addr);
+
+ /// getOrEmitGlobalVariable - Return the address of the specified global
+ /// variable, possibly emitting it to memory if needed. This is used by the
+ /// Emitter.
+ ///
+ /// This function is deprecated for the MCJIT execution engine. Use
+ /// getGlobalValueAddress instead.
+ virtual void *getOrEmitGlobalVariable(const GlobalVariable *GV) {
+ return getPointerToGlobal((const GlobalValue *)GV);
+ }
+
+ /// Registers a listener to be called back on various events within
+ /// the JIT. See JITEventListener.h for more details. Does not
+ /// take ownership of the argument. The argument may be NULL, in
+ /// which case these functions do nothing.
+ virtual void RegisterJITEventListener(JITEventListener *) {}
+ virtual void UnregisterJITEventListener(JITEventListener *) {}
+
+ /// Sets the pre-compiled object cache. The ownership of the ObjectCache is
+ /// not changed. Supported by MCJIT but not the interpreter.
+ virtual void setObjectCache(ObjectCache *) {
+ llvm_unreachable("No support for an object cache");
+ }
+
+ /// setProcessAllSections (MCJIT Only): By default, only sections that are
+ /// "required for execution" are passed to the RTDyldMemoryManager, and other
+ /// sections are discarded. Passing 'true' to this method will cause
+ /// RuntimeDyld to pass all sections to its RTDyldMemoryManager regardless
+ /// of whether they are "required to execute" in the usual sense.
+ ///
+ /// Rationale: Some MCJIT clients want to be able to inspect metadata
+ /// sections (e.g. Dwarf, Stack-maps) to enable functionality or analyze
+ /// performance. Passing these sections to the memory manager allows the
+ /// client to make policy about the relevant sections, rather than having
+ /// MCJIT do it.
+ virtual void setProcessAllSections(bool ProcessAllSections) {
+ llvm_unreachable("No support for ProcessAllSections option");
+ }
+
+ /// Return the target machine (if available).
+ virtual TargetMachine *getTargetMachine() { return nullptr; }
+
+ /// DisableLazyCompilation - When lazy compilation is off (the default), the
+ /// JIT will eagerly compile every function reachable from the argument to
+ /// getPointerToFunction. If lazy compilation is turned on, the JIT will only
+ /// compile the one function and emit stubs to compile the rest when they're
+ /// first called. If lazy compilation is turned off again while some lazy
+ /// stubs are still around, and one of those stubs is called, the program will
+ /// abort.
+ ///
+ /// In order to safely compile lazily in a threaded program, the user must
+ /// ensure that 1) only one thread at a time can call any particular lazy
+ /// stub, and 2) any thread modifying LLVM IR must hold the JIT's lock
+ /// (ExecutionEngine::lock) or otherwise ensure that no other thread calls a
+ /// lazy stub. See http://llvm.org/PR5184 for details.
+ void DisableLazyCompilation(bool Disabled = true) {
+ CompilingLazily = !Disabled;
+ }
+ bool isCompilingLazily() const {
+ return CompilingLazily;
+ }
+
+ /// DisableGVCompilation - If called, the JIT will abort if it's asked to
+ /// allocate space and populate a GlobalVariable that is not internal to
+ /// the module.
+ void DisableGVCompilation(bool Disabled = true) {
+ GVCompilationDisabled = Disabled;
+ }
+ bool isGVCompilationDisabled() const {
+ return GVCompilationDisabled;
+ }
+
+ /// DisableSymbolSearching - If called, the JIT will not try to lookup unknown
+ /// symbols with dlsym. A client can still use InstallLazyFunctionCreator to
+ /// resolve symbols in a custom way.
+ void DisableSymbolSearching(bool Disabled = true) {
+ SymbolSearchingDisabled = Disabled;
+ }
+ bool isSymbolSearchingDisabled() const {
+ return SymbolSearchingDisabled;
+ }
+
+ /// Enable/Disable IR module verification.
+ ///
+ /// Note: Module verification is enabled by default in Debug builds, and
+ /// disabled by default in Release. Use this method to override the default.
+ void setVerifyModules(bool Verify) {
+ VerifyModules = Verify;
+ }
+ bool getVerifyModules() const {
+ return VerifyModules;
+ }
+
+ /// InstallLazyFunctionCreator - If an unknown function is needed, the
+ /// specified function pointer is invoked to create it. If it returns null,
+ /// the JIT will abort.
+ void InstallLazyFunctionCreator(FunctionCreator C) {
+ LazyFunctionCreator = C;
+ }
+
+protected:
+ ExecutionEngine(const DataLayout DL) : DL(std::move(DL)){}
+ explicit ExecutionEngine(DataLayout DL, std::unique_ptr<Module> M);
+ explicit ExecutionEngine(std::unique_ptr<Module> M);
+
+ void emitGlobals();
+
+ void EmitGlobalVariable(const GlobalVariable *GV);
+
+ GenericValue getConstantValue(const Constant *C);
+ void LoadValueFromMemory(GenericValue &Result, GenericValue *Ptr,
+ Type *Ty);
+
+private:
+ void Init(std::unique_ptr<Module> M);
+};
+
+namespace EngineKind {
+ // These are actually bitmasks that get or-ed together.
+ enum Kind {
+ JIT = 0x1,
+ Interpreter = 0x2
+ };
+ const static Kind Either = (Kind)(JIT | Interpreter);
+}
+
+/// Builder class for ExecutionEngines. Use this by stack-allocating a builder,
+/// chaining the various set* methods, and terminating it with a .create()
+/// call.
+class EngineBuilder {
+private:
+ std::unique_ptr<Module> M;
+ EngineKind::Kind WhichEngine;
+ std::string *ErrorStr;
+ CodeGenOpt::Level OptLevel;
+ std::shared_ptr<MCJITMemoryManager> MemMgr;
+ std::shared_ptr<RuntimeDyld::SymbolResolver> Resolver;
+ TargetOptions Options;
+ Reloc::Model RelocModel;
+ CodeModel::Model CMModel;
+ std::string MArch;
+ std::string MCPU;
+ SmallVector<std::string, 4> MAttrs;
+ bool VerifyModules;
+ bool UseOrcMCJITReplacement;
+
+public:
+ /// Default constructor for EngineBuilder.
+ EngineBuilder();
+
+ /// Constructor for EngineBuilder.
+ EngineBuilder(std::unique_ptr<Module> M);
+
+ // Out-of-line since we don't have the def'n of RTDyldMemoryManager here.
+ ~EngineBuilder();
+
+ /// setEngineKind - Controls whether the user wants the interpreter, the JIT,
+ /// or whichever engine works. This option defaults to EngineKind::Either.
+ EngineBuilder &setEngineKind(EngineKind::Kind w) {
+ WhichEngine = w;
+ return *this;
+ }
+
+ /// setMCJITMemoryManager - Sets the MCJIT memory manager to use. This allows
+ /// clients to customize their memory allocation policies for the MCJIT. This
+ /// is only appropriate for the MCJIT; setting this and configuring the builder
+ /// to create anything other than MCJIT will cause a runtime error. If create()
+ /// is called and is successful, the created engine takes ownership of the
+ /// memory manager. This option defaults to NULL.
+ EngineBuilder &setMCJITMemoryManager(std::unique_ptr<RTDyldMemoryManager> mcjmm);
+
+ EngineBuilder&
+ setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM);
+
+ EngineBuilder&
+ setSymbolResolver(std::unique_ptr<RuntimeDyld::SymbolResolver> SR);
+
+ /// setErrorStr - Set the error string to write to on error. This option
+ /// defaults to NULL.
+ EngineBuilder &setErrorStr(std::string *e) {
+ ErrorStr = e;
+ return *this;
+ }
+
+ /// setOptLevel - Set the optimization level for the JIT. This option
+ /// defaults to CodeGenOpt::Default.
+ EngineBuilder &setOptLevel(CodeGenOpt::Level l) {
+ OptLevel = l;
+ return *this;
+ }
+
+ /// setTargetOptions - Set the target options that the ExecutionEngine
+ /// target is using. Defaults to TargetOptions().
+ EngineBuilder &setTargetOptions(const TargetOptions &Opts) {
+ Options = Opts;
+ return *this;
+ }
+
+ /// setRelocationModel - Set the relocation model that the ExecutionEngine
+ /// target is using. Defaults to target specific default "Reloc::Default".
+ EngineBuilder &setRelocationModel(Reloc::Model RM) {
+ RelocModel = RM;
+ return *this;
+ }
+
+ /// setCodeModel - Set the CodeModel that the ExecutionEngine target
+ /// data is using. Defaults to target specific default
+ /// "CodeModel::JITDefault".
+ EngineBuilder &setCodeModel(CodeModel::Model M) {
+ CMModel = M;
+ return *this;
+ }
+
+ /// setMArch - Override the architecture set by the Module's triple.
+ EngineBuilder &setMArch(StringRef march) {
+ MArch.assign(march.begin(), march.end());
+ return *this;
+ }
+
+ /// setMCPU - Target a specific cpu type.
+ EngineBuilder &setMCPU(StringRef mcpu) {
+ MCPU.assign(mcpu.begin(), mcpu.end());
+ return *this;
+ }
+
+ /// setVerifyModules - Set whether the JIT implementation should verify
+ /// IR modules during compilation.
+ EngineBuilder &setVerifyModules(bool Verify) {
+ VerifyModules = Verify;
+ return *this;
+ }
+
+ /// setMAttrs - Set cpu-specific attributes.
+ template<typename StringSequence>
+ EngineBuilder &setMAttrs(const StringSequence &mattrs) {
+ MAttrs.clear();
+ MAttrs.append(mattrs.begin(), mattrs.end());
+ return *this;
+ }
+
+ // \brief Use OrcMCJITReplacement instead of MCJIT. Off by default.
+ void setUseOrcMCJITReplacement(bool UseOrcMCJITReplacement) {
+ this->UseOrcMCJITReplacement = UseOrcMCJITReplacement;
+ }
+
+ TargetMachine *selectTarget();
+
+ /// selectTarget - Pick a target either via -march or by guessing the native
+ /// arch. Add any CPU features specified via -mcpu or -mattr.
+ TargetMachine *selectTarget(const Triple &TargetTriple,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs);
+
+ ExecutionEngine *create() {
+ return create(selectTarget());
+ }
+
+ ExecutionEngine *create(TargetMachine *TM);
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ExecutionEngine, LLVMExecutionEngineRef)
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/GenericValue.h b/contrib/llvm/include/llvm/ExecutionEngine/GenericValue.h
new file mode 100644
index 0000000..0e92f79
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/GenericValue.h
@@ -0,0 +1,53 @@
+//===-- GenericValue.h - Represent any type of LLVM value -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The GenericValue class is used to represent an LLVM value of arbitrary type.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_EXECUTIONENGINE_GENERICVALUE_H
+#define LLVM_EXECUTIONENGINE_GENERICVALUE_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+typedef void* PointerTy;
+class APInt;
+
+struct GenericValue {
+ struct IntPair {
+ unsigned int first;
+ unsigned int second;
+ };
+ union {
+ double DoubleVal;
+ float FloatVal;
+ PointerTy PointerVal;
+ struct IntPair UIntPairVal;
+ unsigned char Untyped[8];
+ };
+ APInt IntVal; // also used for long doubles.
+ // For aggregate data types.
+ std::vector<GenericValue> AggregateVal;
+
+ // to make code faster, set GenericValue to zero could be omitted, but it is
+ // potentially can cause problems, since GenericValue to store garbage
+ // instead of zero.
+ GenericValue() : IntVal(1,0) {UIntPairVal.first = 0; UIntPairVal.second = 0;}
+ explicit GenericValue(void *V) : PointerVal(V), IntVal(1,0) { }
+};
+
+inline GenericValue PTOGV(void *P) { return GenericValue(P); }
+inline void* GVTOP(const GenericValue &GV) { return GV.PointerVal; }
+
+} // End llvm namespace.
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Interpreter.h b/contrib/llvm/include/llvm/ExecutionEngine/Interpreter.h
new file mode 100644
index 0000000..a147078
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Interpreter.h
@@ -0,0 +1,28 @@
+//===-- Interpreter.h - Abstract Execution Engine Interface -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forces the interpreter to link in on certain operating systems.
+// (Windows).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_INTERPRETER_H
+#define LLVM_EXECUTIONENGINE_INTERPRETER_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+
+extern "C" void LLVMLinkInInterpreter();
+
+namespace {
+ struct ForceInterpreterLinking {
+ ForceInterpreterLinking() { LLVMLinkInInterpreter(); }
+ } ForceInterpreterLinking;
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h b/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
new file mode 100644
index 0000000..c3edec8
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JITEventListener.h
@@ -0,0 +1,122 @@
+//===- JITEventListener.h - Exposes events from JIT compilation -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the JITEventListener interface, which lets users get
+// callbacks when significant events happen during the JIT compilation process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
+#define LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
+
+#include "RuntimeDyld.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/Support/DataTypes.h"
+#include <vector>
+
+namespace llvm {
+class Function;
+class MachineFunction;
+class OProfileWrapper;
+class IntelJITEventsWrapper;
+
+namespace object {
+ class ObjectFile;
+}
+
+/// JITEvent_EmittedFunctionDetails - Helper struct for containing information
+/// about a generated machine code function.
+struct JITEvent_EmittedFunctionDetails {
+ struct LineStart {
+ /// The address at which the current line changes.
+ uintptr_t Address;
+
+ /// The new location information. These can be translated to DebugLocTuples
+ /// using MF->getDebugLocTuple().
+ DebugLoc Loc;
+ };
+
+ /// The machine function the struct contains information for.
+ const MachineFunction *MF;
+
+ /// The list of line boundary information, sorted by address.
+ std::vector<LineStart> LineStarts;
+};
+
+/// JITEventListener - Abstract interface for use by the JIT to notify clients
+/// about significant events during compilation. For example, to notify
+/// profilers and debuggers that need to know where functions have been emitted.
+///
+/// The default implementation of each method does nothing.
+class JITEventListener {
+public:
+ typedef JITEvent_EmittedFunctionDetails EmittedFunctionDetails;
+
+public:
+ JITEventListener() {}
+ virtual ~JITEventListener() {}
+
+ /// NotifyObjectEmitted - Called after an object has been successfully
+ /// emitted to memory. NotifyFunctionEmitted will not be called for
+ /// individual functions in the object.
+ ///
+ /// ELF-specific information
+ /// The ObjectImage contains the generated object image
+ /// with section headers updated to reflect the address at which sections
+ /// were loaded and with relocations performed in-place on debug sections.
+ virtual void NotifyObjectEmitted(const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {}
+
+ /// NotifyFreeingObject - Called just before the memory associated with
+ /// a previously emitted object is released.
+ virtual void NotifyFreeingObject(const object::ObjectFile &Obj) {}
+
+ // Get a pointe to the GDB debugger registration listener.
+ static JITEventListener *createGDBRegistrationListener();
+
+#if LLVM_USE_INTEL_JITEVENTS
+ // Construct an IntelJITEventListener
+ static JITEventListener *createIntelJITEventListener();
+
+ // Construct an IntelJITEventListener with a test Intel JIT API implementation
+ static JITEventListener *createIntelJITEventListener(
+ IntelJITEventsWrapper* AlternativeImpl);
+#else
+ static JITEventListener *createIntelJITEventListener() { return nullptr; }
+
+ static JITEventListener *createIntelJITEventListener(
+ IntelJITEventsWrapper* AlternativeImpl) {
+ return nullptr;
+ }
+#endif // USE_INTEL_JITEVENTS
+
+#if LLVM_USE_OPROFILE
+ // Construct an OProfileJITEventListener
+ static JITEventListener *createOProfileJITEventListener();
+
+ // Construct an OProfileJITEventListener with a test opagent implementation
+ static JITEventListener *createOProfileJITEventListener(
+ OProfileWrapper* AlternativeImpl);
+#else
+
+ static JITEventListener *createOProfileJITEventListener() { return nullptr; }
+
+ static JITEventListener *createOProfileJITEventListener(
+ OProfileWrapper* AlternativeImpl) {
+ return nullptr;
+ }
+#endif // USE_OPROFILE
+private:
+ virtual void anchor();
+};
+
+} // end namespace llvm.
+
+#endif // defined LLVM_EXECUTIONENGINE_JITEVENTLISTENER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/JITSymbolFlags.h b/contrib/llvm/include/llvm/ExecutionEngine/JITSymbolFlags.h
new file mode 100644
index 0000000..450e948
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/JITSymbolFlags.h
@@ -0,0 +1,81 @@
+//===------ JITSymbolFlags.h - Flags for symbols in the JIT -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Symbol flags for symbols in the JIT (e.g. weak, exported).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITSYMBOLFLAGS_H
+#define LLVM_EXECUTIONENGINE_JITSYMBOLFLAGS_H
+
+#include "llvm/IR/GlobalValue.h"
+
+namespace llvm {
+
+/// @brief Flags for symbols in the JIT.
+enum class JITSymbolFlags : char {
+ None = 0,
+ Weak = 1U << 0,
+ Exported = 1U << 1
+};
+
+inline JITSymbolFlags operator|(JITSymbolFlags LHS, JITSymbolFlags RHS) {
+ typedef std::underlying_type<JITSymbolFlags>::type UT;
+ return static_cast<JITSymbolFlags>(
+ static_cast<UT>(LHS) | static_cast<UT>(RHS));
+}
+
+inline JITSymbolFlags& operator |=(JITSymbolFlags &LHS, JITSymbolFlags RHS) {
+ LHS = LHS | RHS;
+ return LHS;
+}
+
+inline JITSymbolFlags operator&(JITSymbolFlags LHS, JITSymbolFlags RHS) {
+ typedef std::underlying_type<JITSymbolFlags>::type UT;
+ return static_cast<JITSymbolFlags>(
+ static_cast<UT>(LHS) & static_cast<UT>(RHS));
+}
+
+inline JITSymbolFlags& operator &=(JITSymbolFlags &LHS, JITSymbolFlags RHS) {
+ LHS = LHS & RHS;
+ return LHS;
+}
+
+/// @brief Base class for symbols in the JIT.
+class JITSymbolBase {
+public:
+ JITSymbolBase(JITSymbolFlags Flags) : Flags(Flags) {}
+
+ JITSymbolFlags getFlags() const { return Flags; }
+
+ bool isWeak() const {
+ return (Flags & JITSymbolFlags::Weak) == JITSymbolFlags::Weak;
+ }
+
+ bool isExported() const {
+ return (Flags & JITSymbolFlags::Exported) == JITSymbolFlags::Exported;
+ }
+
+ static JITSymbolFlags flagsFromGlobalValue(const GlobalValue &GV) {
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ if (GV.hasWeakLinkage())
+ Flags |= JITSymbolFlags::Weak;
+ if (!GV.hasLocalLinkage() && !GV.hasHiddenVisibility())
+ Flags |= JITSymbolFlags::Exported;
+ return Flags;
+
+ }
+
+private:
+ JITSymbolFlags Flags;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h b/contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h
new file mode 100644
index 0000000..66ddb7c
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/MCJIT.h
@@ -0,0 +1,38 @@
+//===-- MCJIT.h - MC-Based Just-In-Time Execution Engine --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forces the MCJIT to link in on certain operating systems.
+// (Windows).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_MCJIT_H
+#define LLVM_EXECUTIONENGINE_MCJIT_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include <cstdlib>
+
+extern "C" void LLVMLinkInMCJIT();
+
+namespace {
+ struct ForceMCJITLinking {
+ ForceMCJITLinking() {
+ // We must reference MCJIT in such a way that compilers will not
+ // delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough
+ // to know that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+
+ LLVMLinkInMCJIT();
+ }
+ } ForceMCJITLinking;
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h b/contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h
new file mode 100644
index 0000000..05da594
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/OProfileWrapper.h
@@ -0,0 +1,124 @@
+//===-- OProfileWrapper.h - OProfile JIT API Wrapper ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines a OProfileWrapper object that detects if the oprofile
+// daemon is running, and provides wrappers for opagent functions used to
+// communicate with the oprofile JIT interface. The dynamic library libopagent
+// does not need to be linked directly as this object lazily loads the library
+// when the first op_ function is called.
+//
+// See http://oprofile.sourceforge.net/doc/devel/jit-interface.html for the
+// definition of the interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
+#define LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
+
+#include "llvm/Support/DataTypes.h"
+#include <opagent.h>
+
+namespace llvm {
+
+
+class OProfileWrapper {
+ typedef op_agent_t (*op_open_agent_ptr_t)();
+ typedef int (*op_close_agent_ptr_t)(op_agent_t);
+ typedef int (*op_write_native_code_ptr_t)(op_agent_t,
+ const char*,
+ uint64_t,
+ void const*,
+ const unsigned int);
+ typedef int (*op_write_debug_line_info_ptr_t)(op_agent_t,
+ void const*,
+ size_t,
+ struct debug_line_info const*);
+ typedef int (*op_unload_native_code_ptr_t)(op_agent_t, uint64_t);
+
+ // Also used for op_minor_version function which has the same signature
+ typedef int (*op_major_version_ptr_t)();
+
+ // This is not a part of the opagent API, but is useful nonetheless
+ typedef bool (*IsOProfileRunningPtrT)();
+
+
+ op_agent_t Agent;
+ op_open_agent_ptr_t OpenAgentFunc;
+ op_close_agent_ptr_t CloseAgentFunc;
+ op_write_native_code_ptr_t WriteNativeCodeFunc;
+ op_write_debug_line_info_ptr_t WriteDebugLineInfoFunc;
+ op_unload_native_code_ptr_t UnloadNativeCodeFunc;
+ op_major_version_ptr_t MajorVersionFunc;
+ op_major_version_ptr_t MinorVersionFunc;
+ IsOProfileRunningPtrT IsOProfileRunningFunc;
+
+ bool Initialized;
+
+public:
+ OProfileWrapper();
+
+ // For testing with a mock opagent implementation, skips the dynamic load and
+ // the function resolution.
+ OProfileWrapper(op_open_agent_ptr_t OpenAgentImpl,
+ op_close_agent_ptr_t CloseAgentImpl,
+ op_write_native_code_ptr_t WriteNativeCodeImpl,
+ op_write_debug_line_info_ptr_t WriteDebugLineInfoImpl,
+ op_unload_native_code_ptr_t UnloadNativeCodeImpl,
+ op_major_version_ptr_t MajorVersionImpl,
+ op_major_version_ptr_t MinorVersionImpl,
+ IsOProfileRunningPtrT MockIsOProfileRunningImpl = 0)
+ : OpenAgentFunc(OpenAgentImpl),
+ CloseAgentFunc(CloseAgentImpl),
+ WriteNativeCodeFunc(WriteNativeCodeImpl),
+ WriteDebugLineInfoFunc(WriteDebugLineInfoImpl),
+ UnloadNativeCodeFunc(UnloadNativeCodeImpl),
+ MajorVersionFunc(MajorVersionImpl),
+ MinorVersionFunc(MinorVersionImpl),
+ IsOProfileRunningFunc(MockIsOProfileRunningImpl),
+ Initialized(true)
+ {
+ }
+
+ // Calls op_open_agent in the oprofile JIT library and saves the returned
+ // op_agent_t handle internally so it can be used when calling all the other
+ // op_* functions. Callers of this class do not need to keep track of
+ // op_agent_t objects.
+ bool op_open_agent();
+
+ int op_close_agent();
+ int op_write_native_code(const char* name,
+ uint64_t addr,
+ void const* code,
+ const unsigned int size);
+ int op_write_debug_line_info(void const* code,
+ size_t num_entries,
+ struct debug_line_info const* info);
+ int op_unload_native_code(uint64_t addr);
+ int op_major_version();
+ int op_minor_version();
+
+ // Returns true if the oprofiled process is running, the opagent library is
+ // loaded and a connection to the agent has been established, and false
+ // otherwise.
+ bool isAgentAvailable();
+
+private:
+ // Loads the libopagent library and initializes this wrapper if the oprofile
+ // daemon is running
+ bool initialize();
+
+ // Searches /proc for the oprofile daemon and returns true if the process if
+ // found, or false otherwise.
+ bool checkForOProfileProcEntry();
+
+ bool isOProfileRunning();
+};
+
+} // namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_OPROFILEWRAPPER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/ObjectCache.h b/contrib/llvm/include/llvm/ExecutionEngine/ObjectCache.h
new file mode 100644
index 0000000..cc01a4e
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/ObjectCache.h
@@ -0,0 +1,40 @@
+//===-- ObjectCache.h - Class definition for the ObjectCache -----C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OBJECTCACHE_H
+#define LLVM_EXECUTIONENGINE_OBJECTCACHE_H
+
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+
+class Module;
+
+/// This is the base ObjectCache type which can be provided to an
+/// ExecutionEngine for the purpose of avoiding compilation for Modules that
+/// have already been compiled and an object file is available.
+class ObjectCache {
+ virtual void anchor();
+public:
+ ObjectCache() { }
+
+ virtual ~ObjectCache() { }
+
+ /// notifyObjectCompiled - Provides a pointer to compiled code for Module M.
+ virtual void notifyObjectCompiled(const Module *M, MemoryBufferRef Obj) = 0;
+
+ /// Returns a pointer to a newly allocated MemoryBuffer that contains the
+ /// object which corresponds with Module M, or 0 if an object is not
+ /// available.
+ virtual std::unique_ptr<MemoryBuffer> getObject(const Module* M) = 0;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h b/contrib/llvm/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h
new file mode 100644
index 0000000..b075611
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/ObjectMemoryBuffer.h
@@ -0,0 +1,63 @@
+//===- ObjectMemoryBuffer.h - SmallVector-backed MemoryBuffrer -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a wrapper class to hold the memory into which an
+// object will be generated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_OBJECTMEMORYBUFFER_H
+#define LLVM_EXECUTIONENGINE_OBJECTMEMORYBUFFER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// \brief SmallVector-backed MemoryBuffer instance.
+///
+/// This class enables efficient construction of MemoryBuffers from SmallVector
+/// instances. This is useful for MCJIT and Orc, where object files are streamed
+/// into SmallVectors, then inspected using ObjectFile (which takes a
+/// MemoryBuffer).
+class ObjectMemoryBuffer : public MemoryBuffer {
+public:
+
+ /// \brief Construct an ObjectMemoryBuffer from the given SmallVector r-value.
+ ///
+ /// FIXME: It'd be nice for this to be a non-templated constructor taking a
+ /// SmallVectorImpl here instead of a templated one taking a SmallVector<N>,
+ /// but SmallVector's move-construction/assignment currently only take
+ /// SmallVectors. If/when that is fixed we can simplify this constructor and
+ /// the following one.
+ ObjectMemoryBuffer(SmallVectorImpl<char> &&SV)
+ : SV(std::move(SV)), BufferName("<in-memory object>") {
+ init(this->SV.begin(), this->SV.end(), false);
+ }
+
+ /// \brief Construct a named ObjectMemoryBuffer from the given SmallVector
+ /// r-value and StringRef.
+ ObjectMemoryBuffer(SmallVectorImpl<char> &&SV, StringRef Name)
+ : SV(std::move(SV)), BufferName(Name) {
+ init(this->SV.begin(), this->SV.end(), false);
+ }
+
+ const char* getBufferIdentifier() const override { return BufferName.c_str(); }
+
+ BufferKind getBufferKind() const override { return MemoryBuffer_Malloc; }
+
+private:
+ SmallVector<char, 0> SV;
+ std::string BufferName;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
new file mode 100644
index 0000000..7dab5d1
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
@@ -0,0 +1,480 @@
+//===- CompileOnDemandLayer.h - Compile each function on demand -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// JIT layer for breaking up modules and inserting callbacks to allow
+// individual functions to be compiled on demand.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
+
+#include "IndirectionUtils.h"
+#include "LambdaResolver.h"
+#include "LogicalDylib.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <list>
+#include <memory>
+#include <set>
+
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+namespace orc {
+
+/// @brief Compile-on-demand layer.
+///
+/// When a module is added to this layer a stub is created for each of its
+/// function definitions. The stubs and other global values are immediately
+/// added to the layer below. When a stub is called it triggers the extraction
+/// of the function body from the original module. The extracted body is then
+/// compiled and executed.
+template <typename BaseLayerT,
+ typename CompileCallbackMgrT = JITCompileCallbackManager,
+ typename IndirectStubsMgrT = IndirectStubsManager>
+class CompileOnDemandLayer {
+private:
+
+ template <typename MaterializerFtor>
+ class LambdaMaterializer final : public ValueMaterializer {
+ public:
+ LambdaMaterializer(MaterializerFtor M) : M(std::move(M)) {}
+ Value *materializeDeclFor(Value *V) final { return M(V); }
+
+ private:
+ MaterializerFtor M;
+ };
+
+ template <typename MaterializerFtor>
+ LambdaMaterializer<MaterializerFtor>
+ createLambdaMaterializer(MaterializerFtor M) {
+ return LambdaMaterializer<MaterializerFtor>(std::move(M));
+ }
+
+ typedef typename BaseLayerT::ModuleSetHandleT BaseLayerModuleSetHandleT;
+
+ class ModuleOwner {
+ public:
+ ModuleOwner() = default;
+ ModuleOwner(const ModuleOwner&) = delete;
+ ModuleOwner& operator=(const ModuleOwner&) = delete;
+ virtual ~ModuleOwner() { }
+ virtual Module& getModule() const = 0;
+ };
+
+ template <typename ModulePtrT>
+ class ModuleOwnerImpl : public ModuleOwner {
+ public:
+ ModuleOwnerImpl(ModulePtrT ModulePtr) : ModulePtr(std::move(ModulePtr)) {}
+ Module& getModule() const override { return *ModulePtr; }
+ private:
+ ModulePtrT ModulePtr;
+ };
+
+ template <typename ModulePtrT>
+ std::unique_ptr<ModuleOwner> wrapOwnership(ModulePtrT ModulePtr) {
+ return llvm::make_unique<ModuleOwnerImpl<ModulePtrT>>(std::move(ModulePtr));
+ }
+
+ struct LogicalModuleResources {
+ std::unique_ptr<ModuleOwner> SourceModuleOwner;
+ std::set<const Function*> StubsToClone;
+ std::unique_ptr<IndirectStubsMgrT> StubsMgr;
+
+ LogicalModuleResources() = default;
+
+ // Explicit move constructor to make MSVC happy.
+ LogicalModuleResources(LogicalModuleResources &&Other)
+ : SourceModuleOwner(std::move(Other.SourceModuleOwner)),
+ StubsToClone(std::move(Other.StubsToClone)),
+ StubsMgr(std::move(Other.StubsMgr)) {}
+
+ // Explicit move assignment to make MSVC happy.
+ LogicalModuleResources& operator=(LogicalModuleResources &&Other) {
+ SourceModuleOwner = std::move(Other.SourceModuleOwner);
+ StubsToClone = std::move(Other.StubsToClone);
+ StubsMgr = std::move(Other.StubsMgr);
+ }
+
+ JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+ if (Name.endswith("$stub_ptr") && !ExportedSymbolsOnly) {
+ assert(!ExportedSymbolsOnly && "Stubs are never exported");
+ return StubsMgr->findPointer(Name.drop_back(9));
+ }
+ return StubsMgr->findStub(Name, ExportedSymbolsOnly);
+ }
+
+ };
+
+
+
+ struct LogicalDylibResources {
+ typedef std::function<RuntimeDyld::SymbolInfo(const std::string&)>
+ SymbolResolverFtor;
+ SymbolResolverFtor ExternalSymbolResolver;
+ };
+
+ typedef LogicalDylib<BaseLayerT, LogicalModuleResources,
+ LogicalDylibResources> CODLogicalDylib;
+
+ typedef typename CODLogicalDylib::LogicalModuleHandle LogicalModuleHandle;
+ typedef std::list<CODLogicalDylib> LogicalDylibList;
+
+public:
+
+ /// @brief Handle to a set of loaded modules.
+ typedef typename LogicalDylibList::iterator ModuleSetHandleT;
+
+ /// @brief Module partitioning functor.
+ typedef std::function<std::set<Function*>(Function&)> PartitioningFtor;
+
+ /// @brief Builder for IndirectStubsManagers.
+ typedef std::function<std::unique_ptr<IndirectStubsMgrT>()>
+ IndirectStubsManagerBuilderT;
+
+ /// @brief Construct a compile-on-demand layer instance.
+ CompileOnDemandLayer(BaseLayerT &BaseLayer, PartitioningFtor Partition,
+ CompileCallbackMgrT &CallbackMgr,
+ IndirectStubsManagerBuilderT CreateIndirectStubsManager,
+ bool CloneStubsIntoPartitions = true)
+ : BaseLayer(BaseLayer), Partition(Partition),
+ CompileCallbackMgr(CallbackMgr),
+ CreateIndirectStubsManager(std::move(CreateIndirectStubsManager)),
+ CloneStubsIntoPartitions(CloneStubsIntoPartitions) {}
+
+ /// @brief Add a module to the compile-on-demand layer.
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ModuleSetHandleT addModuleSet(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+
+ assert(MemMgr == nullptr &&
+ "User supplied memory managers not supported with COD yet.");
+
+ LogicalDylibs.push_back(CODLogicalDylib(BaseLayer));
+ auto &LDResources = LogicalDylibs.back().getDylibResources();
+
+ LDResources.ExternalSymbolResolver =
+ [Resolver](const std::string &Name) {
+ return Resolver->findSymbol(Name);
+ };
+
+ // Process each of the modules in this module set.
+ for (auto &M : Ms)
+ addLogicalModule(LogicalDylibs.back(), std::move(M));
+
+ return std::prev(LogicalDylibs.end());
+ }
+
+ /// @brief Remove the module represented by the given handle.
+ ///
+ /// This will remove all modules in the layers below that were derived from
+ /// the module represented by H.
+ void removeModuleSet(ModuleSetHandleT H) {
+ LogicalDylibs.erase(H);
+ }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+ for (auto LDI = LogicalDylibs.begin(), LDE = LogicalDylibs.end();
+ LDI != LDE; ++LDI)
+ if (auto Symbol = findSymbolIn(LDI, Name, ExportedSymbolsOnly))
+ return Symbol;
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of a symbol provided by this layer, or some layer
+ /// below this one.
+ JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return H->findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+private:
+
+ template <typename ModulePtrT>
+ void addLogicalModule(CODLogicalDylib &LD, ModulePtrT SrcMPtr) {
+
+ // Bump the linkage and rename any anonymous/privote members in SrcM to
+ // ensure that everything will resolve properly after we partition SrcM.
+ makeAllSymbolsExternallyAccessible(*SrcMPtr);
+
+ // Create a logical module handle for SrcM within the logical dylib.
+ auto LMH = LD.createLogicalModule();
+ auto &LMResources = LD.getLogicalModuleResources(LMH);
+
+ LMResources.SourceModuleOwner = wrapOwnership(std::move(SrcMPtr));
+
+ Module &SrcM = LMResources.SourceModuleOwner->getModule();
+
+ // Create the GlobalValues module.
+ const DataLayout &DL = SrcM.getDataLayout();
+ auto GVsM = llvm::make_unique<Module>((SrcM.getName() + ".globals").str(),
+ SrcM.getContext());
+ GVsM->setDataLayout(DL);
+
+ // Create function stubs.
+ ValueToValueMapTy VMap;
+ {
+ typename IndirectStubsMgrT::StubInitsMap StubInits;
+ for (auto &F : SrcM) {
+ // Skip declarations.
+ if (F.isDeclaration())
+ continue;
+
+ // Record all functions defined by this module.
+ if (CloneStubsIntoPartitions)
+ LMResources.StubsToClone.insert(&F);
+
+ // Create a callback, associate it with the stub for the function,
+ // and set the compile action to compile the partition containing the
+ // function.
+ auto CCInfo = CompileCallbackMgr.getCompileCallback();
+ StubInits[mangle(F.getName(), DL)] =
+ std::make_pair(CCInfo.getAddress(),
+ JITSymbolBase::flagsFromGlobalValue(F));
+ CCInfo.setCompileAction([this, &LD, LMH, &F]() {
+ return this->extractAndCompile(LD, LMH, F);
+ });
+ }
+
+ LMResources.StubsMgr = CreateIndirectStubsManager();
+ auto EC = LMResources.StubsMgr->createStubs(StubInits);
+ (void)EC;
+ // FIXME: This should be propagated back to the user. Stub creation may
+ // fail for remote JITs.
+ assert(!EC && "Error generating stubs");
+ }
+
+ // Clone global variable decls.
+ for (auto &GV : SrcM.globals())
+ if (!GV.isDeclaration() && !VMap.count(&GV))
+ cloneGlobalVariableDecl(*GVsM, GV, &VMap);
+
+ // And the aliases.
+ for (auto &A : SrcM.aliases())
+ if (!VMap.count(&A))
+ cloneGlobalAliasDecl(*GVsM, A, VMap);
+
+ // Now we need to clone the GV and alias initializers.
+
+ // Initializers may refer to functions declared (but not defined) in this
+ // module. Build a materializer to clone decls on demand.
+ auto Materializer = createLambdaMaterializer(
+ [this, &GVsM, &LMResources](Value *V) -> Value* {
+ if (auto *F = dyn_cast<Function>(V)) {
+ // Decls in the original module just get cloned.
+ if (F->isDeclaration())
+ return cloneFunctionDecl(*GVsM, *F);
+
+ // Definitions in the original module (which we have emitted stubs
+ // for at this point) get turned into a constant alias to the stub
+ // instead.
+ const DataLayout &DL = GVsM->getDataLayout();
+ std::string FName = mangle(F->getName(), DL);
+ auto StubSym = LMResources.StubsMgr->findStub(FName, false);
+ unsigned PtrBitWidth = DL.getPointerTypeSizeInBits(F->getType());
+ ConstantInt *StubAddr =
+ ConstantInt::get(GVsM->getContext(),
+ APInt(PtrBitWidth, StubSym.getAddress()));
+ Constant *Init = ConstantExpr::getCast(Instruction::IntToPtr,
+ StubAddr, F->getType());
+ return GlobalAlias::create(F->getFunctionType(),
+ F->getType()->getAddressSpace(),
+ F->getLinkage(), F->getName(),
+ Init, GVsM.get());
+ }
+ // else....
+ return nullptr;
+ });
+
+ // Clone the global variable initializers.
+ for (auto &GV : SrcM.globals())
+ if (!GV.isDeclaration())
+ moveGlobalVariableInitializer(GV, VMap, &Materializer);
+
+ // Clone the global alias initializers.
+ for (auto &A : SrcM.aliases()) {
+ auto *NewA = cast<GlobalAlias>(VMap[&A]);
+ assert(NewA && "Alias not cloned?");
+ Value *Init = MapValue(A.getAliasee(), VMap, RF_None, nullptr,
+ &Materializer);
+ NewA->setAliasee(cast<Constant>(Init));
+ }
+
+ // Build a resolver for the globals module and add it to the base layer.
+ auto GVsResolver = createLambdaResolver(
+ [&LD, LMH](const std::string &Name) {
+ auto &LMResources = LD.getLogicalModuleResources(LMH);
+ if (auto Sym = LMResources.StubsMgr->findStub(Name, false))
+ return RuntimeDyld::SymbolInfo(Sym.getAddress(), Sym.getFlags());
+ return LD.getDylibResources().ExternalSymbolResolver(Name);
+ },
+ [](const std::string &Name) {
+ return RuntimeDyld::SymbolInfo(nullptr);
+ });
+
+ std::vector<std::unique_ptr<Module>> GVsMSet;
+ GVsMSet.push_back(std::move(GVsM));
+ auto GVsH =
+ BaseLayer.addModuleSet(std::move(GVsMSet),
+ llvm::make_unique<SectionMemoryManager>(),
+ std::move(GVsResolver));
+ LD.addToLogicalModule(LMH, GVsH);
+ }
+
+ static std::string mangle(StringRef Name, const DataLayout &DL) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
+ }
+ return MangledName;
+ }
+
+ TargetAddress extractAndCompile(CODLogicalDylib &LD,
+ LogicalModuleHandle LMH,
+ Function &F) {
+ auto &LMResources = LD.getLogicalModuleResources(LMH);
+ Module &SrcM = LMResources.SourceModuleOwner->getModule();
+
+ // If F is a declaration we must already have compiled it.
+ if (F.isDeclaration())
+ return 0;
+
+ // Grab the name of the function being called here.
+ std::string CalledFnName = mangle(F.getName(), SrcM.getDataLayout());
+
+ auto Part = Partition(F);
+ auto PartH = emitPartition(LD, LMH, Part);
+
+ TargetAddress CalledAddr = 0;
+ for (auto *SubF : Part) {
+ std::string FnName = mangle(SubF->getName(), SrcM.getDataLayout());
+ auto FnBodySym = BaseLayer.findSymbolIn(PartH, FnName, false);
+ assert(FnBodySym && "Couldn't find function body.");
+
+ TargetAddress FnBodyAddr = FnBodySym.getAddress();
+
+ // If this is the function we're calling record the address so we can
+ // return it from this function.
+ if (SubF == &F)
+ CalledAddr = FnBodyAddr;
+
+ // Update the function body pointer for the stub.
+ if (auto EC = LMResources.StubsMgr->updatePointer(FnName, FnBodyAddr))
+ return 0;
+ }
+
+ return CalledAddr;
+ }
+
+ template <typename PartitionT>
+ BaseLayerModuleSetHandleT emitPartition(CODLogicalDylib &LD,
+ LogicalModuleHandle LMH,
+ const PartitionT &Part) {
+ auto &LMResources = LD.getLogicalModuleResources(LMH);
+ Module &SrcM = LMResources.SourceModuleOwner->getModule();
+
+ // Create the module.
+ std::string NewName = SrcM.getName();
+ for (auto *F : Part) {
+ NewName += ".";
+ NewName += F->getName();
+ }
+
+ auto M = llvm::make_unique<Module>(NewName, SrcM.getContext());
+ M->setDataLayout(SrcM.getDataLayout());
+ ValueToValueMapTy VMap;
+
+ auto Materializer = createLambdaMaterializer([this, &LMResources, &M,
+ &VMap](Value *V) -> Value * {
+ if (auto *GV = dyn_cast<GlobalVariable>(V))
+ return cloneGlobalVariableDecl(*M, *GV);
+
+ if (auto *F = dyn_cast<Function>(V)) {
+ // Check whether we want to clone an available_externally definition.
+ if (!LMResources.StubsToClone.count(F))
+ return cloneFunctionDecl(*M, *F);
+
+ // Ok - we want an inlinable stub. For that to work we need a decl
+ // for the stub pointer.
+ auto *StubPtr = createImplPointer(*F->getType(), *M,
+ F->getName() + "$stub_ptr", nullptr);
+ auto *ClonedF = cloneFunctionDecl(*M, *F);
+ makeStub(*ClonedF, *StubPtr);
+ ClonedF->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ ClonedF->addFnAttr(Attribute::AlwaysInline);
+ return ClonedF;
+ }
+
+ if (auto *A = dyn_cast<GlobalAlias>(V)) {
+ auto *Ty = A->getValueType();
+ if (Ty->isFunctionTy())
+ return Function::Create(cast<FunctionType>(Ty),
+ GlobalValue::ExternalLinkage, A->getName(),
+ M.get());
+
+ return new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage,
+ nullptr, A->getName(), nullptr,
+ GlobalValue::NotThreadLocal,
+ A->getType()->getAddressSpace());
+ }
+
+ return nullptr;
+ });
+
+ // Create decls in the new module.
+ for (auto *F : Part)
+ cloneFunctionDecl(*M, *F, &VMap);
+
+ // Move the function bodies.
+ for (auto *F : Part)
+ moveFunctionBody(*F, VMap, &Materializer);
+
+ // Create memory manager and symbol resolver.
+ auto MemMgr = llvm::make_unique<SectionMemoryManager>();
+ auto Resolver = createLambdaResolver(
+ [this, &LD, LMH](const std::string &Name) {
+ if (auto Symbol = LD.findSymbolInternally(LMH, Name))
+ return RuntimeDyld::SymbolInfo(Symbol.getAddress(),
+ Symbol.getFlags());
+ return LD.getDylibResources().ExternalSymbolResolver(Name);
+ },
+ [this, &LD, LMH](const std::string &Name) {
+ if (auto Symbol = LD.findSymbolInternally(LMH, Name))
+ return RuntimeDyld::SymbolInfo(Symbol.getAddress(),
+ Symbol.getFlags());
+ return RuntimeDyld::SymbolInfo(nullptr);
+ });
+ std::vector<std::unique_ptr<Module>> PartMSet;
+ PartMSet.push_back(std::move(M));
+ return BaseLayer.addModuleSet(std::move(PartMSet), std::move(MemMgr),
+ std::move(Resolver));
+ }
+
+ BaseLayerT &BaseLayer;
+ PartitioningFtor Partition;
+ CompileCallbackMgrT &CompileCallbackMgr;
+ IndirectStubsManagerBuilderT CreateIndirectStubsManager;
+
+ LogicalDylibList LogicalDylibs;
+ bool CloneStubsIntoPartitions;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEONDEMANDLAYER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h
new file mode 100644
index 0000000..1e7d211
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/CompileUtils.h
@@ -0,0 +1,61 @@
+//===-- CompileUtils.h - Utilities for compiling IR in the JIT --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for compiling IR to object files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
+
+#include "llvm/ExecutionEngine/ObjectMemoryBuffer.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+namespace orc {
+
+/// @brief Simple compile functor: Takes a single IR module and returns an
+/// ObjectFile.
+class SimpleCompiler {
+public:
+ /// @brief Construct a simple compile functor with the given target.
+ SimpleCompiler(TargetMachine &TM) : TM(TM) {}
+
+ /// @brief Compile a Module to an ObjectFile.
+ object::OwningBinary<object::ObjectFile> operator()(Module &M) const {
+ SmallVector<char, 0> ObjBufferSV;
+ raw_svector_ostream ObjStream(ObjBufferSV);
+
+ legacy::PassManager PM;
+ MCContext *Ctx;
+ if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
+ llvm_unreachable("Target does not support MC emission.");
+ PM.run(M);
+ std::unique_ptr<MemoryBuffer> ObjBuffer(
+ new ObjectMemoryBuffer(std::move(ObjBufferSV)));
+ ErrorOr<std::unique_ptr<object::ObjectFile>> Obj =
+ object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+ // TODO: Actually report errors helpfully.
+ typedef object::OwningBinary<object::ObjectFile> OwningObj;
+ if (Obj)
+ return OwningObj(std::move(*Obj), std::move(ObjBuffer));
+ return OwningObj(nullptr, nullptr);
+ }
+
+private:
+ TargetMachine &TM;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_COMPILEUTILS_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
new file mode 100644
index 0000000..c10508c
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ExecutionUtils.h
@@ -0,0 +1,182 @@
+//===-- ExecutionUtils.h - Utilities for executing code in Orc --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for executing code in Orc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
+
+#include "JITSymbol.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include <vector>
+
+namespace llvm {
+
+class ConstantArray;
+class GlobalVariable;
+class Function;
+class Module;
+class Value;
+
+namespace orc {
+
+/// @brief This iterator provides a convenient way to iterate over the elements
+/// of an llvm.global_ctors/llvm.global_dtors instance.
+///
+/// The easiest way to get hold of instances of this class is to use the
+/// getConstructors/getDestructors functions.
+class CtorDtorIterator {
+public:
+
+ /// @brief Accessor for an element of the global_ctors/global_dtors array.
+ ///
+ /// This class provides a read-only view of the element with any casts on
+ /// the function stripped away.
+ struct Element {
+ Element(unsigned Priority, const Function *Func, const Value *Data)
+ : Priority(Priority), Func(Func), Data(Data) {}
+
+ unsigned Priority;
+ const Function *Func;
+ const Value *Data;
+ };
+
+ /// @brief Construct an iterator instance. If End is true then this iterator
+ /// acts as the end of the range, otherwise it is the beginning.
+ CtorDtorIterator(const GlobalVariable *GV, bool End);
+
+ /// @brief Test iterators for equality.
+ bool operator==(const CtorDtorIterator &Other) const;
+
+ /// @brief Test iterators for inequality.
+ bool operator!=(const CtorDtorIterator &Other) const;
+
+ /// @brief Pre-increment iterator.
+ CtorDtorIterator& operator++();
+
+ /// @brief Post-increment iterator.
+ CtorDtorIterator operator++(int);
+
+ /// @brief Dereference iterator. The resulting value provides a read-only view
+ /// of this element of the global_ctors/global_dtors list.
+ Element operator*() const;
+
+private:
+ const ConstantArray *InitList;
+ unsigned I;
+};
+
+/// @brief Create an iterator range over the entries of the llvm.global_ctors
+/// array.
+iterator_range<CtorDtorIterator> getConstructors(const Module &M);
+
+/// @brief Create an iterator range over the entries of the llvm.global_ctors
+/// array.
+iterator_range<CtorDtorIterator> getDestructors(const Module &M);
+
+/// @brief Convenience class for recording constructor/destructor names for
+/// later execution.
+template <typename JITLayerT>
+class CtorDtorRunner {
+public:
+
+ /// @brief Construct a CtorDtorRunner for the given range using the given
+ /// name mangling function.
+ CtorDtorRunner(std::vector<std::string> CtorDtorNames,
+ typename JITLayerT::ModuleSetHandleT H)
+ : CtorDtorNames(std::move(CtorDtorNames)), H(H) {}
+
+ /// @brief Run the recorded constructors/destructors through the given JIT
+ /// layer.
+ bool runViaLayer(JITLayerT &JITLayer) const {
+ typedef void (*CtorDtorTy)();
+
+ bool Error = false;
+ for (const auto &CtorDtorName : CtorDtorNames)
+ if (auto CtorDtorSym = JITLayer.findSymbolIn(H, CtorDtorName, false)) {
+ CtorDtorTy CtorDtor =
+ reinterpret_cast<CtorDtorTy>(
+ static_cast<uintptr_t>(CtorDtorSym.getAddress()));
+ CtorDtor();
+ } else
+ Error = true;
+ return !Error;
+ }
+
+private:
+ std::vector<std::string> CtorDtorNames;
+ typename JITLayerT::ModuleSetHandleT H;
+};
+
+/// @brief Support class for static dtor execution. For hosted (in-process) JITs
+/// only!
+///
+/// If a __cxa_atexit function isn't found C++ programs that use static
+/// destructors will fail to link. However, we don't want to use the host
+/// process's __cxa_atexit, because it will schedule JIT'd destructors to run
+/// after the JIT has been torn down, which is no good. This class makes it easy
+/// to override __cxa_atexit (and the related __dso_handle).
+///
+/// To use, clients should manually call searchOverrides from their symbol
+/// resolver. This should generally be done after attempting symbol resolution
+/// inside the JIT, but before searching the host process's symbol table. When
+/// the client determines that destructors should be run (generally at JIT
+/// teardown or after a return from main), the runDestructors method should be
+/// called.
+class LocalCXXRuntimeOverrides {
+public:
+
+ /// Create a runtime-overrides class.
+ template <typename MangleFtorT>
+ LocalCXXRuntimeOverrides(const MangleFtorT &Mangle) {
+ addOverride(Mangle("__dso_handle"), toTargetAddress(&DSOHandleOverride));
+ addOverride(Mangle("__cxa_atexit"), toTargetAddress(&CXAAtExitOverride));
+ }
+
+ /// Search overrided symbols.
+ RuntimeDyld::SymbolInfo searchOverrides(const std::string &Name) {
+ auto I = CXXRuntimeOverrides.find(Name);
+ if (I != CXXRuntimeOverrides.end())
+ return RuntimeDyld::SymbolInfo(I->second, JITSymbolFlags::Exported);
+ return nullptr;
+ }
+
+ /// Run any destructors recorded by the overriden __cxa_atexit function
+ /// (CXAAtExitOverride).
+ void runDestructors();
+
+private:
+
+ template <typename PtrTy>
+ TargetAddress toTargetAddress(PtrTy* P) {
+ return static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(P));
+ }
+
+ void addOverride(const std::string &Name, TargetAddress Addr) {
+ CXXRuntimeOverrides.insert(std::make_pair(Name, Addr));
+ }
+
+ StringMap<TargetAddress> CXXRuntimeOverrides;
+
+ typedef void (*DestructorPtr)(void*);
+ typedef std::pair<DestructorPtr, void*> CXXDestructorDataPair;
+ typedef std::vector<CXXDestructorDataPair> CXXDestructorDataPairList;
+ CXXDestructorDataPairList DSOHandleOverride;
+ static int CXAAtExitOverride(DestructorPtr Destructor, void *Arg,
+ void *DSOHandle);
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_EXECUTIONUTILS_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
new file mode 100644
index 0000000..9fa222c
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/GlobalMappingLayer.h
@@ -0,0 +1,108 @@
+//===---- GlobalMappingLayer.h - Run all IR through a functor ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Convenience layer for injecting symbols that will appear in calls to
+// findSymbol.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
+
+#include "JITSymbol.h"
+#include <map>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Global mapping layer.
+///
+/// This layer overrides the findSymbol method to first search a local symbol
+/// table that the client can define. It can be used to inject new symbol
+/// mappings into the JIT. Beware, however: symbols within a single IR module or
+/// object file will still resolve locally (via RuntimeDyld's symbol table) -
+/// such internal references cannot be overriden via this layer.
+template <typename BaseLayerT>
+class GlobalMappingLayer {
+public:
+ /// @brief Handle to a set of added modules.
+ typedef typename BaseLayerT::ModuleSetHandleT ModuleSetHandleT;
+
+ /// @brief Construct an GlobalMappingLayer with the given BaseLayer
+ GlobalMappingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
+
+ /// @brief Add the given module set to the JIT.
+ /// @return A handle for the added modules.
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ModuleSetHandleT addModuleSet(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+ return BaseLayer.addModuleSet(std::move(Ms), std::move(MemMgr),
+ std::move(Resolver));
+ }
+
+ /// @brief Remove the module set associated with the handle H.
+ void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeModuleSet(H); }
+
+ /// @brief Manually set the address to return for the given symbol.
+ void setGlobalMapping(const std::string &Name, TargetAddress Addr) {
+ SymbolTable[Name] = Addr;
+ }
+
+ /// @brief Remove the given symbol from the global mapping.
+ void eraseGlobalMapping(const std::string &Name) {
+ SymbolTable.erase(Name);
+ }
+
+ /// @brief Search for the given named symbol.
+ ///
+ /// This method will first search the local symbol table, returning
+ /// any symbol found there. If the symbol is not found in the local
+ /// table then this call will be passed through to the base layer.
+ ///
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ auto I = SymbolTable.find(Name);
+ if (I != SymbolTable.end())
+ return JITSymbol(I->second, JITSymbolFlags::Exported);
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of the given symbol in the context of the set of
+ /// modules represented by the handle H. This call is forwarded to the
+ /// base layer's implementation.
+ /// @param H The handle for the module set to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given module set.
+ JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Immediately emit and finalize the module set represented by the
+ /// given handle.
+ /// @param H Handle for module set to emit/finalize.
+ void emitAndFinalize(ModuleSetHandleT H) {
+ BaseLayer.emitAndFinalize(H);
+ }
+
+private:
+ BaseLayerT &BaseLayer;
+ std::map<std::string, TargetAddress> SymbolTable;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_GLOBALMAPPINGLAYER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
new file mode 100644
index 0000000..e4bed95
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRCompileLayer.h
@@ -0,0 +1,146 @@
+//===------ IRCompileLayer.h -- Eagerly compile IR for JIT ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for a basic, eagerly compiling layer of the JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_IRCOMPILELAYER_H
+
+#include "JITSymbol.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/Object/ObjectFile.h"
+#include <memory>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Eager IR compiling layer.
+///
+/// This layer accepts sets of LLVM IR Modules (via addModuleSet). It
+/// immediately compiles each IR module to an object file (each IR Module is
+/// compiled separately). The resulting set of object files is then added to
+/// the layer below, which must implement the object layer concept.
+template <typename BaseLayerT> class IRCompileLayer {
+public:
+ typedef std::function<object::OwningBinary<object::ObjectFile>(Module &)>
+ CompileFtor;
+
+private:
+ typedef typename BaseLayerT::ObjSetHandleT ObjSetHandleT;
+
+ typedef std::vector<std::unique_ptr<object::ObjectFile>> OwningObjectVec;
+ typedef std::vector<std::unique_ptr<MemoryBuffer>> OwningBufferVec;
+
+public:
+ /// @brief Handle to a set of compiled modules.
+ typedef ObjSetHandleT ModuleSetHandleT;
+
+ /// @brief Construct an IRCompileLayer with the given BaseLayer, which must
+ /// implement the ObjectLayer concept.
+ IRCompileLayer(BaseLayerT &BaseLayer, CompileFtor Compile)
+ : BaseLayer(BaseLayer), Compile(std::move(Compile)), ObjCache(nullptr) {}
+
+ /// @brief Set an ObjectCache to query before compiling.
+ void setObjectCache(ObjectCache *NewCache) { ObjCache = NewCache; }
+
+ /// @brief Compile each module in the given module set, then add the resulting
+ /// set of objects to the base layer along with the memory manager and
+ /// symbol resolver.
+ ///
+ /// @return A handle for the added modules.
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ModuleSetHandleT addModuleSet(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+ OwningObjectVec Objects;
+ OwningBufferVec Buffers;
+
+ for (const auto &M : Ms) {
+ std::unique_ptr<object::ObjectFile> Object;
+ std::unique_ptr<MemoryBuffer> Buffer;
+
+ if (ObjCache)
+ std::tie(Object, Buffer) = tryToLoadFromObjectCache(*M).takeBinary();
+
+ if (!Object) {
+ std::tie(Object, Buffer) = Compile(*M).takeBinary();
+ if (ObjCache)
+ ObjCache->notifyObjectCompiled(&*M, Buffer->getMemBufferRef());
+ }
+
+ Objects.push_back(std::move(Object));
+ Buffers.push_back(std::move(Buffer));
+ }
+
+ ModuleSetHandleT H =
+ BaseLayer.addObjectSet(Objects, std::move(MemMgr), std::move(Resolver));
+
+ return H;
+ }
+
+ /// @brief Remove the module set associated with the handle H.
+ void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeObjectSet(H); }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of the given symbol in the context of the set of
+ /// compiled modules represented by the handle H. This call is
+ /// forwarded to the base layer's implementation.
+ /// @param H The handle for the module set to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given module set.
+ JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Immediately emit and finalize the moduleOB set represented by the
+ /// given handle.
+ /// @param H Handle for module set to emit/finalize.
+ void emitAndFinalize(ModuleSetHandleT H) {
+ BaseLayer.emitAndFinalize(H);
+ }
+
+private:
+ object::OwningBinary<object::ObjectFile>
+ tryToLoadFromObjectCache(const Module &M) {
+ std::unique_ptr<MemoryBuffer> ObjBuffer = ObjCache->getObject(&M);
+ if (!ObjBuffer)
+ return object::OwningBinary<object::ObjectFile>();
+
+ ErrorOr<std::unique_ptr<object::ObjectFile>> Obj =
+ object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+ if (!Obj)
+ return object::OwningBinary<object::ObjectFile>();
+
+ return object::OwningBinary<object::ObjectFile>(std::move(*Obj),
+ std::move(ObjBuffer));
+ }
+
+ BaseLayerT &BaseLayer;
+ CompileFtor Compile;
+ ObjectCache *ObjCache;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_IRCOMPILINGLAYER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
new file mode 100644
index 0000000..4dabb9a
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IRTransformLayer.h
@@ -0,0 +1,101 @@
+//===----- IRTransformLayer.h - Run all IR through a functor ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Run all IR passed in through a user supplied functor.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
+
+#include "JITSymbol.h"
+
+namespace llvm {
+namespace orc {
+
+/// @brief IR mutating layer.
+///
+/// This layer accepts sets of LLVM IR Modules (via addModuleSet). It
+/// immediately applies the user supplied functor to each module, then adds
+/// the set of transformed modules to the layer below.
+template <typename BaseLayerT, typename TransformFtor>
+class IRTransformLayer {
+public:
+ /// @brief Handle to a set of added modules.
+ typedef typename BaseLayerT::ModuleSetHandleT ModuleSetHandleT;
+
+ /// @brief Construct an IRTransformLayer with the given BaseLayer
+ IRTransformLayer(BaseLayerT &BaseLayer,
+ TransformFtor Transform = TransformFtor())
+ : BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+ /// @brief Apply the transform functor to each module in the module set, then
+ /// add the resulting set of modules to the base layer, along with the
+ /// memory manager and symbol resolver.
+ ///
+ /// @return A handle for the added modules.
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ModuleSetHandleT addModuleSet(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+
+ for (auto I = Ms.begin(), E = Ms.end(); I != E; ++I)
+ *I = Transform(std::move(*I));
+
+ return BaseLayer.addModuleSet(std::move(Ms), std::move(MemMgr),
+ std::move(Resolver));
+ }
+
+ /// @brief Remove the module set associated with the handle H.
+ void removeModuleSet(ModuleSetHandleT H) { BaseLayer.removeModuleSet(H); }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of the given symbol in the context of the set of
+ /// modules represented by the handle H. This call is forwarded to the
+ /// base layer's implementation.
+ /// @param H The handle for the module set to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given module set.
+ JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Immediately emit and finalize the module set represented by the
+ /// given handle.
+ /// @param H Handle for module set to emit/finalize.
+ void emitAndFinalize(ModuleSetHandleT H) {
+ BaseLayer.emitAndFinalize(H);
+ }
+
+ /// @brief Access the transform functor directly.
+ TransformFtor& getTransform() { return Transform; }
+
+ /// @brief Access the mumate functor directly.
+ const TransformFtor& getTransform() const { return Transform; }
+
+private:
+ BaseLayerT &BaseLayer;
+ TransformFtor Transform;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_IRTRANSFORMLAYER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
new file mode 100644
index 0000000..d6ee3a8
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/IndirectionUtils.h
@@ -0,0 +1,413 @@
+//===-- IndirectionUtils.h - Utilities for adding indirections --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains utilities for adding indirections and breaking up modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
+#define LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
+
+#include "JITSymbol.h"
+#include "LambdaResolver.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <sstream>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Target-independent base class for compile callback management.
+class JITCompileCallbackManager {
+public:
+
+ typedef std::function<TargetAddress()> CompileFtor;
+
+ /// @brief Handle to a newly created compile callback. Can be used to get an
+ /// IR constant representing the address of the trampoline, and to set
+ /// the compile action for the callback.
+ class CompileCallbackInfo {
+ public:
+ CompileCallbackInfo(TargetAddress Addr, CompileFtor &Compile)
+ : Addr(Addr), Compile(Compile) {}
+
+ TargetAddress getAddress() const { return Addr; }
+ void setCompileAction(CompileFtor Compile) {
+ this->Compile = std::move(Compile);
+ }
+ private:
+ TargetAddress Addr;
+ CompileFtor &Compile;
+ };
+
+ /// @brief Construct a JITCompileCallbackManager.
+ /// @param ErrorHandlerAddress The address of an error handler in the target
+ /// process to be used if a compile callback fails.
+ JITCompileCallbackManager(TargetAddress ErrorHandlerAddress)
+ : ErrorHandlerAddress(ErrorHandlerAddress) {}
+
+ virtual ~JITCompileCallbackManager() {}
+
+ /// @brief Execute the callback for the given trampoline id. Called by the JIT
+ /// to compile functions on demand.
+ TargetAddress executeCompileCallback(TargetAddress TrampolineAddr) {
+ auto I = ActiveTrampolines.find(TrampolineAddr);
+ // FIXME: Also raise an error in the Orc error-handler when we finally have
+ // one.
+ if (I == ActiveTrampolines.end())
+ return ErrorHandlerAddress;
+
+ // Found a callback handler. Yank this trampoline out of the active list and
+ // put it back in the available trampolines list, then try to run the
+ // handler's compile and update actions.
+ // Moving the trampoline ID back to the available list first means there's at
+ // least one available trampoline if the compile action triggers a request for
+ // a new one.
+ auto Compile = std::move(I->second);
+ ActiveTrampolines.erase(I);
+ AvailableTrampolines.push_back(TrampolineAddr);
+
+ if (auto Addr = Compile())
+ return Addr;
+
+ return ErrorHandlerAddress;
+ }
+
+ /// @brief Reserve a compile callback.
+ CompileCallbackInfo getCompileCallback() {
+ TargetAddress TrampolineAddr = getAvailableTrampolineAddr();
+ auto &Compile = this->ActiveTrampolines[TrampolineAddr];
+ return CompileCallbackInfo(TrampolineAddr, Compile);
+ }
+
+ /// @brief Get a CompileCallbackInfo for an existing callback.
+ CompileCallbackInfo getCompileCallbackInfo(TargetAddress TrampolineAddr) {
+ auto I = ActiveTrampolines.find(TrampolineAddr);
+ assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
+ return CompileCallbackInfo(I->first, I->second);
+ }
+
+ /// @brief Release a compile callback.
+ ///
+ /// Note: Callbacks are auto-released after they execute. This method should
+ /// only be called to manually release a callback that is not going to
+ /// execute.
+ void releaseCompileCallback(TargetAddress TrampolineAddr) {
+ auto I = ActiveTrampolines.find(TrampolineAddr);
+ assert(I != ActiveTrampolines.end() && "Not an active trampoline.");
+ ActiveTrampolines.erase(I);
+ AvailableTrampolines.push_back(TrampolineAddr);
+ }
+
+protected:
+ TargetAddress ErrorHandlerAddress;
+
+ typedef std::map<TargetAddress, CompileFtor> TrampolineMapT;
+ TrampolineMapT ActiveTrampolines;
+ std::vector<TargetAddress> AvailableTrampolines;
+
+private:
+
+ TargetAddress getAvailableTrampolineAddr() {
+ if (this->AvailableTrampolines.empty())
+ grow();
+ assert(!this->AvailableTrampolines.empty() &&
+ "Failed to grow available trampolines.");
+ TargetAddress TrampolineAddr = this->AvailableTrampolines.back();
+ this->AvailableTrampolines.pop_back();
+ return TrampolineAddr;
+ }
+
+ // Create new trampolines - to be implemented in subclasses.
+ virtual void grow() = 0;
+
+ virtual void anchor();
+};
+
+/// @brief Manage compile callbacks for in-process JITs.
+template <typename TargetT>
+class LocalJITCompileCallbackManager : public JITCompileCallbackManager {
+public:
+
+ /// @brief Construct a InProcessJITCompileCallbackManager.
+ /// @param ErrorHandlerAddress The address of an error handler in the target
+ /// process to be used if a compile callback fails.
+ LocalJITCompileCallbackManager(TargetAddress ErrorHandlerAddress)
+ : JITCompileCallbackManager(ErrorHandlerAddress) {
+
+ /// Set up the resolver block.
+ std::error_code EC;
+ ResolverBlock =
+ sys::OwningMemoryBlock(
+ sys::Memory::allocateMappedMemory(TargetT::ResolverCodeSize, nullptr,
+ sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE, EC));
+ assert(!EC && "Failed to allocate resolver block");
+
+ TargetT::writeResolverCode(static_cast<uint8_t *>(ResolverBlock.base()),
+ &reenter, this);
+
+ EC = sys::Memory::protectMappedMemory(ResolverBlock.getMemoryBlock(),
+ sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC);
+ assert(!EC && "Failed to mprotect resolver block");
+ }
+
+private:
+
+ static TargetAddress reenter(void *CCMgr, void *TrampolineId) {
+ JITCompileCallbackManager *Mgr =
+ static_cast<JITCompileCallbackManager*>(CCMgr);
+ return Mgr->executeCompileCallback(
+ static_cast<TargetAddress>(
+ reinterpret_cast<uintptr_t>(TrampolineId)));
+ }
+
+ void grow() override {
+ assert(this->AvailableTrampolines.empty() && "Growing prematurely?");
+
+ std::error_code EC;
+ auto TrampolineBlock =
+ sys::OwningMemoryBlock(
+ sys::Memory::allocateMappedMemory(TargetT::PageSize, nullptr,
+ sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE, EC));
+ assert(!EC && "Failed to allocate trampoline block");
+
+
+ unsigned NumTrampolines =
+ (TargetT::PageSize - TargetT::PointerSize) / TargetT::TrampolineSize;
+
+ uint8_t *TrampolineMem = static_cast<uint8_t*>(TrampolineBlock.base());
+ TargetT::writeTrampolines(TrampolineMem, ResolverBlock.base(),
+ NumTrampolines);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I)
+ this->AvailableTrampolines.push_back(
+ static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(
+ TrampolineMem + (I * TargetT::TrampolineSize))));
+
+ EC = sys::Memory::protectMappedMemory(TrampolineBlock.getMemoryBlock(),
+ sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC);
+ assert(!EC && "Failed to mprotect trampoline block");
+
+ TrampolineBlocks.push_back(std::move(TrampolineBlock));
+ }
+
+ sys::OwningMemoryBlock ResolverBlock;
+ std::vector<sys::OwningMemoryBlock> TrampolineBlocks;
+};
+
+/// @brief Base class for managing collections of named indirect stubs.
+class IndirectStubsManager {
+public:
+
+ /// @brief Map type for initializing the manager. See init.
+ typedef StringMap<std::pair<TargetAddress, JITSymbolFlags>> StubInitsMap;
+
+ virtual ~IndirectStubsManager() {}
+
+ /// @brief Create a single stub with the given name, target address and flags.
+ virtual std::error_code createStub(StringRef StubName, TargetAddress StubAddr,
+ JITSymbolFlags StubFlags) = 0;
+
+ /// @brief Create StubInits.size() stubs with the given names, target
+ /// addresses, and flags.
+ virtual std::error_code createStubs(const StubInitsMap &StubInits) = 0;
+
+ /// @brief Find the stub with the given name. If ExportedStubsOnly is true,
+ /// this will only return a result if the stub's flags indicate that it
+ /// is exported.
+ virtual JITSymbol findStub(StringRef Name, bool ExportedStubsOnly) = 0;
+
+ /// @brief Find the implementation-pointer for the stub.
+ virtual JITSymbol findPointer(StringRef Name) = 0;
+
+ /// @brief Change the value of the implementation pointer for the stub.
+ virtual std::error_code updatePointer(StringRef Name, TargetAddress NewAddr) = 0;
+private:
+ virtual void anchor();
+};
+
+/// @brief IndirectStubsManager implementation for a concrete target, e.g.
+/// OrcX86_64. (See OrcTargetSupport.h).
+template <typename TargetT>
+class LocalIndirectStubsManager : public IndirectStubsManager {
+public:
+
+ std::error_code createStub(StringRef StubName, TargetAddress StubAddr,
+ JITSymbolFlags StubFlags) override {
+ if (auto EC = reserveStubs(1))
+ return EC;
+
+ createStubInternal(StubName, StubAddr, StubFlags);
+
+ return std::error_code();
+ }
+
+ std::error_code createStubs(const StubInitsMap &StubInits) override {
+ if (auto EC = reserveStubs(StubInits.size()))
+ return EC;
+
+ for (auto &Entry : StubInits)
+ createStubInternal(Entry.first(), Entry.second.first,
+ Entry.second.second);
+
+ return std::error_code();
+ }
+
+ JITSymbol findStub(StringRef Name, bool ExportedStubsOnly) override {
+ auto I = StubIndexes.find(Name);
+ if (I == StubIndexes.end())
+ return nullptr;
+ auto Key = I->second.first;
+ void *StubAddr = IndirectStubsInfos[Key.first].getStub(Key.second);
+ assert(StubAddr && "Missing stub address");
+ auto StubTargetAddr =
+ static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(StubAddr));
+ auto StubSymbol = JITSymbol(StubTargetAddr, I->second.second);
+ if (ExportedStubsOnly && !StubSymbol.isExported())
+ return nullptr;
+ return StubSymbol;
+ }
+
+ JITSymbol findPointer(StringRef Name) override {
+ auto I = StubIndexes.find(Name);
+ if (I == StubIndexes.end())
+ return nullptr;
+ auto Key = I->second.first;
+ void *PtrAddr = IndirectStubsInfos[Key.first].getPtr(Key.second);
+ assert(PtrAddr && "Missing pointer address");
+ auto PtrTargetAddr =
+ static_cast<TargetAddress>(reinterpret_cast<uintptr_t>(PtrAddr));
+ return JITSymbol(PtrTargetAddr, I->second.second);
+ }
+
+ std::error_code updatePointer(StringRef Name, TargetAddress NewAddr) override {
+ auto I = StubIndexes.find(Name);
+ assert(I != StubIndexes.end() && "No stub pointer for symbol");
+ auto Key = I->second.first;
+ *IndirectStubsInfos[Key.first].getPtr(Key.second) =
+ reinterpret_cast<void*>(static_cast<uintptr_t>(NewAddr));
+ return std::error_code();
+ }
+
+private:
+
+ std::error_code reserveStubs(unsigned NumStubs) {
+ if (NumStubs <= FreeStubs.size())
+ return std::error_code();
+
+ unsigned NewStubsRequired = NumStubs - FreeStubs.size();
+ unsigned NewBlockId = IndirectStubsInfos.size();
+ typename TargetT::IndirectStubsInfo ISI;
+ if (auto EC = TargetT::emitIndirectStubsBlock(ISI, NewStubsRequired,
+ nullptr))
+ return EC;
+ for (unsigned I = 0; I < ISI.getNumStubs(); ++I)
+ FreeStubs.push_back(std::make_pair(NewBlockId, I));
+ IndirectStubsInfos.push_back(std::move(ISI));
+ return std::error_code();
+ }
+
+ void createStubInternal(StringRef StubName, TargetAddress InitAddr,
+ JITSymbolFlags StubFlags) {
+ auto Key = FreeStubs.back();
+ FreeStubs.pop_back();
+ *IndirectStubsInfos[Key.first].getPtr(Key.second) =
+ reinterpret_cast<void*>(static_cast<uintptr_t>(InitAddr));
+ StubIndexes[StubName] = std::make_pair(Key, StubFlags);
+ }
+
+ std::vector<typename TargetT::IndirectStubsInfo> IndirectStubsInfos;
+ typedef std::pair<uint16_t, uint16_t> StubKey;
+ std::vector<StubKey> FreeStubs;
+ StringMap<std::pair<StubKey, JITSymbolFlags>> StubIndexes;
+};
+
+/// @brief Build a function pointer of FunctionType with the given constant
+/// address.
+///
+/// Usage example: Turn a trampoline address into a function pointer constant
+/// for use in a stub.
+Constant* createIRTypedAddress(FunctionType &FT, TargetAddress Addr);
+
+/// @brief Create a function pointer with the given type, name, and initializer
+/// in the given Module.
+GlobalVariable* createImplPointer(PointerType &PT, Module &M,
+ const Twine &Name, Constant *Initializer);
+
+/// @brief Turn a function declaration into a stub function that makes an
+/// indirect call using the given function pointer.
+void makeStub(Function &F, Value &ImplPointer);
+
+/// @brief Raise linkage types and rename as necessary to ensure that all
+/// symbols are accessible for other modules.
+///
+/// This should be called before partitioning a module to ensure that the
+/// partitions retain access to each other's symbols.
+void makeAllSymbolsExternallyAccessible(Module &M);
+
+/// @brief Clone a function declaration into a new module.
+///
+/// This function can be used as the first step towards creating a callback
+/// stub (see makeStub), or moving a function body (see moveFunctionBody).
+///
+/// If the VMap argument is non-null, a mapping will be added between F and
+/// the new declaration, and between each of F's arguments and the new
+/// declaration's arguments. This map can then be passed in to moveFunction to
+/// move the function body if required. Note: When moving functions between
+/// modules with these utilities, all decls should be cloned (and added to a
+/// single VMap) before any bodies are moved. This will ensure that references
+/// between functions all refer to the versions in the new module.
+Function* cloneFunctionDecl(Module &Dst, const Function &F,
+ ValueToValueMapTy *VMap = nullptr);
+
+/// @brief Move the body of function 'F' to a cloned function declaration in a
+/// different module (See related cloneFunctionDecl).
+///
+/// If the target function declaration is not supplied via the NewF parameter
+/// then it will be looked up via the VMap.
+///
+/// This will delete the body of function 'F' from its original parent module,
+/// but leave its declaration.
+void moveFunctionBody(Function &OrigF, ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer = nullptr,
+ Function *NewF = nullptr);
+
+/// @brief Clone a global variable declaration into a new module.
+GlobalVariable* cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
+ ValueToValueMapTy *VMap = nullptr);
+
+/// @brief Move global variable GV from its parent module to cloned global
+/// declaration in a different module.
+///
+/// If the target global declaration is not supplied via the NewGV parameter
+/// then it will be looked up via the VMap.
+///
+/// This will delete the initializer of GV from its original parent module,
+/// but leave its declaration.
+void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
+ ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer = nullptr,
+ GlobalVariable *NewGV = nullptr);
+
+/// @brief Clone
+GlobalAlias* cloneGlobalAliasDecl(Module &Dst, const GlobalAlias &OrigA,
+ ValueToValueMapTy &VMap);
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_INDIRECTIONUTILS_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/JITSymbol.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/JITSymbol.h
new file mode 100644
index 0000000..422a376
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/JITSymbol.h
@@ -0,0 +1,77 @@
+//===----------- JITSymbol.h - JIT symbol abstraction -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Abstraction for target process addresses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
+#define LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
+
+#include "llvm/ExecutionEngine/JITSymbolFlags.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <functional>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Represents an address in the target process's address space.
+typedef uint64_t TargetAddress;
+
+/// @brief Represents a symbol in the JIT.
+class JITSymbol : public JITSymbolBase {
+public:
+
+ typedef std::function<TargetAddress()> GetAddressFtor;
+
+ /// @brief Create a 'null' symbol that represents failure to find a symbol
+ /// definition.
+ JITSymbol(std::nullptr_t)
+ : JITSymbolBase(JITSymbolFlags::None), CachedAddr(0) {}
+
+ /// @brief Create a symbol for a definition with a known address.
+ JITSymbol(TargetAddress Addr, JITSymbolFlags Flags)
+ : JITSymbolBase(Flags), CachedAddr(Addr) {}
+
+ /// @brief Create a symbol for a definition that doesn't have a known address
+ /// yet.
+ /// @param GetAddress A functor to materialize a definition (fixing the
+ /// address) on demand.
+ ///
+ /// This constructor allows a JIT layer to provide a reference to a symbol
+ /// definition without actually materializing the definition up front. The
+ /// user can materialize the definition at any time by calling the getAddress
+ /// method.
+ JITSymbol(GetAddressFtor GetAddress, JITSymbolFlags Flags)
+ : JITSymbolBase(Flags), GetAddress(std::move(GetAddress)), CachedAddr(0) {}
+
+ /// @brief Returns true if the symbol exists, false otherwise.
+ explicit operator bool() const { return CachedAddr || GetAddress; }
+
+ /// @brief Get the address of the symbol in the target address space. Returns
+ /// '0' if the symbol does not exist.
+ TargetAddress getAddress() {
+ if (GetAddress) {
+ CachedAddr = GetAddress();
+ assert(CachedAddr && "Symbol could not be materialized.");
+ GetAddress = nullptr;
+ }
+ return CachedAddr;
+ }
+
+private:
+ GetAddressFtor GetAddress;
+ TargetAddress CachedAddr;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_JITSYMBOL_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/LambdaResolver.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
new file mode 100644
index 0000000..faa2365
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/LambdaResolver.h
@@ -0,0 +1,62 @@
+//===-- LambdaResolverMM - Redirect symbol lookup via a functor -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a RuntimeDyld::SymbolResolver subclass that uses a user-supplied
+// functor for symbol resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
+#define LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+template <typename ExternalLookupFtorT, typename DylibLookupFtorT>
+class LambdaResolver : public RuntimeDyld::SymbolResolver {
+public:
+
+ LambdaResolver(ExternalLookupFtorT ExternalLookupFtor,
+ DylibLookupFtorT DylibLookupFtor)
+ : ExternalLookupFtor(ExternalLookupFtor),
+ DylibLookupFtor(DylibLookupFtor) {}
+
+ RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) final {
+ return ExternalLookupFtor(Name);
+ }
+
+ RuntimeDyld::SymbolInfo
+ findSymbolInLogicalDylib(const std::string &Name) final {
+ return DylibLookupFtor(Name);
+ }
+
+private:
+ ExternalLookupFtorT ExternalLookupFtor;
+ DylibLookupFtorT DylibLookupFtor;
+};
+
+template <typename ExternalLookupFtorT,
+ typename DylibLookupFtorT>
+std::unique_ptr<LambdaResolver<ExternalLookupFtorT, DylibLookupFtorT>>
+createLambdaResolver(ExternalLookupFtorT ExternalLookupFtor,
+ DylibLookupFtorT DylibLookupFtor) {
+ typedef LambdaResolver<ExternalLookupFtorT, DylibLookupFtorT> LR;
+ return make_unique<LR>(std::move(ExternalLookupFtor),
+ std::move(DylibLookupFtor));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LAMBDARESOLVER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
new file mode 100644
index 0000000..a5286ff
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/LazyEmittingLayer.h
@@ -0,0 +1,304 @@
+//===- LazyEmittingLayer.h - Lazily emit IR to lower JIT layers -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for a lazy-emitting layer for the JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
+
+#include "JITSymbol.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include <list>
+
+namespace llvm {
+namespace orc {
+
+/// @brief Lazy-emitting IR layer.
+///
+/// This layer accepts sets of LLVM IR Modules (via addModuleSet), but does
+/// not immediately emit them the layer below. Instead, emissing to the base
+/// layer is deferred until the first time the client requests the address
+/// (via JITSymbol::getAddress) for a symbol contained in this layer.
+template <typename BaseLayerT> class LazyEmittingLayer {
+public:
+ typedef typename BaseLayerT::ModuleSetHandleT BaseLayerHandleT;
+
+private:
+ class EmissionDeferredSet {
+ public:
+ EmissionDeferredSet() : EmitState(NotEmitted) {}
+ virtual ~EmissionDeferredSet() {}
+
+ JITSymbol find(StringRef Name, bool ExportedSymbolsOnly, BaseLayerT &B) {
+ switch (EmitState) {
+ case NotEmitted:
+ if (auto GV = searchGVs(Name, ExportedSymbolsOnly)) {
+ // Create a std::string version of Name to capture here - the argument
+ // (a StringRef) may go away before the lambda is executed.
+ // FIXME: Use capture-init when we move to C++14.
+ std::string PName = Name;
+ JITSymbolFlags Flags = JITSymbolBase::flagsFromGlobalValue(*GV);
+ auto GetAddress =
+ [this, ExportedSymbolsOnly, PName, &B]() -> TargetAddress {
+ if (this->EmitState == Emitting)
+ return 0;
+ else if (this->EmitState == NotEmitted) {
+ this->EmitState = Emitting;
+ Handle = this->emitToBaseLayer(B);
+ this->EmitState = Emitted;
+ }
+ auto Sym = B.findSymbolIn(Handle, PName, ExportedSymbolsOnly);
+ return Sym.getAddress();
+ };
+ return JITSymbol(std::move(GetAddress), Flags);
+ } else
+ return nullptr;
+ case Emitting:
+ // Calling "emit" can trigger a recursive call to 'find' (e.g. to check
+ // for pre-existing definitions of common-symbol), but any symbol in
+ // this module would already have been found internally (in the
+ // RuntimeDyld that did the lookup), so just return a nullptr here.
+ return nullptr;
+ case Emitted:
+ return B.findSymbolIn(Handle, Name, ExportedSymbolsOnly);
+ }
+ llvm_unreachable("Invalid emit-state.");
+ }
+
+ void removeModulesFromBaseLayer(BaseLayerT &BaseLayer) {
+ if (EmitState != NotEmitted)
+ BaseLayer.removeModuleSet(Handle);
+ }
+
+ void emitAndFinalize(BaseLayerT &BaseLayer) {
+ assert(EmitState != Emitting &&
+ "Cannot emitAndFinalize while already emitting");
+ if (EmitState == NotEmitted) {
+ EmitState = Emitting;
+ Handle = emitToBaseLayer(BaseLayer);
+ EmitState = Emitted;
+ }
+ BaseLayer.emitAndFinalize(Handle);
+ }
+
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ static std::unique_ptr<EmissionDeferredSet>
+ create(BaseLayerT &B, ModuleSetT Ms, MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver);
+
+ protected:
+ virtual const GlobalValue* searchGVs(StringRef Name,
+ bool ExportedSymbolsOnly) const = 0;
+ virtual BaseLayerHandleT emitToBaseLayer(BaseLayerT &BaseLayer) = 0;
+
+ private:
+ enum { NotEmitted, Emitting, Emitted } EmitState;
+ BaseLayerHandleT Handle;
+ };
+
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ class EmissionDeferredSetImpl : public EmissionDeferredSet {
+ public:
+ EmissionDeferredSetImpl(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver)
+ : Ms(std::move(Ms)), MemMgr(std::move(MemMgr)),
+ Resolver(std::move(Resolver)) {}
+
+ protected:
+
+ const GlobalValue* searchGVs(StringRef Name,
+ bool ExportedSymbolsOnly) const override {
+ // FIXME: We could clean all this up if we had a way to reliably demangle
+ // names: We could just demangle name and search, rather than
+ // mangling everything else.
+
+ // If we have already built the mangled name set then just search it.
+ if (MangledSymbols) {
+ auto VI = MangledSymbols->find(Name);
+ if (VI == MangledSymbols->end())
+ return nullptr;
+ auto GV = VI->second;
+ if (!ExportedSymbolsOnly || GV->hasDefaultVisibility())
+ return GV;
+ return nullptr;
+ }
+
+ // If we haven't built the mangled name set yet, try to build it. As an
+ // optimization this will leave MangledNames set to nullptr if we find
+ // Name in the process of building the set.
+ return buildMangledSymbols(Name, ExportedSymbolsOnly);
+ }
+
+ BaseLayerHandleT emitToBaseLayer(BaseLayerT &BaseLayer) override {
+ // We don't need the mangled names set any more: Once we've emitted this
+ // to the base layer we'll just look for symbols there.
+ MangledSymbols.reset();
+ return BaseLayer.addModuleSet(std::move(Ms), std::move(MemMgr),
+ std::move(Resolver));
+ }
+
+ private:
+ // If the mangled name of the given GlobalValue matches the given search
+ // name (and its visibility conforms to the ExportedSymbolsOnly flag) then
+ // return the symbol. Otherwise, add the mangled name to the Names map and
+ // return nullptr.
+ const GlobalValue* addGlobalValue(StringMap<const GlobalValue*> &Names,
+ const GlobalValue &GV,
+ const Mangler &Mang, StringRef SearchName,
+ bool ExportedSymbolsOnly) const {
+ // Modules don't "provide" decls or common symbols.
+ if (GV.isDeclaration() || GV.hasCommonLinkage())
+ return nullptr;
+
+ // Mangle the GV name.
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mang.getNameWithPrefix(MangledNameStream, &GV, false);
+ }
+
+ // Check whether this is the name we were searching for, and if it is then
+ // bail out early.
+ if (MangledName == SearchName)
+ if (!ExportedSymbolsOnly || GV.hasDefaultVisibility())
+ return &GV;
+
+ // Otherwise add this to the map for later.
+ Names[MangledName] = &GV;
+ return nullptr;
+ }
+
+ // Build the MangledSymbols map. Bails out early (with MangledSymbols left set
+ // to nullptr) if the given SearchName is found while building the map.
+ const GlobalValue* buildMangledSymbols(StringRef SearchName,
+ bool ExportedSymbolsOnly) const {
+ assert(!MangledSymbols && "Mangled symbols map already exists?");
+
+ auto Symbols = llvm::make_unique<StringMap<const GlobalValue*>>();
+
+ for (const auto &M : Ms) {
+ Mangler Mang;
+
+ for (const auto &V : M->globals())
+ if (auto GV = addGlobalValue(*Symbols, V, Mang, SearchName,
+ ExportedSymbolsOnly))
+ return GV;
+
+ for (const auto &F : *M)
+ if (auto GV = addGlobalValue(*Symbols, F, Mang, SearchName,
+ ExportedSymbolsOnly))
+ return GV;
+ }
+
+ MangledSymbols = std::move(Symbols);
+ return nullptr;
+ }
+
+ ModuleSetT Ms;
+ MemoryManagerPtrT MemMgr;
+ SymbolResolverPtrT Resolver;
+ mutable std::unique_ptr<StringMap<const GlobalValue*>> MangledSymbols;
+ };
+
+ typedef std::list<std::unique_ptr<EmissionDeferredSet>> ModuleSetListT;
+
+ BaseLayerT &BaseLayer;
+ ModuleSetListT ModuleSetList;
+
+public:
+ /// @brief Handle to a set of loaded modules.
+ typedef typename ModuleSetListT::iterator ModuleSetHandleT;
+
+ /// @brief Construct a lazy emitting layer.
+ LazyEmittingLayer(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
+
+ /// @brief Add the given set of modules to the lazy emitting layer.
+ template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ModuleSetHandleT addModuleSet(ModuleSetT Ms,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+ return ModuleSetList.insert(
+ ModuleSetList.end(),
+ EmissionDeferredSet::create(BaseLayer, std::move(Ms), std::move(MemMgr),
+ std::move(Resolver)));
+ }
+
+ /// @brief Remove the module set represented by the given handle.
+ ///
+ /// This method will free the memory associated with the given module set,
+ /// both in this layer, and the base layer.
+ void removeModuleSet(ModuleSetHandleT H) {
+ (*H)->removeModulesFromBaseLayer(BaseLayer);
+ ModuleSetList.erase(H);
+ }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ // Look for the symbol among existing definitions.
+ if (auto Symbol = BaseLayer.findSymbol(Name, ExportedSymbolsOnly))
+ return Symbol;
+
+ // If not found then search the deferred sets. If any of these contain a
+ // definition of 'Name' then they will return a JITSymbol that will emit
+ // the corresponding module when the symbol address is requested.
+ for (auto &DeferredSet : ModuleSetList)
+ if (auto Symbol = DeferredSet->find(Name, ExportedSymbolsOnly, BaseLayer))
+ return Symbol;
+
+ // If no definition found anywhere return a null symbol.
+ return nullptr;
+ }
+
+ /// @brief Get the address of the given symbol in the context of the set of
+ /// compiled modules represented by the handle H.
+ JITSymbol findSymbolIn(ModuleSetHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return (*H)->find(Name, ExportedSymbolsOnly, BaseLayer);
+ }
+
+ /// @brief Immediately emit and finalize the moduleOB set represented by the
+ /// given handle.
+ /// @param H Handle for module set to emit/finalize.
+ void emitAndFinalize(ModuleSetHandleT H) {
+ (*H)->emitAndFinalize(BaseLayer);
+ }
+
+};
+
+template <typename BaseLayerT>
+template <typename ModuleSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+std::unique_ptr<typename LazyEmittingLayer<BaseLayerT>::EmissionDeferredSet>
+LazyEmittingLayer<BaseLayerT>::EmissionDeferredSet::create(
+ BaseLayerT &B, ModuleSetT Ms, MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+ typedef EmissionDeferredSetImpl<ModuleSetT, MemoryManagerPtrT, SymbolResolverPtrT>
+ EDS;
+ return llvm::make_unique<EDS>(std::move(Ms), std::move(MemMgr),
+ std::move(Resolver));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LAZYEMITTINGLAYER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/LogicalDylib.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/LogicalDylib.h
new file mode 100644
index 0000000..883fa9e
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/LogicalDylib.h
@@ -0,0 +1,135 @@
+//===--- LogicalDylib.h - Simulates dylib-style symbol lookup ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Simulates symbol resolution inside a dylib.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_LOGICALDYLIB_H
+#define LLVM_EXECUTIONENGINE_ORC_LOGICALDYLIB_H
+
+#include "llvm/ExecutionEngine/Orc/JITSymbol.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+template <typename BaseLayerT,
+ typename LogicalModuleResources,
+ typename LogicalDylibResources>
+class LogicalDylib {
+public:
+ typedef typename BaseLayerT::ModuleSetHandleT BaseLayerModuleSetHandleT;
+private:
+
+ typedef std::vector<BaseLayerModuleSetHandleT> BaseLayerHandleList;
+
+ struct LogicalModule {
+ // Make this move-only to ensure they don't get duplicated across moves of
+ // LogicalDylib or anything like that.
+ LogicalModule(LogicalModule &&RHS)
+ : Resources(std::move(RHS.Resources)),
+ BaseLayerHandles(std::move(RHS.BaseLayerHandles)) {}
+ LogicalModule() = default;
+ LogicalModuleResources Resources;
+ BaseLayerHandleList BaseLayerHandles;
+ };
+ typedef std::vector<LogicalModule> LogicalModuleList;
+
+public:
+
+ typedef typename BaseLayerHandleList::iterator BaseLayerHandleIterator;
+ typedef typename LogicalModuleList::iterator LogicalModuleHandle;
+
+ LogicalDylib(BaseLayerT &BaseLayer) : BaseLayer(BaseLayer) {}
+
+ ~LogicalDylib() {
+ for (auto &LM : LogicalModules)
+ for (auto BLH : LM.BaseLayerHandles)
+ BaseLayer.removeModuleSet(BLH);
+ }
+
+ // If possible, remove this and ~LogicalDylib once the work in the dtor is
+ // moved to members (eg: self-unregistering base layer handles).
+ LogicalDylib(LogicalDylib &&RHS)
+ : BaseLayer(std::move(RHS.BaseLayer)),
+ LogicalModules(std::move(RHS.LogicalModules)),
+ DylibResources(std::move(RHS.DylibResources)) {}
+
+ LogicalModuleHandle createLogicalModule() {
+ LogicalModules.push_back(LogicalModule());
+ return std::prev(LogicalModules.end());
+ }
+
+ void addToLogicalModule(LogicalModuleHandle LMH,
+ BaseLayerModuleSetHandleT BaseLayerHandle) {
+ LMH->BaseLayerHandles.push_back(BaseLayerHandle);
+ }
+
+ LogicalModuleResources& getLogicalModuleResources(LogicalModuleHandle LMH) {
+ return LMH->Resources;
+ }
+
+ BaseLayerHandleIterator moduleHandlesBegin(LogicalModuleHandle LMH) {
+ return LMH->BaseLayerHandles.begin();
+ }
+
+ BaseLayerHandleIterator moduleHandlesEnd(LogicalModuleHandle LMH) {
+ return LMH->BaseLayerHandles.end();
+ }
+
+ JITSymbol findSymbolInLogicalModule(LogicalModuleHandle LMH,
+ const std::string &Name,
+ bool ExportedSymbolsOnly) {
+
+ if (auto StubSym = LMH->Resources.findSymbol(Name, ExportedSymbolsOnly))
+ return StubSym;
+
+ for (auto BLH : LMH->BaseLayerHandles)
+ if (auto Symbol = BaseLayer.findSymbolIn(BLH, Name, ExportedSymbolsOnly))
+ return Symbol;
+ return nullptr;
+ }
+
+ JITSymbol findSymbolInternally(LogicalModuleHandle LMH,
+ const std::string &Name) {
+ if (auto Symbol = findSymbolInLogicalModule(LMH, Name, false))
+ return Symbol;
+
+ for (auto LMI = LogicalModules.begin(), LME = LogicalModules.end();
+ LMI != LME; ++LMI) {
+ if (LMI != LMH)
+ if (auto Symbol = findSymbolInLogicalModule(LMI, Name, false))
+ return Symbol;
+ }
+
+ return nullptr;
+ }
+
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ for (auto LMI = LogicalModules.begin(), LME = LogicalModules.end();
+ LMI != LME; ++LMI)
+ if (auto Sym = findSymbolInLogicalModule(LMI, Name, ExportedSymbolsOnly))
+ return Sym;
+ return nullptr;
+ }
+
+ LogicalDylibResources& getDylibResources() { return DylibResources; }
+
+protected:
+ BaseLayerT BaseLayer;
+ LogicalModuleList LogicalModules;
+ LogicalDylibResources DylibResources;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_LOGICALDYLIB_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/NullResolver.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/NullResolver.h
new file mode 100644
index 0000000..1560c6d
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/NullResolver.h
@@ -0,0 +1,36 @@
+//===------ NullResolver.h - Reject symbol lookup requests ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a RuntimeDyld::SymbolResolver subclass that rejects all symbol
+// resolution requests, for clients that have no cross-object fixups.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
+#define LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
+
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+
+namespace llvm {
+namespace orc {
+
+/// SymbolResolver impliementation that rejects all resolution requests.
+/// Useful for clients that have no cross-object fixups.
+class NullResolver : public RuntimeDyld::SymbolResolver {
+public:
+ RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) final;
+
+ RuntimeDyld::SymbolInfo
+ findSymbolInLogicalDylib(const std::string &Name) final;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_NULLRESOLVER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h
new file mode 100644
index 0000000..2acfecf
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h
@@ -0,0 +1,283 @@
+//===- ObjectLinkingLayer.h - Add object files to a JIT process -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the definition for the object layer of the JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
+
+#include "JITSymbol.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include <list>
+#include <memory>
+
+namespace llvm {
+namespace orc {
+
+class ObjectLinkingLayerBase {
+protected:
+
+ /// @brief Holds a set of objects to be allocated/linked as a unit in the JIT.
+ ///
+ /// An instance of this class will be created for each set of objects added
+ /// via JITObjectLayer::addObjectSet. Deleting the instance (via
+ /// removeObjectSet) frees its memory, removing all symbol definitions that
+ /// had been provided by this instance. Higher level layers are responsible
+ /// for taking any action required to handle the missing symbols.
+ class LinkedObjectSet {
+ LinkedObjectSet(const LinkedObjectSet&) = delete;
+ void operator=(const LinkedObjectSet&) = delete;
+ public:
+ LinkedObjectSet(RuntimeDyld::MemoryManager &MemMgr,
+ RuntimeDyld::SymbolResolver &Resolver,
+ bool ProcessAllSections)
+ : RTDyld(llvm::make_unique<RuntimeDyld>(MemMgr, Resolver)),
+ State(Raw) {
+ RTDyld->setProcessAllSections(ProcessAllSections);
+ }
+
+ virtual ~LinkedObjectSet() {}
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ addObject(const object::ObjectFile &Obj) {
+ return RTDyld->loadObject(Obj);
+ }
+
+ RuntimeDyld::SymbolInfo getSymbol(StringRef Name) const {
+ return RTDyld->getSymbol(Name);
+ }
+
+ bool NeedsFinalization() const { return (State == Raw); }
+
+ virtual void Finalize() = 0;
+
+ void mapSectionAddress(const void *LocalAddress, TargetAddress TargetAddr) {
+ assert((State != Finalized) &&
+ "Attempting to remap sections for finalized objects.");
+ RTDyld->mapSectionAddress(LocalAddress, TargetAddr);
+ }
+
+ protected:
+ std::unique_ptr<RuntimeDyld> RTDyld;
+ enum { Raw, Finalizing, Finalized } State;
+ };
+
+ typedef std::list<std::unique_ptr<LinkedObjectSet>> LinkedObjectSetListT;
+
+public:
+ /// @brief Handle to a set of loaded objects.
+ typedef LinkedObjectSetListT::iterator ObjSetHandleT;
+};
+
+/// @brief Default (no-op) action to perform when loading objects.
+class DoNothingOnNotifyLoaded {
+public:
+ template <typename ObjSetT, typename LoadResult>
+ void operator()(ObjectLinkingLayerBase::ObjSetHandleT, const ObjSetT &,
+ const LoadResult &) {}
+};
+
+/// @brief Bare bones object linking layer.
+///
+/// This class is intended to be used as the base layer for a JIT. It allows
+/// object files to be loaded into memory, linked, and the addresses of their
+/// symbols queried. All objects added to this layer can see each other's
+/// symbols.
+template <typename NotifyLoadedFtor = DoNothingOnNotifyLoaded>
+class ObjectLinkingLayer : public ObjectLinkingLayerBase {
+private:
+
+ template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
+ class ConcreteLinkedObjectSet : public LinkedObjectSet {
+ public:
+ ConcreteLinkedObjectSet(MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver,
+ bool ProcessAllSections)
+ : LinkedObjectSet(*MemMgr, *Resolver, ProcessAllSections),
+ MemMgr(std::move(MemMgr)), Resolver(std::move(Resolver)) { }
+
+ void Finalize() override {
+ State = Finalizing;
+ RTDyld->resolveRelocations();
+ RTDyld->registerEHFrames();
+ MemMgr->finalizeMemory();
+ State = Finalized;
+ }
+
+ private:
+ MemoryManagerPtrT MemMgr;
+ SymbolResolverPtrT Resolver;
+ };
+
+ template <typename MemoryManagerPtrT, typename SymbolResolverPtrT>
+ std::unique_ptr<LinkedObjectSet>
+ createLinkedObjectSet(MemoryManagerPtrT MemMgr, SymbolResolverPtrT Resolver,
+ bool ProcessAllSections) {
+ typedef ConcreteLinkedObjectSet<MemoryManagerPtrT, SymbolResolverPtrT> LOS;
+ return llvm::make_unique<LOS>(std::move(MemMgr), std::move(Resolver),
+ ProcessAllSections);
+ }
+
+public:
+
+ /// @brief LoadedObjectInfo list. Contains a list of owning pointers to
+ /// RuntimeDyld::LoadedObjectInfo instances.
+ typedef std::vector<std::unique_ptr<RuntimeDyld::LoadedObjectInfo>>
+ LoadedObjInfoList;
+
+ /// @brief Functor for receiving finalization notifications.
+ typedef std::function<void(ObjSetHandleT)> NotifyFinalizedFtor;
+
+ /// @brief Construct an ObjectLinkingLayer with the given NotifyLoaded,
+ /// and NotifyFinalized functors.
+ ObjectLinkingLayer(
+ NotifyLoadedFtor NotifyLoaded = NotifyLoadedFtor(),
+ NotifyFinalizedFtor NotifyFinalized = NotifyFinalizedFtor())
+ : NotifyLoaded(std::move(NotifyLoaded)),
+ NotifyFinalized(std::move(NotifyFinalized)),
+ ProcessAllSections(false) {}
+
+ /// @brief Set the 'ProcessAllSections' flag.
+ ///
+ /// If set to true, all sections in each object file will be allocated using
+ /// the memory manager, rather than just the sections required for execution.
+ ///
+ /// This is kludgy, and may be removed in the future.
+ void setProcessAllSections(bool ProcessAllSections) {
+ this->ProcessAllSections = ProcessAllSections;
+ }
+
+ /// @brief Add a set of objects (or archives) that will be treated as a unit
+ /// for the purposes of symbol lookup and memory management.
+ ///
+ /// @return A pair containing (1) A handle that can be used to free the memory
+ /// allocated for the objects, and (2) a LoadedObjInfoList containing
+ /// one LoadedObjInfo instance for each object at the corresponding
+ /// index in the Objects list.
+ ///
+ /// This version of this method allows the client to pass in an
+ /// RTDyldMemoryManager instance that will be used to allocate memory and look
+ /// up external symbol addresses for the given objects.
+ template <typename ObjSetT,
+ typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ObjSetHandleT addObjectSet(const ObjSetT &Objects,
+ MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+ ObjSetHandleT Handle =
+ LinkedObjSetList.insert(
+ LinkedObjSetList.end(),
+ createLinkedObjectSet(std::move(MemMgr), std::move(Resolver),
+ ProcessAllSections));
+
+ LinkedObjectSet &LOS = **Handle;
+ LoadedObjInfoList LoadedObjInfos;
+
+ for (auto &Obj : Objects)
+ LoadedObjInfos.push_back(LOS.addObject(*Obj));
+
+ NotifyLoaded(Handle, Objects, LoadedObjInfos);
+
+ return Handle;
+ }
+
+ /// @brief Remove the set of objects associated with handle H.
+ ///
+ /// All memory allocated for the objects will be freed, and the sections and
+ /// symbols they provided will no longer be available. No attempt is made to
+ /// re-emit the missing symbols, and any use of these symbols (directly or
+ /// indirectly) will result in undefined behavior. If dependence tracking is
+ /// required to detect or resolve such issues it should be added at a higher
+ /// layer.
+ void removeObjectSet(ObjSetHandleT H) {
+ // How do we invalidate the symbols in H?
+ LinkedObjSetList.erase(H);
+ }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) {
+ for (auto I = LinkedObjSetList.begin(), E = LinkedObjSetList.end(); I != E;
+ ++I)
+ if (auto Symbol = findSymbolIn(I, Name, ExportedSymbolsOnly))
+ return Symbol;
+
+ return nullptr;
+ }
+
+ /// @brief Search for the given named symbol in the context of the set of
+ /// loaded objects represented by the handle H.
+ /// @param H The handle for the object set to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given object set.
+ JITSymbol findSymbolIn(ObjSetHandleT H, StringRef Name,
+ bool ExportedSymbolsOnly) {
+ if (auto Sym = (*H)->getSymbol(Name)) {
+ if (Sym.isExported() || !ExportedSymbolsOnly) {
+ auto Addr = Sym.getAddress();
+ auto Flags = Sym.getFlags();
+ if (!(*H)->NeedsFinalization()) {
+ // If this instance has already been finalized then we can just return
+ // the address.
+ return JITSymbol(Addr, Flags);
+ } else {
+ // If this instance needs finalization return a functor that will do
+ // it. The functor still needs to double-check whether finalization is
+ // required, in case someone else finalizes this set before the
+ // functor is called.
+ auto GetAddress =
+ [this, Addr, H]() {
+ if ((*H)->NeedsFinalization()) {
+ (*H)->Finalize();
+ if (NotifyFinalized)
+ NotifyFinalized(H);
+ }
+ return Addr;
+ };
+ return JITSymbol(std::move(GetAddress), Flags);
+ }
+ }
+ }
+ return nullptr;
+ }
+
+ /// @brief Map section addresses for the objects associated with the handle H.
+ void mapSectionAddress(ObjSetHandleT H, const void *LocalAddress,
+ TargetAddress TargetAddr) {
+ (*H)->mapSectionAddress(LocalAddress, TargetAddr);
+ }
+
+ /// @brief Immediately emit and finalize the object set represented by the
+ /// given handle.
+ /// @param H Handle for object set to emit/finalize.
+ void emitAndFinalize(ObjSetHandleT H) {
+ (*H)->Finalize();
+ if (NotifyFinalized)
+ NotifyFinalized(H);
+ }
+
+private:
+ LinkedObjectSetListT LinkedObjSetList;
+ NotifyLoadedFtor NotifyLoaded;
+ NotifyFinalizedFtor NotifyFinalized;
+ bool ProcessAllSections;
+};
+
+} // End namespace orc.
+} // End namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTLINKINGLAYER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
new file mode 100644
index 0000000..f96e83e
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/ObjectTransformLayer.h
@@ -0,0 +1,104 @@
+//===- ObjectTransformLayer.h - Run all objects through functor -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Run all objects passed in through a user supplied functor.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
+#define LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
+
+#include "JITSymbol.h"
+
+namespace llvm {
+namespace orc {
+
+/// @brief Object mutating layer.
+///
+/// This layer accepts sets of ObjectFiles (via addObjectSet). It
+/// immediately applies the user supplied functor to each object, then adds
+/// the set of transformed objects to the layer below.
+template <typename BaseLayerT, typename TransformFtor>
+class ObjectTransformLayer {
+public:
+ /// @brief Handle to a set of added objects.
+ typedef typename BaseLayerT::ObjSetHandleT ObjSetHandleT;
+
+ /// @brief Construct an ObjectTransformLayer with the given BaseLayer
+ ObjectTransformLayer(BaseLayerT &BaseLayer,
+ TransformFtor Transform = TransformFtor())
+ : BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+ /// @brief Apply the transform functor to each object in the object set, then
+ /// add the resulting set of objects to the base layer, along with the
+ /// memory manager and symbol resolver.
+ ///
+ /// @return A handle for the added objects.
+ template <typename ObjSetT, typename MemoryManagerPtrT,
+ typename SymbolResolverPtrT>
+ ObjSetHandleT addObjectSet(ObjSetT &Objects, MemoryManagerPtrT MemMgr,
+ SymbolResolverPtrT Resolver) {
+
+ for (auto I = Objects.begin(), E = Objects.end(); I != E; ++I)
+ *I = Transform(std::move(*I));
+
+ return BaseLayer.addObjectSet(Objects, std::move(MemMgr),
+ std::move(Resolver));
+ }
+
+ /// @brief Remove the object set associated with the handle H.
+ void removeObjectSet(ObjSetHandleT H) { BaseLayer.removeObjectSet(H); }
+
+ /// @brief Search for the given named symbol.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it exists.
+ JITSymbol findSymbol(const std::string &Name, bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbol(Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Get the address of the given symbol in the context of the set of
+ /// objects represented by the handle H. This call is forwarded to the
+ /// base layer's implementation.
+ /// @param H The handle for the object set to search in.
+ /// @param Name The name of the symbol to search for.
+ /// @param ExportedSymbolsOnly If true, search only for exported symbols.
+ /// @return A handle for the given named symbol, if it is found in the
+ /// given object set.
+ JITSymbol findSymbolIn(ObjSetHandleT H, const std::string &Name,
+ bool ExportedSymbolsOnly) {
+ return BaseLayer.findSymbolIn(H, Name, ExportedSymbolsOnly);
+ }
+
+ /// @brief Immediately emit and finalize the object set represented by the
+ /// given handle.
+ /// @param H Handle for object set to emit/finalize.
+ void emitAndFinalize(ObjSetHandleT H) { BaseLayer.emitAndFinalize(H); }
+
+ /// @brief Map section addresses for the objects associated with the handle H.
+ void mapSectionAddress(ObjSetHandleT H, const void *LocalAddress,
+ TargetAddress TargetAddr) {
+ BaseLayer.mapSectionAddress(H, LocalAddress, TargetAddr);
+ }
+
+ /// @brief Access the transform functor directly.
+ TransformFtor &getTransform() { return Transform; }
+
+ /// @brief Access the mumate functor directly.
+ const TransformFtor &getTransform() const { return Transform; }
+
+private:
+ BaseLayerT &BaseLayer;
+ TransformFtor Transform;
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_OBJECTTRANSFORMLAYER_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcTargetSupport.h b/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcTargetSupport.h
new file mode 100644
index 0000000..246d3e0
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/Orc/OrcTargetSupport.h
@@ -0,0 +1,103 @@
+//===-- OrcTargetSupport.h - Code to support specific targets --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Target specific code for Orc, e.g. callback assembly.
+//
+// Target classes should be part of the JIT *target* process, not the host
+// process (except where you're doing hosted JITing and the two are one and the
+// same).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORC_ORCTARGETSUPPORT_H
+#define LLVM_EXECUTIONENGINE_ORC_ORCTARGETSUPPORT_H
+
+#include "IndirectionUtils.h"
+#include "llvm/Support/Memory.h"
+
+namespace llvm {
+namespace orc {
+
+class OrcX86_64 {
+public:
+ static const unsigned PageSize = 4096;
+ static const unsigned PointerSize = 8;
+ static const unsigned TrampolineSize = 8;
+ static const unsigned ResolverCodeSize = 0x78;
+
+ typedef TargetAddress (*JITReentryFn)(void *CallbackMgr,
+ void *TrampolineId);
+
+ /// @brief Write the resolver code into the given memory. The user is be
+ /// responsible for allocating the memory and setting permissions.
+ static void writeResolverCode(uint8_t *ResolveMem, JITReentryFn Reentry,
+ void *CallbackMgr);
+
+ /// @brief Write the requsted number of trampolines into the given memory,
+ /// which must be big enough to hold 1 pointer, plus NumTrampolines
+ /// trampolines.
+ static void writeTrampolines(uint8_t *TrampolineMem, void *ResolverAddr,
+ unsigned NumTrampolines);
+
+ /// @brief Provide information about stub blocks generated by the
+ /// makeIndirectStubsBlock function.
+ class IndirectStubsInfo {
+ friend class OrcX86_64;
+ public:
+ const static unsigned StubSize = 8;
+ const static unsigned PtrSize = 8;
+
+ IndirectStubsInfo() : NumStubs(0) {}
+ IndirectStubsInfo(IndirectStubsInfo &&Other)
+ : NumStubs(Other.NumStubs), StubsMem(std::move(Other.StubsMem)) {
+ Other.NumStubs = 0;
+ }
+ IndirectStubsInfo& operator=(IndirectStubsInfo &&Other) {
+ NumStubs = Other.NumStubs;
+ Other.NumStubs = 0;
+ StubsMem = std::move(Other.StubsMem);
+ return *this;
+ }
+
+ /// @brief Number of stubs in this block.
+ unsigned getNumStubs() const { return NumStubs; }
+
+ /// @brief Get a pointer to the stub at the given index, which must be in
+ /// the range 0 .. getNumStubs() - 1.
+ void* getStub(unsigned Idx) const {
+ return static_cast<uint64_t*>(StubsMem.base()) + Idx;
+ }
+
+ /// @brief Get a pointer to the implementation-pointer at the given index,
+ /// which must be in the range 0 .. getNumStubs() - 1.
+ void** getPtr(unsigned Idx) const {
+ char *PtrsBase =
+ static_cast<char*>(StubsMem.base()) + NumStubs * StubSize;
+ return reinterpret_cast<void**>(PtrsBase) + Idx;
+ }
+ private:
+ unsigned NumStubs;
+ sys::OwningMemoryBlock StubsMem;
+ };
+
+ /// @brief Emit at least MinStubs worth of indirect call stubs, rounded out to
+ /// the nearest page size.
+ ///
+ /// E.g. Asking for 4 stubs on x86-64, where stubs are 8-bytes, with 4k
+ /// pages will return a block of 512 stubs (4096 / 8 = 512). Asking for 513
+ /// will return a block of 1024 (2-pages worth).
+ static std::error_code emitIndirectStubsBlock(IndirectStubsInfo &StubsInfo,
+ unsigned MinStubs,
+ void *InitialPtrVal);
+};
+
+} // End namespace orc.
+} // End namespace llvm.
+
+#endif // LLVM_EXECUTIONENGINE_ORC_ORCTARGETSUPPORT_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/OrcMCJITReplacement.h b/contrib/llvm/include/llvm/ExecutionEngine/OrcMCJITReplacement.h
new file mode 100644
index 0000000..4cd5648
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/OrcMCJITReplacement.h
@@ -0,0 +1,38 @@
+//===---- OrcMCJITReplacement.h - Orc-based MCJIT replacement ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forces OrcMCJITReplacement to link in on certain operating systems.
+// (Windows).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_ORCMCJITREPLACEMENT_H
+#define LLVM_EXECUTIONENGINE_ORCMCJITREPLACEMENT_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include <cstdlib>
+
+extern "C" void LLVMLinkInOrcMCJITReplacement();
+
+namespace {
+ struct ForceOrcMCJITReplacementLinking {
+ ForceOrcMCJITReplacementLinking() {
+ // We must reference OrcMCJITReplacement in such a way that compilers will
+ // not delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough to know
+ // that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+
+ LLVMLinkInOrcMCJITReplacement();
+ }
+ } ForceOrcMCJITReplacementLinking;
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/RTDyldMemoryManager.h b/contrib/llvm/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
new file mode 100644
index 0000000..207bad0
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/RTDyldMemoryManager.h
@@ -0,0 +1,136 @@
+//===-- RTDyldMemoryManager.cpp - Memory manager for MC-JIT -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface of the runtime dynamic memory manager base class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
+#define LLVM_EXECUTIONENGINE_RTDYLDMEMORYMANAGER_H
+
+#include "RuntimeDyld.h"
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Memory.h"
+
+namespace llvm {
+
+class ExecutionEngine;
+
+ namespace object {
+ class ObjectFile;
+ }
+
+class MCJITMemoryManager : public RuntimeDyld::MemoryManager {
+public:
+ /// This method is called after an object has been loaded into memory but
+ /// before relocations are applied to the loaded sections. The object load
+ /// may have been initiated by MCJIT to resolve an external symbol for another
+ /// object that is being finalized. In that case, the object about which
+ /// the memory manager is being notified will be finalized immediately after
+ /// the memory manager returns from this call.
+ ///
+ /// Memory managers which are preparing code for execution in an external
+ /// address space can use this call to remap the section addresses for the
+ /// newly loaded object.
+ virtual void notifyObjectLoaded(ExecutionEngine *EE,
+ const object::ObjectFile &) {}
+};
+
+// RuntimeDyld clients often want to handle the memory management of
+// what gets placed where. For JIT clients, this is the subset of
+// JITMemoryManager required for dynamic loading of binaries.
+//
+// FIXME: As the RuntimeDyld fills out, additional routines will be needed
+// for the varying types of objects to be allocated.
+class RTDyldMemoryManager : public MCJITMemoryManager,
+ public RuntimeDyld::SymbolResolver {
+ RTDyldMemoryManager(const RTDyldMemoryManager&) = delete;
+ void operator=(const RTDyldMemoryManager&) = delete;
+public:
+ RTDyldMemoryManager() {}
+ ~RTDyldMemoryManager() override;
+
+ void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) override;
+ void deregisterEHFrames(uint8_t *Addr, uint64_t LoadAddr, size_t Size) override;
+
+ /// This method returns the address of the specified function or variable in
+ /// the current process.
+ static uint64_t getSymbolAddressInProcess(const std::string &Name);
+
+ /// Legacy symbol lookup - DEPRECATED! Please override findSymbol instead.
+ ///
+ /// This method returns the address of the specified function or variable.
+ /// It is used to resolve symbols during module linking.
+ virtual uint64_t getSymbolAddress(const std::string &Name) {
+ return getSymbolAddressInProcess(Name);
+ }
+
+ /// This method returns a RuntimeDyld::SymbolInfo for the specified function
+ /// or variable. It is used to resolve symbols during module linking.
+ ///
+ /// By default this falls back on the legacy lookup method:
+ /// 'getSymbolAddress'. The address returned by getSymbolAddress is treated as
+ /// a strong, exported symbol, consistent with historical treatment by
+ /// RuntimeDyld.
+ ///
+ /// Clients writing custom RTDyldMemoryManagers are encouraged to override
+ /// this method and return a SymbolInfo with the flags set correctly. This is
+ /// necessary for RuntimeDyld to correctly handle weak and non-exported symbols.
+ RuntimeDyld::SymbolInfo findSymbol(const std::string &Name) override {
+ return RuntimeDyld::SymbolInfo(getSymbolAddress(Name),
+ JITSymbolFlags::Exported);
+ }
+
+ /// Legacy symbol lookup -- DEPRECATED! Please override
+ /// findSymbolInLogicalDylib instead.
+ ///
+ /// Default to treating all modules as separate.
+ virtual uint64_t getSymbolAddressInLogicalDylib(const std::string &Name) {
+ return 0;
+ }
+
+ /// Default to treating all modules as separate.
+ ///
+ /// By default this falls back on the legacy lookup method:
+ /// 'getSymbolAddressInLogicalDylib'. The address returned by
+ /// getSymbolAddressInLogicalDylib is treated as a strong, exported symbol,
+ /// consistent with historical treatment by RuntimeDyld.
+ ///
+ /// Clients writing custom RTDyldMemoryManagers are encouraged to override
+ /// this method and return a SymbolInfo with the flags set correctly. This is
+ /// necessary for RuntimeDyld to correctly handle weak and non-exported symbols.
+ RuntimeDyld::SymbolInfo
+ findSymbolInLogicalDylib(const std::string &Name) override {
+ return RuntimeDyld::SymbolInfo(getSymbolAddressInLogicalDylib(Name),
+ JITSymbolFlags::Exported);
+ }
+
+ /// This method returns the address of the specified function. As such it is
+ /// only useful for resolving library symbols, not code generated symbols.
+ ///
+ /// If \p AbortOnFailure is false and no function with the given name is
+ /// found, this function returns a null pointer. Otherwise, it prints a
+ /// message to stderr and aborts.
+ ///
+ /// This function is deprecated for memory managers to be used with
+ /// MCJIT or RuntimeDyld. Use getSymbolAddress instead.
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true);
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(
+ RTDyldMemoryManager, LLVMMCJITMemoryManagerRef)
+
+} // namespace llvm
+
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
new file mode 100644
index 0000000..385b8d0
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h
@@ -0,0 +1,256 @@
+//===-- RuntimeDyld.h - Run-time dynamic linker for MC-JIT ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface for the runtime dynamic linker facilities of the MC-JIT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
+#define LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
+
+#include "JITSymbolFlags.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include <map>
+#include <memory>
+
+namespace llvm {
+
+namespace object {
+ class ObjectFile;
+ template <typename T> class OwningBinary;
+}
+
+class RuntimeDyldImpl;
+class RuntimeDyldCheckerImpl;
+
+class RuntimeDyld {
+ friend class RuntimeDyldCheckerImpl;
+
+ RuntimeDyld(const RuntimeDyld &) = delete;
+ void operator=(const RuntimeDyld &) = delete;
+
+protected:
+ // Change the address associated with a section when resolving relocations.
+ // Any relocations already associated with the symbol will be re-resolved.
+ void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
+public:
+
+ /// \brief Information about a named symbol.
+ class SymbolInfo : public JITSymbolBase {
+ public:
+ SymbolInfo(std::nullptr_t) : JITSymbolBase(JITSymbolFlags::None), Address(0) {}
+ SymbolInfo(uint64_t Address, JITSymbolFlags Flags)
+ : JITSymbolBase(Flags), Address(Address) {}
+ explicit operator bool() const { return Address != 0; }
+ uint64_t getAddress() const { return Address; }
+ private:
+ uint64_t Address;
+ };
+
+ /// \brief Information about the loaded object.
+ class LoadedObjectInfo : public llvm::LoadedObjectInfo {
+ friend class RuntimeDyldImpl;
+ public:
+ typedef std::map<object::SectionRef, unsigned> ObjSectionToIDMap;
+
+ LoadedObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
+ : RTDyld(RTDyld), ObjSecToIDMap(ObjSecToIDMap) { }
+
+ virtual object::OwningBinary<object::ObjectFile>
+ getObjectForDebug(const object::ObjectFile &Obj) const = 0;
+
+ uint64_t
+ getSectionLoadAddress(const object::SectionRef &Sec) const override;
+
+ protected:
+ virtual void anchor();
+
+ RuntimeDyldImpl &RTDyld;
+ ObjSectionToIDMap ObjSecToIDMap;
+ };
+
+ template <typename Derived> struct LoadedObjectInfoHelper : LoadedObjectInfo {
+ protected:
+ LoadedObjectInfoHelper(const LoadedObjectInfoHelper &) = default;
+ LoadedObjectInfoHelper() = default;
+
+ public:
+ LoadedObjectInfoHelper(RuntimeDyldImpl &RTDyld,
+ LoadedObjectInfo::ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfo(RTDyld, std::move(ObjSecToIDMap)) {}
+ std::unique_ptr<llvm::LoadedObjectInfo> clone() const override {
+ return llvm::make_unique<Derived>(static_cast<const Derived &>(*this));
+ }
+ };
+
+ /// \brief Memory Management.
+ class MemoryManager {
+ public:
+ virtual ~MemoryManager() {}
+
+ /// Allocate a memory block of (at least) the given size suitable for
+ /// executable code. The SectionID is a unique identifier assigned by the
+ /// RuntimeDyld instance, and optionally recorded by the memory manager to
+ /// access a loaded section.
+ virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) = 0;
+
+ /// Allocate a memory block of (at least) the given size suitable for data.
+ /// The SectionID is a unique identifier assigned by the JIT engine, and
+ /// optionally recorded by the memory manager to access a loaded section.
+ virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName,
+ bool IsReadOnly) = 0;
+
+ /// Inform the memory manager about the total amount of memory required to
+ /// allocate all sections to be loaded:
+ /// \p CodeSize - the total size of all code sections
+ /// \p DataSizeRO - the total size of all read-only data sections
+ /// \p DataSizeRW - the total size of all read-write data sections
+ ///
+ /// Note that by default the callback is disabled. To enable it
+ /// redefine the method needsToReserveAllocationSpace to return true.
+ virtual void reserveAllocationSpace(uintptr_t CodeSize,
+ uintptr_t DataSizeRO,
+ uintptr_t DataSizeRW) {}
+
+ /// Override to return true to enable the reserveAllocationSpace callback.
+ virtual bool needsToReserveAllocationSpace() { return false; }
+
+ /// Register the EH frames with the runtime so that c++ exceptions work.
+ ///
+ /// \p Addr parameter provides the local address of the EH frame section
+ /// data, while \p LoadAddr provides the address of the data in the target
+ /// address space. If the section has not been remapped (which will usually
+ /// be the case for local execution) these two values will be the same.
+ virtual void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) = 0;
+ virtual void deregisterEHFrames(uint8_t *addr, uint64_t LoadAddr,
+ size_t Size) = 0;
+
+ /// This method is called when object loading is complete and section page
+ /// permissions can be applied. It is up to the memory manager implementation
+ /// to decide whether or not to act on this method. The memory manager will
+ /// typically allocate all sections as read-write and then apply specific
+ /// permissions when this method is called. Code sections cannot be executed
+ /// until this function has been called. In addition, any cache coherency
+ /// operations needed to reliably use the memory are also performed.
+ ///
+ /// Returns true if an error occurred, false otherwise.
+ virtual bool finalizeMemory(std::string *ErrMsg = nullptr) = 0;
+
+ private:
+ virtual void anchor();
+ };
+
+ /// \brief Symbol resolution.
+ class SymbolResolver {
+ public:
+ virtual ~SymbolResolver() {}
+
+ /// This method returns the address of the specified function or variable.
+ /// It is used to resolve symbols during module linking.
+ ///
+ /// If the returned symbol's address is equal to ~0ULL then RuntimeDyld will
+ /// skip all relocations for that symbol, and the client will be responsible
+ /// for handling them manually.
+ virtual SymbolInfo findSymbol(const std::string &Name) = 0;
+
+ /// This method returns the address of the specified symbol if it exists
+ /// within the logical dynamic library represented by this
+ /// RTDyldMemoryManager. Unlike getSymbolAddress, queries through this
+ /// interface should return addresses for hidden symbols.
+ ///
+ /// This is of particular importance for the Orc JIT APIs, which support lazy
+ /// compilation by breaking up modules: Each of those broken out modules
+ /// must be able to resolve hidden symbols provided by the others. Clients
+ /// writing memory managers for MCJIT can usually ignore this method.
+ ///
+ /// This method will be queried by RuntimeDyld when checking for previous
+ /// definitions of common symbols. It will *not* be queried by default when
+ /// resolving external symbols (this minimises the link-time overhead for
+ /// MCJIT clients who don't care about Orc features). If you are writing a
+ /// RTDyldMemoryManager for Orc and want "external" symbol resolution to
+ /// search the logical dylib, you should override your getSymbolAddress
+ /// method call this method directly.
+ virtual SymbolInfo findSymbolInLogicalDylib(const std::string &Name) = 0;
+ private:
+ virtual void anchor();
+ };
+
+ /// \brief Construct a RuntimeDyld instance.
+ RuntimeDyld(MemoryManager &MemMgr, SymbolResolver &Resolver);
+ ~RuntimeDyld();
+
+ /// Add the referenced object file to the list of objects to be loaded and
+ /// relocated.
+ std::unique_ptr<LoadedObjectInfo> loadObject(const object::ObjectFile &O);
+
+ /// Get the address of our local copy of the symbol. This may or may not
+ /// be the address used for relocation (clients can copy the data around
+ /// and resolve relocatons based on where they put it).
+ void *getSymbolLocalAddress(StringRef Name) const;
+
+ /// Get the target address and flags for the named symbol.
+ /// This address is the one used for relocation.
+ SymbolInfo getSymbol(StringRef Name) const;
+
+ /// Resolve the relocations for all symbols we currently know about.
+ void resolveRelocations();
+
+ /// Map a section to its target address space value.
+ /// Map the address of a JIT section as returned from the memory manager
+ /// to the address in the target process as the running code will see it.
+ /// This is the address which will be used for relocation resolution.
+ void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
+
+ /// Register any EH frame sections that have been loaded but not previously
+ /// registered with the memory manager. Note, RuntimeDyld is responsible
+ /// for identifying the EH frame and calling the memory manager with the
+ /// EH frame section data. However, the memory manager itself will handle
+ /// the actual target-specific EH frame registration.
+ void registerEHFrames();
+
+ void deregisterEHFrames();
+
+ bool hasError();
+ StringRef getErrorString();
+
+ /// By default, only sections that are "required for execution" are passed to
+ /// the RTDyldMemoryManager, and other sections are discarded. Passing 'true'
+ /// to this method will cause RuntimeDyld to pass all sections to its
+ /// memory manager regardless of whether they are "required to execute" in the
+ /// usual sense. This is useful for inspecting metadata sections that may not
+ /// contain relocations, E.g. Debug info, stackmaps.
+ ///
+ /// Must be called before the first object file is loaded.
+ void setProcessAllSections(bool ProcessAllSections) {
+ assert(!Dyld && "setProcessAllSections must be called before loadObject.");
+ this->ProcessAllSections = ProcessAllSections;
+ }
+
+private:
+ // RuntimeDyldImpl is the actual class. RuntimeDyld is just the public
+ // interface.
+ std::unique_ptr<RuntimeDyldImpl> Dyld;
+ MemoryManager &MemMgr;
+ SymbolResolver &Resolver;
+ bool ProcessAllSections;
+ RuntimeDyldCheckerImpl *Checker;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_EXECUTIONENGINE_RUNTIMEDYLD_H
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
new file mode 100644
index 0000000..31ce151
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/RuntimeDyldChecker.h
@@ -0,0 +1,102 @@
+//===---- RuntimeDyldChecker.h - RuntimeDyld tester framework -----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
+#define LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+class MCDisassembler;
+class MemoryBuffer;
+class MCInstPrinter;
+class RuntimeDyld;
+class RuntimeDyldCheckerImpl;
+class raw_ostream;
+
+/// \brief RuntimeDyld invariant checker for verifying that RuntimeDyld has
+/// correctly applied relocations.
+///
+/// The RuntimeDyldChecker class evaluates expressions against an attached
+/// RuntimeDyld instance to verify that relocations have been applied
+/// correctly.
+///
+/// The expression language supports basic pointer arithmetic and bit-masking,
+/// and has limited disassembler integration for accessing instruction
+/// operands and the next PC (program counter) address for each instruction.
+///
+/// The language syntax is:
+///
+/// check = expr '=' expr
+///
+/// expr = binary_expr
+/// | sliceable_expr
+///
+/// sliceable_expr = '*{' number '}' load_addr_expr [slice]
+/// | '(' expr ')' [slice]
+/// | ident_expr [slice]
+/// | number [slice]
+///
+/// slice = '[' high-bit-index ':' low-bit-index ']'
+///
+/// load_addr_expr = symbol
+/// | '(' symbol '+' number ')'
+/// | '(' symbol '-' number ')'
+///
+/// ident_expr = 'decode_operand' '(' symbol ',' operand-index ')'
+/// | 'next_pc' '(' symbol ')'
+/// | 'stub_addr' '(' file-name ',' section-name ',' symbol ')'
+/// | symbol
+///
+/// binary_expr = expr '+' expr
+/// | expr '-' expr
+/// | expr '&' expr
+/// | expr '|' expr
+/// | expr '<<' expr
+/// | expr '>>' expr
+///
+class RuntimeDyldChecker {
+public:
+ RuntimeDyldChecker(RuntimeDyld &RTDyld, MCDisassembler *Disassembler,
+ MCInstPrinter *InstPrinter, raw_ostream &ErrStream);
+ ~RuntimeDyldChecker();
+
+ // \brief Get the associated RTDyld instance.
+ RuntimeDyld& getRTDyld();
+
+ // \brief Get the associated RTDyld instance.
+ const RuntimeDyld& getRTDyld() const;
+
+ /// \brief Check a single expression against the attached RuntimeDyld
+ /// instance.
+ bool check(StringRef CheckExpr) const;
+
+ /// \brief Scan the given memory buffer for lines beginning with the string
+ /// in RulePrefix. The remainder of the line is passed to the check
+ /// method to be evaluated as an expression.
+ bool checkAllRulesInBuffer(StringRef RulePrefix, MemoryBuffer *MemBuf) const;
+
+ /// \brief Returns the address of the requested section (or an error message
+ /// in the second element of the pair if the address cannot be found).
+ ///
+ /// if 'LocalAddress' is true, this returns the address of the section
+ /// within the linker's memory. If 'LocalAddress' is false it returns the
+ /// address within the target process (i.e. the load address).
+ std::pair<uint64_t, std::string> getSectionAddr(StringRef FileName,
+ StringRef SectionName,
+ bool LocalAddress);
+
+private:
+ std::unique_ptr<RuntimeDyldCheckerImpl> Impl;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h b/contrib/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h
new file mode 100644
index 0000000..7bb96eb
--- /dev/null
+++ b/contrib/llvm/include/llvm/ExecutionEngine/SectionMemoryManager.h
@@ -0,0 +1,123 @@
+//===- SectionMemoryManager.h - Memory manager for MCJIT/RtDyld -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of a section-based memory manager used by
+// the MCJIT execution engine and RuntimeDyld.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H
+#define LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Memory.h"
+
+namespace llvm {
+/// This is a simple memory manager which implements the methods called by
+/// the RuntimeDyld class to allocate memory for section-based loading of
+/// objects, usually those generated by the MCJIT execution engine.
+///
+/// This memory manager allocates all section memory as read-write. The
+/// RuntimeDyld will copy JITed section memory into these allocated blocks
+/// and perform any necessary linking and relocations.
+///
+/// Any client using this memory manager MUST ensure that section-specific
+/// page permissions have been applied before attempting to execute functions
+/// in the JITed object. Permissions can be applied either by calling
+/// MCJIT::finalizeObject or by calling SectionMemoryManager::finalizeMemory
+/// directly. Clients of MCJIT should call MCJIT::finalizeObject.
+class SectionMemoryManager : public RTDyldMemoryManager {
+ SectionMemoryManager(const SectionMemoryManager&) = delete;
+ void operator=(const SectionMemoryManager&) = delete;
+
+public:
+ SectionMemoryManager() { }
+ ~SectionMemoryManager() override;
+
+ /// \brief Allocates a memory block of (at least) the given size suitable for
+ /// executable code.
+ ///
+ /// The value of \p Alignment must be a power of two. If \p Alignment is zero
+ /// a default alignment of 16 will be used.
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override;
+
+ /// \brief Allocates a memory block of (at least) the given size suitable for
+ /// executable code.
+ ///
+ /// The value of \p Alignment must be a power of two. If \p Alignment is zero
+ /// a default alignment of 16 will be used.
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool isReadOnly) override;
+
+ /// \brief Update section-specific memory permissions and other attributes.
+ ///
+ /// This method is called when object loading is complete and section page
+ /// permissions can be applied. It is up to the memory manager implementation
+ /// to decide whether or not to act on this method. The memory manager will
+ /// typically allocate all sections as read-write and then apply specific
+ /// permissions when this method is called. Code sections cannot be executed
+ /// until this function has been called. In addition, any cache coherency
+ /// operations needed to reliably use the memory are also performed.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool finalizeMemory(std::string *ErrMsg = nullptr) override;
+
+ /// \brief Invalidate instruction cache for code sections.
+ ///
+ /// Some platforms with separate data cache and instruction cache require
+ /// explicit cache flush, otherwise JIT code manipulations (like resolved
+ /// relocations) will get to the data cache but not to the instruction cache.
+ ///
+ /// This method is called from finalizeMemory.
+ virtual void invalidateInstructionCache();
+
+private:
+ struct FreeMemBlock {
+ // The actual block of free memory
+ sys::MemoryBlock Free;
+ // If there is a pending allocation from the same reservation right before
+ // this block, store it's index in PendingMem, to be able to update the
+ // pending region if part of this block is allocated, rather than having to
+ // create a new one
+ unsigned PendingPrefixIndex;
+ };
+
+ struct MemoryGroup {
+ // PendingMem contains all blocks of memory (subblocks of AllocatedMem)
+ // which have not yet had their permissions applied, but have been given
+ // out to the user. FreeMem contains all block of memory, which have
+ // neither had their permissions applied, nor been given out to the user.
+ SmallVector<sys::MemoryBlock, 16> PendingMem;
+ SmallVector<FreeMemBlock, 16> FreeMem;
+
+ // All memory blocks that have been requested from the system
+ SmallVector<sys::MemoryBlock, 16> AllocatedMem;
+
+ sys::MemoryBlock Near;
+ };
+
+ uint8_t *allocateSection(MemoryGroup &MemGroup, uintptr_t Size,
+ unsigned Alignment);
+
+ std::error_code applyMemoryGroupPermissions(MemoryGroup &MemGroup,
+ unsigned Permissions);
+
+ MemoryGroup CodeMem;
+ MemoryGroup RWDataMem;
+ MemoryGroup RODataMem;
+};
+
+}
+
+#endif // LLVM_EXECUTION_ENGINE_SECTION_MEMORY_MANAGER_H
diff --git a/contrib/llvm/include/llvm/IR/Argument.h b/contrib/llvm/include/llvm/IR/Argument.h
new file mode 100644
index 0000000..0092f49
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Argument.h
@@ -0,0 +1,135 @@
+//===-- llvm/Argument.h - Definition of the Argument class ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Argument class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_ARGUMENT_H
+#define LLVM_IR_ARGUMENT_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Value.h"
+
+namespace llvm {
+
+template <typename NodeTy> class SymbolTableListTraits;
+
+/// \brief LLVM Argument representation
+///
+/// This class represents an incoming formal argument to a Function. A formal
+/// argument, since it is ``formal'', does not contain an actual value but
+/// instead represents the type, argument number, and attributes of an argument
+/// for a specific function. When used in the body of said function, the
+/// argument of course represents the value of the actual argument that the
+/// function was called with.
+class Argument : public Value, public ilist_node<Argument> {
+ virtual void anchor();
+ Function *Parent;
+
+ friend class SymbolTableListTraits<Argument>;
+ void setParent(Function *parent);
+
+public:
+ /// \brief Constructor.
+ ///
+ /// If \p F is specified, the argument is inserted at the end of the argument
+ /// list for \p F.
+ explicit Argument(Type *Ty, const Twine &Name = "", Function *F = nullptr);
+
+ inline const Function *getParent() const { return Parent; }
+ inline Function *getParent() { return Parent; }
+
+ /// \brief Return the index of this formal argument in its containing
+ /// function.
+ ///
+ /// For example in "void foo(int a, float b)" a is 0 and b is 1.
+ unsigned getArgNo() const;
+
+ /// \brief Return true if this argument has the nonnull attribute on it in
+ /// its containing function. Also returns true if at least one byte is known
+ /// to be dereferenceable and the pointer is in addrspace(0).
+ bool hasNonNullAttr() const;
+
+ /// \brief If this argument has the dereferenceable attribute on it in its
+ /// containing function, return the number of bytes known to be
+ /// dereferenceable. Otherwise, zero is returned.
+ uint64_t getDereferenceableBytes() const;
+
+ /// \brief If this argument has the dereferenceable_or_null attribute on
+ /// it in its containing function, return the number of bytes known to be
+ /// dereferenceable. Otherwise, zero is returned.
+ uint64_t getDereferenceableOrNullBytes() const;
+
+ /// \brief Return true if this argument has the byval attribute on it in its
+ /// containing function.
+ bool hasByValAttr() const;
+
+ /// \brief Return true if this argument has the byval attribute or inalloca
+ /// attribute on it in its containing function. These attributes both
+ /// represent arguments being passed by value.
+ bool hasByValOrInAllocaAttr() const;
+
+ /// \brief If this is a byval or inalloca argument, return its alignment.
+ unsigned getParamAlignment() const;
+
+ /// \brief Return true if this argument has the nest attribute on it in its
+ /// containing function.
+ bool hasNestAttr() const;
+
+ /// \brief Return true if this argument has the noalias attribute on it in its
+ /// containing function.
+ bool hasNoAliasAttr() const;
+
+ /// \brief Return true if this argument has the nocapture attribute on it in
+ /// its containing function.
+ bool hasNoCaptureAttr() const;
+
+ /// \brief Return true if this argument has the sret attribute on it in its
+ /// containing function.
+ bool hasStructRetAttr() const;
+
+ /// \brief Return true if this argument has the returned attribute on it in
+ /// its containing function.
+ bool hasReturnedAttr() const;
+
+ /// \brief Return true if this argument has the readonly or readnone attribute
+ /// on it in its containing function.
+ bool onlyReadsMemory() const;
+
+ /// \brief Return true if this argument has the inalloca attribute on it in
+ /// its containing function.
+ bool hasInAllocaAttr() const;
+
+ /// \brief Return true if this argument has the zext attribute on it in its
+ /// containing function.
+ bool hasZExtAttr() const;
+
+ /// \brief Return true if this argument has the sext attribute on it in its
+ /// containing function.
+ bool hasSExtAttr() const;
+
+ /// \brief Add a Attribute to an argument.
+ void addAttr(AttributeSet AS);
+
+ /// \brief Remove a Attribute from an argument.
+ void removeAttr(AttributeSet AS);
+
+ /// \brief Method for support type inquiry through isa, cast, and
+ /// dyn_cast.
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == ArgumentVal;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/AssemblyAnnotationWriter.h b/contrib/llvm/include/llvm/IR/AssemblyAnnotationWriter.h
new file mode 100644
index 0000000..6e1f5c4
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/AssemblyAnnotationWriter.h
@@ -0,0 +1,62 @@
+//===-- AssemblyAnnotationWriter.h - Annotation .ll files -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Clients of the assembly writer can use this interface to add their own
+// special-purpose annotations to LLVM assembly language printouts. Note that
+// the assembly parser won't be able to parse these, in general, so
+// implementations are advised to print stuff as LLVM comments.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_ASSEMBLYANNOTATIONWRITER_H
+#define LLVM_IR_ASSEMBLYANNOTATIONWRITER_H
+
+namespace llvm {
+
+class Function;
+class BasicBlock;
+class Instruction;
+class Value;
+class formatted_raw_ostream;
+
+class AssemblyAnnotationWriter {
+public:
+ virtual ~AssemblyAnnotationWriter();
+
+ /// emitFunctionAnnot - This may be implemented to emit a string right before
+ /// the start of a function.
+ virtual void emitFunctionAnnot(const Function *,
+ formatted_raw_ostream &) {}
+
+ /// emitBasicBlockStartAnnot - This may be implemented to emit a string right
+ /// after the basic block label, but before the first instruction in the
+ /// block.
+ virtual void emitBasicBlockStartAnnot(const BasicBlock *,
+ formatted_raw_ostream &) {
+ }
+
+ /// emitBasicBlockEndAnnot - This may be implemented to emit a string right
+ /// after the basic block.
+ virtual void emitBasicBlockEndAnnot(const BasicBlock *,
+ formatted_raw_ostream &) {
+ }
+
+ /// emitInstructionAnnot - This may be implemented to emit a string right
+ /// before an instruction is emitted.
+ virtual void emitInstructionAnnot(const Instruction *,
+ formatted_raw_ostream &) {}
+
+ /// printInfoComment - This may be implemented to emit a comment to the
+ /// right of an instruction or global value.
+ virtual void printInfoComment(const Value &, formatted_raw_ostream &) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Attributes.h b/contrib/llvm/include/llvm/IR/Attributes.h
new file mode 100644
index 0000000..0e33731
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Attributes.h
@@ -0,0 +1,547 @@
+//===-- llvm/Attributes.h - Container for Attributes ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file contains the simple types necessary to represent the
+/// attributes associated with functions and their calls.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_ATTRIBUTES_H
+#define LLVM_IR_ATTRIBUTES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <bitset>
+#include <cassert>
+#include <map>
+#include <string>
+
+namespace llvm {
+
+class AttrBuilder;
+class AttributeImpl;
+class AttributeSetImpl;
+class AttributeSetNode;
+class Constant;
+template<typename T> struct DenseMapInfo;
+class Function;
+class LLVMContext;
+class Type;
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief Functions, function parameters, and return types can have attributes
+/// to indicate how they should be treated by optimizations and code
+/// generation. This class represents one of those attributes. It's light-weight
+/// and should be passed around by-value.
+class Attribute {
+public:
+ /// This enumeration lists the attributes that can be associated with
+ /// parameters, function results, or the function itself.
+ ///
+ /// Note: The `uwtable' attribute is about the ABI or the user mandating an
+ /// entry in the unwind table. The `nounwind' attribute is about an exception
+ /// passing by the function.
+ ///
+ /// In a theoretical system that uses tables for profiling and SjLj for
+ /// exceptions, they would be fully independent. In a normal system that uses
+ /// tables for both, the semantics are:
+ ///
+ /// nil = Needs an entry because an exception might pass by.
+ /// nounwind = No need for an entry
+ /// uwtable = Needs an entry because the ABI says so and because
+ /// an exception might pass by.
+ /// uwtable + nounwind = Needs an entry because the ABI says so.
+
+ enum AttrKind {
+ // IR-Level Attributes
+ None, ///< No attributes have been set
+ #define GET_ATTR_ENUM
+ #include "llvm/IR/Attributes.inc"
+ EndAttrKinds ///< Sentinal value useful for loops
+ };
+
+private:
+ AttributeImpl *pImpl;
+ Attribute(AttributeImpl *A) : pImpl(A) {}
+
+public:
+ Attribute() : pImpl(nullptr) {}
+
+ //===--------------------------------------------------------------------===//
+ // Attribute Construction
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Return a uniquified Attribute object.
+ static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val = 0);
+ static Attribute get(LLVMContext &Context, StringRef Kind,
+ StringRef Val = StringRef());
+
+ /// \brief Return a uniquified Attribute object that has the specific
+ /// alignment set.
+ static Attribute getWithAlignment(LLVMContext &Context, uint64_t Align);
+ static Attribute getWithStackAlignment(LLVMContext &Context, uint64_t Align);
+ static Attribute getWithDereferenceableBytes(LLVMContext &Context,
+ uint64_t Bytes);
+ static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context,
+ uint64_t Bytes);
+
+ //===--------------------------------------------------------------------===//
+ // Attribute Accessors
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Return true if the attribute is an Attribute::AttrKind type.
+ bool isEnumAttribute() const;
+
+ /// \brief Return true if the attribute is an integer attribute.
+ bool isIntAttribute() const;
+
+ /// \brief Return true if the attribute is a string (target-dependent)
+ /// attribute.
+ bool isStringAttribute() const;
+
+ /// \brief Return true if the attribute is present.
+ bool hasAttribute(AttrKind Val) const;
+
+ /// \brief Return true if the target-dependent attribute is present.
+ bool hasAttribute(StringRef Val) const;
+
+ /// \brief Return the attribute's kind as an enum (Attribute::AttrKind). This
+ /// requires the attribute to be an enum or alignment attribute.
+ Attribute::AttrKind getKindAsEnum() const;
+
+ /// \brief Return the attribute's value as an integer. This requires that the
+ /// attribute be an alignment attribute.
+ uint64_t getValueAsInt() const;
+
+ /// \brief Return the attribute's kind as a string. This requires the
+ /// attribute to be a string attribute.
+ StringRef getKindAsString() const;
+
+ /// \brief Return the attribute's value as a string. This requires the
+ /// attribute to be a string attribute.
+ StringRef getValueAsString() const;
+
+ /// \brief Returns the alignment field of an attribute as a byte alignment
+ /// value.
+ unsigned getAlignment() const;
+
+ /// \brief Returns the stack alignment field of an attribute as a byte
+ /// alignment value.
+ unsigned getStackAlignment() const;
+
+ /// \brief Returns the number of dereferenceable bytes from the
+ /// dereferenceable attribute.
+ uint64_t getDereferenceableBytes() const;
+
+ /// \brief Returns the number of dereferenceable_or_null bytes from the
+ /// dereferenceable_or_null attribute.
+ uint64_t getDereferenceableOrNullBytes() const;
+
+ /// \brief The Attribute is converted to a string of equivalent mnemonic. This
+ /// is, presumably, for writing out the mnemonics for the assembly writer.
+ std::string getAsString(bool InAttrGrp = false) const;
+
+ /// \brief Equality and non-equality operators.
+ bool operator==(Attribute A) const { return pImpl == A.pImpl; }
+ bool operator!=(Attribute A) const { return pImpl != A.pImpl; }
+
+ /// \brief Less-than operator. Useful for sorting the attributes list.
+ bool operator<(Attribute A) const;
+
+ void Profile(FoldingSetNodeID &ID) const {
+ ID.AddPointer(pImpl);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief This class holds the attributes for a function, its return value, and
+/// its parameters. You access the attributes for each of them via an index into
+/// the AttributeSet object. The function attributes are at index
+/// `AttributeSet::FunctionIndex', the return value is at index
+/// `AttributeSet::ReturnIndex', and the attributes for the parameters start at
+/// index `1'.
+class AttributeSet {
+public:
+ enum AttrIndex : unsigned {
+ ReturnIndex = 0U,
+ FunctionIndex = ~0U
+ };
+
+private:
+ friend class AttrBuilder;
+ friend class AttributeSetImpl;
+ template <typename Ty> friend struct DenseMapInfo;
+
+ /// \brief The attributes that we are managing. This can be null to represent
+ /// the empty attributes list.
+ AttributeSetImpl *pImpl;
+
+ /// \brief The attributes for the specified index are returned.
+ AttributeSetNode *getAttributes(unsigned Index) const;
+
+ /// \brief Create an AttributeSet with the specified parameters in it.
+ static AttributeSet get(LLVMContext &C,
+ ArrayRef<std::pair<unsigned, Attribute> > Attrs);
+ static AttributeSet get(LLVMContext &C,
+ ArrayRef<std::pair<unsigned,
+ AttributeSetNode*> > Attrs);
+
+ static AttributeSet getImpl(LLVMContext &C,
+ ArrayRef<std::pair<unsigned,
+ AttributeSetNode*> > Attrs);
+
+ explicit AttributeSet(AttributeSetImpl *LI) : pImpl(LI) {}
+
+public:
+ AttributeSet() : pImpl(nullptr) {}
+
+ //===--------------------------------------------------------------------===//
+ // AttributeSet Construction and Mutation
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Return an AttributeSet with the specified parameters in it.
+ static AttributeSet get(LLVMContext &C, ArrayRef<AttributeSet> Attrs);
+ static AttributeSet get(LLVMContext &C, unsigned Index,
+ ArrayRef<Attribute::AttrKind> Kind);
+ static AttributeSet get(LLVMContext &C, unsigned Index, const AttrBuilder &B);
+
+ /// \brief Add an attribute to the attribute set at the given index. Because
+ /// attribute sets are immutable, this returns a new set.
+ AttributeSet addAttribute(LLVMContext &C, unsigned Index,
+ Attribute::AttrKind Attr) const;
+
+ /// \brief Add an attribute to the attribute set at the given index. Because
+ /// attribute sets are immutable, this returns a new set.
+ AttributeSet addAttribute(LLVMContext &C, unsigned Index,
+ StringRef Kind) const;
+ AttributeSet addAttribute(LLVMContext &C, unsigned Index,
+ StringRef Kind, StringRef Value) const;
+
+ /// Add an attribute to the attribute set at the given indices. Because
+ /// attribute sets are immutable, this returns a new set.
+ AttributeSet addAttribute(LLVMContext &C, ArrayRef<unsigned> Indices,
+ Attribute A) const;
+
+ /// \brief Add attributes to the attribute set at the given index. Because
+ /// attribute sets are immutable, this returns a new set.
+ AttributeSet addAttributes(LLVMContext &C, unsigned Index,
+ AttributeSet Attrs) const;
+
+ /// \brief Remove the specified attribute at the specified index from this
+ /// attribute list. Because attribute lists are immutable, this returns the
+ /// new list.
+ AttributeSet removeAttribute(LLVMContext &C, unsigned Index,
+ Attribute::AttrKind Attr) const;
+
+ /// \brief Remove the specified attributes at the specified index from this
+ /// attribute list. Because attribute lists are immutable, this returns the
+ /// new list.
+ AttributeSet removeAttributes(LLVMContext &C, unsigned Index,
+ AttributeSet Attrs) const;
+
+ /// \brief Remove the specified attributes at the specified index from this
+ /// attribute list. Because attribute lists are immutable, this returns the
+ /// new list.
+ AttributeSet removeAttributes(LLVMContext &C, unsigned Index,
+ const AttrBuilder &Attrs) const;
+
+ /// \brief Add the dereferenceable attribute to the attribute set at the given
+ /// index. Because attribute sets are immutable, this returns a new set.
+ AttributeSet addDereferenceableAttr(LLVMContext &C, unsigned Index,
+ uint64_t Bytes) const;
+
+ /// \brief Add the dereferenceable_or_null attribute to the attribute set at
+ /// the given index. Because attribute sets are immutable, this returns a new
+ /// set.
+ AttributeSet addDereferenceableOrNullAttr(LLVMContext &C, unsigned Index,
+ uint64_t Bytes) const;
+
+ //===--------------------------------------------------------------------===//
+ // AttributeSet Accessors
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Retrieve the LLVM context.
+ LLVMContext &getContext() const;
+
+ /// \brief The attributes for the specified index are returned.
+ AttributeSet getParamAttributes(unsigned Index) const;
+
+ /// \brief The attributes for the ret value are returned.
+ AttributeSet getRetAttributes() const;
+
+ /// \brief The function attributes are returned.
+ AttributeSet getFnAttributes() const;
+
+ /// \brief Return true if the attribute exists at the given index.
+ bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const;
+
+ /// \brief Return true if the attribute exists at the given index.
+ bool hasAttribute(unsigned Index, StringRef Kind) const;
+
+ /// \brief Return true if attribute exists at the given index.
+ bool hasAttributes(unsigned Index) const;
+
+ /// \brief Return true if the specified attribute is set for at least one
+ /// parameter or for the return value.
+ bool hasAttrSomewhere(Attribute::AttrKind Attr) const;
+
+ /// \brief Return the attribute object that exists at the given index.
+ Attribute getAttribute(unsigned Index, Attribute::AttrKind Kind) const;
+
+ /// \brief Return the attribute object that exists at the given index.
+ Attribute getAttribute(unsigned Index, StringRef Kind) const;
+
+ /// \brief Return the alignment for the specified function parameter.
+ unsigned getParamAlignment(unsigned Index) const;
+
+ /// \brief Get the stack alignment.
+ unsigned getStackAlignment(unsigned Index) const;
+
+ /// \brief Get the number of dereferenceable bytes (or zero if unknown).
+ uint64_t getDereferenceableBytes(unsigned Index) const;
+
+ /// \brief Get the number of dereferenceable_or_null bytes (or zero if
+ /// unknown).
+ uint64_t getDereferenceableOrNullBytes(unsigned Index) const;
+
+ /// \brief Return the attributes at the index as a string.
+ std::string getAsString(unsigned Index, bool InAttrGrp = false) const;
+
+ typedef ArrayRef<Attribute>::iterator iterator;
+
+ iterator begin(unsigned Slot) const;
+ iterator end(unsigned Slot) const;
+
+ /// operator==/!= - Provide equality predicates.
+ bool operator==(const AttributeSet &RHS) const {
+ return pImpl == RHS.pImpl;
+ }
+ bool operator!=(const AttributeSet &RHS) const {
+ return pImpl != RHS.pImpl;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // AttributeSet Introspection
+ //===--------------------------------------------------------------------===//
+
+ // FIXME: Remove this.
+ uint64_t Raw(unsigned Index) const;
+
+ /// \brief Return a raw pointer that uniquely identifies this attribute list.
+ void *getRawPointer() const {
+ return pImpl;
+ }
+
+ /// \brief Return true if there are no attributes.
+ bool isEmpty() const {
+ return getNumSlots() == 0;
+ }
+
+ /// \brief Return the number of slots used in this attribute list. This is
+ /// the number of arguments that have an attribute set on them (including the
+ /// function itself).
+ unsigned getNumSlots() const;
+
+ /// \brief Return the index for the given slot.
+ unsigned getSlotIndex(unsigned Slot) const;
+
+ /// \brief Return the attributes at the given slot.
+ AttributeSet getSlotAttributes(unsigned Slot) const;
+
+ void dump() const;
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief Provide DenseMapInfo for AttributeSet.
+template<> struct DenseMapInfo<AttributeSet> {
+ static inline AttributeSet getEmptyKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-1);
+ Val <<= PointerLikeTypeTraits<void*>::NumLowBitsAvailable;
+ return AttributeSet(reinterpret_cast<AttributeSetImpl*>(Val));
+ }
+ static inline AttributeSet getTombstoneKey() {
+ uintptr_t Val = static_cast<uintptr_t>(-2);
+ Val <<= PointerLikeTypeTraits<void*>::NumLowBitsAvailable;
+ return AttributeSet(reinterpret_cast<AttributeSetImpl*>(Val));
+ }
+ static unsigned getHashValue(AttributeSet AS) {
+ return (unsigned((uintptr_t)AS.pImpl) >> 4) ^
+ (unsigned((uintptr_t)AS.pImpl) >> 9);
+ }
+ static bool isEqual(AttributeSet LHS, AttributeSet RHS) { return LHS == RHS; }
+};
+
+//===----------------------------------------------------------------------===//
+/// \class
+/// \brief This class is used in conjunction with the Attribute::get method to
+/// create an Attribute object. The object itself is uniquified. The Builder's
+/// value, however, is not. So this can be used as a quick way to test for
+/// equality, presence of attributes, etc.
+class AttrBuilder {
+ std::bitset<Attribute::EndAttrKinds> Attrs;
+ std::map<std::string, std::string> TargetDepAttrs;
+ uint64_t Alignment;
+ uint64_t StackAlignment;
+ uint64_t DerefBytes;
+ uint64_t DerefOrNullBytes;
+
+public:
+ AttrBuilder()
+ : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0),
+ DerefOrNullBytes(0) {}
+ explicit AttrBuilder(uint64_t Val)
+ : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0),
+ DerefOrNullBytes(0) {
+ addRawValue(Val);
+ }
+ AttrBuilder(const Attribute &A)
+ : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0),
+ DerefOrNullBytes(0) {
+ addAttribute(A);
+ }
+ AttrBuilder(AttributeSet AS, unsigned Idx);
+
+ void clear();
+
+ /// \brief Add an attribute to the builder.
+ AttrBuilder &addAttribute(Attribute::AttrKind Val);
+
+ /// \brief Add the Attribute object to the builder.
+ AttrBuilder &addAttribute(Attribute A);
+
+ /// \brief Add the target-dependent attribute to the builder.
+ AttrBuilder &addAttribute(StringRef A, StringRef V = StringRef());
+
+ /// \brief Remove an attribute from the builder.
+ AttrBuilder &removeAttribute(Attribute::AttrKind Val);
+
+ /// \brief Remove the attributes from the builder.
+ AttrBuilder &removeAttributes(AttributeSet A, uint64_t Index);
+
+ /// \brief Remove the target-dependent attribute to the builder.
+ AttrBuilder &removeAttribute(StringRef A);
+
+ /// \brief Add the attributes from the builder.
+ AttrBuilder &merge(const AttrBuilder &B);
+
+ /// \brief Remove the attributes from the builder.
+ AttrBuilder &remove(const AttrBuilder &B);
+
+ /// \brief Return true if the builder has any attribute that's in the
+ /// specified builder.
+ bool overlaps(const AttrBuilder &B) const;
+
+ /// \brief Return true if the builder has the specified attribute.
+ bool contains(Attribute::AttrKind A) const {
+ assert((unsigned)A < Attribute::EndAttrKinds && "Attribute out of range!");
+ return Attrs[A];
+ }
+
+ /// \brief Return true if the builder has the specified target-dependent
+ /// attribute.
+ bool contains(StringRef A) const;
+
+ /// \brief Return true if the builder has IR-level attributes.
+ bool hasAttributes() const;
+
+ /// \brief Return true if the builder has any attribute that's in the
+ /// specified attribute.
+ bool hasAttributes(AttributeSet A, uint64_t Index) const;
+
+ /// \brief Return true if the builder has an alignment attribute.
+ bool hasAlignmentAttr() const;
+
+ /// \brief Retrieve the alignment attribute, if it exists.
+ uint64_t getAlignment() const { return Alignment; }
+
+ /// \brief Retrieve the stack alignment attribute, if it exists.
+ uint64_t getStackAlignment() const { return StackAlignment; }
+
+ /// \brief Retrieve the number of dereferenceable bytes, if the
+ /// dereferenceable attribute exists (zero is returned otherwise).
+ uint64_t getDereferenceableBytes() const { return DerefBytes; }
+
+ /// \brief Retrieve the number of dereferenceable_or_null bytes, if the
+ /// dereferenceable_or_null attribute exists (zero is returned otherwise).
+ uint64_t getDereferenceableOrNullBytes() const { return DerefOrNullBytes; }
+
+ /// \brief This turns an int alignment (which must be a power of 2) into the
+ /// form used internally in Attribute.
+ AttrBuilder &addAlignmentAttr(unsigned Align);
+
+ /// \brief This turns an int stack alignment (which must be a power of 2) into
+ /// the form used internally in Attribute.
+ AttrBuilder &addStackAlignmentAttr(unsigned Align);
+
+ /// \brief This turns the number of dereferenceable bytes into the form used
+ /// internally in Attribute.
+ AttrBuilder &addDereferenceableAttr(uint64_t Bytes);
+
+ /// \brief This turns the number of dereferenceable_or_null bytes into the
+ /// form used internally in Attribute.
+ AttrBuilder &addDereferenceableOrNullAttr(uint64_t Bytes);
+
+ /// \brief Return true if the builder contains no target-independent
+ /// attributes.
+ bool empty() const { return Attrs.none(); }
+
+ // Iterators for target-dependent attributes.
+ typedef std::pair<std::string, std::string> td_type;
+ typedef std::map<std::string, std::string>::iterator td_iterator;
+ typedef std::map<std::string, std::string>::const_iterator td_const_iterator;
+ typedef llvm::iterator_range<td_iterator> td_range;
+ typedef llvm::iterator_range<td_const_iterator> td_const_range;
+
+ td_iterator td_begin() { return TargetDepAttrs.begin(); }
+ td_iterator td_end() { return TargetDepAttrs.end(); }
+
+ td_const_iterator td_begin() const { return TargetDepAttrs.begin(); }
+ td_const_iterator td_end() const { return TargetDepAttrs.end(); }
+
+ td_range td_attrs() { return td_range(td_begin(), td_end()); }
+ td_const_range td_attrs() const {
+ return td_const_range(td_begin(), td_end());
+ }
+
+ bool td_empty() const { return TargetDepAttrs.empty(); }
+
+ bool operator==(const AttrBuilder &B);
+ bool operator!=(const AttrBuilder &B) {
+ return !(*this == B);
+ }
+
+ // FIXME: Remove this in 4.0.
+
+ /// \brief Add the raw value to the internal representation.
+ AttrBuilder &addRawValue(uint64_t Val);
+};
+
+namespace AttributeFuncs {
+
+/// \brief Which attributes cannot be applied to a type.
+AttrBuilder typeIncompatible(Type *Ty);
+
+/// \returns Return true if the two functions have compatible target-independent
+/// attributes for inlining purposes.
+bool areInlineCompatible(const Function &Caller, const Function &Callee);
+
+/// \brief Merge caller's and callee's attributes.
+void mergeAttributesForInlining(Function &Caller, const Function &Callee);
+
+} // end AttributeFuncs namespace
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Attributes.td b/contrib/llvm/include/llvm/IR/Attributes.td
new file mode 100644
index 0000000..797cd55
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Attributes.td
@@ -0,0 +1,192 @@
+/// Attribute base class.
+class Attr<string S> {
+ // String representation of this attribute in the IR.
+ string AttrString = S;
+}
+
+/// Enum attribute.
+class EnumAttr<string S> : Attr<S>;
+
+/// StringBool attribute.
+class StrBoolAttr<string S> : Attr<S>;
+
+/// Target-independent enum attributes.
+
+/// Alignment of parameter (5 bits) stored as log2 of alignment with +1 bias.
+/// 0 means unaligned (different from align(1)).
+def Alignment : EnumAttr<"align">;
+
+/// inline=always.
+def AlwaysInline : EnumAttr<"alwaysinline">;
+
+/// Function can access memory only using pointers based on its arguments.
+def ArgMemOnly : EnumAttr<"argmemonly">;
+
+/// Callee is recognized as a builtin, despite nobuiltin attribute on its
+/// declaration.
+def Builtin : EnumAttr<"builtin">;
+
+/// Pass structure by value.
+def ByVal : EnumAttr<"byval">;
+
+/// Marks function as being in a cold path.
+def Cold : EnumAttr<"cold">;
+
+/// Can only be moved to control-equivalent blocks.
+def Convergent : EnumAttr<"convergent">;
+
+/// Pointer is known to be dereferenceable.
+def Dereferenceable : EnumAttr<"dereferenceable">;
+
+/// Pointer is either null or dereferenceable.
+def DereferenceableOrNull : EnumAttr<"dereferenceable_or_null">;
+
+/// Function may only access memory that is inaccessible from IR.
+def InaccessibleMemOnly : EnumAttr<"inaccessiblememonly">;
+
+/// Function may only access memory that is either inaccessible from the IR,
+/// or pointed to by its pointer arguments.
+def InaccessibleMemOrArgMemOnly : EnumAttr<"inaccessiblemem_or_argmemonly">;
+
+/// Pass structure in an alloca.
+def InAlloca : EnumAttr<"inalloca">;
+
+/// Source said inlining was desirable.
+def InlineHint : EnumAttr<"inlinehint">;
+
+/// Force argument to be passed in register.
+def InReg : EnumAttr<"inreg">;
+
+/// Build jump-instruction tables and replace refs.
+def JumpTable : EnumAttr<"jumptable">;
+
+/// Function must be optimized for size first.
+def MinSize : EnumAttr<"minsize">;
+
+/// Naked function.
+def Naked : EnumAttr<"naked">;
+
+/// Nested function static chain.
+def Nest : EnumAttr<"nest">;
+
+/// Considered to not alias after call.
+def NoAlias : EnumAttr<"noalias">;
+
+/// Callee isn't recognized as a builtin.
+def NoBuiltin : EnumAttr<"nobuiltin">;
+
+/// Function creates no aliases of pointer.
+def NoCapture : EnumAttr<"nocapture">;
+
+/// Call cannot be duplicated.
+def NoDuplicate : EnumAttr<"noduplicate">;
+
+/// Disable implicit floating point insts.
+def NoImplicitFloat : EnumAttr<"noimplicitfloat">;
+
+/// inline=never.
+def NoInline : EnumAttr<"noinline">;
+
+/// Function is called early and/or often, so lazy binding isn't worthwhile.
+def NonLazyBind : EnumAttr<"nonlazybind">;
+
+/// Pointer is known to be not null.
+def NonNull : EnumAttr<"nonnull">;
+
+/// The function does not recurse.
+def NoRecurse : EnumAttr<"norecurse">;
+
+/// Disable redzone.
+def NoRedZone : EnumAttr<"noredzone">;
+
+/// Mark the function as not returning.
+def NoReturn : EnumAttr<"noreturn">;
+
+/// Function doesn't unwind stack.
+def NoUnwind : EnumAttr<"nounwind">;
+
+/// opt_size.
+def OptimizeForSize : EnumAttr<"optsize">;
+
+/// Function must not be optimized.
+def OptimizeNone : EnumAttr<"optnone">;
+
+/// Function does not access memory.
+def ReadNone : EnumAttr<"readnone">;
+
+/// Function only reads from memory.
+def ReadOnly : EnumAttr<"readonly">;
+
+/// Return value is always equal to this argument.
+def Returned : EnumAttr<"returned">;
+
+/// Function can return twice.
+def ReturnsTwice : EnumAttr<"returns_twice">;
+
+/// Safe Stack protection.
+def SafeStack : EnumAttr<"safestack">;
+
+/// Sign extended before/after call.
+def SExt : EnumAttr<"signext">;
+
+/// Alignment of stack for function (3 bits) stored as log2 of alignment with
+/// +1 bias 0 means unaligned (different from alignstack=(1)).
+def StackAlignment : EnumAttr<"alignstack">;
+
+/// Stack protection.
+def StackProtect : EnumAttr<"ssp">;
+
+/// Stack protection required.
+def StackProtectReq : EnumAttr<"sspreq">;
+
+/// Strong Stack protection.
+def StackProtectStrong : EnumAttr<"sspstrong">;
+
+/// Hidden pointer to structure to return.
+def StructRet : EnumAttr<"sret">;
+
+/// AddressSanitizer is on.
+def SanitizeAddress : EnumAttr<"sanitize_address">;
+
+/// ThreadSanitizer is on.
+def SanitizeThread : EnumAttr<"sanitize_thread">;
+
+/// MemorySanitizer is on.
+def SanitizeMemory : EnumAttr<"sanitize_memory">;
+
+/// Function must be in a unwind table.
+def UWTable : EnumAttr<"uwtable">;
+
+/// Zero extended before/after call.
+def ZExt : EnumAttr<"zeroext">;
+
+/// Target-independent string attributes.
+def LessPreciseFPMAD : StrBoolAttr<"less-precise-fpmad">;
+def NoInfsFPMath : StrBoolAttr<"no-infs-fp-math">;
+def NoNansFPMath : StrBoolAttr<"no-nans-fp-math">;
+def UnsafeFPMath : StrBoolAttr<"unsafe-fp-math">;
+
+class CompatRule<string F> {
+ // The name of the function called to check the attribute of the caller and
+ // callee and decide whether inlining should be allowed. The function's
+ // signature must match "bool(const Function&, const Function &)", where the
+ // first parameter is the reference to the caller and the second parameter is
+ // the reference to the callee. It must return false if the attributes of the
+ // caller and callee are incompatible, and true otherwise.
+ string CompatFunc = F;
+}
+
+def : CompatRule<"isEqual<SanitizeAddressAttr>">;
+def : CompatRule<"isEqual<SanitizeThreadAttr>">;
+def : CompatRule<"isEqual<SanitizeMemoryAttr>">;
+
+class MergeRule<string F> {
+ // The name of the function called to merge the attributes of the caller and
+ // callee. The function's signature must match
+ // "void(Function&, const Function &)", where the first parameter is the
+ // reference to the caller and the second parameter is the reference to the
+ // callee.
+ string MergeFunc = F;
+}
+
+def : MergeRule<"adjustCallerSSPLevel">;
diff --git a/contrib/llvm/include/llvm/IR/AutoUpgrade.h b/contrib/llvm/include/llvm/IR/AutoUpgrade.h
new file mode 100644
index 0000000..a4b3c41
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/AutoUpgrade.h
@@ -0,0 +1,71 @@
+//===- AutoUpgrade.h - AutoUpgrade Helpers ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These functions are implemented by lib/IR/AutoUpgrade.cpp.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_AUTOUPGRADE_H
+#define LLVM_IR_AUTOUPGRADE_H
+
+#include <string>
+
+namespace llvm {
+ class CallInst;
+ class Constant;
+ class Function;
+ class Instruction;
+ class Module;
+ class GlobalVariable;
+ class Type;
+ class Value;
+
+ /// This is a more granular function that simply checks an intrinsic function
+ /// for upgrading, and returns true if it requires upgrading. It may return
+ /// null in NewFn if the all calls to the original intrinsic function
+ /// should be transformed to non-function-call instructions.
+ bool UpgradeIntrinsicFunction(Function *F, Function *&NewFn);
+
+ /// This is the complement to the above, replacing a specific call to an
+ /// intrinsic function with a call to the specified new function.
+ void UpgradeIntrinsicCall(CallInst *CI, Function *NewFn);
+
+ /// This is an auto-upgrade hook for any old intrinsic function syntaxes
+ /// which need to have both the function updated as well as all calls updated
+ /// to the new function. This should only be run in a post-processing fashion
+ /// so that it can update all calls to the old function.
+ void UpgradeCallsToIntrinsic(Function* F);
+
+ /// This checks for global variables which should be upgraded. It returns true
+ /// if it requires upgrading.
+ bool UpgradeGlobalVariable(GlobalVariable *GV);
+
+ /// If the TBAA tag for the given instruction uses the scalar TBAA format,
+ /// we upgrade it to the struct-path aware TBAA format.
+ void UpgradeInstWithTBAATag(Instruction *I);
+
+ /// This is an auto-upgrade for bitcast between pointers with different
+ /// address spaces: the instruction is replaced by a pair ptrtoint+inttoptr.
+ Instruction *UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
+ Instruction *&Temp);
+
+ /// This is an auto-upgrade for bitcast constant expression between pointers
+ /// with different address spaces: the instruction is replaced by a pair
+ /// ptrtoint+inttoptr.
+ Value *UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy);
+
+ /// Check the debug info version number, if it is out-dated, drop the debug
+ /// info. Return true if module is modified.
+ bool UpgradeDebugInfo(Module &M);
+
+ /// Upgrade a metadata string constant in place.
+ void UpgradeMDStringConstant(std::string &String);
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/BasicBlock.h b/contrib/llvm/include/llvm/IR/BasicBlock.h
new file mode 100644
index 0000000..c6b54d3
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/BasicBlock.h
@@ -0,0 +1,341 @@
+//===-- llvm/BasicBlock.h - Represent a basic block in the VM ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the BasicBlock class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_BASICBLOCK_H
+#define LLVM_IR_BASICBLOCK_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class CallInst;
+class LandingPadInst;
+class TerminatorInst;
+class LLVMContext;
+class BlockAddress;
+class Function;
+
+template <>
+struct SymbolTableListSentinelTraits<BasicBlock>
+ : public ilist_half_embedded_sentinel_traits<BasicBlock> {};
+
+/// \brief LLVM Basic Block Representation
+///
+/// This represents a single basic block in LLVM. A basic block is simply a
+/// container of instructions that execute sequentially. Basic blocks are Values
+/// because they are referenced by instructions such as branches and switch
+/// tables. The type of a BasicBlock is "Type::LabelTy" because the basic block
+/// represents a label to which a branch can jump.
+///
+/// A well formed basic block is formed of a list of non-terminating
+/// instructions followed by a single TerminatorInst instruction.
+/// TerminatorInst's may not occur in the middle of basic blocks, and must
+/// terminate the blocks. The BasicBlock class allows malformed basic blocks to
+/// occur because it may be useful in the intermediate stage of constructing or
+/// modifying a program. However, the verifier will ensure that basic blocks
+/// are "well formed".
+class BasicBlock : public Value, // Basic blocks are data objects also
+ public ilist_node_with_parent<BasicBlock, Function> {
+ friend class BlockAddress;
+public:
+ typedef SymbolTableList<Instruction> InstListType;
+
+private:
+ InstListType InstList;
+ Function *Parent;
+
+ void setParent(Function *parent);
+ friend class SymbolTableListTraits<BasicBlock>;
+
+ BasicBlock(const BasicBlock &) = delete;
+ void operator=(const BasicBlock &) = delete;
+
+ /// \brief Constructor.
+ ///
+ /// If the function parameter is specified, the basic block is automatically
+ /// inserted at either the end of the function (if InsertBefore is null), or
+ /// before the specified basic block.
+ explicit BasicBlock(LLVMContext &C, const Twine &Name = "",
+ Function *Parent = nullptr,
+ BasicBlock *InsertBefore = nullptr);
+public:
+ /// \brief Get the context in which this basic block lives.
+ LLVMContext &getContext() const;
+
+ /// Instruction iterators...
+ typedef InstListType::iterator iterator;
+ typedef InstListType::const_iterator const_iterator;
+ typedef InstListType::reverse_iterator reverse_iterator;
+ typedef InstListType::const_reverse_iterator const_reverse_iterator;
+
+ /// \brief Creates a new BasicBlock.
+ ///
+ /// If the Parent parameter is specified, the basic block is automatically
+ /// inserted at either the end of the function (if InsertBefore is 0), or
+ /// before the specified basic block.
+ static BasicBlock *Create(LLVMContext &Context, const Twine &Name = "",
+ Function *Parent = nullptr,
+ BasicBlock *InsertBefore = nullptr) {
+ return new BasicBlock(Context, Name, Parent, InsertBefore);
+ }
+ ~BasicBlock() override;
+
+ /// \brief Return the enclosing method, or null if none.
+ const Function *getParent() const { return Parent; }
+ Function *getParent() { return Parent; }
+
+ /// \brief Return the module owning the function this basic block belongs to,
+ /// or nullptr it the function does not have a module.
+ ///
+ /// Note: this is undefined behavior if the block does not have a parent.
+ const Module *getModule() const;
+ Module *getModule();
+
+ /// \brief Returns the terminator instruction if the block is well formed or
+ /// null if the block is not well formed.
+ TerminatorInst *getTerminator();
+ const TerminatorInst *getTerminator() const;
+
+ /// \brief Returns the call instruction marked 'musttail' prior to the
+ /// terminating return instruction of this basic block, if such a call is
+ /// present. Otherwise, returns null.
+ CallInst *getTerminatingMustTailCall();
+ const CallInst *getTerminatingMustTailCall() const {
+ return const_cast<BasicBlock *>(this)->getTerminatingMustTailCall();
+ }
+
+ /// \brief Returns a pointer to the first instruction in this block that is
+ /// not a PHINode instruction.
+ ///
+ /// When adding instructions to the beginning of the basic block, they should
+ /// be added before the returned value, not before the first instruction,
+ /// which might be PHI. Returns 0 is there's no non-PHI instruction.
+ Instruction* getFirstNonPHI();
+ const Instruction* getFirstNonPHI() const {
+ return const_cast<BasicBlock*>(this)->getFirstNonPHI();
+ }
+
+ /// \brief Returns a pointer to the first instruction in this block that is not
+ /// a PHINode or a debug intrinsic.
+ Instruction* getFirstNonPHIOrDbg();
+ const Instruction* getFirstNonPHIOrDbg() const {
+ return const_cast<BasicBlock*>(this)->getFirstNonPHIOrDbg();
+ }
+
+ /// \brief Returns a pointer to the first instruction in this block that is not
+ /// a PHINode, a debug intrinsic, or a lifetime intrinsic.
+ Instruction* getFirstNonPHIOrDbgOrLifetime();
+ const Instruction* getFirstNonPHIOrDbgOrLifetime() const {
+ return const_cast<BasicBlock*>(this)->getFirstNonPHIOrDbgOrLifetime();
+ }
+
+ /// \brief Returns an iterator to the first instruction in this block that is
+ /// suitable for inserting a non-PHI instruction.
+ ///
+ /// In particular, it skips all PHIs and LandingPad instructions.
+ iterator getFirstInsertionPt();
+ const_iterator getFirstInsertionPt() const {
+ return const_cast<BasicBlock*>(this)->getFirstInsertionPt();
+ }
+
+ /// \brief Unlink 'this' from the containing function, but do not delete it.
+ void removeFromParent();
+
+ /// \brief Unlink 'this' from the containing function and delete it.
+ ///
+ // \returns an iterator pointing to the element after the erased one.
+ SymbolTableList<BasicBlock>::iterator eraseFromParent();
+
+ /// \brief Unlink this basic block from its current function and insert it
+ /// into the function that \p MovePos lives in, right before \p MovePos.
+ void moveBefore(BasicBlock *MovePos);
+
+ /// \brief Unlink this basic block from its current function and insert it
+ /// right after \p MovePos in the function \p MovePos lives in.
+ void moveAfter(BasicBlock *MovePos);
+
+ /// \brief Insert unlinked basic block into a function.
+ ///
+ /// Inserts an unlinked basic block into \c Parent. If \c InsertBefore is
+ /// provided, inserts before that basic block, otherwise inserts at the end.
+ ///
+ /// \pre \a getParent() is \c nullptr.
+ void insertInto(Function *Parent, BasicBlock *InsertBefore = nullptr);
+
+ /// \brief Return the predecessor of this block if it has a single predecessor
+ /// block. Otherwise return a null pointer.
+ BasicBlock *getSinglePredecessor();
+ const BasicBlock *getSinglePredecessor() const {
+ return const_cast<BasicBlock*>(this)->getSinglePredecessor();
+ }
+
+ /// \brief Return the predecessor of this block if it has a unique predecessor
+ /// block. Otherwise return a null pointer.
+ ///
+ /// Note that unique predecessor doesn't mean single edge, there can be
+ /// multiple edges from the unique predecessor to this block (for example a
+ /// switch statement with multiple cases having the same destination).
+ BasicBlock *getUniquePredecessor();
+ const BasicBlock *getUniquePredecessor() const {
+ return const_cast<BasicBlock*>(this)->getUniquePredecessor();
+ }
+
+ /// \brief Return the successor of this block if it has a single successor.
+ /// Otherwise return a null pointer.
+ ///
+ /// This method is analogous to getSinglePredecessor above.
+ BasicBlock *getSingleSuccessor();
+ const BasicBlock *getSingleSuccessor() const {
+ return const_cast<BasicBlock*>(this)->getSingleSuccessor();
+ }
+
+ /// \brief Return the successor of this block if it has a unique successor.
+ /// Otherwise return a null pointer.
+ ///
+ /// This method is analogous to getUniquePredecessor above.
+ BasicBlock *getUniqueSuccessor();
+ const BasicBlock *getUniqueSuccessor() const {
+ return const_cast<BasicBlock*>(this)->getUniqueSuccessor();
+ }
+
+ //===--------------------------------------------------------------------===//
+ /// Instruction iterator methods
+ ///
+ inline iterator begin() { return InstList.begin(); }
+ inline const_iterator begin() const { return InstList.begin(); }
+ inline iterator end () { return InstList.end(); }
+ inline const_iterator end () const { return InstList.end(); }
+
+ inline reverse_iterator rbegin() { return InstList.rbegin(); }
+ inline const_reverse_iterator rbegin() const { return InstList.rbegin(); }
+ inline reverse_iterator rend () { return InstList.rend(); }
+ inline const_reverse_iterator rend () const { return InstList.rend(); }
+
+ inline size_t size() const { return InstList.size(); }
+ inline bool empty() const { return InstList.empty(); }
+ inline const Instruction &front() const { return InstList.front(); }
+ inline Instruction &front() { return InstList.front(); }
+ inline const Instruction &back() const { return InstList.back(); }
+ inline Instruction &back() { return InstList.back(); }
+
+ /// \brief Return the underlying instruction list container.
+ ///
+ /// Currently you need to access the underlying instruction list container
+ /// directly if you want to modify it.
+ const InstListType &getInstList() const { return InstList; }
+ InstListType &getInstList() { return InstList; }
+
+ /// \brief Returns a pointer to a member of the instruction list.
+ static InstListType BasicBlock::*getSublistAccess(Instruction*) {
+ return &BasicBlock::InstList;
+ }
+
+ /// \brief Returns a pointer to the symbol table if one exists.
+ ValueSymbolTable *getValueSymbolTable();
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == Value::BasicBlockVal;
+ }
+
+ /// \brief Cause all subinstructions to "let go" of all the references that
+ /// said subinstructions are maintaining.
+ ///
+ /// This allows one to 'delete' a whole class at a time, even though there may
+ /// be circular references... first all references are dropped, and all use
+ /// counts go to zero. Then everything is delete'd for real. Note that no
+ /// operations are valid on an object that has "dropped all references",
+ /// except operator delete.
+ void dropAllReferences();
+
+ /// \brief Notify the BasicBlock that the predecessor \p Pred is no longer
+ /// able to reach it.
+ ///
+ /// This is actually not used to update the Predecessor list, but is actually
+ /// used to update the PHI nodes that reside in the block. Note that this
+ /// should be called while the predecessor still refers to this block.
+ void removePredecessor(BasicBlock *Pred, bool DontDeleteUselessPHIs = false);
+
+ bool canSplitPredecessors() const;
+
+ /// \brief Split the basic block into two basic blocks at the specified
+ /// instruction.
+ ///
+ /// Note that all instructions BEFORE the specified iterator stay as part of
+ /// the original basic block, an unconditional branch is added to the original
+ /// BB, and the rest of the instructions in the BB are moved to the new BB,
+ /// including the old terminator. The newly formed BasicBlock is returned.
+ /// This function invalidates the specified iterator.
+ ///
+ /// Note that this only works on well formed basic blocks (must have a
+ /// terminator), and 'I' must not be the end of instruction list (which would
+ /// cause a degenerate basic block to be formed, having a terminator inside of
+ /// the basic block).
+ ///
+ /// Also note that this doesn't preserve any passes. To split blocks while
+ /// keeping loop information consistent, use the SplitBlock utility function.
+ BasicBlock *splitBasicBlock(iterator I, const Twine &BBName = "");
+ BasicBlock *splitBasicBlock(Instruction *I, const Twine &BBName = "") {
+ return splitBasicBlock(I->getIterator(), BBName);
+ }
+
+ /// \brief Returns true if there are any uses of this basic block other than
+ /// direct branches, switches, etc. to it.
+ bool hasAddressTaken() const { return getSubclassDataFromValue() != 0; }
+
+ /// \brief Update all phi nodes in this basic block's successors to refer to
+ /// basic block \p New instead of to it.
+ void replaceSuccessorsPhiUsesWith(BasicBlock *New);
+
+ /// \brief Return true if this basic block is an exception handling block.
+ bool isEHPad() const { return getFirstNonPHI()->isEHPad(); }
+
+ /// \brief Return true if this basic block is a landing pad.
+ ///
+ /// Being a ``landing pad'' means that the basic block is the destination of
+ /// the 'unwind' edge of an invoke instruction.
+ bool isLandingPad() const;
+
+ /// \brief Return the landingpad instruction associated with the landing pad.
+ LandingPadInst *getLandingPadInst();
+ const LandingPadInst *getLandingPadInst() const;
+
+private:
+ /// \brief Increment the internal refcount of the number of BlockAddresses
+ /// referencing this BasicBlock by \p Amt.
+ ///
+ /// This is almost always 0, sometimes one possibly, but almost never 2, and
+ /// inconceivably 3 or more.
+ void AdjustBlockAddressRefCount(int Amt) {
+ setValueSubclassData(getSubclassDataFromValue()+Amt);
+ assert((int)(signed char)getSubclassDataFromValue() >= 0 &&
+ "Refcount wrap-around");
+ }
+ /// \brief Shadow Value::setValueSubclassData with a private forwarding method
+ /// so that any future subclasses cannot accidentally use it.
+ void setValueSubclassData(unsigned short D) {
+ Value::setValueSubclassData(D);
+ }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(BasicBlock, LLVMBasicBlockRef)
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/CFG.h b/contrib/llvm/include/llvm/IR/CFG.h
new file mode 100644
index 0000000..e9bf093
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/CFG.h
@@ -0,0 +1,265 @@
+//===- CFG.h - Process LLVM structures as graphs ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines specializations of GraphTraits that allow Function and
+// BasicBlock graphs to be treated as proper graphs for generic algorithms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CFG_H
+#define LLVM_IR_CFG_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// BasicBlock pred_iterator definition
+//===----------------------------------------------------------------------===//
+
+template <class Ptr, class USE_iterator> // Predecessor Iterator
+class PredIterator : public std::iterator<std::forward_iterator_tag,
+ Ptr, ptrdiff_t, Ptr*, Ptr*> {
+ typedef std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t, Ptr*,
+ Ptr*> super;
+ typedef PredIterator<Ptr, USE_iterator> Self;
+ USE_iterator It;
+
+ inline void advancePastNonTerminators() {
+ // Loop to ignore non-terminator uses (for example BlockAddresses).
+ while (!It.atEnd() && !isa<TerminatorInst>(*It))
+ ++It;
+ }
+
+public:
+ typedef typename super::pointer pointer;
+ typedef typename super::reference reference;
+
+ PredIterator() {}
+ explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) {
+ advancePastNonTerminators();
+ }
+ inline PredIterator(Ptr *bb, bool) : It(bb->user_end()) {}
+
+ inline bool operator==(const Self& x) const { return It == x.It; }
+ inline bool operator!=(const Self& x) const { return !operator==(x); }
+
+ inline reference operator*() const {
+ assert(!It.atEnd() && "pred_iterator out of range!");
+ return cast<TerminatorInst>(*It)->getParent();
+ }
+ inline pointer *operator->() const { return &operator*(); }
+
+ inline Self& operator++() { // Preincrement
+ assert(!It.atEnd() && "pred_iterator out of range!");
+ ++It; advancePastNonTerminators();
+ return *this;
+ }
+
+ inline Self operator++(int) { // Postincrement
+ Self tmp = *this; ++*this; return tmp;
+ }
+
+ /// getOperandNo - Return the operand number in the predecessor's
+ /// terminator of the successor.
+ unsigned getOperandNo() const {
+ return It.getOperandNo();
+ }
+
+ /// getUse - Return the operand Use in the predecessor's terminator
+ /// of the successor.
+ Use &getUse() const {
+ return It.getUse();
+ }
+};
+
+typedef PredIterator<BasicBlock, Value::user_iterator> pred_iterator;
+typedef PredIterator<const BasicBlock,
+ Value::const_user_iterator> const_pred_iterator;
+typedef llvm::iterator_range<pred_iterator> pred_range;
+typedef llvm::iterator_range<const_pred_iterator> pred_const_range;
+
+inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
+inline const_pred_iterator pred_begin(const BasicBlock *BB) {
+ return const_pred_iterator(BB);
+}
+inline pred_iterator pred_end(BasicBlock *BB) { return pred_iterator(BB, true);}
+inline const_pred_iterator pred_end(const BasicBlock *BB) {
+ return const_pred_iterator(BB, true);
+}
+inline bool pred_empty(const BasicBlock *BB) {
+ return pred_begin(BB) == pred_end(BB);
+}
+inline pred_range predecessors(BasicBlock *BB) {
+ return pred_range(pred_begin(BB), pred_end(BB));
+}
+inline pred_const_range predecessors(const BasicBlock *BB) {
+ return pred_const_range(pred_begin(BB), pred_end(BB));
+}
+
+//===----------------------------------------------------------------------===//
+// BasicBlock succ_iterator helpers
+//===----------------------------------------------------------------------===//
+
+typedef TerminatorInst::SuccIterator<TerminatorInst *, BasicBlock>
+ succ_iterator;
+typedef TerminatorInst::SuccIterator<const TerminatorInst *, const BasicBlock>
+ succ_const_iterator;
+typedef llvm::iterator_range<succ_iterator> succ_range;
+typedef llvm::iterator_range<succ_const_iterator> succ_const_range;
+
+inline succ_iterator succ_begin(BasicBlock *BB) {
+ return succ_iterator(BB->getTerminator());
+}
+inline succ_const_iterator succ_begin(const BasicBlock *BB) {
+ return succ_const_iterator(BB->getTerminator());
+}
+inline succ_iterator succ_end(BasicBlock *BB) {
+ return succ_iterator(BB->getTerminator(), true);
+}
+inline succ_const_iterator succ_end(const BasicBlock *BB) {
+ return succ_const_iterator(BB->getTerminator(), true);
+}
+inline bool succ_empty(const BasicBlock *BB) {
+ return succ_begin(BB) == succ_end(BB);
+}
+inline succ_range successors(BasicBlock *BB) {
+ return succ_range(succ_begin(BB), succ_end(BB));
+}
+inline succ_const_range successors(const BasicBlock *BB) {
+ return succ_const_range(succ_begin(BB), succ_end(BB));
+}
+
+template <typename T, typename U>
+struct isPodLike<TerminatorInst::SuccIterator<T, U>> {
+ static const bool value = isPodLike<T>::value;
+};
+
+
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for basic block graphs (CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks...
+
+template <> struct GraphTraits<BasicBlock*> {
+ typedef BasicBlock NodeType;
+ typedef succ_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(BasicBlock *BB) { return BB; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return succ_begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return succ_end(N);
+ }
+};
+
+template <> struct GraphTraits<const BasicBlock*> {
+ typedef const BasicBlock NodeType;
+ typedef succ_const_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(const BasicBlock *BB) { return BB; }
+
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return succ_begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return succ_end(N);
+ }
+};
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... and to walk it in inverse order. Inverse order for
+// a function is considered to be when traversing the predecessor edges of a BB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<BasicBlock*> > {
+ typedef BasicBlock NodeType;
+ typedef pred_iterator ChildIteratorType;
+ static NodeType *getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return pred_begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return pred_end(N);
+ }
+};
+
+template <> struct GraphTraits<Inverse<const BasicBlock*> > {
+ typedef const BasicBlock NodeType;
+ typedef const_pred_iterator ChildIteratorType;
+ static NodeType *getEntryNode(Inverse<const BasicBlock*> G) {
+ return G.Graph;
+ }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return pred_begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return pred_end(N);
+ }
+};
+
+
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for function basic block graphs (CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... these are the same as the basic block iterators,
+// except that the root node is implicitly the first node of the function.
+//
+template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> {
+ static NodeType *getEntryNode(Function *F) { return &F->getEntryBlock(); }
+
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef Function::iterator nodes_iterator;
+ static nodes_iterator nodes_begin(Function *F) { return F->begin(); }
+ static nodes_iterator nodes_end (Function *F) { return F->end(); }
+ static size_t size (Function *F) { return F->size(); }
+};
+template <> struct GraphTraits<const Function*> :
+ public GraphTraits<const BasicBlock*> {
+ static NodeType *getEntryNode(const Function *F) {return &F->getEntryBlock();}
+
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef Function::const_iterator nodes_iterator;
+ static nodes_iterator nodes_begin(const Function *F) { return F->begin(); }
+ static nodes_iterator nodes_end (const Function *F) { return F->end(); }
+ static size_t size (const Function *F) { return F->size(); }
+};
+
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... and to walk it in inverse order. Inverse order for
+// a function is considered to be when traversing the predecessor edges of a BB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<Function*> > :
+ public GraphTraits<Inverse<BasicBlock*> > {
+ static NodeType *getEntryNode(Inverse<Function*> G) {
+ return &G.Graph->getEntryBlock();
+ }
+};
+template <> struct GraphTraits<Inverse<const Function*> > :
+ public GraphTraits<Inverse<const BasicBlock*> > {
+ static NodeType *getEntryNode(Inverse<const Function *> G) {
+ return &G.Graph->getEntryBlock();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/CallSite.h b/contrib/llvm/include/llvm/IR/CallSite.h
new file mode 100644
index 0000000..f4b8a8a
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/CallSite.h
@@ -0,0 +1,563 @@
+//===- CallSite.h - Abstract Call & Invoke instrs ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CallSite class, which is a handy wrapper for code that
+// wants to treat Call and Invoke instructions in a generic way. When in non-
+// mutation context (e.g. an analysis) ImmutableCallSite should be used.
+// Finally, when some degree of customization is necessary between these two
+// extremes, CallSiteBase<> can be supplied with fine-tuned parameters.
+//
+// NOTE: These classes are supposed to have "value semantics". So they should be
+// passed by value, not by reference; they should not be "new"ed or "delete"d.
+// They are efficiently copyable, assignable and constructable, with cost
+// equivalent to copying a pointer (notice that they have only a single data
+// member). The internal representation carries a flag which indicates which of
+// the two variants is enclosed. This allows for cheaper checks when various
+// accessors of CallSite are employed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CALLSITE_H
+#define LLVM_IR_CALLSITE_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Instructions.h"
+
+namespace llvm {
+
+class CallInst;
+class InvokeInst;
+
+template <typename FunTy = const Function,
+ typename BBTy = const BasicBlock,
+ typename ValTy = const Value,
+ typename UserTy = const User,
+ typename UseTy = const Use,
+ typename InstrTy = const Instruction,
+ typename CallTy = const CallInst,
+ typename InvokeTy = const InvokeInst,
+ typename IterTy = User::const_op_iterator>
+class CallSiteBase {
+protected:
+ PointerIntPair<InstrTy*, 1, bool> I;
+
+ CallSiteBase() : I(nullptr, false) {}
+ CallSiteBase(CallTy *CI) : I(CI, true) { assert(CI); }
+ CallSiteBase(InvokeTy *II) : I(II, false) { assert(II); }
+ explicit CallSiteBase(ValTy *II) { *this = get(II); }
+
+private:
+ /// CallSiteBase::get - This static method is sort of like a constructor. It
+ /// will create an appropriate call site for a Call or Invoke instruction, but
+ /// it can also create a null initialized CallSiteBase object for something
+ /// which is NOT a call site.
+ ///
+ static CallSiteBase get(ValTy *V) {
+ if (InstrTy *II = dyn_cast<InstrTy>(V)) {
+ if (II->getOpcode() == Instruction::Call)
+ return CallSiteBase(static_cast<CallTy*>(II));
+ else if (II->getOpcode() == Instruction::Invoke)
+ return CallSiteBase(static_cast<InvokeTy*>(II));
+ }
+ return CallSiteBase();
+ }
+
+public:
+ /// isCall - true if a CallInst is enclosed.
+ /// Note that !isCall() does not mean it is an InvokeInst enclosed,
+ /// it also could signify a NULL Instruction pointer.
+ bool isCall() const { return I.getInt(); }
+
+ /// isInvoke - true if a InvokeInst is enclosed.
+ ///
+ bool isInvoke() const { return getInstruction() && !I.getInt(); }
+
+ InstrTy *getInstruction() const { return I.getPointer(); }
+ InstrTy *operator->() const { return I.getPointer(); }
+ explicit operator bool() const { return I.getPointer(); }
+
+ /// Get the basic block containing the call site
+ BBTy* getParent() const { return getInstruction()->getParent(); }
+
+ /// getCalledValue - Return the pointer to function that is being called.
+ ///
+ ValTy *getCalledValue() const {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ return *getCallee();
+ }
+
+ /// getCalledFunction - Return the function being called if this is a direct
+ /// call, otherwise return null (if it's an indirect call).
+ ///
+ FunTy *getCalledFunction() const {
+ return dyn_cast<FunTy>(getCalledValue());
+ }
+
+ /// setCalledFunction - Set the callee to the specified value.
+ ///
+ void setCalledFunction(Value *V) {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ *getCallee() = V;
+ }
+
+ /// isCallee - Determine whether the passed iterator points to the
+ /// callee operand's Use.
+ bool isCallee(Value::const_user_iterator UI) const {
+ return isCallee(&UI.getUse());
+ }
+
+ /// Determine whether this Use is the callee operand's Use.
+ bool isCallee(const Use *U) const { return getCallee() == U; }
+
+ /// \brief Determine whether the passed iterator points to an argument
+ /// operand.
+ bool isArgOperand(Value::const_user_iterator UI) const {
+ return isArgOperand(&UI.getUse());
+ }
+
+ /// \brief Determine whether the passed use points to an argument operand.
+ bool isArgOperand(const Use *U) const {
+ assert(getInstruction() == U->getUser());
+ return arg_begin() <= U && U < arg_end();
+ }
+
+ /// \brief Determine whether the passed iterator points to a bundle operand.
+ bool isBundleOperand(Value::const_user_iterator UI) const {
+ return isBundleOperand(&UI.getUse());
+ }
+
+ /// \brief Determine whether the passed use points to a bundle operand.
+ bool isBundleOperand(const Use *U) const {
+ assert(getInstruction() == U->getUser());
+ if (!hasOperandBundles())
+ return false;
+ unsigned OperandNo = U - (*this)->op_begin();
+ return getBundleOperandsStartIndex() <= OperandNo &&
+ OperandNo < getBundleOperandsEndIndex();
+ }
+
+ /// \brief Determine whether the passed iterator points to a data operand.
+ bool isDataOperand(Value::const_user_iterator UI) const {
+ return isDataOperand(&UI.getUse());
+ }
+
+ /// \brief Determine whether the passed use points to a data operand.
+ bool isDataOperand(const Use *U) const {
+ return data_operands_begin() <= U && U < data_operands_end();
+ }
+
+ ValTy *getArgument(unsigned ArgNo) const {
+ assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
+ return *(arg_begin() + ArgNo);
+ }
+
+ void setArgument(unsigned ArgNo, Value* newVal) {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
+ getInstruction()->setOperand(ArgNo, newVal);
+ }
+
+ /// Given a value use iterator, returns the argument that corresponds to it.
+ /// Iterator must actually correspond to an argument.
+ unsigned getArgumentNo(Value::const_user_iterator I) const {
+ return getArgumentNo(&I.getUse());
+ }
+
+ /// Given a use for an argument, get the argument number that corresponds to
+ /// it.
+ unsigned getArgumentNo(const Use *U) const {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ assert(isArgOperand(U) && "Argument # out of range!");
+ return U - arg_begin();
+ }
+
+ /// arg_iterator - The type of iterator to use when looping over actual
+ /// arguments at this call site.
+ typedef IterTy arg_iterator;
+
+ iterator_range<IterTy> args() const {
+ return make_range(arg_begin(), arg_end());
+ }
+ bool arg_empty() const { return arg_end() == arg_begin(); }
+ unsigned arg_size() const { return unsigned(arg_end() - arg_begin()); }
+
+ /// Given a value use iterator, returns the data operand that corresponds to
+ /// it.
+ /// Iterator must actually correspond to a data operand.
+ unsigned getDataOperandNo(Value::const_user_iterator UI) const {
+ return getDataOperandNo(&UI.getUse());
+ }
+
+ /// Given a use for a data operand, get the data operand number that
+ /// corresponds to it.
+ unsigned getDataOperandNo(const Use *U) const {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ assert(isDataOperand(U) && "Data operand # out of range!");
+ return U - data_operands_begin();
+ }
+
+ /// Type of iterator to use when looping over data operands at this call site
+ /// (see below).
+ typedef IterTy data_operand_iterator;
+
+ /// data_operands_begin/data_operands_end - Return iterators iterating over
+ /// the call / invoke argument list and bundle operands. For invokes, this is
+ /// the set of instruction operands except the invoke target and the two
+ /// successor blocks; and for calls this is the set of instruction operands
+ /// except the call target.
+
+ IterTy data_operands_begin() const {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ return (*this)->op_begin();
+ }
+ IterTy data_operands_end() const {
+ assert(getInstruction() && "Not a call or invoke instruction!");
+ return (*this)->op_end() - (isCall() ? 1 : 3);
+ }
+ iterator_range<IterTy> data_ops() const {
+ return make_range(data_operands_begin(), data_operands_end());
+ }
+ bool data_operands_empty() const {
+ return data_operands_end() == data_operands_begin();
+ }
+ unsigned data_operands_size() const {
+ return std::distance(data_operands_begin(), data_operands_end());
+ }
+
+ /// getType - Return the type of the instruction that generated this call site
+ ///
+ Type *getType() const { return (*this)->getType(); }
+
+ /// getCaller - Return the caller function for this call site
+ ///
+ FunTy *getCaller() const { return (*this)->getParent()->getParent(); }
+
+ /// \brief Tests if this call site must be tail call optimized. Only a
+ /// CallInst can be tail call optimized.
+ bool isMustTailCall() const {
+ return isCall() && cast<CallInst>(getInstruction())->isMustTailCall();
+ }
+
+ /// \brief Tests if this call site is marked as a tail call.
+ bool isTailCall() const {
+ return isCall() && cast<CallInst>(getInstruction())->isTailCall();
+ }
+
+#define CALLSITE_DELEGATE_GETTER(METHOD) \
+ InstrTy *II = getInstruction(); \
+ return isCall() \
+ ? cast<CallInst>(II)->METHOD \
+ : cast<InvokeInst>(II)->METHOD
+
+#define CALLSITE_DELEGATE_SETTER(METHOD) \
+ InstrTy *II = getInstruction(); \
+ if (isCall()) \
+ cast<CallInst>(II)->METHOD; \
+ else \
+ cast<InvokeInst>(II)->METHOD
+
+ unsigned getNumArgOperands() const {
+ CALLSITE_DELEGATE_GETTER(getNumArgOperands());
+ }
+
+ ValTy *getArgOperand(unsigned i) const {
+ CALLSITE_DELEGATE_GETTER(getArgOperand(i));
+ }
+
+ bool isInlineAsm() const {
+ if (isCall())
+ return cast<CallInst>(getInstruction())->isInlineAsm();
+ return false;
+ }
+
+ /// getCallingConv/setCallingConv - get or set the calling convention of the
+ /// call.
+ CallingConv::ID getCallingConv() const {
+ CALLSITE_DELEGATE_GETTER(getCallingConv());
+ }
+ void setCallingConv(CallingConv::ID CC) {
+ CALLSITE_DELEGATE_SETTER(setCallingConv(CC));
+ }
+
+ FunctionType *getFunctionType() const {
+ CALLSITE_DELEGATE_GETTER(getFunctionType());
+ }
+
+ void mutateFunctionType(FunctionType *Ty) const {
+ CALLSITE_DELEGATE_SETTER(mutateFunctionType(Ty));
+ }
+
+ /// getAttributes/setAttributes - get or set the parameter attributes of
+ /// the call.
+ const AttributeSet &getAttributes() const {
+ CALLSITE_DELEGATE_GETTER(getAttributes());
+ }
+ void setAttributes(const AttributeSet &PAL) {
+ CALLSITE_DELEGATE_SETTER(setAttributes(PAL));
+ }
+
+ /// \brief Return true if this function has the given attribute.
+ bool hasFnAttr(Attribute::AttrKind A) const {
+ CALLSITE_DELEGATE_GETTER(hasFnAttr(A));
+ }
+
+ /// \brief Return true if the call or the callee has the given attribute.
+ bool paramHasAttr(unsigned i, Attribute::AttrKind A) const {
+ CALLSITE_DELEGATE_GETTER(paramHasAttr(i, A));
+ }
+
+ /// \brief Return true if the data operand at index \p i directly or
+ /// indirectly has the attribute \p A.
+ ///
+ /// Normal call or invoke arguments have per operand attributes, as specified
+ /// in the attribute set attached to this instruction, while operand bundle
+ /// operands may have some attributes implied by the type of its containing
+ /// operand bundle.
+ bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind A) const {
+ CALLSITE_DELEGATE_GETTER(dataOperandHasImpliedAttr(i, A));
+ }
+
+ /// @brief Extract the alignment for a call or parameter (0=unknown).
+ uint16_t getParamAlignment(uint16_t i) const {
+ CALLSITE_DELEGATE_GETTER(getParamAlignment(i));
+ }
+
+ /// @brief Extract the number of dereferenceable bytes for a call or
+ /// parameter (0=unknown).
+ uint64_t getDereferenceableBytes(uint16_t i) const {
+ CALLSITE_DELEGATE_GETTER(getDereferenceableBytes(i));
+ }
+
+ /// @brief Extract the number of dereferenceable_or_null bytes for a call or
+ /// parameter (0=unknown).
+ uint64_t getDereferenceableOrNullBytes(uint16_t i) const {
+ CALLSITE_DELEGATE_GETTER(getDereferenceableOrNullBytes(i));
+ }
+
+ /// @brief Determine if the parameter or return value is marked with NoAlias
+ /// attribute.
+ /// @param n The parameter to check. 1 is the first parameter, 0 is the return
+ bool doesNotAlias(unsigned n) const {
+ CALLSITE_DELEGATE_GETTER(doesNotAlias(n));
+ }
+
+ /// \brief Return true if the call should not be treated as a call to a
+ /// builtin.
+ bool isNoBuiltin() const {
+ CALLSITE_DELEGATE_GETTER(isNoBuiltin());
+ }
+
+ /// @brief Return true if the call should not be inlined.
+ bool isNoInline() const {
+ CALLSITE_DELEGATE_GETTER(isNoInline());
+ }
+ void setIsNoInline(bool Value = true) {
+ CALLSITE_DELEGATE_SETTER(setIsNoInline(Value));
+ }
+
+ /// @brief Determine if the call does not access memory.
+ bool doesNotAccessMemory() const {
+ CALLSITE_DELEGATE_GETTER(doesNotAccessMemory());
+ }
+ void setDoesNotAccessMemory() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory());
+ }
+
+ /// @brief Determine if the call does not access or only reads memory.
+ bool onlyReadsMemory() const {
+ CALLSITE_DELEGATE_GETTER(onlyReadsMemory());
+ }
+ void setOnlyReadsMemory() {
+ CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory());
+ }
+
+ /// @brief Determine if the call can access memmory only using pointers based
+ /// on its arguments.
+ bool onlyAccessesArgMemory() const {
+ CALLSITE_DELEGATE_GETTER(onlyAccessesArgMemory());
+ }
+ void setOnlyAccessesArgMemory() {
+ CALLSITE_DELEGATE_SETTER(setOnlyAccessesArgMemory());
+ }
+
+ /// @brief Determine if the call cannot return.
+ bool doesNotReturn() const {
+ CALLSITE_DELEGATE_GETTER(doesNotReturn());
+ }
+ void setDoesNotReturn() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotReturn());
+ }
+
+ /// @brief Determine if the call cannot unwind.
+ bool doesNotThrow() const {
+ CALLSITE_DELEGATE_GETTER(doesNotThrow());
+ }
+ void setDoesNotThrow() {
+ CALLSITE_DELEGATE_SETTER(setDoesNotThrow());
+ }
+
+ unsigned getNumOperandBundles() const {
+ CALLSITE_DELEGATE_GETTER(getNumOperandBundles());
+ }
+
+ bool hasOperandBundles() const {
+ CALLSITE_DELEGATE_GETTER(hasOperandBundles());
+ }
+
+ unsigned getBundleOperandsStartIndex() const {
+ CALLSITE_DELEGATE_GETTER(getBundleOperandsStartIndex());
+ }
+
+ unsigned getBundleOperandsEndIndex() const {
+ CALLSITE_DELEGATE_GETTER(getBundleOperandsEndIndex());
+ }
+
+ unsigned getNumTotalBundleOperands() const {
+ CALLSITE_DELEGATE_GETTER(getNumTotalBundleOperands());
+ }
+
+ OperandBundleUse getOperandBundleAt(unsigned Index) const {
+ CALLSITE_DELEGATE_GETTER(getOperandBundleAt(Index));
+ }
+
+ Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
+ CALLSITE_DELEGATE_GETTER(getOperandBundle(Name));
+ }
+
+ Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
+ CALLSITE_DELEGATE_GETTER(getOperandBundle(ID));
+ }
+
+ IterTy arg_begin() const {
+ CALLSITE_DELEGATE_GETTER(arg_begin());
+ }
+
+ IterTy arg_end() const {
+ CALLSITE_DELEGATE_GETTER(arg_end());
+ }
+
+#undef CALLSITE_DELEGATE_GETTER
+#undef CALLSITE_DELEGATE_SETTER
+
+ void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const {
+ const Instruction *II = getInstruction();
+ // Since this is actually a getter that "looks like" a setter, don't use the
+ // above macros to avoid confusion.
+ if (isCall())
+ cast<CallInst>(II)->getOperandBundlesAsDefs(Defs);
+ else
+ cast<InvokeInst>(II)->getOperandBundlesAsDefs(Defs);
+ }
+
+ /// @brief Determine whether this data operand is not captured.
+ bool doesNotCapture(unsigned OpNo) const {
+ return dataOperandHasImpliedAttr(OpNo + 1, Attribute::NoCapture);
+ }
+
+ /// @brief Determine whether this argument is passed by value.
+ bool isByValArgument(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::ByVal);
+ }
+
+ /// @brief Determine whether this argument is passed in an alloca.
+ bool isInAllocaArgument(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::InAlloca);
+ }
+
+ /// @brief Determine whether this argument is passed by value or in an alloca.
+ bool isByValOrInAllocaArgument(unsigned ArgNo) const {
+ return paramHasAttr(ArgNo + 1, Attribute::ByVal) ||
+ paramHasAttr(ArgNo + 1, Attribute::InAlloca);
+ }
+
+ /// @brief Determine if there are is an inalloca argument. Only the last
+ /// argument can have the inalloca attribute.
+ bool hasInAllocaArgument() const {
+ return paramHasAttr(arg_size(), Attribute::InAlloca);
+ }
+
+ bool doesNotAccessMemory(unsigned OpNo) const {
+ return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+ }
+
+ bool onlyReadsMemory(unsigned OpNo) const {
+ return dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadOnly) ||
+ dataOperandHasImpliedAttr(OpNo + 1, Attribute::ReadNone);
+ }
+
+ /// @brief Return true if the return value is known to be not null.
+ /// This may be because it has the nonnull attribute, or because at least
+ /// one byte is dereferenceable and the pointer is in addrspace(0).
+ bool isReturnNonNull() const {
+ if (paramHasAttr(0, Attribute::NonNull))
+ return true;
+ else if (getDereferenceableBytes(0) > 0 &&
+ getType()->getPointerAddressSpace() == 0)
+ return true;
+
+ return false;
+ }
+
+ /// hasArgument - Returns true if this CallSite passes the given Value* as an
+ /// argument to the called function.
+ bool hasArgument(const Value *Arg) const {
+ for (arg_iterator AI = this->arg_begin(), E = this->arg_end(); AI != E;
+ ++AI)
+ if (AI->get() == Arg)
+ return true;
+ return false;
+ }
+
+private:
+ IterTy getCallee() const {
+ if (isCall()) // Skip Callee
+ return cast<CallInst>(getInstruction())->op_end() - 1;
+ else // Skip BB, BB, Callee
+ return cast<InvokeInst>(getInstruction())->op_end() - 3;
+ }
+};
+
+class CallSite : public CallSiteBase<Function, BasicBlock, Value, User, Use,
+ Instruction, CallInst, InvokeInst,
+ User::op_iterator> {
+public:
+ CallSite() {}
+ CallSite(CallSiteBase B) : CallSiteBase(B) {}
+ CallSite(CallInst *CI) : CallSiteBase(CI) {}
+ CallSite(InvokeInst *II) : CallSiteBase(II) {}
+ explicit CallSite(Instruction *II) : CallSiteBase(II) {}
+ explicit CallSite(Value *V) : CallSiteBase(V) {}
+
+ bool operator==(const CallSite &CS) const { return I == CS.I; }
+ bool operator!=(const CallSite &CS) const { return I != CS.I; }
+ bool operator<(const CallSite &CS) const {
+ return getInstruction() < CS.getInstruction();
+ }
+
+private:
+ User::op_iterator getCallee() const;
+};
+
+/// ImmutableCallSite - establish a view to a call site for examination
+class ImmutableCallSite : public CallSiteBase<> {
+public:
+ ImmutableCallSite() {}
+ ImmutableCallSite(const CallInst *CI) : CallSiteBase(CI) {}
+ ImmutableCallSite(const InvokeInst *II) : CallSiteBase(II) {}
+ explicit ImmutableCallSite(const Instruction *II) : CallSiteBase(II) {}
+ explicit ImmutableCallSite(const Value *V) : CallSiteBase(V) {}
+ ImmutableCallSite(CallSite CS) : CallSiteBase(CS.getInstruction()) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/CallingConv.h b/contrib/llvm/include/llvm/IR/CallingConv.h
new file mode 100644
index 0000000..bc05092
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/CallingConv.h
@@ -0,0 +1,178 @@
+//===-- llvm/CallingConv.h - LLVM Calling Conventions -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines LLVM's set of calling conventions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CALLINGCONV_H
+#define LLVM_IR_CALLINGCONV_H
+
+namespace llvm {
+
+/// CallingConv Namespace - This namespace contains an enum with a value for
+/// the well-known calling conventions.
+///
+namespace CallingConv {
+ /// LLVM IR allows to use arbitrary numbers as calling convention identifiers.
+ typedef unsigned ID;
+
+ /// A set of enums which specify the assigned numeric values for known llvm
+ /// calling conventions.
+ /// @brief LLVM Calling Convention Representation
+ enum {
+ /// C - The default llvm calling convention, compatible with C. This
+ /// convention is the only calling convention that supports varargs calls.
+ /// As with typical C calling conventions, the callee/caller have to
+ /// tolerate certain amounts of prototype mismatch.
+ C = 0,
+
+ // Generic LLVM calling conventions. None of these calling conventions
+ // support varargs calls, and all assume that the caller and callee
+ // prototype exactly match.
+
+ /// Fast - This calling convention attempts to make calls as fast as
+ /// possible (e.g. by passing things in registers).
+ Fast = 8,
+
+ // Cold - This calling convention attempts to make code in the caller as
+ // efficient as possible under the assumption that the call is not commonly
+ // executed. As such, these calls often preserve all registers so that the
+ // call does not break any live ranges in the caller side.
+ Cold = 9,
+
+ // GHC - Calling convention used by the Glasgow Haskell Compiler (GHC).
+ GHC = 10,
+
+ // HiPE - Calling convention used by the High-Performance Erlang Compiler
+ // (HiPE).
+ HiPE = 11,
+
+ // WebKit JS - Calling convention for stack based JavaScript calls
+ WebKit_JS = 12,
+
+ // AnyReg - Calling convention for dynamic register based calls (e.g.
+ // stackmap and patchpoint intrinsics).
+ AnyReg = 13,
+
+ // PreserveMost - Calling convention for runtime calls that preserves most
+ // registers.
+ PreserveMost = 14,
+
+ // PreserveAll - Calling convention for runtime calls that preserves
+ // (almost) all registers.
+ PreserveAll = 15,
+
+ // Swift - Calling convention for Swift.
+ Swift = 16,
+
+ // CXX_FAST_TLS - Calling convention for access functions.
+ CXX_FAST_TLS = 17,
+
+ // Target - This is the start of the target-specific calling conventions,
+ // e.g. fastcall and thiscall on X86.
+ FirstTargetCC = 64,
+
+ /// X86_StdCall - stdcall is the calling conventions mostly used by the
+ /// Win32 API. It is basically the same as the C convention with the
+ /// difference in that the callee is responsible for popping the arguments
+ /// from the stack.
+ X86_StdCall = 64,
+
+ /// X86_FastCall - 'fast' analog of X86_StdCall. Passes first two arguments
+ /// in ECX:EDX registers, others - via stack. Callee is responsible for
+ /// stack cleaning.
+ X86_FastCall = 65,
+
+ /// ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete,
+ /// but still used on some targets).
+ ARM_APCS = 66,
+
+ /// ARM_AAPCS - ARM Architecture Procedure Calling Standard calling
+ /// convention (aka EABI). Soft float variant.
+ ARM_AAPCS = 67,
+
+ /// ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
+ ARM_AAPCS_VFP = 68,
+
+ /// MSP430_INTR - Calling convention used for MSP430 interrupt routines.
+ MSP430_INTR = 69,
+
+ /// X86_ThisCall - Similar to X86_StdCall. Passes first argument in ECX,
+ /// others via stack. Callee is responsible for stack cleaning. MSVC uses
+ /// this by default for methods in its ABI.
+ X86_ThisCall = 70,
+
+ /// PTX_Kernel - Call to a PTX kernel.
+ /// Passes all arguments in parameter space.
+ PTX_Kernel = 71,
+
+ /// PTX_Device - Call to a PTX device function.
+ /// Passes all arguments in register or parameter space.
+ PTX_Device = 72,
+
+ /// SPIR_FUNC - Calling convention for SPIR non-kernel device functions.
+ /// No lowering or expansion of arguments.
+ /// Structures are passed as a pointer to a struct with the byval attribute.
+ /// Functions can only call SPIR_FUNC and SPIR_KERNEL functions.
+ /// Functions can only have zero or one return values.
+ /// Variable arguments are not allowed, except for printf.
+ /// How arguments/return values are lowered are not specified.
+ /// Functions are only visible to the devices.
+ SPIR_FUNC = 75,
+
+ /// SPIR_KERNEL - Calling convention for SPIR kernel functions.
+ /// Inherits the restrictions of SPIR_FUNC, except
+ /// Cannot have non-void return values.
+ /// Cannot have variable arguments.
+ /// Can also be called by the host.
+ /// Is externally visible.
+ SPIR_KERNEL = 76,
+
+ /// Intel_OCL_BI - Calling conventions for Intel OpenCL built-ins
+ Intel_OCL_BI = 77,
+
+ /// \brief The C convention as specified in the x86-64 supplement to the
+ /// System V ABI, used on most non-Windows systems.
+ X86_64_SysV = 78,
+
+ /// \brief The C convention as implemented on Windows/x86-64. This
+ /// convention differs from the more common \c X86_64_SysV convention
+ /// in a number of ways, most notably in that XMM registers used to pass
+ /// arguments are shadowed by GPRs, and vice versa.
+ X86_64_Win64 = 79,
+
+ /// \brief MSVC calling convention that passes vectors and vector aggregates
+ /// in SSE registers.
+ X86_VectorCall = 80,
+
+ /// \brief Calling convention used by HipHop Virtual Machine (HHVM) to
+ /// perform calls to and from translation cache, and for calling PHP
+ /// functions.
+ /// HHVM calling convention supports tail/sibling call elimination.
+ HHVM = 81,
+
+ /// \brief HHVM calling convention for invoking C/C++ helpers.
+ HHVM_C = 82,
+
+ /// X86_INTR - x86 hardware interrupt context. Callee may take one or two
+ /// parameters, where the 1st represents a pointer to hardware context frame
+ /// and the 2nd represents hardware error code, the presence of the later
+ /// depends on the interrupt vector taken. Valid for both 32- and 64-bit
+ /// subtargets.
+ X86_INTR = 83,
+
+ /// The highest possible calling convention ID. Must be some 2^k - 1.
+ MaxID = 1023
+ };
+} // End CallingConv namespace
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Comdat.h b/contrib/llvm/include/llvm/IR/Comdat.h
new file mode 100644
index 0000000..fb79e13
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Comdat.h
@@ -0,0 +1,66 @@
+//===-- llvm/IR/Comdat.h - Comdat definitions -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file contains the declaration of the Comdat class, which represents a
+/// single COMDAT in LLVM.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_COMDAT_H
+#define LLVM_IR_COMDAT_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+
+class raw_ostream;
+template <typename ValueTy> class StringMapEntry;
+
+// This is a Name X SelectionKind pair. The reason for having this be an
+// independent object instead of just adding the name and the SelectionKind
+// to a GlobalObject is that it is invalid to have two Comdats with the same
+// name but different SelectionKind. This structure makes that unrepresentable.
+class Comdat {
+public:
+ enum SelectionKind {
+ Any, ///< The linker may choose any COMDAT.
+ ExactMatch, ///< The data referenced by the COMDAT must be the same.
+ Largest, ///< The linker will choose the largest COMDAT.
+ NoDuplicates, ///< No other Module may specify this COMDAT.
+ SameSize, ///< The data referenced by the COMDAT must be the same size.
+ };
+
+ Comdat(Comdat &&C);
+ SelectionKind getSelectionKind() const { return SK; }
+ void setSelectionKind(SelectionKind Val) { SK = Val; }
+ StringRef getName() const;
+ void print(raw_ostream &OS, bool IsForDebug = false) const;
+ void dump() const;
+
+private:
+ friend class Module;
+ Comdat();
+ Comdat(SelectionKind SK, StringMapEntry<Comdat> *Name);
+ Comdat(const Comdat &) = delete;
+
+ // Points to the map in Module.
+ StringMapEntry<Comdat> *Name;
+ SelectionKind SK;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Comdat &C) {
+ C.print(OS);
+ return OS;
+}
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Constant.h b/contrib/llvm/include/llvm/IR/Constant.h
new file mode 100644
index 0000000..bb88905
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Constant.h
@@ -0,0 +1,166 @@
+//===-- llvm/Constant.h - Constant class definition -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the Constant class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CONSTANT_H
+#define LLVM_IR_CONSTANT_H
+
+#include "llvm/IR/User.h"
+
+namespace llvm {
+ class APInt;
+
+ template<typename T> class SmallVectorImpl;
+
+/// This is an important base class in LLVM. It provides the common facilities
+/// of all constant values in an LLVM program. A constant is a value that is
+/// immutable at runtime. Functions are constants because their address is
+/// immutable. Same with global variables.
+///
+/// All constants share the capabilities provided in this class. All constants
+/// can have a null value. They can have an operand list. Constants can be
+/// simple (integer and floating point values), complex (arrays and structures),
+/// or expression based (computations yielding a constant value composed of
+/// only certain operators and other constant values).
+///
+/// Note that Constants are immutable (once created they never change)
+/// and are fully shared by structural equivalence. This means that two
+/// structurally equivalent constants will always have the same address.
+/// Constants are created on demand as needed and never deleted: thus clients
+/// don't have to worry about the lifetime of the objects.
+/// @brief LLVM Constant Representation
+class Constant : public User {
+ void operator=(const Constant &) = delete;
+ Constant(const Constant &) = delete;
+ void anchor() override;
+
+protected:
+ Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
+ : User(ty, vty, Ops, NumOps) {}
+
+public:
+ /// isNullValue - Return true if this is the value that would be returned by
+ /// getNullValue.
+ bool isNullValue() const;
+
+ /// \brief Returns true if the value is one.
+ bool isOneValue() const;
+
+ /// isAllOnesValue - Return true if this is the value that would be returned by
+ /// getAllOnesValue.
+ bool isAllOnesValue() const;
+
+ /// isNegativeZeroValue - Return true if the value is what would be returned
+ /// by getZeroValueForNegation.
+ bool isNegativeZeroValue() const;
+
+ /// Return true if the value is negative zero or null value.
+ bool isZeroValue() const;
+
+ /// \brief Return true if the value is not the smallest signed value.
+ bool isNotMinSignedValue() const;
+
+ /// \brief Return true if the value is the smallest signed value.
+ bool isMinSignedValue() const;
+
+ /// canTrap - Return true if evaluation of this constant could trap. This is
+ /// true for things like constant expressions that could divide by zero.
+ bool canTrap() const;
+
+ /// isThreadDependent - Return true if the value can vary between threads.
+ bool isThreadDependent() const;
+
+ /// Return true if the value is dependent on a dllimport variable.
+ bool isDLLImportDependent() const;
+
+ /// isConstantUsed - Return true if the constant has users other than constant
+ /// exprs and other dangling things.
+ bool isConstantUsed() const;
+
+ /// This method classifies the entry according to whether or not it may
+ /// generate a relocation entry. This must be conservative, so if it might
+ /// codegen to a relocatable entry, it should say so.
+ ///
+ /// FIXME: This really should not be in IR.
+ bool needsRelocation() const;
+
+ /// getAggregateElement - For aggregates (struct/array/vector) return the
+ /// constant that corresponds to the specified element if possible, or null if
+ /// not. This can return null if the element index is a ConstantExpr, or if
+ /// 'this' is a constant expr.
+ Constant *getAggregateElement(unsigned Elt) const;
+ Constant *getAggregateElement(Constant *Elt) const;
+
+ /// getSplatValue - If this is a splat vector constant, meaning that all of
+ /// the elements have the same value, return that value. Otherwise return 0.
+ Constant *getSplatValue() const;
+
+ /// If C is a constant integer then return its value, otherwise C must be a
+ /// vector of constant integers, all equal, and the common value is returned.
+ const APInt &getUniqueInteger() const;
+
+ /// Called if some element of this constant is no longer valid.
+ /// At this point only other constants may be on the use_list for this
+ /// constant. Any constants on our Use list must also be destroy'd. The
+ /// implementation must be sure to remove the constant from the list of
+ /// available cached constants. Implementations should implement
+ /// destroyConstantImpl to remove constants from any pools/maps they are
+ /// contained it.
+ void destroyConstant();
+
+ //// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return V->getValueID() >= ConstantFirstVal &&
+ V->getValueID() <= ConstantLastVal;
+ }
+
+ /// This method is a special form of User::replaceUsesOfWith
+ /// (which does not work on constants) that does work
+ /// on constants. Basically this method goes through the trouble of building
+ /// a new constant that is equivalent to the current one, with all uses of
+ /// From replaced with uses of To. After this construction is completed, all
+ /// of the users of 'this' are replaced to use the new constant, and then
+ /// 'this' is deleted. In general, you should not call this method, instead,
+ /// use Value::replaceAllUsesWith, which automatically dispatches to this
+ /// method as needed.
+ ///
+ void handleOperandChange(Value *, Value *, Use *);
+
+ static Constant *getNullValue(Type* Ty);
+
+ /// @returns the value for an integer or vector of integer constant of the
+ /// given type that has all its bits set to true.
+ /// @brief Get the all ones value
+ static Constant *getAllOnesValue(Type* Ty);
+
+ /// getIntegerValue - Return the value for an integer or pointer constant,
+ /// or a vector thereof, with the given scalar value.
+ static Constant *getIntegerValue(Type *Ty, const APInt &V);
+
+ /// removeDeadConstantUsers - If there are any dead constant users dangling
+ /// off of this constant, remove them. This method is useful for clients
+ /// that want to check to see if a global is unused, but don't want to deal
+ /// with potentially dead constants hanging off of the globals.
+ void removeDeadConstantUsers() const;
+
+ Constant *stripPointerCasts() {
+ return cast<Constant>(Value::stripPointerCasts());
+ }
+
+ const Constant *stripPointerCasts() const {
+ return const_cast<Constant*>(this)->stripPointerCasts();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/ConstantFolder.h b/contrib/llvm/include/llvm/IR/ConstantFolder.h
new file mode 100644
index 0000000..fb6ca3b
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/ConstantFolder.h
@@ -0,0 +1,245 @@
+//===- ConstantFolder.h - Constant folding helper ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ConstantFolder class, a helper for IRBuilder.
+// It provides IRBuilder with a set of methods for creating constants
+// with minimal folding. For general constant creation and folding,
+// use ConstantExpr and the routines in llvm/Analysis/ConstantFolding.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CONSTANTFOLDER_H
+#define LLVM_IR_CONSTANTFOLDER_H
+
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/InstrTypes.h"
+
+namespace llvm {
+
+/// ConstantFolder - Create constants with minimum, target independent, folding.
+class ConstantFolder {
+public:
+ explicit ConstantFolder() {}
+
+ //===--------------------------------------------------------------------===//
+ // Binary Operators
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateAdd(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getAdd(LHS, RHS, HasNUW, HasNSW);
+ }
+ Constant *CreateFAdd(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getFAdd(LHS, RHS);
+ }
+ Constant *CreateSub(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW);
+ }
+ Constant *CreateFSub(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getFSub(LHS, RHS);
+ }
+ Constant *CreateMul(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW);
+ }
+ Constant *CreateFMul(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getFMul(LHS, RHS);
+ }
+ Constant *CreateUDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getUDiv(LHS, RHS, isExact);
+ }
+ Constant *CreateSDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getSDiv(LHS, RHS, isExact);
+ }
+ Constant *CreateFDiv(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getFDiv(LHS, RHS);
+ }
+ Constant *CreateURem(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getURem(LHS, RHS);
+ }
+ Constant *CreateSRem(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getSRem(LHS, RHS);
+ }
+ Constant *CreateFRem(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getFRem(LHS, RHS);
+ }
+ Constant *CreateShl(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW);
+ }
+ Constant *CreateLShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getLShr(LHS, RHS, isExact);
+ }
+ Constant *CreateAShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ return ConstantExpr::getAShr(LHS, RHS, isExact);
+ }
+ Constant *CreateAnd(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getAnd(LHS, RHS);
+ }
+ Constant *CreateOr(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getOr(LHS, RHS);
+ }
+ Constant *CreateXor(Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::getXor(LHS, RHS);
+ }
+
+ Constant *CreateBinOp(Instruction::BinaryOps Opc,
+ Constant *LHS, Constant *RHS) const {
+ return ConstantExpr::get(Opc, LHS, RHS);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Unary Operators
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateNeg(Constant *C,
+ bool HasNUW = false, bool HasNSW = false) const {
+ return ConstantExpr::getNeg(C, HasNUW, HasNSW);
+ }
+ Constant *CreateFNeg(Constant *C) const {
+ return ConstantExpr::getFNeg(C);
+ }
+ Constant *CreateNot(Constant *C) const {
+ return ConstantExpr::getNot(C);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Memory Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Constant *> IdxList) const {
+ return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
+ }
+ Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getGetElementPtr(Ty, C, Idx);
+ }
+ Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> IdxList) const {
+ return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
+ }
+
+ Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Constant *> IdxList) const {
+ return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
+ }
+ Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+ Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx);
+ }
+ Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> IdxList) const {
+ return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Cast/Conversion Operators
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateCast(Instruction::CastOps Op, Constant *C,
+ Type *DestTy) const {
+ return ConstantExpr::getCast(Op, C, DestTy);
+ }
+ Constant *CreatePointerCast(Constant *C, Type *DestTy) const {
+ return ConstantExpr::getPointerCast(C, DestTy);
+ }
+
+ Constant *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
+ Type *DestTy) const {
+ return ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy);
+ }
+
+ Constant *CreateIntCast(Constant *C, Type *DestTy,
+ bool isSigned) const {
+ return ConstantExpr::getIntegerCast(C, DestTy, isSigned);
+ }
+ Constant *CreateFPCast(Constant *C, Type *DestTy) const {
+ return ConstantExpr::getFPCast(C, DestTy);
+ }
+
+ Constant *CreateBitCast(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::BitCast, C, DestTy);
+ }
+ Constant *CreateIntToPtr(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::IntToPtr, C, DestTy);
+ }
+ Constant *CreatePtrToInt(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::PtrToInt, C, DestTy);
+ }
+ Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
+ return ConstantExpr::getZExtOrBitCast(C, DestTy);
+ }
+ Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
+ return ConstantExpr::getSExtOrBitCast(C, DestTy);
+ }
+
+ Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
+ return ConstantExpr::getTruncOrBitCast(C, DestTy);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Compare Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateICmp(CmpInst::Predicate P, Constant *LHS,
+ Constant *RHS) const {
+ return ConstantExpr::getCompare(P, LHS, RHS);
+ }
+ Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
+ Constant *RHS) const {
+ return ConstantExpr::getCompare(P, LHS, RHS);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Other Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateSelect(Constant *C, Constant *True, Constant *False) const {
+ return ConstantExpr::getSelect(C, True, False);
+ }
+
+ Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+ return ConstantExpr::getExtractElement(Vec, Idx);
+ }
+
+ Constant *CreateInsertElement(Constant *Vec, Constant *NewElt,
+ Constant *Idx) const {
+ return ConstantExpr::getInsertElement(Vec, NewElt, Idx);
+ }
+
+ Constant *CreateShuffleVector(Constant *V1, Constant *V2,
+ Constant *Mask) const {
+ return ConstantExpr::getShuffleVector(V1, V2, Mask);
+ }
+
+ Constant *CreateExtractValue(Constant *Agg,
+ ArrayRef<unsigned> IdxList) const {
+ return ConstantExpr::getExtractValue(Agg, IdxList);
+ }
+
+ Constant *CreateInsertValue(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> IdxList) const {
+ return ConstantExpr::getInsertValue(Agg, Val, IdxList);
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/ConstantRange.h b/contrib/llvm/include/llvm/IR/ConstantRange.h
new file mode 100644
index 0000000..fb596a3
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/ConstantRange.h
@@ -0,0 +1,289 @@
+//===- ConstantRange.h - Represent a range ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Represent a range of possible values that may occur when the program is run
+// for an integral value. This keeps track of a lower and upper bound for the
+// constant, which MAY wrap around the end of the numeric range. To do this, it
+// keeps track of a [lower, upper) bound, which specifies an interval just like
+// STL iterators. When used with boolean values, the following are important
+// ranges: :
+//
+// [F, F) = {} = Empty set
+// [T, F) = {T}
+// [F, T) = {F}
+// [T, T) = {F, T} = Full set
+//
+// The other integral ranges use min/max values for special range values. For
+// example, for 8-bit types, it uses:
+// [0, 0) = {} = Empty set
+// [255, 255) = {0..255} = Full Set
+//
+// Note that ConstantRange can be used to represent either signed or
+// unsigned ranges.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CONSTANTRANGE_H
+#define LLVM_IR_CONSTANTRANGE_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+/// This class represents a range of values.
+///
+class ConstantRange {
+ APInt Lower, Upper;
+
+ // If we have move semantics, pass APInts by value and move them into place.
+ typedef APInt APIntMoveTy;
+
+public:
+ /// Initialize a full (the default) or empty set for the specified bit width.
+ ///
+ explicit ConstantRange(uint32_t BitWidth, bool isFullSet = true);
+
+ /// Initialize a range to hold the single specified value.
+ ///
+ ConstantRange(APIntMoveTy Value);
+
+ /// @brief Initialize a range of values explicitly. This will assert out if
+ /// Lower==Upper and Lower != Min or Max value for its type. It will also
+ /// assert out if the two APInt's are not the same bit width.
+ ConstantRange(APIntMoveTy Lower, APIntMoveTy Upper);
+
+ /// Produce the smallest range such that all values that may satisfy the given
+ /// predicate with any value contained within Other is contained in the
+ /// returned range. Formally, this returns a superset of
+ /// 'union over all y in Other . { x : icmp op x y is true }'. If the exact
+ /// answer is not representable as a ConstantRange, the return value will be a
+ /// proper superset of the above.
+ ///
+ /// Example: Pred = ult and Other = i8 [2, 5) returns Result = [0, 4)
+ static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred,
+ const ConstantRange &Other);
+
+ /// Produce the largest range such that all values in the returned range
+ /// satisfy the given predicate with all values contained within Other.
+ /// Formally, this returns a subset of
+ /// 'intersection over all y in Other . { x : icmp op x y is true }'. If the
+ /// exact answer is not representable as a ConstantRange, the return value
+ /// will be a proper subset of the above.
+ ///
+ /// Example: Pred = ult and Other = i8 [2, 5) returns [0, 2)
+ static ConstantRange makeSatisfyingICmpRegion(CmpInst::Predicate Pred,
+ const ConstantRange &Other);
+
+ /// Return the largest range containing all X such that "X BinOpC C" does not
+ /// wrap (overflow).
+ ///
+ /// Example:
+ /// typedef OverflowingBinaryOperator OBO;
+ /// makeNoWrapRegion(Add, i8 1, OBO::NoSignedWrap) == [-128, 127)
+ /// makeNoWrapRegion(Add, i8 1, OBO::NoUnsignedWrap) == [0, -1)
+ /// makeNoWrapRegion(Add, i8 0, OBO::NoUnsignedWrap) == Full Set
+ static ConstantRange makeNoWrapRegion(Instruction::BinaryOps BinOp,
+ const APInt &C, unsigned NoWrapKind);
+
+ /// Return the lower value for this range.
+ ///
+ const APInt &getLower() const { return Lower; }
+
+ /// Return the upper value for this range.
+ ///
+ const APInt &getUpper() const { return Upper; }
+
+ /// Get the bit width of this ConstantRange.
+ ///
+ uint32_t getBitWidth() const { return Lower.getBitWidth(); }
+
+ /// Return true if this set contains all of the elements possible
+ /// for this data-type.
+ ///
+ bool isFullSet() const;
+
+ /// Return true if this set contains no members.
+ ///
+ bool isEmptySet() const;
+
+ /// Return true if this set wraps around the top of the range.
+ /// For example: [100, 8).
+ ///
+ bool isWrappedSet() const;
+
+ /// Return true if this set wraps around the INT_MIN of
+ /// its bitwidth. For example: i8 [120, 140).
+ ///
+ bool isSignWrappedSet() const;
+
+ /// Return true if the specified value is in the set.
+ ///
+ bool contains(const APInt &Val) const;
+
+ /// Return true if the other range is a subset of this one.
+ ///
+ bool contains(const ConstantRange &CR) const;
+
+ /// If this set contains a single element, return it, otherwise return null.
+ ///
+ const APInt *getSingleElement() const {
+ if (Upper == Lower + 1)
+ return &Lower;
+ return nullptr;
+ }
+
+ /// Return true if this set contains exactly one member.
+ ///
+ bool isSingleElement() const { return getSingleElement() != nullptr; }
+
+ /// Return the number of elements in this set.
+ ///
+ APInt getSetSize() const;
+
+ /// Return the largest unsigned value contained in the ConstantRange.
+ ///
+ APInt getUnsignedMax() const;
+
+ /// Return the smallest unsigned value contained in the ConstantRange.
+ ///
+ APInt getUnsignedMin() const;
+
+ /// Return the largest signed value contained in the ConstantRange.
+ ///
+ APInt getSignedMax() const;
+
+ /// Return the smallest signed value contained in the ConstantRange.
+ ///
+ APInt getSignedMin() const;
+
+ /// Return true if this range is equal to another range.
+ ///
+ bool operator==(const ConstantRange &CR) const {
+ return Lower == CR.Lower && Upper == CR.Upper;
+ }
+ bool operator!=(const ConstantRange &CR) const {
+ return !operator==(CR);
+ }
+
+ /// Subtract the specified constant from the endpoints of this constant range.
+ ConstantRange subtract(const APInt &CI) const;
+
+ /// \brief Subtract the specified range from this range (aka relative
+ /// complement of the sets).
+ ConstantRange difference(const ConstantRange &CR) const;
+
+ /// Return the range that results from the intersection of
+ /// this range with another range. The resultant range is guaranteed to
+ /// include all elements contained in both input ranges, and to have the
+ /// smallest possible set size that does so. Because there may be two
+ /// intersections with the same set size, A.intersectWith(B) might not
+ /// be equal to B.intersectWith(A).
+ ///
+ ConstantRange intersectWith(const ConstantRange &CR) const;
+
+ /// Return the range that results from the union of this range
+ /// with another range. The resultant range is guaranteed to include the
+ /// elements of both sets, but may contain more. For example, [3, 9) union
+ /// [12,15) is [3, 15), which includes 9, 10, and 11, which were not included
+ /// in either set before.
+ ///
+ ConstantRange unionWith(const ConstantRange &CR) const;
+
+ /// Return a new range in the specified integer type, which must
+ /// be strictly larger than the current type. The returned range will
+ /// correspond to the possible range of values if the source range had been
+ /// zero extended to BitWidth.
+ ConstantRange zeroExtend(uint32_t BitWidth) const;
+
+ /// Return a new range in the specified integer type, which must
+ /// be strictly larger than the current type. The returned range will
+ /// correspond to the possible range of values if the source range had been
+ /// sign extended to BitWidth.
+ ConstantRange signExtend(uint32_t BitWidth) const;
+
+ /// Return a new range in the specified integer type, which must be
+ /// strictly smaller than the current type. The returned range will
+ /// correspond to the possible range of values if the source range had been
+ /// truncated to the specified type.
+ ConstantRange truncate(uint32_t BitWidth) const;
+
+ /// Make this range have the bit width given by \p BitWidth. The
+ /// value is zero extended, truncated, or left alone to make it that width.
+ ConstantRange zextOrTrunc(uint32_t BitWidth) const;
+
+ /// Make this range have the bit width given by \p BitWidth. The
+ /// value is sign extended, truncated, or left alone to make it that width.
+ ConstantRange sextOrTrunc(uint32_t BitWidth) const;
+
+ /// Return a new range representing the possible values resulting
+ /// from an addition of a value in this range and a value in \p Other.
+ ConstantRange add(const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting
+ /// from a subtraction of a value in this range and a value in \p Other.
+ ConstantRange sub(const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting
+ /// from a multiplication of a value in this range and a value in \p Other,
+ /// treating both this and \p Other as unsigned ranges.
+ ConstantRange multiply(const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting
+ /// from a signed maximum of a value in this range and a value in \p Other.
+ ConstantRange smax(const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting
+ /// from an unsigned maximum of a value in this range and a value in \p Other.
+ ConstantRange umax(const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting
+ /// from an unsigned division of a value in this range and a value in
+ /// \p Other.
+ ConstantRange udiv(const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting
+ /// from a binary-and of a value in this range by a value in \p Other.
+ ConstantRange binaryAnd(const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting
+ /// from a binary-or of a value in this range by a value in \p Other.
+ ConstantRange binaryOr(const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting
+ /// from a left shift of a value in this range by a value in \p Other.
+ /// TODO: This isn't fully implemented yet.
+ ConstantRange shl(const ConstantRange &Other) const;
+
+ /// Return a new range representing the possible values resulting from a
+ /// logical right shift of a value in this range and a value in \p Other.
+ ConstantRange lshr(const ConstantRange &Other) const;
+
+ /// Return a new range that is the logical not of the current set.
+ ///
+ ConstantRange inverse() const;
+
+ /// Print out the bounds to a stream.
+ ///
+ void print(raw_ostream &OS) const;
+
+ /// Allow printing from a debugger easily.
+ ///
+ void dump() const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const ConstantRange &CR) {
+ CR.print(OS);
+ return OS;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Constants.h b/contrib/llvm/include/llvm/IR/Constants.h
new file mode 100644
index 0000000..a5a20c9
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Constants.h
@@ -0,0 +1,1294 @@
+//===-- llvm/Constants.h - Constant class subclass definitions --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file contains the declarations for the subclasses of Constant,
+/// which represent the different flavors of constant values that live in LLVM.
+/// Note that Constants are immutable (once created they never change) and are
+/// fully shared by structural equivalence. This means that two structurally
+/// equivalent constants will always have the same address. Constants are
+/// created on demand as needed and never deleted: thus clients don't have to
+/// worry about the lifetime of the objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_CONSTANTS_H
+#define LLVM_IR_CONSTANTS_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/OperandTraits.h"
+
+namespace llvm {
+
+class ArrayType;
+class IntegerType;
+class StructType;
+class PointerType;
+class VectorType;
+class SequentialType;
+
+struct ConstantExprKeyType;
+template <class ConstantClass> struct ConstantAggrKeyType;
+
+//===----------------------------------------------------------------------===//
+/// This is the shared class of boolean and integer constants. This class
+/// represents both boolean and integral constants.
+/// @brief Class for constant integers.
+class ConstantInt : public Constant {
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+ ConstantInt(const ConstantInt &) = delete;
+ ConstantInt(IntegerType *Ty, const APInt& V);
+ APInt Val;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+ static ConstantInt *getTrue(LLVMContext &Context);
+ static ConstantInt *getFalse(LLVMContext &Context);
+ static Constant *getTrue(Type *Ty);
+ static Constant *getFalse(Type *Ty);
+
+ /// If Ty is a vector type, return a Constant with a splat of the given
+ /// value. Otherwise return a ConstantInt for the given value.
+ static Constant *get(Type *Ty, uint64_t V, bool isSigned = false);
+
+ /// Return a ConstantInt with the specified integer value for the specified
+ /// type. If the type is wider than 64 bits, the value will be zero-extended
+ /// to fit the type, unless isSigned is true, in which case the value will
+ /// be interpreted as a 64-bit signed integer and sign-extended to fit
+ /// the type.
+ /// @brief Get a ConstantInt for a specific value.
+ static ConstantInt *get(IntegerType *Ty, uint64_t V,
+ bool isSigned = false);
+
+ /// Return a ConstantInt with the specified value for the specified type. The
+ /// value V will be canonicalized to a an unsigned APInt. Accessing it with
+ /// either getSExtValue() or getZExtValue() will yield a correctly sized and
+ /// signed value for the type Ty.
+ /// @brief Get a ConstantInt for a specific signed value.
+ static ConstantInt *getSigned(IntegerType *Ty, int64_t V);
+ static Constant *getSigned(Type *Ty, int64_t V);
+
+ /// Return a ConstantInt with the specified value and an implied Type. The
+ /// type is the integer type that corresponds to the bit width of the value.
+ static ConstantInt *get(LLVMContext &Context, const APInt &V);
+
+ /// Return a ConstantInt constructed from the string strStart with the given
+ /// radix.
+ static ConstantInt *get(IntegerType *Ty, StringRef Str,
+ uint8_t radix);
+
+ /// If Ty is a vector type, return a Constant with a splat of the given
+ /// value. Otherwise return a ConstantInt for the given value.
+ static Constant *get(Type* Ty, const APInt& V);
+
+ /// Return the constant as an APInt value reference. This allows clients to
+ /// obtain a copy of the value, with all its precision in tact.
+ /// @brief Return the constant's value.
+ inline const APInt &getValue() const {
+ return Val;
+ }
+
+ /// getBitWidth - Return the bitwidth of this constant.
+ unsigned getBitWidth() const { return Val.getBitWidth(); }
+
+ /// Return the constant as a 64-bit unsigned integer value after it
+ /// has been zero extended as appropriate for the type of this constant. Note
+ /// that this method can assert if the value does not fit in 64 bits.
+ /// @brief Return the zero extended value.
+ inline uint64_t getZExtValue() const {
+ return Val.getZExtValue();
+ }
+
+ /// Return the constant as a 64-bit integer value after it has been sign
+ /// extended as appropriate for the type of this constant. Note that
+ /// this method can assert if the value does not fit in 64 bits.
+ /// @brief Return the sign extended value.
+ inline int64_t getSExtValue() const {
+ return Val.getSExtValue();
+ }
+
+ /// A helper method that can be used to determine if the constant contained
+ /// within is equal to a constant. This only works for very small values,
+ /// because this is all that can be represented with all types.
+ /// @brief Determine if this constant's value is same as an unsigned char.
+ bool equalsInt(uint64_t V) const {
+ return Val == V;
+ }
+
+ /// getType - Specialize the getType() method to always return an IntegerType,
+ /// which reduces the amount of casting needed in parts of the compiler.
+ ///
+ inline IntegerType *getType() const {
+ return cast<IntegerType>(Value::getType());
+ }
+
+ /// This static method returns true if the type Ty is big enough to
+ /// represent the value V. This can be used to avoid having the get method
+ /// assert when V is larger than Ty can represent. Note that there are two
+ /// versions of this method, one for unsigned and one for signed integers.
+ /// Although ConstantInt canonicalizes everything to an unsigned integer,
+ /// the signed version avoids callers having to convert a signed quantity
+ /// to the appropriate unsigned type before calling the method.
+ /// @returns true if V is a valid value for type Ty
+ /// @brief Determine if the value is in range for the given type.
+ static bool isValueValidForType(Type *Ty, uint64_t V);
+ static bool isValueValidForType(Type *Ty, int64_t V);
+
+ bool isNegative() const { return Val.isNegative(); }
+
+ /// This is just a convenience method to make client code smaller for a
+ /// common code. It also correctly performs the comparison without the
+ /// potential for an assertion from getZExtValue().
+ bool isZero() const {
+ return Val == 0;
+ }
+
+ /// This is just a convenience method to make client code smaller for a
+ /// common case. It also correctly performs the comparison without the
+ /// potential for an assertion from getZExtValue().
+ /// @brief Determine if the value is one.
+ bool isOne() const {
+ return Val == 1;
+ }
+
+ /// This function will return true iff every bit in this constant is set
+ /// to true.
+ /// @returns true iff this constant's bits are all set to true.
+ /// @brief Determine if the value is all ones.
+ bool isMinusOne() const {
+ return Val.isAllOnesValue();
+ }
+
+ /// This function will return true iff this constant represents the largest
+ /// value that may be represented by the constant's type.
+ /// @returns true iff this is the largest value that may be represented
+ /// by this type.
+ /// @brief Determine if the value is maximal.
+ bool isMaxValue(bool isSigned) const {
+ if (isSigned)
+ return Val.isMaxSignedValue();
+ else
+ return Val.isMaxValue();
+ }
+
+ /// This function will return true iff this constant represents the smallest
+ /// value that may be represented by this constant's type.
+ /// @returns true if this is the smallest value that may be represented by
+ /// this type.
+ /// @brief Determine if the value is minimal.
+ bool isMinValue(bool isSigned) const {
+ if (isSigned)
+ return Val.isMinSignedValue();
+ else
+ return Val.isMinValue();
+ }
+
+ /// This function will return true iff this constant represents a value with
+ /// active bits bigger than 64 bits or a value greater than the given uint64_t
+ /// value.
+ /// @returns true iff this constant is greater or equal to the given number.
+ /// @brief Determine if the value is greater or equal to the given number.
+ bool uge(uint64_t Num) const {
+ return Val.getActiveBits() > 64 || Val.getZExtValue() >= Num;
+ }
+
+ /// getLimitedValue - If the value is smaller than the specified limit,
+ /// return it, otherwise return the limit value. This causes the value
+ /// to saturate to the limit.
+ /// @returns the min of the value of the constant and the specified value
+ /// @brief Get the constant's value with a saturation limit
+ uint64_t getLimitedValue(uint64_t Limit = ~0ULL) const {
+ return Val.getLimitedValue(Limit);
+ }
+
+ /// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantIntVal;
+ }
+};
+
+
+//===----------------------------------------------------------------------===//
+/// ConstantFP - Floating Point Values [float, double]
+///
+class ConstantFP : public Constant {
+ APFloat Val;
+ void anchor() override;
+ void *operator new(size_t, unsigned) = delete;
+ ConstantFP(const ConstantFP &) = delete;
+ friend class LLVMContextImpl;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ ConstantFP(Type *Ty, const APFloat& V);
+protected:
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+ /// Floating point negation must be implemented with f(x) = -0.0 - x. This
+ /// method returns the negative zero constant for floating point or vector
+ /// floating point types; for all other types, it returns the null value.
+ static Constant *getZeroValueForNegation(Type *Ty);
+
+ /// get() - This returns a ConstantFP, or a vector containing a splat of a
+ /// ConstantFP, for the specified value in the specified type. This should
+ /// only be used for simple constant values like 2.0/1.0 etc, that are
+ /// known-valid both as host double and as the target format.
+ static Constant *get(Type* Ty, double V);
+ static Constant *get(Type* Ty, StringRef Str);
+ static ConstantFP *get(LLVMContext &Context, const APFloat &V);
+ static Constant *getNaN(Type *Ty, bool Negative = false, unsigned type = 0);
+ static Constant *getNegativeZero(Type *Ty);
+ static Constant *getInfinity(Type *Ty, bool Negative = false);
+
+ /// isValueValidForType - return true if Ty is big enough to represent V.
+ static bool isValueValidForType(Type *Ty, const APFloat &V);
+ inline const APFloat &getValueAPF() const { return Val; }
+
+ /// isZero - Return true if the value is positive or negative zero.
+ bool isZero() const { return Val.isZero(); }
+
+ /// isNegative - Return true if the sign bit is set.
+ bool isNegative() const { return Val.isNegative(); }
+
+ /// isInfinity - Return true if the value is infinity
+ bool isInfinity() const { return Val.isInfinity(); }
+
+ /// isNaN - Return true if the value is a NaN.
+ bool isNaN() const { return Val.isNaN(); }
+
+ /// isExactlyValue - We don't rely on operator== working on double values, as
+ /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
+ /// As such, this method can be used to do an exact bit-for-bit comparison of
+ /// two floating point values. The version with a double operand is retained
+ /// because it's so convenient to write isExactlyValue(2.0), but please use
+ /// it only for simple constants.
+ bool isExactlyValue(const APFloat &V) const;
+
+ bool isExactlyValue(double V) const {
+ bool ignored;
+ APFloat FV(V);
+ FV.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &ignored);
+ return isExactlyValue(FV);
+ }
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantFPVal;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// ConstantAggregateZero - All zero aggregate value
+///
+class ConstantAggregateZero : public Constant {
+ void *operator new(size_t, unsigned) = delete;
+ ConstantAggregateZero(const ConstantAggregateZero &) = delete;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ explicit ConstantAggregateZero(Type *ty)
+ : Constant(ty, ConstantAggregateZeroVal, nullptr, 0) {}
+protected:
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+ static ConstantAggregateZero *get(Type *Ty);
+
+ /// getSequentialElement - If this CAZ has array or vector type, return a zero
+ /// with the right element type.
+ Constant *getSequentialElement() const;
+
+ /// getStructElement - If this CAZ has struct type, return a zero with the
+ /// right element type for the specified element.
+ Constant *getStructElement(unsigned Elt) const;
+
+ /// getElementValue - Return a zero of the right value for the specified GEP
+ /// index.
+ Constant *getElementValue(Constant *C) const;
+
+ /// getElementValue - Return a zero of the right value for the specified GEP
+ /// index.
+ Constant *getElementValue(unsigned Idx) const;
+
+ /// \brief Return the number of elements in the array, vector, or struct.
+ unsigned getNumElements() const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ ///
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantAggregateZeroVal;
+ }
+};
+
+
+//===----------------------------------------------------------------------===//
+/// ConstantArray - Constant Array Declarations
+///
+class ConstantArray : public Constant {
+ friend struct ConstantAggrKeyType<ConstantArray>;
+ ConstantArray(const ConstantArray &) = delete;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ ConstantArray(ArrayType *T, ArrayRef<Constant *> Val);
+public:
+ // ConstantArray accessors
+ static Constant *get(ArrayType *T, ArrayRef<Constant*> V);
+
+private:
+ static Constant *getImpl(ArrayType *T, ArrayRef<Constant *> V);
+
+public:
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
+
+ /// getType - Specialize the getType() method to always return an ArrayType,
+ /// which reduces the amount of casting needed in parts of the compiler.
+ ///
+ inline ArrayType *getType() const {
+ return cast<ArrayType>(Value::getType());
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantArrayVal;
+ }
+};
+
+template <>
+struct OperandTraits<ConstantArray> :
+ public VariadicOperandTraits<ConstantArray> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantArray, Constant)
+
+//===----------------------------------------------------------------------===//
+// ConstantStruct - Constant Struct Declarations
+//
+class ConstantStruct : public Constant {
+ friend struct ConstantAggrKeyType<ConstantStruct>;
+ ConstantStruct(const ConstantStruct &) = delete;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ ConstantStruct(StructType *T, ArrayRef<Constant *> Val);
+public:
+ // ConstantStruct accessors
+ static Constant *get(StructType *T, ArrayRef<Constant*> V);
+ static Constant *get(StructType *T, ...) LLVM_END_WITH_NULL;
+
+ /// getAnon - Return an anonymous struct that has the specified
+ /// elements. If the struct is possibly empty, then you must specify a
+ /// context.
+ static Constant *getAnon(ArrayRef<Constant*> V, bool Packed = false) {
+ return get(getTypeForElements(V, Packed), V);
+ }
+ static Constant *getAnon(LLVMContext &Ctx,
+ ArrayRef<Constant*> V, bool Packed = false) {
+ return get(getTypeForElements(Ctx, V, Packed), V);
+ }
+
+ /// getTypeForElements - Return an anonymous struct type to use for a constant
+ /// with the specified set of elements. The list must not be empty.
+ static StructType *getTypeForElements(ArrayRef<Constant*> V,
+ bool Packed = false);
+ /// getTypeForElements - This version of the method allows an empty list.
+ static StructType *getTypeForElements(LLVMContext &Ctx,
+ ArrayRef<Constant*> V,
+ bool Packed = false);
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
+
+ /// getType() specialization - Reduce amount of casting...
+ ///
+ inline StructType *getType() const {
+ return cast<StructType>(Value::getType());
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantStructVal;
+ }
+};
+
+template <>
+struct OperandTraits<ConstantStruct> :
+ public VariadicOperandTraits<ConstantStruct> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantStruct, Constant)
+
+
+//===----------------------------------------------------------------------===//
+/// ConstantVector - Constant Vector Declarations
+///
+class ConstantVector : public Constant {
+ friend struct ConstantAggrKeyType<ConstantVector>;
+ ConstantVector(const ConstantVector &) = delete;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ ConstantVector(VectorType *T, ArrayRef<Constant *> Val);
+public:
+ // ConstantVector accessors
+ static Constant *get(ArrayRef<Constant*> V);
+
+private:
+ static Constant *getImpl(ArrayRef<Constant *> V);
+
+public:
+ /// getSplat - Return a ConstantVector with the specified constant in each
+ /// element.
+ static Constant *getSplat(unsigned NumElts, Constant *Elt);
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
+
+ /// getType - Specialize the getType() method to always return a VectorType,
+ /// which reduces the amount of casting needed in parts of the compiler.
+ ///
+ inline VectorType *getType() const {
+ return cast<VectorType>(Value::getType());
+ }
+
+ /// getSplatValue - If this is a splat constant, meaning that all of the
+ /// elements have the same value, return that value. Otherwise return NULL.
+ Constant *getSplatValue() const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantVectorVal;
+ }
+};
+
+template <>
+struct OperandTraits<ConstantVector> :
+ public VariadicOperandTraits<ConstantVector> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantVector, Constant)
+
+//===----------------------------------------------------------------------===//
+/// ConstantPointerNull - a constant pointer value that points to null
+///
+class ConstantPointerNull : public Constant {
+ void *operator new(size_t, unsigned) = delete;
+ ConstantPointerNull(const ConstantPointerNull &) = delete;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ explicit ConstantPointerNull(PointerType *T)
+ : Constant(T,
+ Value::ConstantPointerNullVal, nullptr, 0) {}
+
+protected:
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+ /// get() - Static factory methods - Return objects of the specified value
+ static ConstantPointerNull *get(PointerType *T);
+
+ /// getType - Specialize the getType() method to always return an PointerType,
+ /// which reduces the amount of casting needed in parts of the compiler.
+ ///
+ inline PointerType *getType() const {
+ return cast<PointerType>(Value::getType());
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantPointerNullVal;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// ConstantDataSequential - A vector or array constant whose element type is a
+/// simple 1/2/4/8-byte integer or float/double, and whose elements are just
+/// simple data values (i.e. ConstantInt/ConstantFP). This Constant node has no
+/// operands because it stores all of the elements of the constant as densely
+/// packed data, instead of as Value*'s.
+///
+/// This is the common base class of ConstantDataArray and ConstantDataVector.
+///
+class ConstantDataSequential : public Constant {
+ friend class LLVMContextImpl;
+ /// DataElements - A pointer to the bytes underlying this constant (which is
+ /// owned by the uniquing StringMap).
+ const char *DataElements;
+
+ /// Next - This forms a link list of ConstantDataSequential nodes that have
+ /// the same value but different type. For example, 0,0,0,1 could be a 4
+ /// element array of i8, or a 1-element array of i32. They'll both end up in
+ /// the same StringMap bucket, linked up.
+ ConstantDataSequential *Next;
+ void *operator new(size_t, unsigned) = delete;
+ ConstantDataSequential(const ConstantDataSequential &) = delete;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data)
+ : Constant(ty, VT, nullptr, 0), DataElements(Data), Next(nullptr) {}
+ ~ConstantDataSequential() override { delete Next; }
+
+ static Constant *getImpl(StringRef Bytes, Type *Ty);
+
+protected:
+ // allocate space for exactly zero operands.
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+
+ /// isElementTypeCompatible - Return true if a ConstantDataSequential can be
+ /// formed with a vector or array of the specified element type.
+ /// ConstantDataArray only works with normal float and int types that are
+ /// stored densely in memory, not with things like i42 or x86_f80.
+ static bool isElementTypeCompatible(Type *Ty);
+
+ /// getElementAsInteger - If this is a sequential container of integers (of
+ /// any size), return the specified element in the low bits of a uint64_t.
+ uint64_t getElementAsInteger(unsigned i) const;
+
+ /// getElementAsAPFloat - If this is a sequential container of floating point
+ /// type, return the specified element as an APFloat.
+ APFloat getElementAsAPFloat(unsigned i) const;
+
+ /// getElementAsFloat - If this is an sequential container of floats, return
+ /// the specified element as a float.
+ float getElementAsFloat(unsigned i) const;
+
+ /// getElementAsDouble - If this is an sequential container of doubles, return
+ /// the specified element as a double.
+ double getElementAsDouble(unsigned i) const;
+
+ /// getElementAsConstant - Return a Constant for a specified index's element.
+ /// Note that this has to compute a new constant to return, so it isn't as
+ /// efficient as getElementAsInteger/Float/Double.
+ Constant *getElementAsConstant(unsigned i) const;
+
+ /// getType - Specialize the getType() method to always return a
+ /// SequentialType, which reduces the amount of casting needed in parts of the
+ /// compiler.
+ inline SequentialType *getType() const {
+ return cast<SequentialType>(Value::getType());
+ }
+
+ /// getElementType - Return the element type of the array/vector.
+ Type *getElementType() const;
+
+ /// getNumElements - Return the number of elements in the array or vector.
+ unsigned getNumElements() const;
+
+ /// getElementByteSize - Return the size (in bytes) of each element in the
+ /// array/vector. The size of the elements is known to be a multiple of one
+ /// byte.
+ uint64_t getElementByteSize() const;
+
+
+ /// isString - This method returns true if this is an array of i8.
+ bool isString() const;
+
+ /// isCString - This method returns true if the array "isString", ends with a
+ /// nul byte, and does not contains any other nul bytes.
+ bool isCString() const;
+
+ /// getAsString - If this array is isString(), then this method returns the
+ /// array as a StringRef. Otherwise, it asserts out.
+ ///
+ StringRef getAsString() const {
+ assert(isString() && "Not a string");
+ return getRawDataValues();
+ }
+
+ /// getAsCString - If this array is isCString(), then this method returns the
+ /// array (without the trailing null byte) as a StringRef. Otherwise, it
+ /// asserts out.
+ ///
+ StringRef getAsCString() const {
+ assert(isCString() && "Isn't a C string");
+ StringRef Str = getAsString();
+ return Str.substr(0, Str.size()-1);
+ }
+
+ /// getRawDataValues - Return the raw, underlying, bytes of this data. Note
+ /// that this is an extremely tricky thing to work with, as it exposes the
+ /// host endianness of the data elements.
+ StringRef getRawDataValues() const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ ///
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantDataArrayVal ||
+ V->getValueID() == ConstantDataVectorVal;
+ }
+private:
+ const char *getElementPointer(unsigned Elt) const;
+};
+
+//===----------------------------------------------------------------------===//
+/// ConstantDataArray - An array constant whose element type is a simple
+/// 1/2/4/8-byte integer or float/double, and whose elements are just simple
+/// data values (i.e. ConstantInt/ConstantFP). This Constant node has no
+/// operands because it stores all of the elements of the constant as densely
+/// packed data, instead of as Value*'s.
+class ConstantDataArray : public ConstantDataSequential {
+ void *operator new(size_t, unsigned) = delete;
+ ConstantDataArray(const ConstantDataArray &) = delete;
+ void anchor() override;
+ friend class ConstantDataSequential;
+ explicit ConstantDataArray(Type *ty, const char *Data)
+ : ConstantDataSequential(ty, ConstantDataArrayVal, Data) {}
+protected:
+ // allocate space for exactly zero operands.
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+
+ /// get() constructors - Return a constant with array type with an element
+ /// count and element type matching the ArrayRef passed in. Note that this
+ /// can return a ConstantAggregateZero object.
+ static Constant *get(LLVMContext &Context, ArrayRef<uint8_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint16_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint32_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<float> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<double> Elts);
+
+ /// getFP() constructors - Return a constant with array type with an element
+ /// count and element type of float with precision matching the number of
+ /// bits in the ArrayRef passed in. (i.e. half for 16bits, float for 32bits,
+ /// double for 64bits) Note that this can return a ConstantAggregateZero
+ /// object.
+ static Constant *getFP(LLVMContext &Context, ArrayRef<uint16_t> Elts);
+ static Constant *getFP(LLVMContext &Context, ArrayRef<uint32_t> Elts);
+ static Constant *getFP(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+
+ /// getString - This method constructs a CDS and initializes it with a text
+ /// string. The default behavior (AddNull==true) causes a null terminator to
+ /// be placed at the end of the array (increasing the length of the string by
+ /// one more than the StringRef would normally indicate. Pass AddNull=false
+ /// to disable this behavior.
+ static Constant *getString(LLVMContext &Context, StringRef Initializer,
+ bool AddNull = true);
+
+ /// getType - Specialize the getType() method to always return an ArrayType,
+ /// which reduces the amount of casting needed in parts of the compiler.
+ ///
+ inline ArrayType *getType() const {
+ return cast<ArrayType>(Value::getType());
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ ///
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantDataArrayVal;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// ConstantDataVector - A vector constant whose element type is a simple
+/// 1/2/4/8-byte integer or float/double, and whose elements are just simple
+/// data values (i.e. ConstantInt/ConstantFP). This Constant node has no
+/// operands because it stores all of the elements of the constant as densely
+/// packed data, instead of as Value*'s.
+class ConstantDataVector : public ConstantDataSequential {
+ void *operator new(size_t, unsigned) = delete;
+ ConstantDataVector(const ConstantDataVector &) = delete;
+ void anchor() override;
+ friend class ConstantDataSequential;
+ explicit ConstantDataVector(Type *ty, const char *Data)
+ : ConstantDataSequential(ty, ConstantDataVectorVal, Data) {}
+protected:
+ // allocate space for exactly zero operands.
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+
+ /// get() constructors - Return a constant with vector type with an element
+ /// count and element type matching the ArrayRef passed in. Note that this
+ /// can return a ConstantAggregateZero object.
+ static Constant *get(LLVMContext &Context, ArrayRef<uint8_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint16_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint32_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<float> Elts);
+ static Constant *get(LLVMContext &Context, ArrayRef<double> Elts);
+
+ /// getFP() constructors - Return a constant with vector type with an element
+ /// count and element type of float with the precision matching the number of
+ /// bits in the ArrayRef passed in. (i.e. half for 16bits, float for 32bits,
+ /// double for 64bits) Note that this can return a ConstantAggregateZero
+ /// object.
+ static Constant *getFP(LLVMContext &Context, ArrayRef<uint16_t> Elts);
+ static Constant *getFP(LLVMContext &Context, ArrayRef<uint32_t> Elts);
+ static Constant *getFP(LLVMContext &Context, ArrayRef<uint64_t> Elts);
+
+ /// getSplat - Return a ConstantVector with the specified constant in each
+ /// element. The specified constant has to be a of a compatible type (i8/i16/
+ /// i32/i64/float/double) and must be a ConstantFP or ConstantInt.
+ static Constant *getSplat(unsigned NumElts, Constant *Elt);
+
+ /// getSplatValue - If this is a splat constant, meaning that all of the
+ /// elements have the same value, return that value. Otherwise return NULL.
+ Constant *getSplatValue() const;
+
+ /// getType - Specialize the getType() method to always return a VectorType,
+ /// which reduces the amount of casting needed in parts of the compiler.
+ ///
+ inline VectorType *getType() const {
+ return cast<VectorType>(Value::getType());
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ ///
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantDataVectorVal;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// ConstantTokenNone - a constant token which is empty
+///
+class ConstantTokenNone : public Constant {
+ void *operator new(size_t, unsigned) = delete;
+ ConstantTokenNone(const ConstantTokenNone &) = delete;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ explicit ConstantTokenNone(LLVMContext &Context)
+ : Constant(Type::getTokenTy(Context), ConstantTokenNoneVal, nullptr, 0) {}
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) { return User::operator new(s, 0); }
+
+public:
+ /// Return the ConstantTokenNone.
+ static ConstantTokenNone *get(LLVMContext &Context);
+
+ /// @brief Methods to support type inquiry through isa, cast, and dyn_cast.
+ static bool classof(const Value *V) {
+ return V->getValueID() == ConstantTokenNoneVal;
+ }
+};
+
+/// BlockAddress - The address of a basic block.
+///
+class BlockAddress : public Constant {
+ void *operator new(size_t, unsigned) = delete;
+ void *operator new(size_t s) { return User::operator new(s, 2); }
+ BlockAddress(Function *F, BasicBlock *BB);
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+public:
+ /// get - Return a BlockAddress for the specified function and basic block.
+ static BlockAddress *get(Function *F, BasicBlock *BB);
+
+ /// get - Return a BlockAddress for the specified basic block. The basic
+ /// block must be embedded into a function.
+ static BlockAddress *get(BasicBlock *BB);
+
+ /// \brief Lookup an existing \c BlockAddress constant for the given
+ /// BasicBlock.
+ ///
+ /// \returns 0 if \c !BB->hasAddressTaken(), otherwise the \c BlockAddress.
+ static BlockAddress *lookup(const BasicBlock *BB);
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ Function *getFunction() const { return (Function*)Op<0>().get(); }
+ BasicBlock *getBasicBlock() const { return (BasicBlock*)Op<1>().get(); }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == BlockAddressVal;
+ }
+};
+
+template <>
+struct OperandTraits<BlockAddress> :
+ public FixedNumOperandTraits<BlockAddress, 2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BlockAddress, Value)
+
+
+//===----------------------------------------------------------------------===//
+/// ConstantExpr - a constant value that is initialized with an expression using
+/// other constant values.
+///
+/// This class uses the standard Instruction opcodes to define the various
+/// constant expressions. The Opcode field for the ConstantExpr class is
+/// maintained in the Value::SubclassData field.
+class ConstantExpr : public Constant {
+ friend struct ConstantExprKeyType;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ ConstantExpr(Type *ty, unsigned Opcode, Use *Ops, unsigned NumOps)
+ : Constant(ty, ConstantExprVal, Ops, NumOps) {
+ // Operation type (an Instruction opcode) is stored as the SubclassData.
+ setValueSubclassData(Opcode);
+ }
+
+public:
+ // Static methods to construct a ConstantExpr of different kinds. Note that
+ // these methods may return a object that is not an instance of the
+ // ConstantExpr class, because they will attempt to fold the constant
+ // expression into something simpler if possible.
+
+ /// getAlignOf constant expr - computes the alignment of a type in a target
+ /// independent way (Note: the return type is an i64).
+ static Constant *getAlignOf(Type *Ty);
+
+ /// getSizeOf constant expr - computes the (alloc) size of a type (in
+ /// address-units, not bits) in a target independent way (Note: the return
+ /// type is an i64).
+ ///
+ static Constant *getSizeOf(Type *Ty);
+
+ /// getOffsetOf constant expr - computes the offset of a struct field in a
+ /// target independent way (Note: the return type is an i64).
+ ///
+ static Constant *getOffsetOf(StructType *STy, unsigned FieldNo);
+
+ /// getOffsetOf constant expr - This is a generalized form of getOffsetOf,
+ /// which supports any aggregate type, and any Constant index.
+ ///
+ static Constant *getOffsetOf(Type *Ty, Constant *FieldNo);
+
+ static Constant *getNeg(Constant *C, bool HasNUW = false, bool HasNSW =false);
+ static Constant *getFNeg(Constant *C);
+ static Constant *getNot(Constant *C);
+ static Constant *getAdd(Constant *C1, Constant *C2,
+ bool HasNUW = false, bool HasNSW = false);
+ static Constant *getFAdd(Constant *C1, Constant *C2);
+ static Constant *getSub(Constant *C1, Constant *C2,
+ bool HasNUW = false, bool HasNSW = false);
+ static Constant *getFSub(Constant *C1, Constant *C2);
+ static Constant *getMul(Constant *C1, Constant *C2,
+ bool HasNUW = false, bool HasNSW = false);
+ static Constant *getFMul(Constant *C1, Constant *C2);
+ static Constant *getUDiv(Constant *C1, Constant *C2, bool isExact = false);
+ static Constant *getSDiv(Constant *C1, Constant *C2, bool isExact = false);
+ static Constant *getFDiv(Constant *C1, Constant *C2);
+ static Constant *getURem(Constant *C1, Constant *C2);
+ static Constant *getSRem(Constant *C1, Constant *C2);
+ static Constant *getFRem(Constant *C1, Constant *C2);
+ static Constant *getAnd(Constant *C1, Constant *C2);
+ static Constant *getOr(Constant *C1, Constant *C2);
+ static Constant *getXor(Constant *C1, Constant *C2);
+ static Constant *getShl(Constant *C1, Constant *C2,
+ bool HasNUW = false, bool HasNSW = false);
+ static Constant *getLShr(Constant *C1, Constant *C2, bool isExact = false);
+ static Constant *getAShr(Constant *C1, Constant *C2, bool isExact = false);
+ static Constant *getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+ static Constant *getSExt(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+ static Constant *getZExt(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+ static Constant *getFPTrunc(Constant *C, Type *Ty,
+ bool OnlyIfReduced = false);
+ static Constant *getFPExtend(Constant *C, Type *Ty,
+ bool OnlyIfReduced = false);
+ static Constant *getUIToFP(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+ static Constant *getSIToFP(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+ static Constant *getFPToUI(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+ static Constant *getFPToSI(Constant *C, Type *Ty, bool OnlyIfReduced = false);
+ static Constant *getPtrToInt(Constant *C, Type *Ty,
+ bool OnlyIfReduced = false);
+ static Constant *getIntToPtr(Constant *C, Type *Ty,
+ bool OnlyIfReduced = false);
+ static Constant *getBitCast(Constant *C, Type *Ty,
+ bool OnlyIfReduced = false);
+ static Constant *getAddrSpaceCast(Constant *C, Type *Ty,
+ bool OnlyIfReduced = false);
+
+ static Constant *getNSWNeg(Constant *C) { return getNeg(C, false, true); }
+ static Constant *getNUWNeg(Constant *C) { return getNeg(C, true, false); }
+ static Constant *getNSWAdd(Constant *C1, Constant *C2) {
+ return getAdd(C1, C2, false, true);
+ }
+ static Constant *getNUWAdd(Constant *C1, Constant *C2) {
+ return getAdd(C1, C2, true, false);
+ }
+ static Constant *getNSWSub(Constant *C1, Constant *C2) {
+ return getSub(C1, C2, false, true);
+ }
+ static Constant *getNUWSub(Constant *C1, Constant *C2) {
+ return getSub(C1, C2, true, false);
+ }
+ static Constant *getNSWMul(Constant *C1, Constant *C2) {
+ return getMul(C1, C2, false, true);
+ }
+ static Constant *getNUWMul(Constant *C1, Constant *C2) {
+ return getMul(C1, C2, true, false);
+ }
+ static Constant *getNSWShl(Constant *C1, Constant *C2) {
+ return getShl(C1, C2, false, true);
+ }
+ static Constant *getNUWShl(Constant *C1, Constant *C2) {
+ return getShl(C1, C2, true, false);
+ }
+ static Constant *getExactSDiv(Constant *C1, Constant *C2) {
+ return getSDiv(C1, C2, true);
+ }
+ static Constant *getExactUDiv(Constant *C1, Constant *C2) {
+ return getUDiv(C1, C2, true);
+ }
+ static Constant *getExactAShr(Constant *C1, Constant *C2) {
+ return getAShr(C1, C2, true);
+ }
+ static Constant *getExactLShr(Constant *C1, Constant *C2) {
+ return getLShr(C1, C2, true);
+ }
+
+ /// getBinOpIdentity - Return the identity for the given binary operation,
+ /// i.e. a constant C such that X op C = X and C op X = X for every X. It
+ /// returns null if the operator doesn't have an identity.
+ static Constant *getBinOpIdentity(unsigned Opcode, Type *Ty);
+
+ /// getBinOpAbsorber - Return the absorbing element for the given binary
+ /// operation, i.e. a constant C such that X op C = C and C op X = C for
+ /// every X. For example, this returns zero for integer multiplication.
+ /// It returns null if the operator doesn't have an absorbing element.
+ static Constant *getBinOpAbsorber(unsigned Opcode, Type *Ty);
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
+
+ /// \brief Convenience function for getting a Cast operation.
+ ///
+ /// \param ops The opcode for the conversion
+ /// \param C The constant to be converted
+ /// \param Ty The type to which the constant is converted
+ /// \param OnlyIfReduced see \a getWithOperands() docs.
+ static Constant *getCast(unsigned ops, Constant *C, Type *Ty,
+ bool OnlyIfReduced = false);
+
+ // @brief Create a ZExt or BitCast cast constant expression
+ static Constant *getZExtOrBitCast(
+ Constant *C, ///< The constant to zext or bitcast
+ Type *Ty ///< The type to zext or bitcast C to
+ );
+
+ // @brief Create a SExt or BitCast cast constant expression
+ static Constant *getSExtOrBitCast(
+ Constant *C, ///< The constant to sext or bitcast
+ Type *Ty ///< The type to sext or bitcast C to
+ );
+
+ // @brief Create a Trunc or BitCast cast constant expression
+ static Constant *getTruncOrBitCast(
+ Constant *C, ///< The constant to trunc or bitcast
+ Type *Ty ///< The type to trunc or bitcast C to
+ );
+
+ /// @brief Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant
+ /// expression.
+ static Constant *getPointerCast(
+ Constant *C, ///< The pointer value to be casted (operand 0)
+ Type *Ty ///< The type to which cast should be made
+ );
+
+ /// @brief Create a BitCast or AddrSpaceCast for a pointer type depending on
+ /// the address space.
+ static Constant *getPointerBitCastOrAddrSpaceCast(
+ Constant *C, ///< The constant to addrspacecast or bitcast
+ Type *Ty ///< The type to bitcast or addrspacecast C to
+ );
+
+ /// @brief Create a ZExt, Bitcast or Trunc for integer -> integer casts
+ static Constant *getIntegerCast(
+ Constant *C, ///< The integer constant to be casted
+ Type *Ty, ///< The integer type to cast to
+ bool isSigned ///< Whether C should be treated as signed or not
+ );
+
+ /// @brief Create a FPExt, Bitcast or FPTrunc for fp -> fp casts
+ static Constant *getFPCast(
+ Constant *C, ///< The integer constant to be casted
+ Type *Ty ///< The integer type to cast to
+ );
+
+ /// @brief Return true if this is a convert constant expression
+ bool isCast() const;
+
+ /// @brief Return true if this is a compare constant expression
+ bool isCompare() const;
+
+ /// @brief Return true if this is an insertvalue or extractvalue expression,
+ /// and the getIndices() method may be used.
+ bool hasIndices() const;
+
+ /// @brief Return true if this is a getelementptr expression and all
+ /// the index operands are compile-time known integers within the
+ /// corresponding notional static array extents. Note that this is
+ /// not equivalant to, a subset of, or a superset of the "inbounds"
+ /// property.
+ bool isGEPWithNoNotionalOverIndexing() const;
+
+ /// Select constant expr
+ ///
+ /// \param OnlyIfReducedTy see \a getWithOperands() docs.
+ static Constant *getSelect(Constant *C, Constant *V1, Constant *V2,
+ Type *OnlyIfReducedTy = nullptr);
+
+ /// get - Return a binary or shift operator constant expression,
+ /// folding if possible.
+ ///
+ /// \param OnlyIfReducedTy see \a getWithOperands() docs.
+ static Constant *get(unsigned Opcode, Constant *C1, Constant *C2,
+ unsigned Flags = 0, Type *OnlyIfReducedTy = nullptr);
+
+ /// \brief Return an ICmp or FCmp comparison operator constant expression.
+ ///
+ /// \param OnlyIfReduced see \a getWithOperands() docs.
+ static Constant *getCompare(unsigned short pred, Constant *C1, Constant *C2,
+ bool OnlyIfReduced = false);
+
+ /// get* - Return some common constants without having to
+ /// specify the full Instruction::OPCODE identifier.
+ ///
+ static Constant *getICmp(unsigned short pred, Constant *LHS, Constant *RHS,
+ bool OnlyIfReduced = false);
+ static Constant *getFCmp(unsigned short pred, Constant *LHS, Constant *RHS,
+ bool OnlyIfReduced = false);
+
+ /// Getelementptr form. Value* is only accepted for convenience;
+ /// all elements must be Constants.
+ ///
+ /// \param OnlyIfReducedTy see \a getWithOperands() docs.
+ static Constant *getGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Constant *> IdxList,
+ bool InBounds = false,
+ Type *OnlyIfReducedTy = nullptr) {
+ return getGetElementPtr(
+ Ty, C, makeArrayRef((Value * const *)IdxList.data(), IdxList.size()),
+ InBounds, OnlyIfReducedTy);
+ }
+ static Constant *getGetElementPtr(Type *Ty, Constant *C, Constant *Idx,
+ bool InBounds = false,
+ Type *OnlyIfReducedTy = nullptr) {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return getGetElementPtr(Ty, C, cast<Value>(Idx), InBounds, OnlyIfReducedTy);
+ }
+ static Constant *getGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> IdxList,
+ bool InBounds = false,
+ Type *OnlyIfReducedTy = nullptr);
+
+ /// Create an "inbounds" getelementptr. See the documentation for the
+ /// "inbounds" flag in LangRef.html for details.
+ static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Constant *> IdxList) {
+ return getGetElementPtr(Ty, C, IdxList, true);
+ }
+ static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
+ Constant *Idx) {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return getGetElementPtr(Ty, C, Idx, true);
+ }
+ static Constant *getInBoundsGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> IdxList) {
+ return getGetElementPtr(Ty, C, IdxList, true);
+ }
+
+ static Constant *getExtractElement(Constant *Vec, Constant *Idx,
+ Type *OnlyIfReducedTy = nullptr);
+ static Constant *getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx,
+ Type *OnlyIfReducedTy = nullptr);
+ static Constant *getShuffleVector(Constant *V1, Constant *V2, Constant *Mask,
+ Type *OnlyIfReducedTy = nullptr);
+ static Constant *getExtractValue(Constant *Agg, ArrayRef<unsigned> Idxs,
+ Type *OnlyIfReducedTy = nullptr);
+ static Constant *getInsertValue(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> Idxs,
+ Type *OnlyIfReducedTy = nullptr);
+
+ /// getOpcode - Return the opcode at the root of this constant expression
+ unsigned getOpcode() const { return getSubclassDataFromValue(); }
+
+ /// getPredicate - Return the ICMP or FCMP predicate value. Assert if this is
+ /// not an ICMP or FCMP constant expression.
+ unsigned getPredicate() const;
+
+ /// getIndices - Assert that this is an insertvalue or exactvalue
+ /// expression and return the list of indices.
+ ArrayRef<unsigned> getIndices() const;
+
+ /// getOpcodeName - Return a string representation for an opcode.
+ const char *getOpcodeName() const;
+
+ /// getWithOperandReplaced - Return a constant expression identical to this
+ /// one, but with the specified operand set to the specified value.
+ Constant *getWithOperandReplaced(unsigned OpNo, Constant *Op) const;
+
+ /// getWithOperands - This returns the current constant expression with the
+ /// operands replaced with the specified values. The specified array must
+ /// have the same number of operands as our current one.
+ Constant *getWithOperands(ArrayRef<Constant*> Ops) const {
+ return getWithOperands(Ops, getType());
+ }
+
+ /// \brief Get the current expression with the operands replaced.
+ ///
+ /// Return the current constant expression with the operands replaced with \c
+ /// Ops and the type with \c Ty. The new operands must have the same number
+ /// as the current ones.
+ ///
+ /// If \c OnlyIfReduced is \c true, nullptr will be returned unless something
+ /// gets constant-folded, the type changes, or the expression is otherwise
+ /// canonicalized. This parameter should almost always be \c false.
+ Constant *getWithOperands(ArrayRef<Constant *> Ops, Type *Ty,
+ bool OnlyIfReduced = false,
+ Type *SrcTy = nullptr) const;
+
+ /// getAsInstruction - Returns an Instruction which implements the same
+ /// operation as this ConstantExpr. The instruction is not linked to any basic
+ /// block.
+ ///
+ /// A better approach to this could be to have a constructor for Instruction
+ /// which would take a ConstantExpr parameter, but that would have spread
+ /// implementation details of ConstantExpr outside of Constants.cpp, which
+ /// would make it harder to remove ConstantExprs altogether.
+ Instruction *getAsInstruction();
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == ConstantExprVal;
+ }
+
+private:
+ // Shadow Value::setValueSubclassData with a private forwarding method so that
+ // subclasses cannot accidentally use it.
+ void setValueSubclassData(unsigned short D) {
+ Value::setValueSubclassData(D);
+ }
+};
+
+template <>
+struct OperandTraits<ConstantExpr> :
+ public VariadicOperandTraits<ConstantExpr, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantExpr, Constant)
+
+//===----------------------------------------------------------------------===//
+/// UndefValue - 'undef' values are things that do not have specified contents.
+/// These are used for a variety of purposes, including global variable
+/// initializers and operands to instructions. 'undef' values can occur with
+/// any first-class type.
+///
+/// Undef values aren't exactly constants; if they have multiple uses, they
+/// can appear to have different bit patterns at each use. See
+/// LangRef.html#undefvalues for details.
+///
+class UndefValue : public Constant {
+ void *operator new(size_t, unsigned) = delete;
+ UndefValue(const UndefValue &) = delete;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ explicit UndefValue(Type *T) : Constant(T, UndefValueVal, nullptr, 0) {}
+protected:
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+public:
+ /// get() - Static factory methods - Return an 'undef' object of the specified
+ /// type.
+ ///
+ static UndefValue *get(Type *T);
+
+ /// getSequentialElement - If this Undef has array or vector type, return a
+ /// undef with the right element type.
+ UndefValue *getSequentialElement() const;
+
+ /// getStructElement - If this undef has struct type, return a undef with the
+ /// right element type for the specified element.
+ UndefValue *getStructElement(unsigned Elt) const;
+
+ /// getElementValue - Return an undef of the right value for the specified GEP
+ /// index.
+ UndefValue *getElementValue(Constant *C) const;
+
+ /// getElementValue - Return an undef of the right value for the specified GEP
+ /// index.
+ UndefValue *getElementValue(unsigned Idx) const;
+
+ /// \brief Return the number of elements in the array, vector, or struct.
+ unsigned getNumElements() const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Value *V) {
+ return V->getValueID() == UndefValueVal;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/DIBuilder.h b/contrib/llvm/include/llvm/IR/DIBuilder.h
new file mode 100644
index 0000000..aeec395
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/DIBuilder.h
@@ -0,0 +1,726 @@
+//===- DIBuilder.h - Debug Information Builder ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DIBuilder that is useful for creating debugging
+// information entries in LLVM IR form.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DIBUILDER_H
+#define LLVM_IR_DIBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+ class BasicBlock;
+ class Instruction;
+ class Function;
+ class Module;
+ class Value;
+ class Constant;
+ class LLVMContext;
+ class StringRef;
+
+ class DIBuilder {
+ Module &M;
+ LLVMContext &VMContext;
+
+ DICompileUnit *CUNode; ///< The one compile unit created by this DIBuiler.
+ Function *DeclareFn; ///< llvm.dbg.declare
+ Function *ValueFn; ///< llvm.dbg.value
+
+ SmallVector<Metadata *, 4> AllEnumTypes;
+ /// Track the RetainTypes, since they can be updated later on.
+ SmallVector<TrackingMDNodeRef, 4> AllRetainTypes;
+ SmallVector<Metadata *, 4> AllSubprograms;
+ SmallVector<Metadata *, 4> AllGVs;
+ SmallVector<TrackingMDNodeRef, 4> AllImportedModules;
+
+ /// Track nodes that may be unresolved.
+ SmallVector<TrackingMDNodeRef, 4> UnresolvedNodes;
+ bool AllowUnresolvedNodes;
+
+ /// Each subprogram's preserved local variables.
+ DenseMap<MDNode *, std::vector<TrackingMDNodeRef>> PreservedVariables;
+
+ DIBuilder(const DIBuilder &) = delete;
+ void operator=(const DIBuilder &) = delete;
+
+ /// Create a temporary.
+ ///
+ /// Create an \a temporary node and track it in \a UnresolvedNodes.
+ void trackIfUnresolved(MDNode *N);
+
+ public:
+ /// Construct a builder for a module.
+ ///
+ /// If \c AllowUnresolved, collect unresolved nodes attached to the module
+ /// in order to resolve cycles during \a finalize().
+ explicit DIBuilder(Module &M, bool AllowUnresolved = true);
+ enum DebugEmissionKind { FullDebug=1, LineTablesOnly };
+
+ /// Construct any deferred debug info descriptors.
+ void finalize();
+
+ /// A CompileUnit provides an anchor for all debugging
+ /// information generated during this instance of compilation.
+ /// \param Lang Source programming language, eg. dwarf::DW_LANG_C99
+ /// \param File File name
+ /// \param Dir Directory
+ /// \param Producer Identify the producer of debugging information
+ /// and code. Usually this is a compiler
+ /// version string.
+ /// \param isOptimized A boolean flag which indicates whether optimization
+ /// is enabled or not.
+ /// \param Flags This string lists command line options. This
+ /// string is directly embedded in debug info
+ /// output which may be used by a tool
+ /// analyzing generated debugging information.
+ /// \param RV This indicates runtime version for languages like
+ /// Objective-C.
+ /// \param SplitName The name of the file that we'll split debug info
+ /// out into.
+ /// \param Kind The kind of debug information to generate.
+ /// \param DWOId The DWOId if this is a split skeleton compile unit.
+ /// \param EmitDebugInfo A boolean flag which indicates whether
+ /// debug information should be written to
+ /// the final output or not. When this is
+ /// false, debug information annotations will
+ /// be present in the IL but they are not
+ /// written to the final assembly or object
+ /// file. This supports tracking source
+ /// location information in the back end
+ /// without actually changing the output
+ /// (e.g., when using optimization remarks).
+ DICompileUnit *
+ createCompileUnit(unsigned Lang, StringRef File, StringRef Dir,
+ StringRef Producer, bool isOptimized, StringRef Flags,
+ unsigned RV, StringRef SplitName = StringRef(),
+ DebugEmissionKind Kind = FullDebug, uint64_t DWOId = 0,
+ bool EmitDebugInfo = true);
+
+ /// Create a file descriptor to hold debugging information
+ /// for a file.
+ DIFile *createFile(StringRef Filename, StringRef Directory);
+
+ /// Create a single enumerator value.
+ DIEnumerator *createEnumerator(StringRef Name, int64_t Val);
+
+ /// Create a DWARF unspecified type.
+ DIBasicType *createUnspecifiedType(StringRef Name);
+
+ /// Create C++11 nullptr type.
+ DIBasicType *createNullPtrType();
+
+ /// Create debugging information entry for a basic
+ /// type.
+ /// \param Name Type name.
+ /// \param SizeInBits Size of the type.
+ /// \param AlignInBits Type alignment.
+ /// \param Encoding DWARF encoding code, e.g. dwarf::DW_ATE_float.
+ DIBasicType *createBasicType(StringRef Name, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Encoding);
+
+ /// Create debugging information entry for a qualified
+ /// type, e.g. 'const int'.
+ /// \param Tag Tag identifing type, e.g. dwarf::TAG_volatile_type
+ /// \param FromTy Base Type.
+ DIDerivedType *createQualifiedType(unsigned Tag, DIType *FromTy);
+
+ /// Create debugging information entry for a pointer.
+ /// \param PointeeTy Type pointed by this pointer.
+ /// \param SizeInBits Size.
+ /// \param AlignInBits Alignment. (optional)
+ /// \param Name Pointer type name. (optional)
+ DIDerivedType *createPointerType(DIType *PointeeTy, uint64_t SizeInBits,
+ uint64_t AlignInBits = 0,
+ StringRef Name = "");
+
+ /// Create debugging information entry for a pointer to member.
+ /// \param PointeeTy Type pointed to by this pointer.
+ /// \param SizeInBits Size.
+ /// \param AlignInBits Alignment. (optional)
+ /// \param Class Type for which this pointer points to members of.
+ DIDerivedType *createMemberPointerType(DIType *PointeeTy, DIType *Class,
+ uint64_t SizeInBits,
+ uint64_t AlignInBits = 0);
+
+ /// Create debugging information entry for a c++
+ /// style reference or rvalue reference type.
+ DIDerivedType *createReferenceType(unsigned Tag, DIType *RTy,
+ uint64_t SizeInBits = 0,
+ uint64_t AlignInBits = 0);
+
+ /// Create debugging information entry for a typedef.
+ /// \param Ty Original type.
+ /// \param Name Typedef name.
+ /// \param File File where this type is defined.
+ /// \param LineNo Line number.
+ /// \param Context The surrounding context for the typedef.
+ DIDerivedType *createTypedef(DIType *Ty, StringRef Name, DIFile *File,
+ unsigned LineNo, DIScope *Context);
+
+ /// Create debugging information entry for a 'friend'.
+ DIDerivedType *createFriend(DIType *Ty, DIType *FriendTy);
+
+ /// Create debugging information entry to establish
+ /// inheritance relationship between two types.
+ /// \param Ty Original type.
+ /// \param BaseTy Base type. Ty is inherits from base.
+ /// \param BaseOffset Base offset.
+ /// \param Flags Flags to describe inheritance attribute,
+ /// e.g. private
+ DIDerivedType *createInheritance(DIType *Ty, DIType *BaseTy,
+ uint64_t BaseOffset, unsigned Flags);
+
+ /// Create debugging information entry for a member.
+ /// \param Scope Member scope.
+ /// \param Name Member name.
+ /// \param File File where this member is defined.
+ /// \param LineNo Line number.
+ /// \param SizeInBits Member size.
+ /// \param AlignInBits Member alignment.
+ /// \param OffsetInBits Member offset.
+ /// \param Flags Flags to encode member attribute, e.g. private
+ /// \param Ty Parent type.
+ DIDerivedType *createMemberType(DIScope *Scope, StringRef Name,
+ DIFile *File, unsigned LineNo,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ DIType *Ty);
+
+ /// Create debugging information entry for a
+ /// C++ static data member.
+ /// \param Scope Member scope.
+ /// \param Name Member name.
+ /// \param File File where this member is declared.
+ /// \param LineNo Line number.
+ /// \param Ty Type of the static member.
+ /// \param Flags Flags to encode member attribute, e.g. private.
+ /// \param Val Const initializer of the member.
+ DIDerivedType *createStaticMemberType(DIScope *Scope, StringRef Name,
+ DIFile *File, unsigned LineNo,
+ DIType *Ty, unsigned Flags,
+ llvm::Constant *Val);
+
+ /// Create debugging information entry for Objective-C
+ /// instance variable.
+ /// \param Name Member name.
+ /// \param File File where this member is defined.
+ /// \param LineNo Line number.
+ /// \param SizeInBits Member size.
+ /// \param AlignInBits Member alignment.
+ /// \param OffsetInBits Member offset.
+ /// \param Flags Flags to encode member attribute, e.g. private
+ /// \param Ty Parent type.
+ /// \param PropertyNode Property associated with this ivar.
+ DIDerivedType *createObjCIVar(StringRef Name, DIFile *File, unsigned LineNo,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ DIType *Ty, MDNode *PropertyNode);
+
+ /// Create debugging information entry for Objective-C
+ /// property.
+ /// \param Name Property name.
+ /// \param File File where this property is defined.
+ /// \param LineNumber Line number.
+ /// \param GetterName Name of the Objective C property getter selector.
+ /// \param SetterName Name of the Objective C property setter selector.
+ /// \param PropertyAttributes Objective C property attributes.
+ /// \param Ty Type.
+ DIObjCProperty *createObjCProperty(StringRef Name, DIFile *File,
+ unsigned LineNumber,
+ StringRef GetterName,
+ StringRef SetterName,
+ unsigned PropertyAttributes, DIType *Ty);
+
+ /// Create debugging information entry for a class.
+ /// \param Scope Scope in which this class is defined.
+ /// \param Name class name.
+ /// \param File File where this member is defined.
+ /// \param LineNumber Line number.
+ /// \param SizeInBits Member size.
+ /// \param AlignInBits Member alignment.
+ /// \param OffsetInBits Member offset.
+ /// \param Flags Flags to encode member attribute, e.g. private
+ /// \param Elements class members.
+ /// \param VTableHolder Debug info of the base class that contains vtable
+ /// for this type. This is used in
+ /// DW_AT_containing_type. See DWARF documentation
+ /// for more info.
+ /// \param TemplateParms Template type parameters.
+ /// \param UniqueIdentifier A unique identifier for the class.
+ DICompositeType *createClassType(DIScope *Scope, StringRef Name,
+ DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ DIType *DerivedFrom, DINodeArray Elements,
+ DIType *VTableHolder = nullptr,
+ MDNode *TemplateParms = nullptr,
+ StringRef UniqueIdentifier = "");
+
+ /// Create debugging information entry for a struct.
+ /// \param Scope Scope in which this struct is defined.
+ /// \param Name Struct name.
+ /// \param File File where this member is defined.
+ /// \param LineNumber Line number.
+ /// \param SizeInBits Member size.
+ /// \param AlignInBits Member alignment.
+ /// \param Flags Flags to encode member attribute, e.g. private
+ /// \param Elements Struct elements.
+ /// \param RunTimeLang Optional parameter, Objective-C runtime version.
+ /// \param UniqueIdentifier A unique identifier for the struct.
+ DICompositeType *createStructType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits, unsigned Flags,
+ DIType *DerivedFrom, DINodeArray Elements, unsigned RunTimeLang = 0,
+ DIType *VTableHolder = nullptr, StringRef UniqueIdentifier = "");
+
+ /// Create debugging information entry for an union.
+ /// \param Scope Scope in which this union is defined.
+ /// \param Name Union name.
+ /// \param File File where this member is defined.
+ /// \param LineNumber Line number.
+ /// \param SizeInBits Member size.
+ /// \param AlignInBits Member alignment.
+ /// \param Flags Flags to encode member attribute, e.g. private
+ /// \param Elements Union elements.
+ /// \param RunTimeLang Optional parameter, Objective-C runtime version.
+ /// \param UniqueIdentifier A unique identifier for the union.
+ DICompositeType *createUnionType(DIScope *Scope, StringRef Name,
+ DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ unsigned Flags, DINodeArray Elements,
+ unsigned RunTimeLang = 0,
+ StringRef UniqueIdentifier = "");
+
+ /// Create debugging information for template
+ /// type parameter.
+ /// \param Scope Scope in which this type is defined.
+ /// \param Name Type parameter name.
+ /// \param Ty Parameter type.
+ DITemplateTypeParameter *
+ createTemplateTypeParameter(DIScope *Scope, StringRef Name, DIType *Ty);
+
+ /// Create debugging information for template
+ /// value parameter.
+ /// \param Scope Scope in which this type is defined.
+ /// \param Name Value parameter name.
+ /// \param Ty Parameter type.
+ /// \param Val Constant parameter value.
+ DITemplateValueParameter *createTemplateValueParameter(DIScope *Scope,
+ StringRef Name,
+ DIType *Ty,
+ Constant *Val);
+
+ /// Create debugging information for a template template parameter.
+ /// \param Scope Scope in which this type is defined.
+ /// \param Name Value parameter name.
+ /// \param Ty Parameter type.
+ /// \param Val The fully qualified name of the template.
+ DITemplateValueParameter *createTemplateTemplateParameter(DIScope *Scope,
+ StringRef Name,
+ DIType *Ty,
+ StringRef Val);
+
+ /// Create debugging information for a template parameter pack.
+ /// \param Scope Scope in which this type is defined.
+ /// \param Name Value parameter name.
+ /// \param Ty Parameter type.
+ /// \param Val An array of types in the pack.
+ DITemplateValueParameter *createTemplateParameterPack(DIScope *Scope,
+ StringRef Name,
+ DIType *Ty,
+ DINodeArray Val);
+
+ /// Create debugging information entry for an array.
+ /// \param Size Array size.
+ /// \param AlignInBits Alignment.
+ /// \param Ty Element type.
+ /// \param Subscripts Subscripts.
+ DICompositeType *createArrayType(uint64_t Size, uint64_t AlignInBits,
+ DIType *Ty, DINodeArray Subscripts);
+
+ /// Create debugging information entry for a vector type.
+ /// \param Size Array size.
+ /// \param AlignInBits Alignment.
+ /// \param Ty Element type.
+ /// \param Subscripts Subscripts.
+ DICompositeType *createVectorType(uint64_t Size, uint64_t AlignInBits,
+ DIType *Ty, DINodeArray Subscripts);
+
+ /// Create debugging information entry for an
+ /// enumeration.
+ /// \param Scope Scope in which this enumeration is defined.
+ /// \param Name Union name.
+ /// \param File File where this member is defined.
+ /// \param LineNumber Line number.
+ /// \param SizeInBits Member size.
+ /// \param AlignInBits Member alignment.
+ /// \param Elements Enumeration elements.
+ /// \param UnderlyingType Underlying type of a C++11/ObjC fixed enum.
+ /// \param UniqueIdentifier A unique identifier for the enum.
+ DICompositeType *createEnumerationType(
+ DIScope *Scope, StringRef Name, DIFile *File, unsigned LineNumber,
+ uint64_t SizeInBits, uint64_t AlignInBits, DINodeArray Elements,
+ DIType *UnderlyingType, StringRef UniqueIdentifier = "");
+
+ /// Create subroutine type.
+ /// \param ParameterTypes An array of subroutine parameter types. This
+ /// includes return type at 0th index.
+ /// \param Flags E.g.: LValueReference.
+ /// These flags are used to emit dwarf attributes.
+ DISubroutineType *createSubroutineType(DITypeRefArray ParameterTypes,
+ unsigned Flags = 0);
+
+ /// Create an external type reference.
+ /// \param Tag Dwarf TAG.
+ /// \param File File in which the type is defined.
+ /// \param UniqueIdentifier A unique identifier for the type.
+ DICompositeType *createExternalTypeRef(unsigned Tag, DIFile *File,
+ StringRef UniqueIdentifier);
+
+ /// Create a new DIType* with "artificial" flag set.
+ DIType *createArtificialType(DIType *Ty);
+
+ /// Create a new DIType* with the "object pointer"
+ /// flag set.
+ DIType *createObjectPointerType(DIType *Ty);
+
+ /// Create a permanent forward-declared type.
+ DICompositeType *createForwardDecl(unsigned Tag, StringRef Name,
+ DIScope *Scope, DIFile *F, unsigned Line,
+ unsigned RuntimeLang = 0,
+ uint64_t SizeInBits = 0,
+ uint64_t AlignInBits = 0,
+ StringRef UniqueIdentifier = "");
+
+ /// Create a temporary forward-declared type.
+ DICompositeType *createReplaceableCompositeType(
+ unsigned Tag, StringRef Name, DIScope *Scope, DIFile *F, unsigned Line,
+ unsigned RuntimeLang = 0, uint64_t SizeInBits = 0,
+ uint64_t AlignInBits = 0, unsigned Flags = DINode::FlagFwdDecl,
+ StringRef UniqueIdentifier = "");
+
+ /// Retain DIType* in a module even if it is not referenced
+ /// through debug info anchors.
+ void retainType(DIType *T);
+
+ /// Create unspecified parameter type
+ /// for a subroutine type.
+ DIBasicType *createUnspecifiedParameter();
+
+ /// Get a DINodeArray, create one if required.
+ DINodeArray getOrCreateArray(ArrayRef<Metadata *> Elements);
+
+ /// Get a DITypeRefArray, create one if required.
+ DITypeRefArray getOrCreateTypeArray(ArrayRef<Metadata *> Elements);
+
+ /// Create a descriptor for a value range. This
+ /// implicitly uniques the values returned.
+ DISubrange *getOrCreateSubrange(int64_t Lo, int64_t Count);
+
+ /// Create a new descriptor for the specified
+ /// variable.
+ /// \param Context Variable scope.
+ /// \param Name Name of the variable.
+ /// \param LinkageName Mangled name of the variable.
+ /// \param File File where this variable is defined.
+ /// \param LineNo Line number.
+ /// \param Ty Variable Type.
+ /// \param isLocalToUnit Boolean flag indicate whether this variable is
+ /// externally visible or not.
+ /// \param Val llvm::Value of the variable.
+ /// \param Decl Reference to the corresponding declaration.
+ DIGlobalVariable *createGlobalVariable(DIScope *Context, StringRef Name,
+ StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DIType *Ty,
+ bool isLocalToUnit,
+ llvm::Constant *Val,
+ MDNode *Decl = nullptr);
+
+ /// Identical to createGlobalVariable
+ /// except that the resulting DbgNode is temporary and meant to be RAUWed.
+ DIGlobalVariable *createTempGlobalVariableFwdDecl(
+ DIScope *Context, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DIType *Ty, bool isLocalToUnit, llvm::Constant *Val,
+ MDNode *Decl = nullptr);
+
+ /// Create a new descriptor for an auto variable. This is a local variable
+ /// that is not a subprogram parameter.
+ ///
+ /// \c Scope must be a \a DILocalScope, and thus its scope chain eventually
+ /// leads to a \a DISubprogram.
+ ///
+ /// If \c AlwaysPreserve, this variable will be referenced from its
+ /// containing subprogram, and will survive some optimizations.
+ DILocalVariable *createAutoVariable(DIScope *Scope, StringRef Name,
+ DIFile *File, unsigned LineNo,
+ DIType *Ty,
+ bool AlwaysPreserve = false,
+ unsigned Flags = 0);
+
+ /// Create a new descriptor for a parameter variable.
+ ///
+ /// \c Scope must be a \a DILocalScope, and thus its scope chain eventually
+ /// leads to a \a DISubprogram.
+ ///
+ /// \c ArgNo is the index (starting from \c 1) of this variable in the
+ /// subprogram parameters. \c ArgNo should not conflict with other
+ /// parameters of the same subprogram.
+ ///
+ /// If \c AlwaysPreserve, this variable will be referenced from its
+ /// containing subprogram, and will survive some optimizations.
+ DILocalVariable *createParameterVariable(DIScope *Scope, StringRef Name,
+ unsigned ArgNo, DIFile *File,
+ unsigned LineNo, DIType *Ty,
+ bool AlwaysPreserve = false,
+ unsigned Flags = 0);
+
+ /// Create a new descriptor for the specified
+ /// variable which has a complex address expression for its address.
+ /// \param Addr An array of complex address operations.
+ DIExpression *createExpression(ArrayRef<uint64_t> Addr = None);
+ DIExpression *createExpression(ArrayRef<int64_t> Addr);
+
+ /// Create a descriptor to describe one part
+ /// of aggregate variable that is fragmented across multiple Values.
+ ///
+ /// \param OffsetInBits Offset of the piece in bits.
+ /// \param SizeInBits Size of the piece in bits.
+ DIExpression *createBitPieceExpression(unsigned OffsetInBits,
+ unsigned SizeInBits);
+
+ /// Create a new descriptor for the specified subprogram.
+ /// See comments in DISubprogram* for descriptions of these fields.
+ /// \param Scope Function scope.
+ /// \param Name Function name.
+ /// \param LinkageName Mangled function name.
+ /// \param File File where this variable is defined.
+ /// \param LineNo Line number.
+ /// \param Ty Function type.
+ /// \param isLocalToUnit True if this function is not externally visible.
+ /// \param isDefinition True if this is a function definition.
+ /// \param ScopeLine Set to the beginning of the scope this starts
+ /// \param Flags e.g. is this function prototyped or not.
+ /// These flags are used to emit dwarf attributes.
+ /// \param isOptimized True if optimization is ON.
+ /// \param TParams Function template parameters.
+ DISubprogram *createFunction(DIScope *Scope, StringRef Name,
+ StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DISubroutineType *Ty,
+ bool isLocalToUnit, bool isDefinition,
+ unsigned ScopeLine, unsigned Flags = 0,
+ bool isOptimized = false,
+ DITemplateParameterArray TParams = nullptr,
+ DISubprogram *Decl = nullptr);
+
+ /// Identical to createFunction,
+ /// except that the resulting DbgNode is meant to be RAUWed.
+ DISubprogram *createTempFunctionFwdDecl(
+ DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
+ bool isDefinition, unsigned ScopeLine, unsigned Flags = 0,
+ bool isOptimized = false, DITemplateParameterArray TParams = nullptr,
+ DISubprogram *Decl = nullptr);
+
+ /// FIXME: this is added for dragonegg. Once we update dragonegg
+ /// to call resolve function, this will be removed.
+ DISubprogram *createFunction(DIScopeRef Scope, StringRef Name,
+ StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DISubroutineType *Ty,
+ bool isLocalToUnit, bool isDefinition,
+ unsigned ScopeLine, unsigned Flags = 0,
+ bool isOptimized = false,
+ DITemplateParameterArray TParams = nullptr,
+ DISubprogram *Decl = nullptr);
+
+ /// Create a new descriptor for the specified C++ method.
+ /// See comments in \a DISubprogram* for descriptions of these fields.
+ /// \param Scope Function scope.
+ /// \param Name Function name.
+ /// \param LinkageName Mangled function name.
+ /// \param File File where this variable is defined.
+ /// \param LineNo Line number.
+ /// \param Ty Function type.
+ /// \param isLocalToUnit True if this function is not externally visible..
+ /// \param isDefinition True if this is a function definition.
+ /// \param Virtuality Attributes describing virtualness. e.g. pure
+ /// virtual function.
+ /// \param VTableIndex Index no of this method in virtual table.
+ /// \param VTableHolder Type that holds vtable.
+ /// \param Flags e.g. is this function prototyped or not.
+ /// This flags are used to emit dwarf attributes.
+ /// \param isOptimized True if optimization is ON.
+ /// \param TParams Function template parameters.
+ DISubprogram *
+ createMethod(DIScope *Scope, StringRef Name, StringRef LinkageName,
+ DIFile *File, unsigned LineNo, DISubroutineType *Ty,
+ bool isLocalToUnit, bool isDefinition, unsigned Virtuality = 0,
+ unsigned VTableIndex = 0, DIType *VTableHolder = nullptr,
+ unsigned Flags = 0, bool isOptimized = false,
+ DITemplateParameterArray TParams = nullptr);
+
+ /// This creates new descriptor for a namespace with the specified
+ /// parent scope.
+ /// \param Scope Namespace scope
+ /// \param Name Name of this namespace
+ /// \param File Source file
+ /// \param LineNo Line number
+ DINamespace *createNameSpace(DIScope *Scope, StringRef Name, DIFile *File,
+ unsigned LineNo);
+
+ /// This creates new descriptor for a module with the specified
+ /// parent scope.
+ /// \param Scope Parent scope
+ /// \param Name Name of this module
+ /// \param ConfigurationMacros
+ /// A space-separated shell-quoted list of -D macro
+ /// definitions as they would appear on a command line.
+ /// \param IncludePath The path to the module map file.
+ /// \param ISysRoot The clang system root (value of -isysroot).
+ DIModule *createModule(DIScope *Scope, StringRef Name,
+ StringRef ConfigurationMacros,
+ StringRef IncludePath,
+ StringRef ISysRoot);
+
+ /// This creates a descriptor for a lexical block with a new file
+ /// attached. This merely extends the existing
+ /// lexical block as it crosses a file.
+ /// \param Scope Lexical block.
+ /// \param File Source file.
+ /// \param Discriminator DWARF path discriminator value.
+ DILexicalBlockFile *createLexicalBlockFile(DIScope *Scope, DIFile *File,
+ unsigned Discriminator = 0);
+
+ /// This creates a descriptor for a lexical block with the
+ /// specified parent context.
+ /// \param Scope Parent lexical scope.
+ /// \param File Source file.
+ /// \param Line Line number.
+ /// \param Col Column number.
+ DILexicalBlock *createLexicalBlock(DIScope *Scope, DIFile *File,
+ unsigned Line, unsigned Col);
+
+ /// Create a descriptor for an imported module.
+ /// \param Context The scope this module is imported into
+ /// \param NS The namespace being imported here
+ /// \param Line Line number
+ DIImportedEntity *createImportedModule(DIScope *Context, DINamespace *NS,
+ unsigned Line);
+
+ /// Create a descriptor for an imported module.
+ /// \param Context The scope this module is imported into
+ /// \param NS An aliased namespace
+ /// \param Line Line number
+ DIImportedEntity *createImportedModule(DIScope *Context,
+ DIImportedEntity *NS, unsigned Line);
+
+ /// Create a descriptor for an imported module.
+ /// \param Context The scope this module is imported into
+ /// \param M The module being imported here
+ /// \param Line Line number
+ DIImportedEntity *createImportedModule(DIScope *Context, DIModule *M,
+ unsigned Line);
+
+ /// Create a descriptor for an imported function.
+ /// \param Context The scope this module is imported into
+ /// \param Decl The declaration (or definition) of a function, type, or
+ /// variable
+ /// \param Line Line number
+ DIImportedEntity *createImportedDeclaration(DIScope *Context, DINode *Decl,
+ unsigned Line,
+ StringRef Name = "");
+
+ /// Insert a new llvm.dbg.declare intrinsic call.
+ /// \param Storage llvm::Value of the variable
+ /// \param VarInfo Variable's debug info descriptor.
+ /// \param Expr A complex location expression.
+ /// \param DL Debug info location.
+ /// \param InsertAtEnd Location for the new intrinsic.
+ Instruction *insertDeclare(llvm::Value *Storage, DILocalVariable *VarInfo,
+ DIExpression *Expr, const DILocation *DL,
+ BasicBlock *InsertAtEnd);
+
+ /// Insert a new llvm.dbg.declare intrinsic call.
+ /// \param Storage llvm::Value of the variable
+ /// \param VarInfo Variable's debug info descriptor.
+ /// \param Expr A complex location expression.
+ /// \param DL Debug info location.
+ /// \param InsertBefore Location for the new intrinsic.
+ Instruction *insertDeclare(llvm::Value *Storage, DILocalVariable *VarInfo,
+ DIExpression *Expr, const DILocation *DL,
+ Instruction *InsertBefore);
+
+ /// Insert a new llvm.dbg.value intrinsic call.
+ /// \param Val llvm::Value of the variable
+ /// \param Offset Offset
+ /// \param VarInfo Variable's debug info descriptor.
+ /// \param Expr A complex location expression.
+ /// \param DL Debug info location.
+ /// \param InsertAtEnd Location for the new intrinsic.
+ Instruction *insertDbgValueIntrinsic(llvm::Value *Val, uint64_t Offset,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ BasicBlock *InsertAtEnd);
+
+ /// Insert a new llvm.dbg.value intrinsic call.
+ /// \param Val llvm::Value of the variable
+ /// \param Offset Offset
+ /// \param VarInfo Variable's debug info descriptor.
+ /// \param Expr A complex location expression.
+ /// \param DL Debug info location.
+ /// \param InsertBefore Location for the new intrinsic.
+ Instruction *insertDbgValueIntrinsic(llvm::Value *Val, uint64_t Offset,
+ DILocalVariable *VarInfo,
+ DIExpression *Expr,
+ const DILocation *DL,
+ Instruction *InsertBefore);
+
+ /// Replace the vtable holder in the given composite type.
+ ///
+ /// If this creates a self reference, it may orphan some unresolved cycles
+ /// in the operands of \c T, so \a DIBuilder needs to track that.
+ void replaceVTableHolder(DICompositeType *&T,
+ DICompositeType *VTableHolder);
+
+ /// Replace arrays on a composite type.
+ ///
+ /// If \c T is resolved, but the arrays aren't -- which can happen if \c T
+ /// has a self-reference -- \a DIBuilder needs to track the array to
+ /// resolve cycles.
+ void replaceArrays(DICompositeType *&T, DINodeArray Elements,
+ DINodeArray TParams = DINodeArray());
+
+ /// Replace a temporary node.
+ ///
+ /// Call \a MDNode::replaceAllUsesWith() on \c N, replacing it with \c
+ /// Replacement.
+ ///
+ /// If \c Replacement is the same as \c N.get(), instead call \a
+ /// MDNode::replaceWithUniqued(). In this case, the uniqued node could
+ /// have a different address, so we return the final address.
+ template <class NodeTy>
+ NodeTy *replaceTemporary(TempMDNode &&N, NodeTy *Replacement) {
+ if (N.get() == Replacement)
+ return cast<NodeTy>(MDNode::replaceWithUniqued(std::move(N)));
+
+ N->replaceAllUsesWith(Replacement);
+ return Replacement;
+ }
+ };
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/DataLayout.h b/contrib/llvm/include/llvm/IR/DataLayout.h
new file mode 100644
index 0000000..19a3a66
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/DataLayout.h
@@ -0,0 +1,554 @@
+//===--------- llvm/DataLayout.h - Data size & alignment info ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines layout properties related to datatype size/offset/alignment
+// information. It uses lazy annotations to cache information about how
+// structure types are laid out and used.
+//
+// This structure should be created once, filled in if the defaults are not
+// correct and then passed around by const&. None of the members functions
+// require modification to the object.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DATALAYOUT_H
+#define LLVM_IR_DATALAYOUT_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/DataTypes.h"
+
+// This needs to be outside of the namespace, to avoid conflict with llvm-c
+// decl.
+typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef;
+
+namespace llvm {
+
+class Value;
+class Type;
+class IntegerType;
+class StructType;
+class StructLayout;
+class Triple;
+class GlobalVariable;
+class LLVMContext;
+template<typename T>
+class ArrayRef;
+
+/// Enum used to categorize the alignment types stored by LayoutAlignElem
+enum AlignTypeEnum {
+ INVALID_ALIGN = 0,
+ INTEGER_ALIGN = 'i',
+ VECTOR_ALIGN = 'v',
+ FLOAT_ALIGN = 'f',
+ AGGREGATE_ALIGN = 'a'
+};
+
+// FIXME: Currently the DataLayout string carries a "preferred alignment"
+// for types. As the DataLayout is module/global, this should likely be
+// sunk down to an FTTI element that is queried rather than a global
+// preference.
+
+/// \brief Layout alignment element.
+///
+/// Stores the alignment data associated with a given alignment type (integer,
+/// vector, float) and type bit width.
+///
+/// \note The unusual order of elements in the structure attempts to reduce
+/// padding and make the structure slightly more cache friendly.
+struct LayoutAlignElem {
+ /// \brief Alignment type from \c AlignTypeEnum
+ unsigned AlignType : 8;
+ unsigned TypeBitWidth : 24;
+ unsigned ABIAlign : 16;
+ unsigned PrefAlign : 16;
+
+ static LayoutAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width);
+ bool operator==(const LayoutAlignElem &rhs) const;
+};
+
+/// \brief Layout pointer alignment element.
+///
+/// Stores the alignment data associated with a given pointer and address space.
+///
+/// \note The unusual order of elements in the structure attempts to reduce
+/// padding and make the structure slightly more cache friendly.
+struct PointerAlignElem {
+ unsigned ABIAlign;
+ unsigned PrefAlign;
+ uint32_t TypeByteWidth;
+ uint32_t AddressSpace;
+
+ /// Initializer
+ static PointerAlignElem get(uint32_t AddressSpace, unsigned ABIAlign,
+ unsigned PrefAlign, uint32_t TypeByteWidth);
+ bool operator==(const PointerAlignElem &rhs) const;
+};
+
+/// \brief A parsed version of the target data layout string in and methods for
+/// querying it.
+///
+/// The target data layout string is specified *by the target* - a frontend
+/// generating LLVM IR is required to generate the right target data for the
+/// target being codegen'd to.
+class DataLayout {
+private:
+ /// Defaults to false.
+ bool BigEndian;
+
+ unsigned StackNaturalAlign;
+
+ enum ManglingModeT {
+ MM_None,
+ MM_ELF,
+ MM_MachO,
+ MM_WinCOFF,
+ MM_WinCOFFX86,
+ MM_Mips
+ };
+ ManglingModeT ManglingMode;
+
+ SmallVector<unsigned char, 8> LegalIntWidths;
+
+ /// \brief Primitive type alignment data.
+ SmallVector<LayoutAlignElem, 16> Alignments;
+
+ /// \brief The string representation used to create this DataLayout
+ std::string StringRepresentation;
+
+ typedef SmallVector<PointerAlignElem, 8> PointersTy;
+ PointersTy Pointers;
+
+ PointersTy::const_iterator
+ findPointerLowerBound(uint32_t AddressSpace) const {
+ return const_cast<DataLayout *>(this)->findPointerLowerBound(AddressSpace);
+ }
+
+ PointersTy::iterator findPointerLowerBound(uint32_t AddressSpace);
+
+ /// This member is a signal that a requested alignment type and bit width were
+ /// not found in the SmallVector.
+ static const LayoutAlignElem InvalidAlignmentElem;
+
+ /// This member is a signal that a requested pointer type and bit width were
+ /// not found in the DenseSet.
+ static const PointerAlignElem InvalidPointerElem;
+
+ // The StructType -> StructLayout map.
+ mutable void *LayoutMap;
+
+ void setAlignment(AlignTypeEnum align_type, unsigned abi_align,
+ unsigned pref_align, uint32_t bit_width);
+ unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width,
+ bool ABIAlign, Type *Ty) const;
+ void setPointerAlignment(uint32_t AddrSpace, unsigned ABIAlign,
+ unsigned PrefAlign, uint32_t TypeByteWidth);
+
+ /// Internal helper method that returns requested alignment for type.
+ unsigned getAlignment(Type *Ty, bool abi_or_pref) const;
+
+ /// \brief Valid alignment predicate.
+ ///
+ /// Predicate that tests a LayoutAlignElem reference returned by get() against
+ /// InvalidAlignmentElem.
+ bool validAlignment(const LayoutAlignElem &align) const {
+ return &align != &InvalidAlignmentElem;
+ }
+
+ /// \brief Valid pointer predicate.
+ ///
+ /// Predicate that tests a PointerAlignElem reference returned by get()
+ /// against \c InvalidPointerElem.
+ bool validPointer(const PointerAlignElem &align) const {
+ return &align != &InvalidPointerElem;
+ }
+
+ /// Parses a target data specification string. Assert if the string is
+ /// malformed.
+ void parseSpecifier(StringRef LayoutDescription);
+
+ // Free all internal data structures.
+ void clear();
+
+public:
+ /// Constructs a DataLayout from a specification string. See reset().
+ explicit DataLayout(StringRef LayoutDescription) : LayoutMap(nullptr) {
+ reset(LayoutDescription);
+ }
+
+ /// Initialize target data from properties stored in the module.
+ explicit DataLayout(const Module *M);
+
+ void init(const Module *M);
+
+ DataLayout(const DataLayout &DL) : LayoutMap(nullptr) { *this = DL; }
+
+ DataLayout &operator=(const DataLayout &DL) {
+ clear();
+ StringRepresentation = DL.StringRepresentation;
+ BigEndian = DL.isBigEndian();
+ StackNaturalAlign = DL.StackNaturalAlign;
+ ManglingMode = DL.ManglingMode;
+ LegalIntWidths = DL.LegalIntWidths;
+ Alignments = DL.Alignments;
+ Pointers = DL.Pointers;
+ return *this;
+ }
+
+ bool operator==(const DataLayout &Other) const;
+ bool operator!=(const DataLayout &Other) const { return !(*this == Other); }
+
+ ~DataLayout(); // Not virtual, do not subclass this class
+
+ /// Parse a data layout string (with fallback to default values).
+ void reset(StringRef LayoutDescription);
+
+ /// Layout endianness...
+ bool isLittleEndian() const { return !BigEndian; }
+ bool isBigEndian() const { return BigEndian; }
+
+ /// \brief Returns the string representation of the DataLayout.
+ ///
+ /// This representation is in the same format accepted by the string
+ /// constructor above. This should not be used to compare two DataLayout as
+ /// different string can represent the same layout.
+ const std::string &getStringRepresentation() const {
+ return StringRepresentation;
+ }
+
+ /// \brief Test if the DataLayout was constructed from an empty string.
+ bool isDefault() const { return StringRepresentation.empty(); }
+
+ /// \brief Returns true if the specified type is known to be a native integer
+ /// type supported by the CPU.
+ ///
+ /// For example, i64 is not native on most 32-bit CPUs and i37 is not native
+ /// on any known one. This returns false if the integer width is not legal.
+ ///
+ /// The width is specified in bits.
+ bool isLegalInteger(unsigned Width) const {
+ for (unsigned LegalIntWidth : LegalIntWidths)
+ if (LegalIntWidth == Width)
+ return true;
+ return false;
+ }
+
+ bool isIllegalInteger(unsigned Width) const { return !isLegalInteger(Width); }
+
+ /// Returns true if the given alignment exceeds the natural stack alignment.
+ bool exceedsNaturalStackAlignment(unsigned Align) const {
+ return (StackNaturalAlign != 0) && (Align > StackNaturalAlign);
+ }
+
+ unsigned getStackAlignment() const { return StackNaturalAlign; }
+
+ bool hasMicrosoftFastStdCallMangling() const {
+ return ManglingMode == MM_WinCOFFX86;
+ }
+
+ bool hasLinkerPrivateGlobalPrefix() const { return ManglingMode == MM_MachO; }
+
+ const char *getLinkerPrivateGlobalPrefix() const {
+ if (ManglingMode == MM_MachO)
+ return "l";
+ return "";
+ }
+
+ char getGlobalPrefix() const {
+ switch (ManglingMode) {
+ case MM_None:
+ case MM_ELF:
+ case MM_Mips:
+ case MM_WinCOFF:
+ return '\0';
+ case MM_MachO:
+ case MM_WinCOFFX86:
+ return '_';
+ }
+ llvm_unreachable("invalid mangling mode");
+ }
+
+ const char *getPrivateGlobalPrefix() const {
+ switch (ManglingMode) {
+ case MM_None:
+ return "";
+ case MM_ELF:
+ return ".L";
+ case MM_Mips:
+ return "$";
+ case MM_MachO:
+ case MM_WinCOFF:
+ case MM_WinCOFFX86:
+ return "L";
+ }
+ llvm_unreachable("invalid mangling mode");
+ }
+
+ static const char *getManglingComponent(const Triple &T);
+
+ /// \brief Returns true if the specified type fits in a native integer type
+ /// supported by the CPU.
+ ///
+ /// For example, if the CPU only supports i32 as a native integer type, then
+ /// i27 fits in a legal integer type but i45 does not.
+ bool fitsInLegalInteger(unsigned Width) const {
+ for (unsigned LegalIntWidth : LegalIntWidths)
+ if (Width <= LegalIntWidth)
+ return true;
+ return false;
+ }
+
+ /// Layout pointer alignment
+ /// FIXME: The defaults need to be removed once all of
+ /// the backends/clients are updated.
+ unsigned getPointerABIAlignment(unsigned AS = 0) const;
+
+ /// Return target's alignment for stack-based pointers
+ /// FIXME: The defaults need to be removed once all of
+ /// the backends/clients are updated.
+ unsigned getPointerPrefAlignment(unsigned AS = 0) const;
+
+ /// Layout pointer size
+ /// FIXME: The defaults need to be removed once all of
+ /// the backends/clients are updated.
+ unsigned getPointerSize(unsigned AS = 0) const;
+
+ /// Layout pointer size, in bits
+ /// FIXME: The defaults need to be removed once all of
+ /// the backends/clients are updated.
+ unsigned getPointerSizeInBits(unsigned AS = 0) const {
+ return getPointerSize(AS) * 8;
+ }
+
+ /// Layout pointer size, in bits, based on the type. If this function is
+ /// called with a pointer type, then the type size of the pointer is returned.
+ /// If this function is called with a vector of pointers, then the type size
+ /// of the pointer is returned. This should only be called with a pointer or
+ /// vector of pointers.
+ unsigned getPointerTypeSizeInBits(Type *) const;
+
+ unsigned getPointerTypeSize(Type *Ty) const {
+ return getPointerTypeSizeInBits(Ty) / 8;
+ }
+
+ /// Size examples:
+ ///
+ /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*]
+ /// ---- ---------- --------------- ---------------
+ /// i1 1 8 8
+ /// i8 8 8 8
+ /// i19 19 24 32
+ /// i32 32 32 32
+ /// i100 100 104 128
+ /// i128 128 128 128
+ /// Float 32 32 32
+ /// Double 64 64 64
+ /// X86_FP80 80 80 96
+ ///
+ /// [*] The alloc size depends on the alignment, and thus on the target.
+ /// These values are for x86-32 linux.
+
+ /// \brief Returns the number of bits necessary to hold the specified type.
+ ///
+ /// For example, returns 36 for i36 and 80 for x86_fp80. The type passed must
+ /// have a size (Type::isSized() must return true).
+ uint64_t getTypeSizeInBits(Type *Ty) const;
+
+ /// \brief Returns the maximum number of bytes that may be overwritten by
+ /// storing the specified type.
+ ///
+ /// For example, returns 5 for i36 and 10 for x86_fp80.
+ uint64_t getTypeStoreSize(Type *Ty) const {
+ return (getTypeSizeInBits(Ty) + 7) / 8;
+ }
+
+ /// \brief Returns the maximum number of bits that may be overwritten by
+ /// storing the specified type; always a multiple of 8.
+ ///
+ /// For example, returns 40 for i36 and 80 for x86_fp80.
+ uint64_t getTypeStoreSizeInBits(Type *Ty) const {
+ return 8 * getTypeStoreSize(Ty);
+ }
+
+ /// \brief Returns the offset in bytes between successive objects of the
+ /// specified type, including alignment padding.
+ ///
+ /// This is the amount that alloca reserves for this type. For example,
+ /// returns 12 or 16 for x86_fp80, depending on alignment.
+ uint64_t getTypeAllocSize(Type *Ty) const {
+ // Round up to the next alignment boundary.
+ return RoundUpToAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
+ }
+
+ /// \brief Returns the offset in bits between successive objects of the
+ /// specified type, including alignment padding; always a multiple of 8.
+ ///
+ /// This is the amount that alloca reserves for this type. For example,
+ /// returns 96 or 128 for x86_fp80, depending on alignment.
+ uint64_t getTypeAllocSizeInBits(Type *Ty) const {
+ return 8 * getTypeAllocSize(Ty);
+ }
+
+ /// \brief Returns the minimum ABI-required alignment for the specified type.
+ unsigned getABITypeAlignment(Type *Ty) const;
+
+ /// \brief Returns the minimum ABI-required alignment for an integer type of
+ /// the specified bitwidth.
+ unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const;
+
+ /// \brief Returns the preferred stack/global alignment for the specified
+ /// type.
+ ///
+ /// This is always at least as good as the ABI alignment.
+ unsigned getPrefTypeAlignment(Type *Ty) const;
+
+ /// \brief Returns the preferred alignment for the specified type, returned as
+ /// log2 of the value (a shift amount).
+ unsigned getPreferredTypeAlignmentShift(Type *Ty) const;
+
+ /// \brief Returns an integer type with size at least as big as that of a
+ /// pointer in the given address space.
+ IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
+
+ /// \brief Returns an integer (vector of integer) type with size at least as
+ /// big as that of a pointer of the given pointer (vector of pointer) type.
+ Type *getIntPtrType(Type *) const;
+
+ /// \brief Returns the smallest integer type with size at least as big as
+ /// Width bits.
+ Type *getSmallestLegalIntType(LLVMContext &C, unsigned Width = 0) const;
+
+ /// \brief Returns the largest legal integer type, or null if none are set.
+ Type *getLargestLegalIntType(LLVMContext &C) const {
+ unsigned LargestSize = getLargestLegalIntTypeSize();
+ return (LargestSize == 0) ? nullptr : Type::getIntNTy(C, LargestSize);
+ }
+
+ /// \brief Returns the size of largest legal integer type size, or 0 if none
+ /// are set.
+ unsigned getLargestLegalIntTypeSize() const;
+
+ /// \brief Returns the offset from the beginning of the type for the specified
+ /// indices.
+ ///
+ /// This is used to implement getelementptr.
+ uint64_t getIndexedOffset(Type *Ty, ArrayRef<Value *> Indices) const;
+
+ /// \brief Returns a StructLayout object, indicating the alignment of the
+ /// struct, its size, and the offsets of its fields.
+ ///
+ /// Note that this information is lazily cached.
+ const StructLayout *getStructLayout(StructType *Ty) const;
+
+ /// \brief Returns the preferred alignment of the specified global.
+ ///
+ /// This includes an explicitly requested alignment (if the global has one).
+ unsigned getPreferredAlignment(const GlobalVariable *GV) const;
+
+ /// \brief Returns the preferred alignment of the specified global, returned
+ /// in log form.
+ ///
+ /// This includes an explicitly requested alignment (if the global has one).
+ unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const;
+};
+
+inline DataLayout *unwrap(LLVMTargetDataRef P) {
+ return reinterpret_cast<DataLayout *>(P);
+}
+
+inline LLVMTargetDataRef wrap(const DataLayout *P) {
+ return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout *>(P));
+}
+
+/// Used to lazily calculate structure layout information for a target machine,
+/// based on the DataLayout structure.
+class StructLayout {
+ uint64_t StructSize;
+ unsigned StructAlignment;
+ bool IsPadded : 1;
+ unsigned NumElements : 31;
+ uint64_t MemberOffsets[1]; // variable sized array!
+public:
+ uint64_t getSizeInBytes() const { return StructSize; }
+
+ uint64_t getSizeInBits() const { return 8 * StructSize; }
+
+ unsigned getAlignment() const { return StructAlignment; }
+
+ /// Returns whether the struct has padding or not between its fields.
+ /// NB: Padding in nested element is not taken into account.
+ bool hasPadding() const { return IsPadded; }
+
+ /// \brief Given a valid byte offset into the structure, returns the structure
+ /// index that contains it.
+ unsigned getElementContainingOffset(uint64_t Offset) const;
+
+ uint64_t getElementOffset(unsigned Idx) const {
+ assert(Idx < NumElements && "Invalid element idx!");
+ return MemberOffsets[Idx];
+ }
+
+ uint64_t getElementOffsetInBits(unsigned Idx) const {
+ return getElementOffset(Idx) * 8;
+ }
+
+private:
+ friend class DataLayout; // Only DataLayout can create this class
+ StructLayout(StructType *ST, const DataLayout &DL);
+};
+
+// The implementation of this method is provided inline as it is particularly
+// well suited to constant folding when called on a specific Type subclass.
+inline uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
+ assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
+ switch (Ty->getTypeID()) {
+ case Type::LabelTyID:
+ return getPointerSizeInBits(0);
+ case Type::PointerTyID:
+ return getPointerSizeInBits(Ty->getPointerAddressSpace());
+ case Type::ArrayTyID: {
+ ArrayType *ATy = cast<ArrayType>(Ty);
+ return ATy->getNumElements() *
+ getTypeAllocSizeInBits(ATy->getElementType());
+ }
+ case Type::StructTyID:
+ // Get the layout annotation... which is lazily created on demand.
+ return getStructLayout(cast<StructType>(Ty))->getSizeInBits();
+ case Type::IntegerTyID:
+ return Ty->getIntegerBitWidth();
+ case Type::HalfTyID:
+ return 16;
+ case Type::FloatTyID:
+ return 32;
+ case Type::DoubleTyID:
+ case Type::X86_MMXTyID:
+ return 64;
+ case Type::PPC_FP128TyID:
+ case Type::FP128TyID:
+ return 128;
+ // In memory objects this is always aligned to a higher boundary, but
+ // only 80 bits contain information.
+ case Type::X86_FP80TyID:
+ return 80;
+ case Type::VectorTyID: {
+ VectorType *VTy = cast<VectorType>(Ty);
+ return VTy->getNumElements() * getTypeSizeInBits(VTy->getElementType());
+ }
+ default:
+ llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type");
+ }
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/DebugInfo.h b/contrib/llvm/include/llvm/IR/DebugInfo.h
new file mode 100644
index 0000000..4caceac
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/DebugInfo.h
@@ -0,0 +1,148 @@
+//===- DebugInfo.h - Debug Information Helpers ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a bunch of datatypes that are useful for creating and
+// walking debug info in LLVM IR form. They essentially provide wrappers around
+// the information in the global variables that's needed when constructing the
+// DWARF information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DEBUGINFO_H
+#define LLVM_IR_DEBUGINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <iterator>
+
+namespace llvm {
+class Module;
+class DbgDeclareInst;
+class DbgValueInst;
+
+/// \brief Maps from type identifier to the actual MDNode.
+typedef DenseMap<const MDString *, DIType *> DITypeIdentifierMap;
+
+/// \brief Find subprogram that is enclosing this scope.
+DISubprogram *getDISubprogram(const MDNode *Scope);
+
+/// \brief Find debug info for a given function.
+///
+/// \returns a valid subprogram, if found. Otherwise, return \c nullptr.
+DISubprogram *getDISubprogram(const Function *F);
+
+/// \brief Generate map by visiting all retained types.
+DITypeIdentifierMap generateDITypeIdentifierMap(const NamedMDNode *CU_Nodes);
+
+/// \brief Strip debug info in the module if it exists.
+///
+/// To do this, we remove all calls to the debugger intrinsics and any named
+/// metadata for debugging. We also remove debug locations for instructions.
+/// Return true if module is modified.
+bool StripDebugInfo(Module &M);
+bool stripDebugInfo(Function &F);
+
+/// \brief Return Debug Info Metadata Version by checking module flags.
+unsigned getDebugMetadataVersionFromModule(const Module &M);
+
+/// \brief Utility to find all debug info in a module.
+///
+/// DebugInfoFinder tries to list all debug info MDNodes used in a module. To
+/// list debug info MDNodes used by an instruction, DebugInfoFinder uses
+/// processDeclare, processValue and processLocation to handle DbgDeclareInst,
+/// DbgValueInst and DbgLoc attached to instructions. processModule will go
+/// through all DICompileUnits in llvm.dbg.cu and list debug info MDNodes
+/// used by the CUs.
+class DebugInfoFinder {
+public:
+ DebugInfoFinder() : TypeMapInitialized(false) {}
+
+ /// \brief Process entire module and collect debug info anchors.
+ void processModule(const Module &M);
+
+ /// \brief Process DbgDeclareInst.
+ void processDeclare(const Module &M, const DbgDeclareInst *DDI);
+ /// \brief Process DbgValueInst.
+ void processValue(const Module &M, const DbgValueInst *DVI);
+ /// \brief Process debug info location.
+ void processLocation(const Module &M, const DILocation *Loc);
+
+ /// \brief Clear all lists.
+ void reset();
+
+private:
+ void InitializeTypeMap(const Module &M);
+
+ void processType(DIType *DT);
+ void processSubprogram(DISubprogram *SP);
+ void processScope(DIScope *Scope);
+ bool addCompileUnit(DICompileUnit *CU);
+ bool addGlobalVariable(DIGlobalVariable *DIG);
+ bool addSubprogram(DISubprogram *SP);
+ bool addType(DIType *DT);
+ bool addScope(DIScope *Scope);
+
+public:
+ typedef SmallVectorImpl<DICompileUnit *>::const_iterator
+ compile_unit_iterator;
+ typedef SmallVectorImpl<DISubprogram *>::const_iterator subprogram_iterator;
+ typedef SmallVectorImpl<DIGlobalVariable *>::const_iterator
+ global_variable_iterator;
+ typedef SmallVectorImpl<DIType *>::const_iterator type_iterator;
+ typedef SmallVectorImpl<DIScope *>::const_iterator scope_iterator;
+
+ iterator_range<compile_unit_iterator> compile_units() const {
+ return make_range(CUs.begin(), CUs.end());
+ }
+
+ iterator_range<subprogram_iterator> subprograms() const {
+ return make_range(SPs.begin(), SPs.end());
+ }
+
+ iterator_range<global_variable_iterator> global_variables() const {
+ return make_range(GVs.begin(), GVs.end());
+ }
+
+ iterator_range<type_iterator> types() const {
+ return make_range(TYs.begin(), TYs.end());
+ }
+
+ iterator_range<scope_iterator> scopes() const {
+ return make_range(Scopes.begin(), Scopes.end());
+ }
+
+ unsigned compile_unit_count() const { return CUs.size(); }
+ unsigned global_variable_count() const { return GVs.size(); }
+ unsigned subprogram_count() const { return SPs.size(); }
+ unsigned type_count() const { return TYs.size(); }
+ unsigned scope_count() const { return Scopes.size(); }
+
+private:
+ SmallVector<DICompileUnit *, 8> CUs;
+ SmallVector<DISubprogram *, 8> SPs;
+ SmallVector<DIGlobalVariable *, 8> GVs;
+ SmallVector<DIType *, 8> TYs;
+ SmallVector<DIScope *, 8> Scopes;
+ SmallPtrSet<const MDNode *, 64> NodesSeen;
+ DITypeIdentifierMap TypeIdentifierMap;
+
+ /// \brief Specify if TypeIdentifierMap is initialized.
+ bool TypeMapInitialized;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/DebugInfoFlags.def b/contrib/llvm/include/llvm/IR/DebugInfoFlags.def
new file mode 100644
index 0000000..9756c12
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/DebugInfoFlags.def
@@ -0,0 +1,37 @@
+//===- llvm/IR/DebugInfoFlags.def - Debug info flag definitions -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Macros for running through debug info flags.
+//
+//===----------------------------------------------------------------------===//
+
+// TODO: Add other DW-based macros.
+#ifndef HANDLE_DI_FLAG
+#error "Missing macro definition of HANDLE_DI_FLAG"
+#endif
+
+HANDLE_DI_FLAG(1, Private)
+HANDLE_DI_FLAG(2, Protected)
+HANDLE_DI_FLAG(3, Public)
+HANDLE_DI_FLAG((1 << 2), FwdDecl)
+HANDLE_DI_FLAG((1 << 3), AppleBlock)
+HANDLE_DI_FLAG((1 << 4), BlockByrefStruct)
+HANDLE_DI_FLAG((1 << 5), Virtual)
+HANDLE_DI_FLAG((1 << 6), Artificial)
+HANDLE_DI_FLAG((1 << 7), Explicit)
+HANDLE_DI_FLAG((1 << 8), Prototyped)
+HANDLE_DI_FLAG((1 << 9), ObjcClassComplete)
+HANDLE_DI_FLAG((1 << 10), ObjectPointer)
+HANDLE_DI_FLAG((1 << 11), Vector)
+HANDLE_DI_FLAG((1 << 12), StaticMember)
+HANDLE_DI_FLAG((1 << 13), LValueReference)
+HANDLE_DI_FLAG((1 << 14), RValueReference)
+HANDLE_DI_FLAG((1 << 15), ExternalTypeRef)
+
+#undef HANDLE_DI_FLAG
diff --git a/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h b/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h
new file mode 100644
index 0000000..456313a
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/DebugInfoMetadata.h
@@ -0,0 +1,2375 @@
+//===- llvm/IR/DebugInfoMetadata.h - Debug info metadata --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declarations for metadata specific to debug info.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DEBUGINFOMETADATA_H
+#define LLVM_IR_DEBUGINFOMETADATA_H
+
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/Dwarf.h"
+
+// Helper macros for defining get() overrides.
+#define DEFINE_MDNODE_GET_UNPACK_IMPL(...) __VA_ARGS__
+#define DEFINE_MDNODE_GET_UNPACK(ARGS) DEFINE_MDNODE_GET_UNPACK_IMPL ARGS
+#define DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(CLASS, FORMAL, ARGS) \
+ static CLASS *getDistinct(LLVMContext &Context, \
+ DEFINE_MDNODE_GET_UNPACK(FORMAL)) { \
+ return getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Distinct); \
+ } \
+ static Temp##CLASS getTemporary(LLVMContext &Context, \
+ DEFINE_MDNODE_GET_UNPACK(FORMAL)) { \
+ return Temp##CLASS( \
+ getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Temporary)); \
+ }
+#define DEFINE_MDNODE_GET(CLASS, FORMAL, ARGS) \
+ static CLASS *get(LLVMContext &Context, DEFINE_MDNODE_GET_UNPACK(FORMAL)) { \
+ return getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Uniqued); \
+ } \
+ static CLASS *getIfExists(LLVMContext &Context, \
+ DEFINE_MDNODE_GET_UNPACK(FORMAL)) { \
+ return getImpl(Context, DEFINE_MDNODE_GET_UNPACK(ARGS), Uniqued, \
+ /* ShouldCreate */ false); \
+ } \
+ DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(CLASS, FORMAL, ARGS)
+
+namespace llvm {
+
+/// \brief Pointer union between a subclass of DINode and MDString.
+///
+/// \a DICompositeType can be referenced via an \a MDString unique identifier.
+/// This class allows some type safety in the face of that, requiring either a
+/// node of a particular type or an \a MDString.
+template <class T> class TypedDINodeRef {
+ const Metadata *MD = nullptr;
+
+public:
+ TypedDINodeRef() = default;
+ TypedDINodeRef(std::nullptr_t) {}
+
+ /// \brief Construct from a raw pointer.
+ explicit TypedDINodeRef(const Metadata *MD) : MD(MD) {
+ assert((!MD || isa<MDString>(MD) || isa<T>(MD)) && "Expected valid ref");
+ }
+
+ template <class U>
+ TypedDINodeRef(
+ const TypedDINodeRef<U> &X,
+ typename std::enable_if<std::is_convertible<U *, T *>::value>::type * =
+ nullptr)
+ : MD(X) {}
+
+ operator Metadata *() const { return const_cast<Metadata *>(MD); }
+
+ bool operator==(const TypedDINodeRef<T> &X) const { return MD == X.MD; }
+ bool operator!=(const TypedDINodeRef<T> &X) const { return MD != X.MD; }
+
+ /// \brief Create a reference.
+ ///
+ /// Get a reference to \c N, using an \a MDString reference if available.
+ static TypedDINodeRef get(const T *N);
+
+ template <class MapTy> T *resolve(const MapTy &Map) const {
+ if (!MD)
+ return nullptr;
+
+ if (auto *Typed = dyn_cast<T>(MD))
+ return const_cast<T *>(Typed);
+
+ auto *S = cast<MDString>(MD);
+ auto I = Map.find(S);
+ assert(I != Map.end() && "Missing identifier in type map");
+ return cast<T>(I->second);
+ }
+};
+
+typedef TypedDINodeRef<DINode> DINodeRef;
+typedef TypedDINodeRef<DIScope> DIScopeRef;
+typedef TypedDINodeRef<DIType> DITypeRef;
+
+class DITypeRefArray {
+ const MDTuple *N = nullptr;
+
+public:
+ DITypeRefArray() = default;
+ DITypeRefArray(const MDTuple *N) : N(N) {}
+
+ explicit operator bool() const { return get(); }
+ explicit operator MDTuple *() const { return get(); }
+
+ MDTuple *get() const { return const_cast<MDTuple *>(N); }
+ MDTuple *operator->() const { return get(); }
+ MDTuple &operator*() const { return *get(); }
+
+ // FIXME: Fix callers and remove condition on N.
+ unsigned size() const { return N ? N->getNumOperands() : 0u; }
+ DITypeRef operator[](unsigned I) const { return DITypeRef(N->getOperand(I)); }
+
+ class iterator : std::iterator<std::input_iterator_tag, DITypeRef,
+ std::ptrdiff_t, void, DITypeRef> {
+ MDNode::op_iterator I = nullptr;
+
+ public:
+ iterator() = default;
+ explicit iterator(MDNode::op_iterator I) : I(I) {}
+ DITypeRef operator*() const { return DITypeRef(*I); }
+ iterator &operator++() {
+ ++I;
+ return *this;
+ }
+ iterator operator++(int) {
+ iterator Temp(*this);
+ ++I;
+ return Temp;
+ }
+ bool operator==(const iterator &X) const { return I == X.I; }
+ bool operator!=(const iterator &X) const { return I != X.I; }
+ };
+
+ // FIXME: Fix callers and remove condition on N.
+ iterator begin() const { return N ? iterator(N->op_begin()) : iterator(); }
+ iterator end() const { return N ? iterator(N->op_end()) : iterator(); }
+};
+
+/// \brief Tagged DWARF-like metadata node.
+///
+/// A metadata node with a DWARF tag (i.e., a constant named \c DW_TAG_*,
+/// defined in llvm/Support/Dwarf.h). Called \a DINode because it's
+/// potentially used for non-DWARF output.
+class DINode : public MDNode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+protected:
+ DINode(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
+ ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = None)
+ : MDNode(C, ID, Storage, Ops1, Ops2) {
+ assert(Tag < 1u << 16);
+ SubclassData16 = Tag;
+ }
+ ~DINode() = default;
+
+ template <class Ty> Ty *getOperandAs(unsigned I) const {
+ return cast_or_null<Ty>(getOperand(I));
+ }
+
+ StringRef getStringOperand(unsigned I) const {
+ if (auto *S = getOperandAs<MDString>(I))
+ return S->getString();
+ return StringRef();
+ }
+
+ static MDString *getCanonicalMDString(LLVMContext &Context, StringRef S) {
+ if (S.empty())
+ return nullptr;
+ return MDString::get(Context, S);
+ }
+
+public:
+ unsigned getTag() const { return SubclassData16; }
+
+ /// \brief Debug info flags.
+ ///
+ /// The three accessibility flags are mutually exclusive and rolled together
+ /// in the first two bits.
+ enum DIFlags {
+#define HANDLE_DI_FLAG(ID, NAME) Flag##NAME = ID,
+#include "llvm/IR/DebugInfoFlags.def"
+ FlagAccessibility = FlagPrivate | FlagProtected | FlagPublic
+ };
+
+ static unsigned getFlag(StringRef Flag);
+ static const char *getFlagString(unsigned Flag);
+
+ /// \brief Split up a flags bitfield.
+ ///
+ /// Split \c Flags into \c SplitFlags, a vector of its components. Returns
+ /// any remaining (unrecognized) bits.
+ static unsigned splitFlags(unsigned Flags,
+ SmallVectorImpl<unsigned> &SplitFlags);
+
+ DINodeRef getRef() const { return DINodeRef::get(this); }
+
+ static bool classof(const Metadata *MD) {
+ switch (MD->getMetadataID()) {
+ default:
+ return false;
+ case GenericDINodeKind:
+ case DISubrangeKind:
+ case DIEnumeratorKind:
+ case DIBasicTypeKind:
+ case DIDerivedTypeKind:
+ case DICompositeTypeKind:
+ case DISubroutineTypeKind:
+ case DIFileKind:
+ case DICompileUnitKind:
+ case DISubprogramKind:
+ case DILexicalBlockKind:
+ case DILexicalBlockFileKind:
+ case DINamespaceKind:
+ case DITemplateTypeParameterKind:
+ case DITemplateValueParameterKind:
+ case DIGlobalVariableKind:
+ case DILocalVariableKind:
+ case DIObjCPropertyKind:
+ case DIImportedEntityKind:
+ case DIModuleKind:
+ return true;
+ }
+ }
+};
+
+template <class T> struct simplify_type<const TypedDINodeRef<T>> {
+ typedef Metadata *SimpleType;
+ static SimpleType getSimplifiedValue(const TypedDINodeRef<T> &MD) {
+ return MD;
+ }
+};
+
+template <class T>
+struct simplify_type<TypedDINodeRef<T>>
+ : simplify_type<const TypedDINodeRef<T>> {};
+
+/// \brief Generic tagged DWARF-like metadata node.
+///
+/// An un-specialized DWARF-like metadata node. The first operand is a
+/// (possibly empty) null-separated \a MDString header that contains arbitrary
+/// fields. The remaining operands are \a dwarf_operands(), and are pointers
+/// to other metadata.
+class GenericDINode : public DINode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ GenericDINode(LLVMContext &C, StorageType Storage, unsigned Hash,
+ unsigned Tag, ArrayRef<Metadata *> Ops1,
+ ArrayRef<Metadata *> Ops2)
+ : DINode(C, GenericDINodeKind, Storage, Tag, Ops1, Ops2) {
+ setHash(Hash);
+ }
+ ~GenericDINode() { dropAllReferences(); }
+
+ void setHash(unsigned Hash) { SubclassData32 = Hash; }
+ void recalculateHash();
+
+ static GenericDINode *getImpl(LLVMContext &Context, unsigned Tag,
+ StringRef Header, ArrayRef<Metadata *> DwarfOps,
+ StorageType Storage, bool ShouldCreate = true) {
+ return getImpl(Context, Tag, getCanonicalMDString(Context, Header),
+ DwarfOps, Storage, ShouldCreate);
+ }
+
+ static GenericDINode *getImpl(LLVMContext &Context, unsigned Tag,
+ MDString *Header, ArrayRef<Metadata *> DwarfOps,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempGenericDINode cloneImpl() const {
+ return getTemporary(
+ getContext(), getTag(), getHeader(),
+ SmallVector<Metadata *, 4>(dwarf_op_begin(), dwarf_op_end()));
+ }
+
+public:
+ unsigned getHash() const { return SubclassData32; }
+
+ DEFINE_MDNODE_GET(GenericDINode, (unsigned Tag, StringRef Header,
+ ArrayRef<Metadata *> DwarfOps),
+ (Tag, Header, DwarfOps))
+ DEFINE_MDNODE_GET(GenericDINode, (unsigned Tag, MDString *Header,
+ ArrayRef<Metadata *> DwarfOps),
+ (Tag, Header, DwarfOps))
+
+ /// \brief Return a (temporary) clone of this.
+ TempGenericDINode clone() const { return cloneImpl(); }
+
+ unsigned getTag() const { return SubclassData16; }
+ StringRef getHeader() const { return getStringOperand(0); }
+
+ op_iterator dwarf_op_begin() const { return op_begin() + 1; }
+ op_iterator dwarf_op_end() const { return op_end(); }
+ op_range dwarf_operands() const {
+ return op_range(dwarf_op_begin(), dwarf_op_end());
+ }
+
+ unsigned getNumDwarfOperands() const { return getNumOperands() - 1; }
+ const MDOperand &getDwarfOperand(unsigned I) const {
+ return getOperand(I + 1);
+ }
+ void replaceDwarfOperandWith(unsigned I, Metadata *New) {
+ replaceOperandWith(I + 1, New);
+ }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == GenericDINodeKind;
+ }
+};
+
+/// \brief Array subrange.
+///
+/// TODO: Merge into node for DW_TAG_array_type, which should have a custom
+/// type.
+class DISubrange : public DINode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ int64_t Count;
+ int64_t LowerBound;
+
+ DISubrange(LLVMContext &C, StorageType Storage, int64_t Count,
+ int64_t LowerBound)
+ : DINode(C, DISubrangeKind, Storage, dwarf::DW_TAG_subrange_type, None),
+ Count(Count), LowerBound(LowerBound) {}
+ ~DISubrange() = default;
+
+ static DISubrange *getImpl(LLVMContext &Context, int64_t Count,
+ int64_t LowerBound, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDISubrange cloneImpl() const {
+ return getTemporary(getContext(), getCount(), getLowerBound());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DISubrange, (int64_t Count, int64_t LowerBound = 0),
+ (Count, LowerBound))
+
+ TempDISubrange clone() const { return cloneImpl(); }
+
+ int64_t getLowerBound() const { return LowerBound; }
+ int64_t getCount() const { return Count; }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DISubrangeKind;
+ }
+};
+
+/// \brief Enumeration value.
+///
+/// TODO: Add a pointer to the context (DW_TAG_enumeration_type) once that no
+/// longer creates a type cycle.
+class DIEnumerator : public DINode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ int64_t Value;
+
+ DIEnumerator(LLVMContext &C, StorageType Storage, int64_t Value,
+ ArrayRef<Metadata *> Ops)
+ : DINode(C, DIEnumeratorKind, Storage, dwarf::DW_TAG_enumerator, Ops),
+ Value(Value) {}
+ ~DIEnumerator() = default;
+
+ static DIEnumerator *getImpl(LLVMContext &Context, int64_t Value,
+ StringRef Name, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Value, getCanonicalMDString(Context, Name), Storage,
+ ShouldCreate);
+ }
+ static DIEnumerator *getImpl(LLVMContext &Context, int64_t Value,
+ MDString *Name, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDIEnumerator cloneImpl() const {
+ return getTemporary(getContext(), getValue(), getName());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIEnumerator, (int64_t Value, StringRef Name),
+ (Value, Name))
+ DEFINE_MDNODE_GET(DIEnumerator, (int64_t Value, MDString *Name),
+ (Value, Name))
+
+ TempDIEnumerator clone() const { return cloneImpl(); }
+
+ int64_t getValue() const { return Value; }
+ StringRef getName() const { return getStringOperand(0); }
+
+ MDString *getRawName() const { return getOperandAs<MDString>(0); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIEnumeratorKind;
+ }
+};
+
+/// \brief Base class for scope-like contexts.
+///
+/// Base class for lexical scopes and types (which are also declaration
+/// contexts).
+///
+/// TODO: Separate the concepts of declaration contexts and lexical scopes.
+class DIScope : public DINode {
+protected:
+ DIScope(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
+ ArrayRef<Metadata *> Ops)
+ : DINode(C, ID, Storage, Tag, Ops) {}
+ ~DIScope() = default;
+
+public:
+ DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
+
+ inline StringRef getFilename() const;
+ inline StringRef getDirectory() const;
+
+ StringRef getName() const;
+ DIScopeRef getScope() const;
+
+ /// \brief Return the raw underlying file.
+ ///
+ /// An \a DIFile is an \a DIScope, but it doesn't point at a separate file
+ /// (it\em is the file). If \c this is an \a DIFile, we need to return \c
+ /// this. Otherwise, return the first operand, which is where all other
+ /// subclasses store their file pointer.
+ Metadata *getRawFile() const {
+ return isa<DIFile>(this) ? const_cast<DIScope *>(this)
+ : static_cast<Metadata *>(getOperand(0));
+ }
+
+ DIScopeRef getRef() const { return DIScopeRef::get(this); }
+
+ static bool classof(const Metadata *MD) {
+ switch (MD->getMetadataID()) {
+ default:
+ return false;
+ case DIBasicTypeKind:
+ case DIDerivedTypeKind:
+ case DICompositeTypeKind:
+ case DISubroutineTypeKind:
+ case DIFileKind:
+ case DICompileUnitKind:
+ case DISubprogramKind:
+ case DILexicalBlockKind:
+ case DILexicalBlockFileKind:
+ case DINamespaceKind:
+ case DIModuleKind:
+ return true;
+ }
+ }
+};
+
+/// \brief File.
+///
+/// TODO: Merge with directory/file node (including users).
+/// TODO: Canonicalize paths on creation.
+class DIFile : public DIScope {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ DIFile(LLVMContext &C, StorageType Storage, ArrayRef<Metadata *> Ops)
+ : DIScope(C, DIFileKind, Storage, dwarf::DW_TAG_file_type, Ops) {}
+ ~DIFile() = default;
+
+ static DIFile *getImpl(LLVMContext &Context, StringRef Filename,
+ StringRef Directory, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, getCanonicalMDString(Context, Filename),
+ getCanonicalMDString(Context, Directory), Storage,
+ ShouldCreate);
+ }
+ static DIFile *getImpl(LLVMContext &Context, MDString *Filename,
+ MDString *Directory, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDIFile cloneImpl() const {
+ return getTemporary(getContext(), getFilename(), getDirectory());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIFile, (StringRef Filename, StringRef Directory),
+ (Filename, Directory))
+ DEFINE_MDNODE_GET(DIFile, (MDString * Filename, MDString *Directory),
+ (Filename, Directory))
+
+ TempDIFile clone() const { return cloneImpl(); }
+
+ StringRef getFilename() const { return getStringOperand(0); }
+ StringRef getDirectory() const { return getStringOperand(1); }
+
+ MDString *getRawFilename() const { return getOperandAs<MDString>(0); }
+ MDString *getRawDirectory() const { return getOperandAs<MDString>(1); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIFileKind;
+ }
+};
+
+StringRef DIScope::getFilename() const {
+ if (auto *F = getFile())
+ return F->getFilename();
+ return "";
+}
+
+StringRef DIScope::getDirectory() const {
+ if (auto *F = getFile())
+ return F->getDirectory();
+ return "";
+}
+
+/// \brief Base class for types.
+///
+/// TODO: Remove the hardcoded name and context, since many types don't use
+/// them.
+/// TODO: Split up flags.
+class DIType : public DIScope {
+ unsigned Line;
+ unsigned Flags;
+ uint64_t SizeInBits;
+ uint64_t AlignInBits;
+ uint64_t OffsetInBits;
+
+protected:
+ DIType(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
+ unsigned Line, uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags, ArrayRef<Metadata *> Ops)
+ : DIScope(C, ID, Storage, Tag, Ops), Line(Line), Flags(Flags),
+ SizeInBits(SizeInBits), AlignInBits(AlignInBits),
+ OffsetInBits(OffsetInBits) {}
+ ~DIType() = default;
+
+public:
+ TempDIType clone() const {
+ return TempDIType(cast<DIType>(MDNode::clone().release()));
+ }
+
+ unsigned getLine() const { return Line; }
+ uint64_t getSizeInBits() const { return SizeInBits; }
+ uint64_t getAlignInBits() const { return AlignInBits; }
+ uint64_t getOffsetInBits() const { return OffsetInBits; }
+ unsigned getFlags() const { return Flags; }
+
+ DIScopeRef getScope() const { return DIScopeRef(getRawScope()); }
+ StringRef getName() const { return getStringOperand(2); }
+
+
+ Metadata *getRawScope() const { return getOperand(1); }
+ MDString *getRawName() const { return getOperandAs<MDString>(2); }
+
+ void setFlags(unsigned NewFlags) {
+ assert(!isUniqued() && "Cannot set flags on uniqued nodes");
+ Flags = NewFlags;
+ }
+
+ bool isPrivate() const {
+ return (getFlags() & FlagAccessibility) == FlagPrivate;
+ }
+ bool isProtected() const {
+ return (getFlags() & FlagAccessibility) == FlagProtected;
+ }
+ bool isPublic() const {
+ return (getFlags() & FlagAccessibility) == FlagPublic;
+ }
+ bool isForwardDecl() const { return getFlags() & FlagFwdDecl; }
+ bool isAppleBlockExtension() const { return getFlags() & FlagAppleBlock; }
+ bool isBlockByrefStruct() const { return getFlags() & FlagBlockByrefStruct; }
+ bool isVirtual() const { return getFlags() & FlagVirtual; }
+ bool isArtificial() const { return getFlags() & FlagArtificial; }
+ bool isObjectPointer() const { return getFlags() & FlagObjectPointer; }
+ bool isObjcClassComplete() const {
+ return getFlags() & FlagObjcClassComplete;
+ }
+ bool isVector() const { return getFlags() & FlagVector; }
+ bool isStaticMember() const { return getFlags() & FlagStaticMember; }
+ bool isLValueReference() const { return getFlags() & FlagLValueReference; }
+ bool isRValueReference() const { return getFlags() & FlagRValueReference; }
+ bool isExternalTypeRef() const { return getFlags() & FlagExternalTypeRef; }
+
+ DITypeRef getRef() const { return DITypeRef::get(this); }
+
+ static bool classof(const Metadata *MD) {
+ switch (MD->getMetadataID()) {
+ default:
+ return false;
+ case DIBasicTypeKind:
+ case DIDerivedTypeKind:
+ case DICompositeTypeKind:
+ case DISubroutineTypeKind:
+ return true;
+ }
+ }
+};
+
+/// \brief Basic type, like 'int' or 'float'.
+///
+/// TODO: Split out DW_TAG_unspecified_type.
+/// TODO: Drop unused accessors.
+class DIBasicType : public DIType {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Encoding;
+
+ DIBasicType(LLVMContext &C, StorageType Storage, unsigned Tag,
+ uint64_t SizeInBits, uint64_t AlignInBits, unsigned Encoding,
+ ArrayRef<Metadata *> Ops)
+ : DIType(C, DIBasicTypeKind, Storage, Tag, 0, SizeInBits, AlignInBits, 0,
+ 0, Ops),
+ Encoding(Encoding) {}
+ ~DIBasicType() = default;
+
+ static DIBasicType *getImpl(LLVMContext &Context, unsigned Tag,
+ StringRef Name, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Encoding,
+ StorageType Storage, bool ShouldCreate = true) {
+ return getImpl(Context, Tag, getCanonicalMDString(Context, Name),
+ SizeInBits, AlignInBits, Encoding, Storage, ShouldCreate);
+ }
+ static DIBasicType *getImpl(LLVMContext &Context, unsigned Tag,
+ MDString *Name, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Encoding,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempDIBasicType cloneImpl() const {
+ return getTemporary(getContext(), getTag(), getName(), getSizeInBits(),
+ getAlignInBits(), getEncoding());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIBasicType, (unsigned Tag, StringRef Name),
+ (Tag, Name, 0, 0, 0))
+ DEFINE_MDNODE_GET(DIBasicType,
+ (unsigned Tag, StringRef Name, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Encoding),
+ (Tag, Name, SizeInBits, AlignInBits, Encoding))
+ DEFINE_MDNODE_GET(DIBasicType,
+ (unsigned Tag, MDString *Name, uint64_t SizeInBits,
+ uint64_t AlignInBits, unsigned Encoding),
+ (Tag, Name, SizeInBits, AlignInBits, Encoding))
+
+ TempDIBasicType clone() const { return cloneImpl(); }
+
+ unsigned getEncoding() const { return Encoding; }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIBasicTypeKind;
+ }
+};
+
+/// \brief Derived types.
+///
+/// This includes qualified types, pointers, references, friends, typedefs, and
+/// class members.
+///
+/// TODO: Split out members (inheritance, fields, methods, etc.).
+class DIDerivedType : public DIType {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ DIDerivedType(LLVMContext &C, StorageType Storage, unsigned Tag,
+ unsigned Line, uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags, ArrayRef<Metadata *> Ops)
+ : DIType(C, DIDerivedTypeKind, Storage, Tag, Line, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, Ops) {}
+ ~DIDerivedType() = default;
+
+ static DIDerivedType *getImpl(LLVMContext &Context, unsigned Tag,
+ StringRef Name, DIFile *File, unsigned Line,
+ DIScopeRef Scope, DITypeRef BaseType,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ Metadata *ExtraData, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Tag, getCanonicalMDString(Context, Name), File,
+ Line, Scope, BaseType, SizeInBits, AlignInBits, OffsetInBits,
+ Flags, ExtraData, Storage, ShouldCreate);
+ }
+ static DIDerivedType *getImpl(LLVMContext &Context, unsigned Tag,
+ MDString *Name, Metadata *File, unsigned Line,
+ Metadata *Scope, Metadata *BaseType,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ Metadata *ExtraData, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDIDerivedType cloneImpl() const {
+ return getTemporary(getContext(), getTag(), getName(), getFile(), getLine(),
+ getScope(), getBaseType(), getSizeInBits(),
+ getAlignInBits(), getOffsetInBits(), getFlags(),
+ getExtraData());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIDerivedType,
+ (unsigned Tag, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Scope, Metadata *BaseType,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags,
+ Metadata *ExtraData = nullptr),
+ (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, ExtraData))
+ DEFINE_MDNODE_GET(DIDerivedType,
+ (unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
+ DIScopeRef Scope, DITypeRef BaseType, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits,
+ unsigned Flags, Metadata *ExtraData = nullptr),
+ (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, ExtraData))
+
+ TempDIDerivedType clone() const { return cloneImpl(); }
+
+ //// Get the base type this is derived from.
+ DITypeRef getBaseType() const { return DITypeRef(getRawBaseType()); }
+ Metadata *getRawBaseType() const { return getOperand(3); }
+
+ /// \brief Get extra data associated with this derived type.
+ ///
+ /// Class type for pointer-to-members, objective-c property node for ivars,
+ /// or global constant wrapper for static members.
+ ///
+ /// TODO: Separate out types that need this extra operand: pointer-to-member
+ /// types and member fields (static members and ivars).
+ Metadata *getExtraData() const { return getRawExtraData(); }
+ Metadata *getRawExtraData() const { return getOperand(4); }
+
+ /// \brief Get casted version of extra data.
+ /// @{
+ DITypeRef getClassType() const {
+ assert(getTag() == dwarf::DW_TAG_ptr_to_member_type);
+ return DITypeRef(getExtraData());
+ }
+ DIObjCProperty *getObjCProperty() const {
+ return dyn_cast_or_null<DIObjCProperty>(getExtraData());
+ }
+ Constant *getConstant() const {
+ assert(getTag() == dwarf::DW_TAG_member && isStaticMember());
+ if (auto *C = cast_or_null<ConstantAsMetadata>(getExtraData()))
+ return C->getValue();
+ return nullptr;
+ }
+ /// @}
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIDerivedTypeKind;
+ }
+};
+
+/// \brief Composite types.
+///
+/// TODO: Detach from DerivedTypeBase (split out MDEnumType?).
+/// TODO: Create a custom, unrelated node for DW_TAG_array_type.
+class DICompositeType : public DIType {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned RuntimeLang;
+
+ DICompositeType(LLVMContext &C, StorageType Storage, unsigned Tag,
+ unsigned Line, unsigned RuntimeLang, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits, unsigned Flags,
+ ArrayRef<Metadata *> Ops)
+ : DIType(C, DICompositeTypeKind, Storage, Tag, Line, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, Ops),
+ RuntimeLang(RuntimeLang) {}
+ ~DICompositeType() = default;
+
+ static DICompositeType *
+ getImpl(LLVMContext &Context, unsigned Tag, StringRef Name, Metadata *File,
+ unsigned Line, DIScopeRef Scope, DITypeRef BaseType,
+ uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits,
+ uint64_t Flags, DINodeArray Elements, unsigned RuntimeLang,
+ DITypeRef VTableHolder, DITemplateParameterArray TemplateParams,
+ StringRef Identifier, StorageType Storage, bool ShouldCreate = true) {
+ return getImpl(
+ Context, Tag, getCanonicalMDString(Context, Name), File, Line, Scope,
+ BaseType, SizeInBits, AlignInBits, OffsetInBits, Flags, Elements.get(),
+ RuntimeLang, VTableHolder, TemplateParams.get(),
+ getCanonicalMDString(Context, Identifier), Storage, ShouldCreate);
+ }
+ static DICompositeType *
+ getImpl(LLVMContext &Context, unsigned Tag, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Scope, Metadata *BaseType,
+ uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits,
+ unsigned Flags, Metadata *Elements, unsigned RuntimeLang,
+ Metadata *VTableHolder, Metadata *TemplateParams,
+ MDString *Identifier, StorageType Storage, bool ShouldCreate = true);
+
+ TempDICompositeType cloneImpl() const {
+ return getTemporary(getContext(), getTag(), getName(), getFile(), getLine(),
+ getScope(), getBaseType(), getSizeInBits(),
+ getAlignInBits(), getOffsetInBits(), getFlags(),
+ getElements(), getRuntimeLang(), getVTableHolder(),
+ getTemplateParams(), getIdentifier());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DICompositeType,
+ (unsigned Tag, StringRef Name, DIFile *File, unsigned Line,
+ DIScopeRef Scope, DITypeRef BaseType, uint64_t SizeInBits,
+ uint64_t AlignInBits, uint64_t OffsetInBits,
+ unsigned Flags, DINodeArray Elements, unsigned RuntimeLang,
+ DITypeRef VTableHolder,
+ DITemplateParameterArray TemplateParams = nullptr,
+ StringRef Identifier = ""),
+ (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang,
+ VTableHolder, TemplateParams, Identifier))
+ DEFINE_MDNODE_GET(DICompositeType,
+ (unsigned Tag, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Scope, Metadata *BaseType,
+ uint64_t SizeInBits, uint64_t AlignInBits,
+ uint64_t OffsetInBits, unsigned Flags, Metadata *Elements,
+ unsigned RuntimeLang, Metadata *VTableHolder,
+ Metadata *TemplateParams = nullptr,
+ MDString *Identifier = nullptr),
+ (Tag, Name, File, Line, Scope, BaseType, SizeInBits,
+ AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang,
+ VTableHolder, TemplateParams, Identifier))
+
+ TempDICompositeType clone() const { return cloneImpl(); }
+
+ DITypeRef getBaseType() const { return DITypeRef(getRawBaseType()); }
+ DINodeArray getElements() const {
+ return cast_or_null<MDTuple>(getRawElements());
+ }
+ DITypeRef getVTableHolder() const { return DITypeRef(getRawVTableHolder()); }
+ DITemplateParameterArray getTemplateParams() const {
+ return cast_or_null<MDTuple>(getRawTemplateParams());
+ }
+ StringRef getIdentifier() const { return getStringOperand(7); }
+ unsigned getRuntimeLang() const { return RuntimeLang; }
+
+ Metadata *getRawBaseType() const { return getOperand(3); }
+ Metadata *getRawElements() const { return getOperand(4); }
+ Metadata *getRawVTableHolder() const { return getOperand(5); }
+ Metadata *getRawTemplateParams() const { return getOperand(6); }
+ MDString *getRawIdentifier() const { return getOperandAs<MDString>(7); }
+
+ /// \brief Replace operands.
+ ///
+ /// If this \a isUniqued() and not \a isResolved(), on a uniquing collision
+ /// this will be RAUW'ed and deleted. Use a \a TrackingMDRef to keep track
+ /// of its movement if necessary.
+ /// @{
+ void replaceElements(DINodeArray Elements) {
+#ifndef NDEBUG
+ for (DINode *Op : getElements())
+ assert(std::find(Elements->op_begin(), Elements->op_end(), Op) &&
+ "Lost a member during member list replacement");
+#endif
+ replaceOperandWith(4, Elements.get());
+ }
+ void replaceVTableHolder(DITypeRef VTableHolder) {
+ replaceOperandWith(5, VTableHolder);
+ }
+ void replaceTemplateParams(DITemplateParameterArray TemplateParams) {
+ replaceOperandWith(6, TemplateParams.get());
+ }
+ /// @}
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DICompositeTypeKind;
+ }
+};
+
+template <class T> TypedDINodeRef<T> TypedDINodeRef<T>::get(const T *N) {
+ if (N)
+ if (auto *Composite = dyn_cast<DICompositeType>(N))
+ if (auto *S = Composite->getRawIdentifier())
+ return TypedDINodeRef<T>(S);
+ return TypedDINodeRef<T>(N);
+}
+
+/// \brief Type array for a subprogram.
+///
+/// TODO: Fold the array of types in directly as operands.
+class DISubroutineType : public DIType {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ DISubroutineType(LLVMContext &C, StorageType Storage, unsigned Flags,
+ ArrayRef<Metadata *> Ops)
+ : DIType(C, DISubroutineTypeKind, Storage, dwarf::DW_TAG_subroutine_type,
+ 0, 0, 0, 0, Flags, Ops) {}
+ ~DISubroutineType() = default;
+
+ static DISubroutineType *getImpl(LLVMContext &Context, unsigned Flags,
+ DITypeRefArray TypeArray,
+ StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Flags, TypeArray.get(), Storage, ShouldCreate);
+ }
+ static DISubroutineType *getImpl(LLVMContext &Context, unsigned Flags,
+ Metadata *TypeArray, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDISubroutineType cloneImpl() const {
+ return getTemporary(getContext(), getFlags(), getTypeArray());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DISubroutineType,
+ (unsigned Flags, DITypeRefArray TypeArray),
+ (Flags, TypeArray))
+ DEFINE_MDNODE_GET(DISubroutineType, (unsigned Flags, Metadata *TypeArray),
+ (Flags, TypeArray))
+
+ TempDISubroutineType clone() const { return cloneImpl(); }
+
+ DITypeRefArray getTypeArray() const {
+ return cast_or_null<MDTuple>(getRawTypeArray());
+ }
+ Metadata *getRawTypeArray() const { return getOperand(3); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DISubroutineTypeKind;
+ }
+};
+
+/// \brief Compile unit.
+class DICompileUnit : public DIScope {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned SourceLanguage;
+ bool IsOptimized;
+ unsigned RuntimeVersion;
+ unsigned EmissionKind;
+ uint64_t DWOId;
+
+ DICompileUnit(LLVMContext &C, StorageType Storage, unsigned SourceLanguage,
+ bool IsOptimized, unsigned RuntimeVersion,
+ unsigned EmissionKind, uint64_t DWOId, ArrayRef<Metadata *> Ops)
+ : DIScope(C, DICompileUnitKind, Storage, dwarf::DW_TAG_compile_unit, Ops),
+ SourceLanguage(SourceLanguage), IsOptimized(IsOptimized),
+ RuntimeVersion(RuntimeVersion), EmissionKind(EmissionKind),
+ DWOId(DWOId) {
+ assert(Storage != Uniqued);
+ }
+ ~DICompileUnit() = default;
+
+ static DICompileUnit *
+ getImpl(LLVMContext &Context, unsigned SourceLanguage, DIFile *File,
+ StringRef Producer, bool IsOptimized, StringRef Flags,
+ unsigned RuntimeVersion, StringRef SplitDebugFilename,
+ unsigned EmissionKind, DICompositeTypeArray EnumTypes,
+ DITypeArray RetainedTypes, DISubprogramArray Subprograms,
+ DIGlobalVariableArray GlobalVariables,
+ DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
+ uint64_t DWOId, StorageType Storage, bool ShouldCreate = true) {
+ return getImpl(Context, SourceLanguage, File,
+ getCanonicalMDString(Context, Producer), IsOptimized,
+ getCanonicalMDString(Context, Flags), RuntimeVersion,
+ getCanonicalMDString(Context, SplitDebugFilename),
+ EmissionKind, EnumTypes.get(), RetainedTypes.get(),
+ Subprograms.get(), GlobalVariables.get(),
+ ImportedEntities.get(), Macros.get(), DWOId, Storage,
+ ShouldCreate);
+ }
+ static DICompileUnit *
+ getImpl(LLVMContext &Context, unsigned SourceLanguage, Metadata *File,
+ MDString *Producer, bool IsOptimized, MDString *Flags,
+ unsigned RuntimeVersion, MDString *SplitDebugFilename,
+ unsigned EmissionKind, Metadata *EnumTypes, Metadata *RetainedTypes,
+ Metadata *Subprograms, Metadata *GlobalVariables,
+ Metadata *ImportedEntities, Metadata *Macros, uint64_t DWOId,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempDICompileUnit cloneImpl() const {
+ return getTemporary(
+ getContext(), getSourceLanguage(), getFile(), getProducer(),
+ isOptimized(), getFlags(), getRuntimeVersion(), getSplitDebugFilename(),
+ getEmissionKind(), getEnumTypes(), getRetainedTypes(), getSubprograms(),
+ getGlobalVariables(), getImportedEntities(), getMacros(), DWOId);
+ }
+
+ static void get() = delete;
+ static void getIfExists() = delete;
+
+public:
+ DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(
+ DICompileUnit,
+ (unsigned SourceLanguage, DIFile *File, StringRef Producer,
+ bool IsOptimized, StringRef Flags, unsigned RuntimeVersion,
+ StringRef SplitDebugFilename, unsigned EmissionKind,
+ DICompositeTypeArray EnumTypes, DITypeArray RetainedTypes,
+ DISubprogramArray Subprograms, DIGlobalVariableArray GlobalVariables,
+ DIImportedEntityArray ImportedEntities, DIMacroNodeArray Macros,
+ uint64_t DWOId),
+ (SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
+ SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes, Subprograms,
+ GlobalVariables, ImportedEntities, Macros, DWOId))
+ DEFINE_MDNODE_GET_DISTINCT_TEMPORARY(
+ DICompileUnit,
+ (unsigned SourceLanguage, Metadata *File, MDString *Producer,
+ bool IsOptimized, MDString *Flags, unsigned RuntimeVersion,
+ MDString *SplitDebugFilename, unsigned EmissionKind, Metadata *EnumTypes,
+ Metadata *RetainedTypes, Metadata *Subprograms,
+ Metadata *GlobalVariables, Metadata *ImportedEntities, Metadata *Macros,
+ uint64_t DWOId),
+ (SourceLanguage, File, Producer, IsOptimized, Flags, RuntimeVersion,
+ SplitDebugFilename, EmissionKind, EnumTypes, RetainedTypes, Subprograms,
+ GlobalVariables, ImportedEntities, Macros, DWOId))
+
+ TempDICompileUnit clone() const { return cloneImpl(); }
+
+ unsigned getSourceLanguage() const { return SourceLanguage; }
+ bool isOptimized() const { return IsOptimized; }
+ unsigned getRuntimeVersion() const { return RuntimeVersion; }
+ unsigned getEmissionKind() const { return EmissionKind; }
+ StringRef getProducer() const { return getStringOperand(1); }
+ StringRef getFlags() const { return getStringOperand(2); }
+ StringRef getSplitDebugFilename() const { return getStringOperand(3); }
+ DICompositeTypeArray getEnumTypes() const {
+ return cast_or_null<MDTuple>(getRawEnumTypes());
+ }
+ DITypeArray getRetainedTypes() const {
+ return cast_or_null<MDTuple>(getRawRetainedTypes());
+ }
+ DISubprogramArray getSubprograms() const {
+ return cast_or_null<MDTuple>(getRawSubprograms());
+ }
+ DIGlobalVariableArray getGlobalVariables() const {
+ return cast_or_null<MDTuple>(getRawGlobalVariables());
+ }
+ DIImportedEntityArray getImportedEntities() const {
+ return cast_or_null<MDTuple>(getRawImportedEntities());
+ }
+ DIMacroNodeArray getMacros() const {
+ return cast_or_null<MDTuple>(getRawMacros());
+ }
+ uint64_t getDWOId() const { return DWOId; }
+ void setDWOId(uint64_t DwoId) { DWOId = DwoId; }
+
+ MDString *getRawProducer() const { return getOperandAs<MDString>(1); }
+ MDString *getRawFlags() const { return getOperandAs<MDString>(2); }
+ MDString *getRawSplitDebugFilename() const {
+ return getOperandAs<MDString>(3);
+ }
+ Metadata *getRawEnumTypes() const { return getOperand(4); }
+ Metadata *getRawRetainedTypes() const { return getOperand(5); }
+ Metadata *getRawSubprograms() const { return getOperand(6); }
+ Metadata *getRawGlobalVariables() const { return getOperand(7); }
+ Metadata *getRawImportedEntities() const { return getOperand(8); }
+ Metadata *getRawMacros() const { return getOperand(9); }
+
+ /// \brief Replace arrays.
+ ///
+ /// If this \a isUniqued() and not \a isResolved(), it will be RAUW'ed and
+ /// deleted on a uniquing collision. In practice, uniquing collisions on \a
+ /// DICompileUnit should be fairly rare.
+ /// @{
+ void replaceEnumTypes(DICompositeTypeArray N) {
+ replaceOperandWith(4, N.get());
+ }
+ void replaceRetainedTypes(DITypeArray N) {
+ replaceOperandWith(5, N.get());
+ }
+ void replaceSubprograms(DISubprogramArray N) {
+ replaceOperandWith(6, N.get());
+ }
+ void replaceGlobalVariables(DIGlobalVariableArray N) {
+ replaceOperandWith(7, N.get());
+ }
+ void replaceImportedEntities(DIImportedEntityArray N) {
+ replaceOperandWith(8, N.get());
+ }
+ void replaceMacros(DIMacroNodeArray N) { replaceOperandWith(9, N.get()); }
+ /// @}
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DICompileUnitKind;
+ }
+};
+
+/// \brief A scope for locals.
+///
+/// A legal scope for lexical blocks, local variables, and debug info
+/// locations. Subclasses are \a DISubprogram, \a DILexicalBlock, and \a
+/// DILexicalBlockFile.
+class DILocalScope : public DIScope {
+protected:
+ DILocalScope(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Tag,
+ ArrayRef<Metadata *> Ops)
+ : DIScope(C, ID, Storage, Tag, Ops) {}
+ ~DILocalScope() = default;
+
+public:
+ /// \brief Get the subprogram for this scope.
+ ///
+ /// Return this if it's an \a DISubprogram; otherwise, look up the scope
+ /// chain.
+ DISubprogram *getSubprogram() const;
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DISubprogramKind ||
+ MD->getMetadataID() == DILexicalBlockKind ||
+ MD->getMetadataID() == DILexicalBlockFileKind;
+ }
+};
+
+/// \brief Debug location.
+///
+/// A debug location in source code, used for debug info and otherwise.
+class DILocation : public MDNode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ DILocation(LLVMContext &C, StorageType Storage, unsigned Line,
+ unsigned Column, ArrayRef<Metadata *> MDs);
+ ~DILocation() { dropAllReferences(); }
+
+ static DILocation *getImpl(LLVMContext &Context, unsigned Line,
+ unsigned Column, Metadata *Scope,
+ Metadata *InlinedAt, StorageType Storage,
+ bool ShouldCreate = true);
+ static DILocation *getImpl(LLVMContext &Context, unsigned Line,
+ unsigned Column, DILocalScope *Scope,
+ DILocation *InlinedAt, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Line, Column, static_cast<Metadata *>(Scope),
+ static_cast<Metadata *>(InlinedAt), Storage, ShouldCreate);
+ }
+
+ TempDILocation cloneImpl() const {
+ // Get the raw scope/inlinedAt since it is possible to invoke this on
+ // a DILocation containing temporary metadata.
+ return getTemporary(getContext(), getLine(), getColumn(), getRawScope(),
+ getRawInlinedAt());
+ }
+
+ // Disallow replacing operands.
+ void replaceOperandWith(unsigned I, Metadata *New) = delete;
+
+public:
+ DEFINE_MDNODE_GET(DILocation,
+ (unsigned Line, unsigned Column, Metadata *Scope,
+ Metadata *InlinedAt = nullptr),
+ (Line, Column, Scope, InlinedAt))
+ DEFINE_MDNODE_GET(DILocation,
+ (unsigned Line, unsigned Column, DILocalScope *Scope,
+ DILocation *InlinedAt = nullptr),
+ (Line, Column, Scope, InlinedAt))
+
+ /// \brief Return a (temporary) clone of this.
+ TempDILocation clone() const { return cloneImpl(); }
+
+ unsigned getLine() const { return SubclassData32; }
+ unsigned getColumn() const { return SubclassData16; }
+ DILocalScope *getScope() const { return cast<DILocalScope>(getRawScope()); }
+ DILocation *getInlinedAt() const {
+ return cast_or_null<DILocation>(getRawInlinedAt());
+ }
+
+ DIFile *getFile() const { return getScope()->getFile(); }
+ StringRef getFilename() const { return getScope()->getFilename(); }
+ StringRef getDirectory() const { return getScope()->getDirectory(); }
+
+ /// \brief Get the scope where this is inlined.
+ ///
+ /// Walk through \a getInlinedAt() and return \a getScope() from the deepest
+ /// location.
+ DILocalScope *getInlinedAtScope() const {
+ if (auto *IA = getInlinedAt())
+ return IA->getInlinedAtScope();
+ return getScope();
+ }
+
+ /// \brief Check whether this can be discriminated from another location.
+ ///
+ /// Check \c this can be discriminated from \c RHS in a linetable entry.
+ /// Scope and inlined-at chains are not recorded in the linetable, so they
+ /// cannot be used to distinguish basic blocks.
+ ///
+ /// The current implementation is weaker than it should be, since it just
+ /// checks filename and line.
+ ///
+ /// FIXME: Add a check for getDiscriminator().
+ /// FIXME: Add a check for getColumn().
+ /// FIXME: Change the getFilename() check to getFile() (or add one for
+ /// getDirectory()).
+ bool canDiscriminate(const DILocation &RHS) const {
+ return getFilename() != RHS.getFilename() || getLine() != RHS.getLine();
+ }
+
+ /// \brief Get the DWARF discriminator.
+ ///
+ /// DWARF discriminators distinguish identical file locations between
+ /// instructions that are on different basic blocks.
+ inline unsigned getDiscriminator() const;
+
+ /// \brief Compute new discriminator in the given context.
+ ///
+ /// This modifies the \a LLVMContext that \c this is in to increment the next
+ /// discriminator for \c this's line/filename combination.
+ ///
+ /// FIXME: Delete this. See comments in implementation and at the only call
+ /// site in \a AddDiscriminators::runOnFunction().
+ unsigned computeNewDiscriminator() const;
+
+ Metadata *getRawScope() const { return getOperand(0); }
+ Metadata *getRawInlinedAt() const {
+ if (getNumOperands() == 2)
+ return getOperand(1);
+ return nullptr;
+ }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DILocationKind;
+ }
+};
+
+/// \brief Subprogram description.
+///
+/// TODO: Remove DisplayName. It's always equal to Name.
+/// TODO: Split up flags.
+class DISubprogram : public DILocalScope {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Line;
+ unsigned ScopeLine;
+ unsigned Virtuality;
+ unsigned VirtualIndex;
+ unsigned Flags;
+ bool IsLocalToUnit;
+ bool IsDefinition;
+ bool IsOptimized;
+
+ DISubprogram(LLVMContext &C, StorageType Storage, unsigned Line,
+ unsigned ScopeLine, unsigned Virtuality, unsigned VirtualIndex,
+ unsigned Flags, bool IsLocalToUnit, bool IsDefinition,
+ bool IsOptimized, ArrayRef<Metadata *> Ops)
+ : DILocalScope(C, DISubprogramKind, Storage, dwarf::DW_TAG_subprogram,
+ Ops),
+ Line(Line), ScopeLine(ScopeLine), Virtuality(Virtuality),
+ VirtualIndex(VirtualIndex), Flags(Flags), IsLocalToUnit(IsLocalToUnit),
+ IsDefinition(IsDefinition), IsOptimized(IsOptimized) {}
+ ~DISubprogram() = default;
+
+ static DISubprogram *
+ getImpl(LLVMContext &Context, DIScopeRef Scope, StringRef Name,
+ StringRef LinkageName, DIFile *File, unsigned Line,
+ DISubroutineType *Type, bool IsLocalToUnit, bool IsDefinition,
+ unsigned ScopeLine, DITypeRef ContainingType, unsigned Virtuality,
+ unsigned VirtualIndex, unsigned Flags, bool IsOptimized,
+ DITemplateParameterArray TemplateParams, DISubprogram *Declaration,
+ DILocalVariableArray Variables, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
+ getCanonicalMDString(Context, LinkageName), File, Line, Type,
+ IsLocalToUnit, IsDefinition, ScopeLine, ContainingType,
+ Virtuality, VirtualIndex, Flags, IsOptimized,
+ TemplateParams.get(), Declaration, Variables.get(), Storage,
+ ShouldCreate);
+ }
+ static DISubprogram *
+ getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
+ MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
+ bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
+ Metadata *ContainingType, unsigned Virtuality, unsigned VirtualIndex,
+ unsigned Flags, bool IsOptimized, Metadata *TemplateParams,
+ Metadata *Declaration, Metadata *Variables, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDISubprogram cloneImpl() const {
+ return getTemporary(
+ getContext(), getScope(), getName(), getLinkageName(), getFile(),
+ getLine(), getType(), isLocalToUnit(), isDefinition(), getScopeLine(),
+ getContainingType(), getVirtuality(), getVirtualIndex(), getFlags(),
+ isOptimized(), getTemplateParams(), getDeclaration(), getVariables());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DISubprogram,
+ (DIScopeRef Scope, StringRef Name, StringRef LinkageName,
+ DIFile *File, unsigned Line, DISubroutineType *Type,
+ bool IsLocalToUnit, bool IsDefinition, unsigned ScopeLine,
+ DITypeRef ContainingType, unsigned Virtuality,
+ unsigned VirtualIndex, unsigned Flags, bool IsOptimized,
+ DITemplateParameterArray TemplateParams = nullptr,
+ DISubprogram *Declaration = nullptr,
+ DILocalVariableArray Variables = nullptr),
+ (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
+ IsDefinition, ScopeLine, ContainingType, Virtuality,
+ VirtualIndex, Flags, IsOptimized, TemplateParams,
+ Declaration, Variables))
+ DEFINE_MDNODE_GET(
+ DISubprogram,
+ (Metadata * Scope, MDString *Name, MDString *LinkageName, Metadata *File,
+ unsigned Line, Metadata *Type, bool IsLocalToUnit, bool IsDefinition,
+ unsigned ScopeLine, Metadata *ContainingType, unsigned Virtuality,
+ unsigned VirtualIndex, unsigned Flags, bool IsOptimized,
+ Metadata *TemplateParams = nullptr, Metadata *Declaration = nullptr,
+ Metadata *Variables = nullptr),
+ (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit, IsDefinition,
+ ScopeLine, ContainingType, Virtuality, VirtualIndex, Flags, IsOptimized,
+ TemplateParams, Declaration, Variables))
+
+ TempDISubprogram clone() const { return cloneImpl(); }
+
+public:
+ unsigned getLine() const { return Line; }
+ unsigned getVirtuality() const { return Virtuality; }
+ unsigned getVirtualIndex() const { return VirtualIndex; }
+ unsigned getScopeLine() const { return ScopeLine; }
+ unsigned getFlags() const { return Flags; }
+ bool isLocalToUnit() const { return IsLocalToUnit; }
+ bool isDefinition() const { return IsDefinition; }
+ bool isOptimized() const { return IsOptimized; }
+
+ unsigned isArtificial() const { return getFlags() & FlagArtificial; }
+ bool isPrivate() const {
+ return (getFlags() & FlagAccessibility) == FlagPrivate;
+ }
+ bool isProtected() const {
+ return (getFlags() & FlagAccessibility) == FlagProtected;
+ }
+ bool isPublic() const {
+ return (getFlags() & FlagAccessibility) == FlagPublic;
+ }
+ bool isExplicit() const { return getFlags() & FlagExplicit; }
+ bool isPrototyped() const { return getFlags() & FlagPrototyped; }
+
+ /// \brief Check if this is reference-qualified.
+ ///
+ /// Return true if this subprogram is a C++11 reference-qualified non-static
+ /// member function (void foo() &).
+ unsigned isLValueReference() const {
+ return getFlags() & FlagLValueReference;
+ }
+
+ /// \brief Check if this is rvalue-reference-qualified.
+ ///
+ /// Return true if this subprogram is a C++11 rvalue-reference-qualified
+ /// non-static member function (void foo() &&).
+ unsigned isRValueReference() const {
+ return getFlags() & FlagRValueReference;
+ }
+
+ DIScopeRef getScope() const { return DIScopeRef(getRawScope()); }
+
+ StringRef getName() const { return getStringOperand(2); }
+ StringRef getDisplayName() const { return getStringOperand(3); }
+ StringRef getLinkageName() const { return getStringOperand(4); }
+
+ MDString *getRawName() const { return getOperandAs<MDString>(2); }
+ MDString *getRawLinkageName() const { return getOperandAs<MDString>(4); }
+
+ DISubroutineType *getType() const {
+ return cast_or_null<DISubroutineType>(getRawType());
+ }
+ DITypeRef getContainingType() const {
+ return DITypeRef(getRawContainingType());
+ }
+
+ DITemplateParameterArray getTemplateParams() const {
+ return cast_or_null<MDTuple>(getRawTemplateParams());
+ }
+ DISubprogram *getDeclaration() const {
+ return cast_or_null<DISubprogram>(getRawDeclaration());
+ }
+ DILocalVariableArray getVariables() const {
+ return cast_or_null<MDTuple>(getRawVariables());
+ }
+
+ Metadata *getRawScope() const { return getOperand(1); }
+ Metadata *getRawType() const { return getOperand(5); }
+ Metadata *getRawContainingType() const { return getOperand(6); }
+ Metadata *getRawTemplateParams() const { return getOperand(7); }
+ Metadata *getRawDeclaration() const { return getOperand(8); }
+ Metadata *getRawVariables() const { return getOperand(9); }
+
+ /// \brief Check if this subprogram describes the given function.
+ ///
+ /// FIXME: Should this be looking through bitcasts?
+ bool describes(const Function *F) const;
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DISubprogramKind;
+ }
+};
+
+class DILexicalBlockBase : public DILocalScope {
+protected:
+ DILexicalBlockBase(LLVMContext &C, unsigned ID, StorageType Storage,
+ ArrayRef<Metadata *> Ops)
+ : DILocalScope(C, ID, Storage, dwarf::DW_TAG_lexical_block, Ops) {}
+ ~DILexicalBlockBase() = default;
+
+public:
+ DILocalScope *getScope() const { return cast<DILocalScope>(getRawScope()); }
+
+ Metadata *getRawScope() const { return getOperand(1); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DILexicalBlockKind ||
+ MD->getMetadataID() == DILexicalBlockFileKind;
+ }
+};
+
+class DILexicalBlock : public DILexicalBlockBase {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Line;
+ uint16_t Column;
+
+ DILexicalBlock(LLVMContext &C, StorageType Storage, unsigned Line,
+ unsigned Column, ArrayRef<Metadata *> Ops)
+ : DILexicalBlockBase(C, DILexicalBlockKind, Storage, Ops), Line(Line),
+ Column(Column) {
+ assert(Column < (1u << 16) && "Expected 16-bit column");
+ }
+ ~DILexicalBlock() = default;
+
+ static DILexicalBlock *getImpl(LLVMContext &Context, DILocalScope *Scope,
+ DIFile *File, unsigned Line, unsigned Column,
+ StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, static_cast<Metadata *>(Scope),
+ static_cast<Metadata *>(File), Line, Column, Storage,
+ ShouldCreate);
+ }
+
+ static DILexicalBlock *getImpl(LLVMContext &Context, Metadata *Scope,
+ Metadata *File, unsigned Line, unsigned Column,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempDILexicalBlock cloneImpl() const {
+ return getTemporary(getContext(), getScope(), getFile(), getLine(),
+ getColumn());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DILexicalBlock, (DILocalScope * Scope, DIFile *File,
+ unsigned Line, unsigned Column),
+ (Scope, File, Line, Column))
+ DEFINE_MDNODE_GET(DILexicalBlock, (Metadata * Scope, Metadata *File,
+ unsigned Line, unsigned Column),
+ (Scope, File, Line, Column))
+
+ TempDILexicalBlock clone() const { return cloneImpl(); }
+
+ unsigned getLine() const { return Line; }
+ unsigned getColumn() const { return Column; }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DILexicalBlockKind;
+ }
+};
+
+class DILexicalBlockFile : public DILexicalBlockBase {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Discriminator;
+
+ DILexicalBlockFile(LLVMContext &C, StorageType Storage,
+ unsigned Discriminator, ArrayRef<Metadata *> Ops)
+ : DILexicalBlockBase(C, DILexicalBlockFileKind, Storage, Ops),
+ Discriminator(Discriminator) {}
+ ~DILexicalBlockFile() = default;
+
+ static DILexicalBlockFile *getImpl(LLVMContext &Context, DILocalScope *Scope,
+ DIFile *File, unsigned Discriminator,
+ StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, static_cast<Metadata *>(Scope),
+ static_cast<Metadata *>(File), Discriminator, Storage,
+ ShouldCreate);
+ }
+
+ static DILexicalBlockFile *getImpl(LLVMContext &Context, Metadata *Scope,
+ Metadata *File, unsigned Discriminator,
+ StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDILexicalBlockFile cloneImpl() const {
+ return getTemporary(getContext(), getScope(), getFile(),
+ getDiscriminator());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DILexicalBlockFile, (DILocalScope * Scope, DIFile *File,
+ unsigned Discriminator),
+ (Scope, File, Discriminator))
+ DEFINE_MDNODE_GET(DILexicalBlockFile,
+ (Metadata * Scope, Metadata *File, unsigned Discriminator),
+ (Scope, File, Discriminator))
+
+ TempDILexicalBlockFile clone() const { return cloneImpl(); }
+
+ // TODO: Remove these once they're gone from DILexicalBlockBase.
+ unsigned getLine() const = delete;
+ unsigned getColumn() const = delete;
+
+ unsigned getDiscriminator() const { return Discriminator; }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DILexicalBlockFileKind;
+ }
+};
+
+unsigned DILocation::getDiscriminator() const {
+ if (auto *F = dyn_cast<DILexicalBlockFile>(getScope()))
+ return F->getDiscriminator();
+ return 0;
+}
+
+class DINamespace : public DIScope {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Line;
+
+ DINamespace(LLVMContext &Context, StorageType Storage, unsigned Line,
+ ArrayRef<Metadata *> Ops)
+ : DIScope(Context, DINamespaceKind, Storage, dwarf::DW_TAG_namespace,
+ Ops),
+ Line(Line) {}
+ ~DINamespace() = default;
+
+ static DINamespace *getImpl(LLVMContext &Context, DIScope *Scope,
+ DIFile *File, StringRef Name, unsigned Line,
+ StorageType Storage, bool ShouldCreate = true) {
+ return getImpl(Context, Scope, File, getCanonicalMDString(Context, Name),
+ Line, Storage, ShouldCreate);
+ }
+ static DINamespace *getImpl(LLVMContext &Context, Metadata *Scope,
+ Metadata *File, MDString *Name, unsigned Line,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempDINamespace cloneImpl() const {
+ return getTemporary(getContext(), getScope(), getFile(), getName(),
+ getLine());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DINamespace, (DIScope * Scope, DIFile *File, StringRef Name,
+ unsigned Line),
+ (Scope, File, Name, Line))
+ DEFINE_MDNODE_GET(DINamespace, (Metadata * Scope, Metadata *File,
+ MDString *Name, unsigned Line),
+ (Scope, File, Name, Line))
+
+ TempDINamespace clone() const { return cloneImpl(); }
+
+ unsigned getLine() const { return Line; }
+ DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
+ StringRef getName() const { return getStringOperand(2); }
+
+ Metadata *getRawScope() const { return getOperand(1); }
+ MDString *getRawName() const { return getOperandAs<MDString>(2); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DINamespaceKind;
+ }
+};
+
+/// \brief A (clang) module that has been imported by the compile unit.
+///
+class DIModule : public DIScope {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ DIModule(LLVMContext &Context, StorageType Storage, ArrayRef<Metadata *> Ops)
+ : DIScope(Context, DIModuleKind, Storage, dwarf::DW_TAG_module, Ops) {}
+ ~DIModule() {}
+
+ static DIModule *getImpl(LLVMContext &Context, DIScope *Scope,
+ StringRef Name, StringRef ConfigurationMacros,
+ StringRef IncludePath, StringRef ISysRoot,
+ StorageType Storage, bool ShouldCreate = true) {
+ return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
+ getCanonicalMDString(Context, ConfigurationMacros),
+ getCanonicalMDString(Context, IncludePath),
+ getCanonicalMDString(Context, ISysRoot),
+ Storage, ShouldCreate);
+ }
+ static DIModule *getImpl(LLVMContext &Context, Metadata *Scope,
+ MDString *Name, MDString *ConfigurationMacros,
+ MDString *IncludePath, MDString *ISysRoot,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempDIModule cloneImpl() const {
+ return getTemporary(getContext(), getScope(), getName(),
+ getConfigurationMacros(), getIncludePath(),
+ getISysRoot());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIModule, (DIScope *Scope, StringRef Name,
+ StringRef ConfigurationMacros, StringRef IncludePath,
+ StringRef ISysRoot),
+ (Scope, Name, ConfigurationMacros, IncludePath, ISysRoot))
+ DEFINE_MDNODE_GET(DIModule,
+ (Metadata *Scope, MDString *Name, MDString *ConfigurationMacros,
+ MDString *IncludePath, MDString *ISysRoot),
+ (Scope, Name, ConfigurationMacros, IncludePath, ISysRoot))
+
+ TempDIModule clone() const { return cloneImpl(); }
+
+ DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
+ StringRef getName() const { return getStringOperand(1); }
+ StringRef getConfigurationMacros() const { return getStringOperand(2); }
+ StringRef getIncludePath() const { return getStringOperand(3); }
+ StringRef getISysRoot() const { return getStringOperand(4); }
+
+ Metadata *getRawScope() const { return getOperand(0); }
+ MDString *getRawName() const { return getOperandAs<MDString>(1); }
+ MDString *getRawConfigurationMacros() const { return getOperandAs<MDString>(2); }
+ MDString *getRawIncludePath() const { return getOperandAs<MDString>(3); }
+ MDString *getRawISysRoot() const { return getOperandAs<MDString>(4); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIModuleKind;
+ }
+};
+
+/// \brief Base class for template parameters.
+class DITemplateParameter : public DINode {
+protected:
+ DITemplateParameter(LLVMContext &Context, unsigned ID, StorageType Storage,
+ unsigned Tag, ArrayRef<Metadata *> Ops)
+ : DINode(Context, ID, Storage, Tag, Ops) {}
+ ~DITemplateParameter() = default;
+
+public:
+ StringRef getName() const { return getStringOperand(0); }
+ DITypeRef getType() const { return DITypeRef(getRawType()); }
+
+ MDString *getRawName() const { return getOperandAs<MDString>(0); }
+ Metadata *getRawType() const { return getOperand(1); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DITemplateTypeParameterKind ||
+ MD->getMetadataID() == DITemplateValueParameterKind;
+ }
+};
+
+class DITemplateTypeParameter : public DITemplateParameter {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ DITemplateTypeParameter(LLVMContext &Context, StorageType Storage,
+ ArrayRef<Metadata *> Ops)
+ : DITemplateParameter(Context, DITemplateTypeParameterKind, Storage,
+ dwarf::DW_TAG_template_type_parameter, Ops) {}
+ ~DITemplateTypeParameter() = default;
+
+ static DITemplateTypeParameter *getImpl(LLVMContext &Context, StringRef Name,
+ DITypeRef Type, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, getCanonicalMDString(Context, Name), Type, Storage,
+ ShouldCreate);
+ }
+ static DITemplateTypeParameter *getImpl(LLVMContext &Context, MDString *Name,
+ Metadata *Type, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDITemplateTypeParameter cloneImpl() const {
+ return getTemporary(getContext(), getName(), getType());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DITemplateTypeParameter, (StringRef Name, DITypeRef Type),
+ (Name, Type))
+ DEFINE_MDNODE_GET(DITemplateTypeParameter, (MDString * Name, Metadata *Type),
+ (Name, Type))
+
+ TempDITemplateTypeParameter clone() const { return cloneImpl(); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DITemplateTypeParameterKind;
+ }
+};
+
+class DITemplateValueParameter : public DITemplateParameter {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ DITemplateValueParameter(LLVMContext &Context, StorageType Storage,
+ unsigned Tag, ArrayRef<Metadata *> Ops)
+ : DITemplateParameter(Context, DITemplateValueParameterKind, Storage, Tag,
+ Ops) {}
+ ~DITemplateValueParameter() = default;
+
+ static DITemplateValueParameter *getImpl(LLVMContext &Context, unsigned Tag,
+ StringRef Name, DITypeRef Type,
+ Metadata *Value, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Tag, getCanonicalMDString(Context, Name), Type,
+ Value, Storage, ShouldCreate);
+ }
+ static DITemplateValueParameter *getImpl(LLVMContext &Context, unsigned Tag,
+ MDString *Name, Metadata *Type,
+ Metadata *Value, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDITemplateValueParameter cloneImpl() const {
+ return getTemporary(getContext(), getTag(), getName(), getType(),
+ getValue());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DITemplateValueParameter, (unsigned Tag, StringRef Name,
+ DITypeRef Type, Metadata *Value),
+ (Tag, Name, Type, Value))
+ DEFINE_MDNODE_GET(DITemplateValueParameter, (unsigned Tag, MDString *Name,
+ Metadata *Type, Metadata *Value),
+ (Tag, Name, Type, Value))
+
+ TempDITemplateValueParameter clone() const { return cloneImpl(); }
+
+ Metadata *getValue() const { return getOperand(2); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DITemplateValueParameterKind;
+ }
+};
+
+/// \brief Base class for variables.
+class DIVariable : public DINode {
+ unsigned Line;
+
+protected:
+ DIVariable(LLVMContext &C, unsigned ID, StorageType Storage, unsigned Line,
+ ArrayRef<Metadata *> Ops)
+ : DINode(C, ID, Storage, dwarf::DW_TAG_variable, Ops), Line(Line) {}
+ ~DIVariable() = default;
+
+public:
+ unsigned getLine() const { return Line; }
+ DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
+ StringRef getName() const { return getStringOperand(1); }
+ DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
+ DITypeRef getType() const { return DITypeRef(getRawType()); }
+
+ StringRef getFilename() const {
+ if (auto *F = getFile())
+ return F->getFilename();
+ return "";
+ }
+ StringRef getDirectory() const {
+ if (auto *F = getFile())
+ return F->getDirectory();
+ return "";
+ }
+
+ Metadata *getRawScope() const { return getOperand(0); }
+ MDString *getRawName() const { return getOperandAs<MDString>(1); }
+ Metadata *getRawFile() const { return getOperand(2); }
+ Metadata *getRawType() const { return getOperand(3); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DILocalVariableKind ||
+ MD->getMetadataID() == DIGlobalVariableKind;
+ }
+};
+
+/// \brief Global variables.
+///
+/// TODO: Remove DisplayName. It's always equal to Name.
+class DIGlobalVariable : public DIVariable {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ bool IsLocalToUnit;
+ bool IsDefinition;
+
+ DIGlobalVariable(LLVMContext &C, StorageType Storage, unsigned Line,
+ bool IsLocalToUnit, bool IsDefinition,
+ ArrayRef<Metadata *> Ops)
+ : DIVariable(C, DIGlobalVariableKind, Storage, Line, Ops),
+ IsLocalToUnit(IsLocalToUnit), IsDefinition(IsDefinition) {}
+ ~DIGlobalVariable() = default;
+
+ static DIGlobalVariable *
+ getImpl(LLVMContext &Context, DIScope *Scope, StringRef Name,
+ StringRef LinkageName, DIFile *File, unsigned Line, DITypeRef Type,
+ bool IsLocalToUnit, bool IsDefinition, Constant *Variable,
+ DIDerivedType *StaticDataMemberDeclaration, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
+ getCanonicalMDString(Context, LinkageName), File, Line, Type,
+ IsLocalToUnit, IsDefinition,
+ Variable ? ConstantAsMetadata::get(Variable) : nullptr,
+ StaticDataMemberDeclaration, Storage, ShouldCreate);
+ }
+ static DIGlobalVariable *
+ getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
+ MDString *LinkageName, Metadata *File, unsigned Line, Metadata *Type,
+ bool IsLocalToUnit, bool IsDefinition, Metadata *Variable,
+ Metadata *StaticDataMemberDeclaration, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDIGlobalVariable cloneImpl() const {
+ return getTemporary(getContext(), getScope(), getName(), getLinkageName(),
+ getFile(), getLine(), getType(), isLocalToUnit(),
+ isDefinition(), getVariable(),
+ getStaticDataMemberDeclaration());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIGlobalVariable,
+ (DIScope * Scope, StringRef Name, StringRef LinkageName,
+ DIFile *File, unsigned Line, DITypeRef Type,
+ bool IsLocalToUnit, bool IsDefinition, Constant *Variable,
+ DIDerivedType *StaticDataMemberDeclaration),
+ (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
+ IsDefinition, Variable, StaticDataMemberDeclaration))
+ DEFINE_MDNODE_GET(DIGlobalVariable,
+ (Metadata * Scope, MDString *Name, MDString *LinkageName,
+ Metadata *File, unsigned Line, Metadata *Type,
+ bool IsLocalToUnit, bool IsDefinition, Metadata *Variable,
+ Metadata *StaticDataMemberDeclaration),
+ (Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
+ IsDefinition, Variable, StaticDataMemberDeclaration))
+
+ TempDIGlobalVariable clone() const { return cloneImpl(); }
+
+ bool isLocalToUnit() const { return IsLocalToUnit; }
+ bool isDefinition() const { return IsDefinition; }
+ StringRef getDisplayName() const { return getStringOperand(4); }
+ StringRef getLinkageName() const { return getStringOperand(5); }
+ Constant *getVariable() const {
+ if (auto *C = cast_or_null<ConstantAsMetadata>(getRawVariable()))
+ return dyn_cast<Constant>(C->getValue());
+ return nullptr;
+ }
+ DIDerivedType *getStaticDataMemberDeclaration() const {
+ return cast_or_null<DIDerivedType>(getRawStaticDataMemberDeclaration());
+ }
+
+ MDString *getRawLinkageName() const { return getOperandAs<MDString>(5); }
+ Metadata *getRawVariable() const { return getOperand(6); }
+ Metadata *getRawStaticDataMemberDeclaration() const { return getOperand(7); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIGlobalVariableKind;
+ }
+};
+
+/// \brief Local variable.
+///
+/// TODO: Split up flags.
+class DILocalVariable : public DIVariable {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Arg;
+ unsigned Flags;
+
+ DILocalVariable(LLVMContext &C, StorageType Storage, unsigned Line,
+ unsigned Arg, unsigned Flags, ArrayRef<Metadata *> Ops)
+ : DIVariable(C, DILocalVariableKind, Storage, Line, Ops), Arg(Arg),
+ Flags(Flags) {}
+ ~DILocalVariable() = default;
+
+ static DILocalVariable *getImpl(LLVMContext &Context, DIScope *Scope,
+ StringRef Name, DIFile *File, unsigned Line,
+ DITypeRef Type, unsigned Arg, unsigned Flags,
+ StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Scope, getCanonicalMDString(Context, Name), File,
+ Line, Type, Arg, Flags, Storage, ShouldCreate);
+ }
+ static DILocalVariable *getImpl(LLVMContext &Context, Metadata *Scope,
+ MDString *Name, Metadata *File, unsigned Line,
+ Metadata *Type, unsigned Arg, unsigned Flags,
+ StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDILocalVariable cloneImpl() const {
+ return getTemporary(getContext(), getScope(), getName(), getFile(),
+ getLine(), getType(), getArg(), getFlags());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DILocalVariable,
+ (DILocalScope * Scope, StringRef Name, DIFile *File,
+ unsigned Line, DITypeRef Type, unsigned Arg,
+ unsigned Flags),
+ (Scope, Name, File, Line, Type, Arg, Flags))
+ DEFINE_MDNODE_GET(DILocalVariable,
+ (Metadata * Scope, MDString *Name, Metadata *File,
+ unsigned Line, Metadata *Type, unsigned Arg,
+ unsigned Flags),
+ (Scope, Name, File, Line, Type, Arg, Flags))
+
+ TempDILocalVariable clone() const { return cloneImpl(); }
+
+ /// \brief Get the local scope for this variable.
+ ///
+ /// Variables must be defined in a local scope.
+ DILocalScope *getScope() const {
+ return cast<DILocalScope>(DIVariable::getScope());
+ }
+
+ bool isParameter() const { return Arg; }
+ unsigned getArg() const { return Arg; }
+ unsigned getFlags() const { return Flags; }
+
+ bool isArtificial() const { return getFlags() & FlagArtificial; }
+ bool isObjectPointer() const { return getFlags() & FlagObjectPointer; }
+
+ /// \brief Check that a location is valid for this variable.
+ ///
+ /// Check that \c DL exists, is in the same subprogram, and has the same
+ /// inlined-at location as \c this. (Otherwise, it's not a valid attachment
+ /// to a \a DbgInfoIntrinsic.)
+ bool isValidLocationForIntrinsic(const DILocation *DL) const {
+ return DL && getScope()->getSubprogram() == DL->getScope()->getSubprogram();
+ }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DILocalVariableKind;
+ }
+};
+
+/// \brief DWARF expression.
+///
+/// This is (almost) a DWARF expression that modifies the location of a
+/// variable or (or the location of a single piece of a variable).
+///
+/// FIXME: Instead of DW_OP_plus taking an argument, this should use DW_OP_const
+/// and have DW_OP_plus consume the topmost elements on the stack.
+///
+/// TODO: Co-allocate the expression elements.
+/// TODO: Separate from MDNode, or otherwise drop Distinct and Temporary
+/// storage types.
+class DIExpression : public MDNode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ std::vector<uint64_t> Elements;
+
+ DIExpression(LLVMContext &C, StorageType Storage, ArrayRef<uint64_t> Elements)
+ : MDNode(C, DIExpressionKind, Storage, None),
+ Elements(Elements.begin(), Elements.end()) {}
+ ~DIExpression() = default;
+
+ static DIExpression *getImpl(LLVMContext &Context,
+ ArrayRef<uint64_t> Elements, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDIExpression cloneImpl() const {
+ return getTemporary(getContext(), getElements());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIExpression, (ArrayRef<uint64_t> Elements), (Elements))
+
+ TempDIExpression clone() const { return cloneImpl(); }
+
+ ArrayRef<uint64_t> getElements() const { return Elements; }
+
+ unsigned getNumElements() const { return Elements.size(); }
+ uint64_t getElement(unsigned I) const {
+ assert(I < Elements.size() && "Index out of range");
+ return Elements[I];
+ }
+
+ /// \brief Return whether this is a piece of an aggregate variable.
+ bool isBitPiece() const;
+
+ /// \brief Return the offset of this piece in bits.
+ uint64_t getBitPieceOffset() const;
+
+ /// \brief Return the size of this piece in bits.
+ uint64_t getBitPieceSize() const;
+
+ typedef ArrayRef<uint64_t>::iterator element_iterator;
+ element_iterator elements_begin() const { return getElements().begin(); }
+ element_iterator elements_end() const { return getElements().end(); }
+
+ /// \brief A lightweight wrapper around an expression operand.
+ ///
+ /// TODO: Store arguments directly and change \a DIExpression to store a
+ /// range of these.
+ class ExprOperand {
+ const uint64_t *Op;
+
+ public:
+ explicit ExprOperand(const uint64_t *Op) : Op(Op) {}
+
+ const uint64_t *get() const { return Op; }
+
+ /// \brief Get the operand code.
+ uint64_t getOp() const { return *Op; }
+
+ /// \brief Get an argument to the operand.
+ ///
+ /// Never returns the operand itself.
+ uint64_t getArg(unsigned I) const { return Op[I + 1]; }
+
+ unsigned getNumArgs() const { return getSize() - 1; }
+
+ /// \brief Return the size of the operand.
+ ///
+ /// Return the number of elements in the operand (1 + args).
+ unsigned getSize() const;
+ };
+
+ /// \brief An iterator for expression operands.
+ class expr_op_iterator
+ : public std::iterator<std::input_iterator_tag, ExprOperand> {
+ ExprOperand Op;
+
+ public:
+ explicit expr_op_iterator(element_iterator I) : Op(I) {}
+
+ element_iterator getBase() const { return Op.get(); }
+ const ExprOperand &operator*() const { return Op; }
+ const ExprOperand *operator->() const { return &Op; }
+
+ expr_op_iterator &operator++() {
+ increment();
+ return *this;
+ }
+ expr_op_iterator operator++(int) {
+ expr_op_iterator T(*this);
+ increment();
+ return T;
+ }
+
+ /// \brief Get the next iterator.
+ ///
+ /// \a std::next() doesn't work because this is technically an
+ /// input_iterator, but it's a perfectly valid operation. This is an
+ /// accessor to provide the same functionality.
+ expr_op_iterator getNext() const { return ++expr_op_iterator(*this); }
+
+ bool operator==(const expr_op_iterator &X) const {
+ return getBase() == X.getBase();
+ }
+ bool operator!=(const expr_op_iterator &X) const {
+ return getBase() != X.getBase();
+ }
+
+ private:
+ void increment() { Op = ExprOperand(getBase() + Op.getSize()); }
+ };
+
+ /// \brief Visit the elements via ExprOperand wrappers.
+ ///
+ /// These range iterators visit elements through \a ExprOperand wrappers.
+ /// This is not guaranteed to be a valid range unless \a isValid() gives \c
+ /// true.
+ ///
+ /// \pre \a isValid() gives \c true.
+ /// @{
+ expr_op_iterator expr_op_begin() const {
+ return expr_op_iterator(elements_begin());
+ }
+ expr_op_iterator expr_op_end() const {
+ return expr_op_iterator(elements_end());
+ }
+ /// @}
+
+ bool isValid() const;
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIExpressionKind;
+ }
+};
+
+class DIObjCProperty : public DINode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Line;
+ unsigned Attributes;
+
+ DIObjCProperty(LLVMContext &C, StorageType Storage, unsigned Line,
+ unsigned Attributes, ArrayRef<Metadata *> Ops)
+ : DINode(C, DIObjCPropertyKind, Storage, dwarf::DW_TAG_APPLE_property,
+ Ops),
+ Line(Line), Attributes(Attributes) {}
+ ~DIObjCProperty() = default;
+
+ static DIObjCProperty *
+ getImpl(LLVMContext &Context, StringRef Name, DIFile *File, unsigned Line,
+ StringRef GetterName, StringRef SetterName, unsigned Attributes,
+ DITypeRef Type, StorageType Storage, bool ShouldCreate = true) {
+ return getImpl(Context, getCanonicalMDString(Context, Name), File, Line,
+ getCanonicalMDString(Context, GetterName),
+ getCanonicalMDString(Context, SetterName), Attributes, Type,
+ Storage, ShouldCreate);
+ }
+ static DIObjCProperty *getImpl(LLVMContext &Context, MDString *Name,
+ Metadata *File, unsigned Line,
+ MDString *GetterName, MDString *SetterName,
+ unsigned Attributes, Metadata *Type,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempDIObjCProperty cloneImpl() const {
+ return getTemporary(getContext(), getName(), getFile(), getLine(),
+ getGetterName(), getSetterName(), getAttributes(),
+ getType());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIObjCProperty,
+ (StringRef Name, DIFile *File, unsigned Line,
+ StringRef GetterName, StringRef SetterName,
+ unsigned Attributes, DITypeRef Type),
+ (Name, File, Line, GetterName, SetterName, Attributes,
+ Type))
+ DEFINE_MDNODE_GET(DIObjCProperty,
+ (MDString * Name, Metadata *File, unsigned Line,
+ MDString *GetterName, MDString *SetterName,
+ unsigned Attributes, Metadata *Type),
+ (Name, File, Line, GetterName, SetterName, Attributes,
+ Type))
+
+ TempDIObjCProperty clone() const { return cloneImpl(); }
+
+ unsigned getLine() const { return Line; }
+ unsigned getAttributes() const { return Attributes; }
+ StringRef getName() const { return getStringOperand(0); }
+ DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
+ StringRef getGetterName() const { return getStringOperand(2); }
+ StringRef getSetterName() const { return getStringOperand(3); }
+ DITypeRef getType() const { return DITypeRef(getRawType()); }
+
+ StringRef getFilename() const {
+ if (auto *F = getFile())
+ return F->getFilename();
+ return "";
+ }
+ StringRef getDirectory() const {
+ if (auto *F = getFile())
+ return F->getDirectory();
+ return "";
+ }
+
+ MDString *getRawName() const { return getOperandAs<MDString>(0); }
+ Metadata *getRawFile() const { return getOperand(1); }
+ MDString *getRawGetterName() const { return getOperandAs<MDString>(2); }
+ MDString *getRawSetterName() const { return getOperandAs<MDString>(3); }
+ Metadata *getRawType() const { return getOperand(4); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIObjCPropertyKind;
+ }
+};
+
+/// \brief An imported module (C++ using directive or similar).
+class DIImportedEntity : public DINode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Line;
+
+ DIImportedEntity(LLVMContext &C, StorageType Storage, unsigned Tag,
+ unsigned Line, ArrayRef<Metadata *> Ops)
+ : DINode(C, DIImportedEntityKind, Storage, Tag, Ops), Line(Line) {}
+ ~DIImportedEntity() = default;
+
+ static DIImportedEntity *getImpl(LLVMContext &Context, unsigned Tag,
+ DIScope *Scope, DINodeRef Entity,
+ unsigned Line, StringRef Name,
+ StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, Tag, Scope, Entity, Line,
+ getCanonicalMDString(Context, Name), Storage, ShouldCreate);
+ }
+ static DIImportedEntity *getImpl(LLVMContext &Context, unsigned Tag,
+ Metadata *Scope, Metadata *Entity,
+ unsigned Line, MDString *Name,
+ StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDIImportedEntity cloneImpl() const {
+ return getTemporary(getContext(), getTag(), getScope(), getEntity(),
+ getLine(), getName());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIImportedEntity,
+ (unsigned Tag, DIScope *Scope, DINodeRef Entity,
+ unsigned Line, StringRef Name = ""),
+ (Tag, Scope, Entity, Line, Name))
+ DEFINE_MDNODE_GET(DIImportedEntity,
+ (unsigned Tag, Metadata *Scope, Metadata *Entity,
+ unsigned Line, MDString *Name),
+ (Tag, Scope, Entity, Line, Name))
+
+ TempDIImportedEntity clone() const { return cloneImpl(); }
+
+ unsigned getLine() const { return Line; }
+ DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
+ DINodeRef getEntity() const { return DINodeRef(getRawEntity()); }
+ StringRef getName() const { return getStringOperand(2); }
+
+ Metadata *getRawScope() const { return getOperand(0); }
+ Metadata *getRawEntity() const { return getOperand(1); }
+ MDString *getRawName() const { return getOperandAs<MDString>(2); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIImportedEntityKind;
+ }
+};
+
+/// \brief Macro Info DWARF-like metadata node.
+///
+/// A metadata node with a DWARF macro info (i.e., a constant named
+/// \c DW_MACINFO_*, defined in llvm/Support/Dwarf.h). Called \a DIMacroNode
+/// because it's potentially used for non-DWARF output.
+class DIMacroNode : public MDNode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+protected:
+ DIMacroNode(LLVMContext &C, unsigned ID, StorageType Storage, unsigned MIType,
+ ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = None)
+ : MDNode(C, ID, Storage, Ops1, Ops2) {
+ assert(MIType < 1u << 16);
+ SubclassData16 = MIType;
+ }
+ ~DIMacroNode() = default;
+
+ template <class Ty> Ty *getOperandAs(unsigned I) const {
+ return cast_or_null<Ty>(getOperand(I));
+ }
+
+ StringRef getStringOperand(unsigned I) const {
+ if (auto *S = getOperandAs<MDString>(I))
+ return S->getString();
+ return StringRef();
+ }
+
+ static MDString *getCanonicalMDString(LLVMContext &Context, StringRef S) {
+ if (S.empty())
+ return nullptr;
+ return MDString::get(Context, S);
+ }
+
+public:
+ unsigned getMacinfoType() const { return SubclassData16; }
+
+ static bool classof(const Metadata *MD) {
+ switch (MD->getMetadataID()) {
+ default:
+ return false;
+ case DIMacroKind:
+ case DIMacroFileKind:
+ return true;
+ }
+ }
+};
+
+class DIMacro : public DIMacroNode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Line;
+
+ DIMacro(LLVMContext &C, StorageType Storage, unsigned MIType, unsigned Line,
+ ArrayRef<Metadata *> Ops)
+ : DIMacroNode(C, DIMacroKind, Storage, MIType, Ops), Line(Line) {}
+ ~DIMacro() = default;
+
+ static DIMacro *getImpl(LLVMContext &Context, unsigned MIType, unsigned Line,
+ StringRef Name, StringRef Value, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, MIType, Line, getCanonicalMDString(Context, Name),
+ getCanonicalMDString(Context, Value), Storage, ShouldCreate);
+ }
+ static DIMacro *getImpl(LLVMContext &Context, unsigned MIType, unsigned Line,
+ MDString *Name, MDString *Value, StorageType Storage,
+ bool ShouldCreate = true);
+
+ TempDIMacro cloneImpl() const {
+ return getTemporary(getContext(), getMacinfoType(), getLine(), getName(),
+ getValue());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIMacro, (unsigned MIType, unsigned Line, StringRef Name,
+ StringRef Value = ""),
+ (MIType, Line, Name, Value))
+ DEFINE_MDNODE_GET(DIMacro, (unsigned MIType, unsigned Line, MDString *Name,
+ MDString *Value),
+ (MIType, Line, Name, Value))
+
+ TempDIMacro clone() const { return cloneImpl(); }
+
+ unsigned getLine() const { return Line; }
+
+ StringRef getName() const { return getStringOperand(0); }
+ StringRef getValue() const { return getStringOperand(1); }
+
+ MDString *getRawName() const { return getOperandAs<MDString>(0); }
+ MDString *getRawValue() const { return getOperandAs<MDString>(1); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIMacroKind;
+ }
+};
+
+class DIMacroFile : public DIMacroNode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ unsigned Line;
+
+ DIMacroFile(LLVMContext &C, StorageType Storage, unsigned MIType,
+ unsigned Line, ArrayRef<Metadata *> Ops)
+ : DIMacroNode(C, DIMacroFileKind, Storage, MIType, Ops), Line(Line) {}
+ ~DIMacroFile() = default;
+
+ static DIMacroFile *getImpl(LLVMContext &Context, unsigned MIType,
+ unsigned Line, DIFile *File,
+ DIMacroNodeArray Elements, StorageType Storage,
+ bool ShouldCreate = true) {
+ return getImpl(Context, MIType, Line, static_cast<Metadata *>(File),
+ Elements.get(), Storage, ShouldCreate);
+ }
+
+ static DIMacroFile *getImpl(LLVMContext &Context, unsigned MIType,
+ unsigned Line, Metadata *File, Metadata *Elements,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempDIMacroFile cloneImpl() const {
+ return getTemporary(getContext(), getMacinfoType(), getLine(), getFile(),
+ getElements());
+ }
+
+public:
+ DEFINE_MDNODE_GET(DIMacroFile, (unsigned MIType, unsigned Line, DIFile *File,
+ DIMacroNodeArray Elements),
+ (MIType, Line, File, Elements))
+ DEFINE_MDNODE_GET(DIMacroFile, (unsigned MIType, unsigned Line,
+ Metadata *File, Metadata *Elements),
+ (MIType, Line, File, Elements))
+
+ TempDIMacroFile clone() const { return cloneImpl(); }
+
+ void replaceElements(DIMacroNodeArray Elements) {
+#ifndef NDEBUG
+ for (DIMacroNode *Op : getElements())
+ assert(std::find(Elements->op_begin(), Elements->op_end(), Op) &&
+ "Lost a macro node during macro node list replacement");
+#endif
+ replaceOperandWith(1, Elements.get());
+ }
+
+ unsigned getLine() const { return Line; }
+ DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
+
+ DIMacroNodeArray getElements() const {
+ return cast_or_null<MDTuple>(getRawElements());
+ }
+
+ Metadata *getRawFile() const { return getOperand(0); }
+ Metadata *getRawElements() const { return getOperand(1); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == DIMacroFileKind;
+ }
+};
+
+} // end namespace llvm
+
+#undef DEFINE_MDNODE_GET_UNPACK_IMPL
+#undef DEFINE_MDNODE_GET_UNPACK
+#undef DEFINE_MDNODE_GET
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/DebugLoc.h b/contrib/llvm/include/llvm/IR/DebugLoc.h
new file mode 100644
index 0000000..8ea5875
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/DebugLoc.h
@@ -0,0 +1,126 @@
+//===- DebugLoc.h - Debug Location Information ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a number of light weight data structures used
+// to describe and track debug location information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DEBUGLOC_H
+#define LLVM_IR_DEBUGLOC_H
+
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+ class LLVMContext;
+ class raw_ostream;
+ class DILocation;
+
+ /// \brief A debug info location.
+ ///
+ /// This class is a wrapper around a tracking reference to an \a DILocation
+ /// pointer.
+ ///
+ /// To avoid extra includes, \a DebugLoc doubles the \a DILocation API with a
+ /// one based on relatively opaque \a MDNode pointers.
+ class DebugLoc {
+ TrackingMDNodeRef Loc;
+
+ public:
+ DebugLoc() {}
+ DebugLoc(DebugLoc &&X) : Loc(std::move(X.Loc)) {}
+ DebugLoc(const DebugLoc &X) : Loc(X.Loc) {}
+ DebugLoc &operator=(DebugLoc &&X) {
+ Loc = std::move(X.Loc);
+ return *this;
+ }
+ DebugLoc &operator=(const DebugLoc &X) {
+ Loc = X.Loc;
+ return *this;
+ }
+
+ /// \brief Construct from an \a DILocation.
+ DebugLoc(const DILocation *L);
+
+ /// \brief Construct from an \a MDNode.
+ ///
+ /// Note: if \c N is not an \a DILocation, a verifier check will fail, and
+ /// accessors will crash. However, construction from other nodes is
+ /// supported in order to handle forward references when reading textual
+ /// IR.
+ explicit DebugLoc(const MDNode *N);
+
+ /// \brief Get the underlying \a DILocation.
+ ///
+ /// \pre !*this or \c isa<DILocation>(getAsMDNode()).
+ /// @{
+ DILocation *get() const;
+ operator DILocation *() const { return get(); }
+ DILocation *operator->() const { return get(); }
+ DILocation &operator*() const { return *get(); }
+ /// @}
+
+ /// \brief Check for null.
+ ///
+ /// Check for null in a way that is safe with broken debug info. Unlike
+ /// the conversion to \c DILocation, this doesn't require that \c Loc is of
+ /// the right type. Important for cases like \a llvm::StripDebugInfo() and
+ /// \a Instruction::hasMetadata().
+ explicit operator bool() const { return Loc; }
+
+ /// \brief Check whether this has a trivial destructor.
+ bool hasTrivialDestructor() const { return Loc.hasTrivialDestructor(); }
+
+ /// \brief Create a new DebugLoc.
+ ///
+ /// Create a new DebugLoc at the specified line/col and scope/inline. This
+ /// forwards to \a DILocation::get().
+ ///
+ /// If \c !Scope, returns a default-constructed \a DebugLoc.
+ ///
+ /// FIXME: Remove this. Users should use DILocation::get().
+ static DebugLoc get(unsigned Line, unsigned Col, const MDNode *Scope,
+ const MDNode *InlinedAt = nullptr);
+
+ unsigned getLine() const;
+ unsigned getCol() const;
+ MDNode *getScope() const;
+ DILocation *getInlinedAt() const;
+
+ /// \brief Get the fully inlined-at scope for a DebugLoc.
+ ///
+ /// Gets the inlined-at scope for a DebugLoc.
+ MDNode *getInlinedAtScope() const;
+
+ /// \brief Find the debug info location for the start of the function.
+ ///
+ /// Walk up the scope chain of given debug loc and find line number info
+ /// for the function.
+ ///
+ /// FIXME: Remove this. Users should use DILocation/DILocalScope API to
+ /// find the subprogram, and then DILocation::get().
+ DebugLoc getFnDebugLoc() const;
+
+ /// \brief Return \c this as a bar \a MDNode.
+ MDNode *getAsMDNode() const { return Loc; }
+
+ bool operator==(const DebugLoc &DL) const { return Loc == DL.Loc; }
+ bool operator!=(const DebugLoc &DL) const { return Loc != DL.Loc; }
+
+ void dump() const;
+
+ /// \brief prints source location /path/to/file.exe:line:col @[inlined at]
+ void print(raw_ostream &OS) const;
+ };
+
+} // end namespace llvm
+
+#endif /* LLVM_SUPPORT_DEBUGLOC_H */
diff --git a/contrib/llvm/include/llvm/IR/DerivedTypes.h b/contrib/llvm/include/llvm/IR/DerivedTypes.h
new file mode 100644
index 0000000..071e69b
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/DerivedTypes.h
@@ -0,0 +1,522 @@
+//===-- llvm/DerivedTypes.h - Classes for handling data types ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of classes that represent "derived
+// types". These are things like "arrays of x" or "structure of x, y, z" or
+// "function returning x taking (y,z) as parameters", etc...
+//
+// The implementations of these classes live in the Type.cpp file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DERIVEDTYPES_H
+#define LLVM_IR_DERIVEDTYPES_H
+
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class Value;
+class APInt;
+class LLVMContext;
+template<typename T> class ArrayRef;
+class StringRef;
+
+/// Class to represent integer types. Note that this class is also used to
+/// represent the built-in integer types: Int1Ty, Int8Ty, Int16Ty, Int32Ty and
+/// Int64Ty.
+/// @brief Integer representation type
+class IntegerType : public Type {
+ friend class LLVMContextImpl;
+
+protected:
+ explicit IntegerType(LLVMContext &C, unsigned NumBits) : Type(C, IntegerTyID){
+ setSubclassData(NumBits);
+ }
+
+public:
+ /// This enum is just used to hold constants we need for IntegerType.
+ enum {
+ MIN_INT_BITS = 1, ///< Minimum number of bits that can be specified
+ MAX_INT_BITS = (1<<23)-1 ///< Maximum number of bits that can be specified
+ ///< Note that bit width is stored in the Type classes SubclassData field
+ ///< which has 23 bits. This yields a maximum bit width of 8,388,607 bits.
+ };
+
+ /// This static method is the primary way of constructing an IntegerType.
+ /// If an IntegerType with the same NumBits value was previously instantiated,
+ /// that instance will be returned. Otherwise a new one will be created. Only
+ /// one instance with a given NumBits value is ever created.
+ /// @brief Get or create an IntegerType instance.
+ static IntegerType *get(LLVMContext &C, unsigned NumBits);
+
+ /// @brief Get the number of bits in this IntegerType
+ unsigned getBitWidth() const { return getSubclassData(); }
+
+ /// getBitMask - Return a bitmask with ones set for all of the bits
+ /// that can be set by an unsigned version of this type. This is 0xFF for
+ /// i8, 0xFFFF for i16, etc.
+ uint64_t getBitMask() const {
+ return ~uint64_t(0UL) >> (64-getBitWidth());
+ }
+
+ /// getSignBit - Return a uint64_t with just the most significant bit set (the
+ /// sign bit, if the value is treated as a signed number).
+ uint64_t getSignBit() const {
+ return 1ULL << (getBitWidth()-1);
+ }
+
+ /// For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
+ /// @returns a bit mask with ones set for all the bits of this type.
+ /// @brief Get a bit mask for this type.
+ APInt getMask() const;
+
+ /// This method determines if the width of this IntegerType is a power-of-2
+ /// in terms of 8 bit bytes.
+ /// @returns true if this is a power-of-2 byte width.
+ /// @brief Is this a power-of-2 byte-width IntegerType ?
+ bool isPowerOf2ByteWidth() const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const Type *T) {
+ return T->getTypeID() == IntegerTyID;
+ }
+};
+
+unsigned Type::getIntegerBitWidth() const {
+ return cast<IntegerType>(this)->getBitWidth();
+}
+
+/// FunctionType - Class to represent function types
+///
+class FunctionType : public Type {
+ FunctionType(const FunctionType &) = delete;
+ const FunctionType &operator=(const FunctionType &) = delete;
+ FunctionType(Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);
+
+public:
+ /// FunctionType::get - This static method is the primary way of constructing
+ /// a FunctionType.
+ ///
+ static FunctionType *get(Type *Result,
+ ArrayRef<Type*> Params, bool isVarArg);
+
+ /// FunctionType::get - Create a FunctionType taking no parameters.
+ ///
+ static FunctionType *get(Type *Result, bool isVarArg);
+
+ /// isValidReturnType - Return true if the specified type is valid as a return
+ /// type.
+ static bool isValidReturnType(Type *RetTy);
+
+ /// isValidArgumentType - Return true if the specified type is valid as an
+ /// argument type.
+ static bool isValidArgumentType(Type *ArgTy);
+
+ bool isVarArg() const { return getSubclassData()!=0; }
+ Type *getReturnType() const { return ContainedTys[0]; }
+
+ typedef Type::subtype_iterator param_iterator;
+ param_iterator param_begin() const { return ContainedTys + 1; }
+ param_iterator param_end() const { return &ContainedTys[NumContainedTys]; }
+ ArrayRef<Type *> params() const {
+ return makeArrayRef(param_begin(), param_end());
+ }
+
+ /// Parameter type accessors.
+ Type *getParamType(unsigned i) const { return ContainedTys[i+1]; }
+
+ /// getNumParams - Return the number of fixed parameters this function type
+ /// requires. This does not consider varargs.
+ ///
+ unsigned getNumParams() const { return NumContainedTys - 1; }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const Type *T) {
+ return T->getTypeID() == FunctionTyID;
+ }
+};
+static_assert(AlignOf<FunctionType>::Alignment >= AlignOf<Type *>::Alignment,
+ "Alignment sufficient for objects appended to FunctionType");
+
+bool Type::isFunctionVarArg() const {
+ return cast<FunctionType>(this)->isVarArg();
+}
+
+Type *Type::getFunctionParamType(unsigned i) const {
+ return cast<FunctionType>(this)->getParamType(i);
+}
+
+unsigned Type::getFunctionNumParams() const {
+ return cast<FunctionType>(this)->getNumParams();
+}
+
+/// CompositeType - Common super class of ArrayType, StructType, PointerType
+/// and VectorType.
+class CompositeType : public Type {
+protected:
+ explicit CompositeType(LLVMContext &C, TypeID tid) : Type(C, tid) {}
+
+public:
+ /// getTypeAtIndex - Given an index value into the type, return the type of
+ /// the element.
+ ///
+ Type *getTypeAtIndex(const Value *V) const;
+ Type *getTypeAtIndex(unsigned Idx) const;
+ bool indexValid(const Value *V) const;
+ bool indexValid(unsigned Idx) const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const Type *T) {
+ return T->getTypeID() == ArrayTyID ||
+ T->getTypeID() == StructTyID ||
+ T->getTypeID() == PointerTyID ||
+ T->getTypeID() == VectorTyID;
+ }
+};
+
+/// StructType - Class to represent struct types. There are two different kinds
+/// of struct types: Literal structs and Identified structs.
+///
+/// Literal struct types (e.g. { i32, i32 }) are uniqued structurally, and must
+/// always have a body when created. You can get one of these by using one of
+/// the StructType::get() forms.
+///
+/// Identified structs (e.g. %foo or %42) may optionally have a name and are not
+/// uniqued. The names for identified structs are managed at the LLVMContext
+/// level, so there can only be a single identified struct with a given name in
+/// a particular LLVMContext. Identified structs may also optionally be opaque
+/// (have no body specified). You get one of these by using one of the
+/// StructType::create() forms.
+///
+/// Independent of what kind of struct you have, the body of a struct type are
+/// laid out in memory consequtively with the elements directly one after the
+/// other (if the struct is packed) or (if not packed) with padding between the
+/// elements as defined by DataLayout (which is required to match what the code
+/// generator for a target expects).
+///
+class StructType : public CompositeType {
+ StructType(const StructType &) = delete;
+ const StructType &operator=(const StructType &) = delete;
+ StructType(LLVMContext &C)
+ : CompositeType(C, StructTyID), SymbolTableEntry(nullptr) {}
+ enum {
+ /// This is the contents of the SubClassData field.
+ SCDB_HasBody = 1,
+ SCDB_Packed = 2,
+ SCDB_IsLiteral = 4,
+ SCDB_IsSized = 8
+ };
+
+ /// SymbolTableEntry - For a named struct that actually has a name, this is a
+ /// pointer to the symbol table entry (maintained by LLVMContext) for the
+ /// struct. This is null if the type is an literal struct or if it is
+ /// a identified type that has an empty name.
+ ///
+ void *SymbolTableEntry;
+
+public:
+ /// StructType::create - This creates an identified struct.
+ static StructType *create(LLVMContext &Context, StringRef Name);
+ static StructType *create(LLVMContext &Context);
+
+ static StructType *create(ArrayRef<Type *> Elements, StringRef Name,
+ bool isPacked = false);
+ static StructType *create(ArrayRef<Type *> Elements);
+ static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements,
+ StringRef Name, bool isPacked = false);
+ static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements);
+ static StructType *create(StringRef Name, Type *elt1, ...) LLVM_END_WITH_NULL;
+
+ /// StructType::get - This static method is the primary way to create a
+ /// literal StructType.
+ static StructType *get(LLVMContext &Context, ArrayRef<Type*> Elements,
+ bool isPacked = false);
+
+ /// StructType::get - Create an empty structure type.
+ ///
+ static StructType *get(LLVMContext &Context, bool isPacked = false);
+
+ /// StructType::get - This static method is a convenience method for creating
+ /// structure types by specifying the elements as arguments. Note that this
+ /// method always returns a non-packed struct, and requires at least one
+ /// element type.
+ static StructType *get(Type *elt1, ...) LLVM_END_WITH_NULL;
+
+ bool isPacked() const { return (getSubclassData() & SCDB_Packed) != 0; }
+
+ /// isLiteral - Return true if this type is uniqued by structural
+ /// equivalence, false if it is a struct definition.
+ bool isLiteral() const { return (getSubclassData() & SCDB_IsLiteral) != 0; }
+
+ /// isOpaque - Return true if this is a type with an identity that has no body
+ /// specified yet. These prints as 'opaque' in .ll files.
+ bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }
+
+ /// isSized - Return true if this is a sized type.
+ bool isSized(SmallPtrSetImpl<Type *> *Visited = nullptr) const;
+
+ /// hasName - Return true if this is a named struct that has a non-empty name.
+ bool hasName() const { return SymbolTableEntry != nullptr; }
+
+ /// getName - Return the name for this struct type if it has an identity.
+ /// This may return an empty string for an unnamed struct type. Do not call
+ /// this on an literal type.
+ StringRef getName() const;
+
+ /// setName - Change the name of this type to the specified name, or to a name
+ /// with a suffix if there is a collision. Do not call this on an literal
+ /// type.
+ void setName(StringRef Name);
+
+ /// setBody - Specify a body for an opaque identified type.
+ void setBody(ArrayRef<Type*> Elements, bool isPacked = false);
+ void setBody(Type *elt1, ...) LLVM_END_WITH_NULL;
+
+ /// isValidElementType - Return true if the specified type is valid as a
+ /// element type.
+ static bool isValidElementType(Type *ElemTy);
+
+ // Iterator access to the elements.
+ typedef Type::subtype_iterator element_iterator;
+ element_iterator element_begin() const { return ContainedTys; }
+ element_iterator element_end() const { return &ContainedTys[NumContainedTys];}
+ ArrayRef<Type *> const elements() const {
+ return makeArrayRef(element_begin(), element_end());
+ }
+
+ /// isLayoutIdentical - Return true if this is layout identical to the
+ /// specified struct.
+ bool isLayoutIdentical(StructType *Other) const;
+
+ /// Random access to the elements
+ unsigned getNumElements() const { return NumContainedTys; }
+ Type *getElementType(unsigned N) const {
+ assert(N < NumContainedTys && "Element number out of range!");
+ return ContainedTys[N];
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const Type *T) {
+ return T->getTypeID() == StructTyID;
+ }
+};
+
+StringRef Type::getStructName() const {
+ return cast<StructType>(this)->getName();
+}
+
+unsigned Type::getStructNumElements() const {
+ return cast<StructType>(this)->getNumElements();
+}
+
+Type *Type::getStructElementType(unsigned N) const {
+ return cast<StructType>(this)->getElementType(N);
+}
+
+/// SequentialType - This is the superclass of the array, pointer and vector
+/// type classes. All of these represent "arrays" in memory. The array type
+/// represents a specifically sized array, pointer types are unsized/unknown
+/// size arrays, vector types represent specifically sized arrays that
+/// allow for use of SIMD instructions. SequentialType holds the common
+/// features of all, which stem from the fact that all three lay their
+/// components out in memory identically.
+///
+class SequentialType : public CompositeType {
+ Type *ContainedType; ///< Storage for the single contained type.
+ SequentialType(const SequentialType &) = delete;
+ const SequentialType &operator=(const SequentialType &) = delete;
+
+protected:
+ SequentialType(TypeID TID, Type *ElType)
+ : CompositeType(ElType->getContext(), TID), ContainedType(ElType) {
+ ContainedTys = &ContainedType;
+ NumContainedTys = 1;
+ }
+
+public:
+ Type *getElementType() const { return ContainedTys[0]; }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const Type *T) {
+ return T->getTypeID() == ArrayTyID ||
+ T->getTypeID() == PointerTyID ||
+ T->getTypeID() == VectorTyID;
+ }
+};
+
+Type *Type::getSequentialElementType() const {
+ return cast<SequentialType>(this)->getElementType();
+}
+
+/// ArrayType - Class to represent array types.
+///
+class ArrayType : public SequentialType {
+ uint64_t NumElements;
+
+ ArrayType(const ArrayType &) = delete;
+ const ArrayType &operator=(const ArrayType &) = delete;
+ ArrayType(Type *ElType, uint64_t NumEl);
+
+public:
+ /// ArrayType::get - This static method is the primary way to construct an
+ /// ArrayType
+ ///
+ static ArrayType *get(Type *ElementType, uint64_t NumElements);
+
+ /// isValidElementType - Return true if the specified type is valid as a
+ /// element type.
+ static bool isValidElementType(Type *ElemTy);
+
+ uint64_t getNumElements() const { return NumElements; }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const Type *T) {
+ return T->getTypeID() == ArrayTyID;
+ }
+};
+
+uint64_t Type::getArrayNumElements() const {
+ return cast<ArrayType>(this)->getNumElements();
+}
+
+/// VectorType - Class to represent vector types.
+///
+class VectorType : public SequentialType {
+ unsigned NumElements;
+
+ VectorType(const VectorType &) = delete;
+ const VectorType &operator=(const VectorType &) = delete;
+ VectorType(Type *ElType, unsigned NumEl);
+
+public:
+ /// VectorType::get - This static method is the primary way to construct an
+ /// VectorType.
+ ///
+ static VectorType *get(Type *ElementType, unsigned NumElements);
+
+ /// VectorType::getInteger - This static method gets a VectorType with the
+ /// same number of elements as the input type, and the element type is an
+ /// integer type of the same width as the input element type.
+ ///
+ static VectorType *getInteger(VectorType *VTy) {
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ assert(EltBits && "Element size must be of a non-zero size");
+ Type *EltTy = IntegerType::get(VTy->getContext(), EltBits);
+ return VectorType::get(EltTy, VTy->getNumElements());
+ }
+
+ /// VectorType::getExtendedElementVectorType - This static method is like
+ /// getInteger except that the element types are twice as wide as the
+ /// elements in the input type.
+ ///
+ static VectorType *getExtendedElementVectorType(VectorType *VTy) {
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ Type *EltTy = IntegerType::get(VTy->getContext(), EltBits * 2);
+ return VectorType::get(EltTy, VTy->getNumElements());
+ }
+
+ /// VectorType::getTruncatedElementVectorType - This static method is like
+ /// getInteger except that the element types are half as wide as the
+ /// elements in the input type.
+ ///
+ static VectorType *getTruncatedElementVectorType(VectorType *VTy) {
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ assert((EltBits & 1) == 0 &&
+ "Cannot truncate vector element with odd bit-width");
+ Type *EltTy = IntegerType::get(VTy->getContext(), EltBits / 2);
+ return VectorType::get(EltTy, VTy->getNumElements());
+ }
+
+ /// VectorType::getHalfElementsVectorType - This static method returns
+ /// a VectorType with half as many elements as the input type and the
+ /// same element type.
+ ///
+ static VectorType *getHalfElementsVectorType(VectorType *VTy) {
+ unsigned NumElts = VTy->getNumElements();
+ assert ((NumElts & 1) == 0 &&
+ "Cannot halve vector with odd number of elements.");
+ return VectorType::get(VTy->getElementType(), NumElts/2);
+ }
+
+ /// VectorType::getDoubleElementsVectorType - This static method returns
+ /// a VectorType with twice as many elements as the input type and the
+ /// same element type.
+ ///
+ static VectorType *getDoubleElementsVectorType(VectorType *VTy) {
+ unsigned NumElts = VTy->getNumElements();
+ return VectorType::get(VTy->getElementType(), NumElts*2);
+ }
+
+ /// isValidElementType - Return true if the specified type is valid as a
+ /// element type.
+ static bool isValidElementType(Type *ElemTy);
+
+ /// @brief Return the number of elements in the Vector type.
+ unsigned getNumElements() const { return NumElements; }
+
+ /// @brief Return the number of bits in the Vector type.
+ /// Returns zero when the vector is a vector of pointers.
+ unsigned getBitWidth() const {
+ return NumElements * getElementType()->getPrimitiveSizeInBits();
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const Type *T) {
+ return T->getTypeID() == VectorTyID;
+ }
+};
+
+unsigned Type::getVectorNumElements() const {
+ return cast<VectorType>(this)->getNumElements();
+}
+
+/// PointerType - Class to represent pointers.
+///
+class PointerType : public SequentialType {
+ PointerType(const PointerType &) = delete;
+ const PointerType &operator=(const PointerType &) = delete;
+ explicit PointerType(Type *ElType, unsigned AddrSpace);
+
+public:
+ /// PointerType::get - This constructs a pointer to an object of the specified
+ /// type in a numbered address space.
+ static PointerType *get(Type *ElementType, unsigned AddressSpace);
+
+ /// PointerType::getUnqual - This constructs a pointer to an object of the
+ /// specified type in the generic address space (address space zero).
+ static PointerType *getUnqual(Type *ElementType) {
+ return PointerType::get(ElementType, 0);
+ }
+
+ /// isValidElementType - Return true if the specified type is valid as a
+ /// element type.
+ static bool isValidElementType(Type *ElemTy);
+
+ /// Return true if we can load or store from a pointer to this type.
+ static bool isLoadableOrStorableType(Type *ElemTy);
+
+ /// @brief Return the address space of the Pointer type.
+ inline unsigned getAddressSpace() const { return getSubclassData(); }
+
+ /// Implement support type inquiry through isa, cast, and dyn_cast.
+ static inline bool classof(const Type *T) {
+ return T->getTypeID() == PointerTyID;
+ }
+};
+
+unsigned Type::getPointerAddressSpace() const {
+ return cast<PointerType>(getScalarType())->getAddressSpace();
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/DiagnosticInfo.h b/contrib/llvm/include/llvm/IR/DiagnosticInfo.h
new file mode 100644
index 0000000..f69955e
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/DiagnosticInfo.h
@@ -0,0 +1,589 @@
+//===- llvm/Support/DiagnosticInfo.h - Diagnostic Declaration ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the different classes involved in low level diagnostics.
+//
+// Diagnostics reporting is still done as part of the LLVMContext.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DIAGNOSTICINFO_H
+#define LLVM_IR_DIAGNOSTICINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Casting.h"
+#include <functional>
+
+namespace llvm {
+
+// Forward declarations.
+class DiagnosticPrinter;
+class Function;
+class Instruction;
+class LLVMContextImpl;
+class Twine;
+class Value;
+class DebugLoc;
+class SMDiagnostic;
+
+/// \brief Defines the different supported severity of a diagnostic.
+enum DiagnosticSeverity {
+ DS_Error,
+ DS_Warning,
+ DS_Remark,
+ // A note attaches additional information to one of the previous diagnostic
+ // types.
+ DS_Note
+};
+
+/// \brief Defines the different supported kind of a diagnostic.
+/// This enum should be extended with a new ID for each added concrete subclass.
+enum DiagnosticKind {
+ DK_Bitcode,
+ DK_InlineAsm,
+ DK_StackSize,
+ DK_Linker,
+ DK_DebugMetadataVersion,
+ DK_SampleProfile,
+ DK_OptimizationRemark,
+ DK_OptimizationRemarkMissed,
+ DK_OptimizationRemarkAnalysis,
+ DK_OptimizationRemarkAnalysisFPCommute,
+ DK_OptimizationRemarkAnalysisAliasing,
+ DK_OptimizationFailure,
+ DK_MIRParser,
+ DK_PGOProfile,
+ DK_FirstPluginKind
+};
+
+/// \brief Get the next available kind ID for a plugin diagnostic.
+/// Each time this function is called, it returns a different number.
+/// Therefore, a plugin that wants to "identify" its own classes
+/// with a dynamic identifier, just have to use this method to get a new ID
+/// and assign it to each of its classes.
+/// The returned ID will be greater than or equal to DK_FirstPluginKind.
+/// Thus, the plugin identifiers will not conflict with the
+/// DiagnosticKind values.
+int getNextAvailablePluginDiagnosticKind();
+
+/// \brief This is the base abstract class for diagnostic reporting in
+/// the backend.
+/// The print method must be overloaded by the subclasses to print a
+/// user-friendly message in the client of the backend (let us call it a
+/// frontend).
+class DiagnosticInfo {
+private:
+ /// Kind defines the kind of report this is about.
+ const /* DiagnosticKind */ int Kind;
+ /// Severity gives the severity of the diagnostic.
+ const DiagnosticSeverity Severity;
+
+public:
+ DiagnosticInfo(/* DiagnosticKind */ int Kind, DiagnosticSeverity Severity)
+ : Kind(Kind), Severity(Severity) {}
+
+ virtual ~DiagnosticInfo() {}
+
+ /* DiagnosticKind */ int getKind() const { return Kind; }
+ DiagnosticSeverity getSeverity() const { return Severity; }
+
+ /// Print using the given \p DP a user-friendly message.
+ /// This is the default message that will be printed to the user.
+ /// It is used when the frontend does not directly take advantage
+ /// of the information contained in fields of the subclasses.
+ /// The printed message must not end with '.' nor start with a severity
+ /// keyword.
+ virtual void print(DiagnosticPrinter &DP) const = 0;
+
+ static const char *AlwaysPrint;
+};
+
+typedef std::function<void(const DiagnosticInfo &)> DiagnosticHandlerFunction;
+
+/// Diagnostic information for inline asm reporting.
+/// This is basically a message and an optional location.
+class DiagnosticInfoInlineAsm : public DiagnosticInfo {
+private:
+ /// Optional line information. 0 if not set.
+ unsigned LocCookie;
+ /// Message to be reported.
+ const Twine &MsgStr;
+ /// Optional origin of the problem.
+ const Instruction *Instr;
+
+public:
+ /// \p MsgStr is the message to be reported to the frontend.
+ /// This class does not copy \p MsgStr, therefore the reference must be valid
+ /// for the whole life time of the Diagnostic.
+ DiagnosticInfoInlineAsm(const Twine &MsgStr,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(0), MsgStr(MsgStr),
+ Instr(nullptr) {}
+
+ /// \p LocCookie if non-zero gives the line number for this report.
+ /// \p MsgStr gives the message.
+ /// This class does not copy \p MsgStr, therefore the reference must be valid
+ /// for the whole life time of the Diagnostic.
+ DiagnosticInfoInlineAsm(unsigned LocCookie, const Twine &MsgStr,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_InlineAsm, Severity), LocCookie(LocCookie),
+ MsgStr(MsgStr), Instr(nullptr) {}
+
+ /// \p Instr gives the original instruction that triggered the diagnostic.
+ /// \p MsgStr gives the message.
+ /// This class does not copy \p MsgStr, therefore the reference must be valid
+ /// for the whole life time of the Diagnostic.
+ /// Same for \p I.
+ DiagnosticInfoInlineAsm(const Instruction &I, const Twine &MsgStr,
+ DiagnosticSeverity Severity = DS_Error);
+
+ unsigned getLocCookie() const { return LocCookie; }
+ const Twine &getMsgStr() const { return MsgStr; }
+ const Instruction *getInstruction() const { return Instr; }
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_InlineAsm;
+ }
+};
+
+/// Diagnostic information for stack size reporting.
+/// This is basically a function and a size.
+class DiagnosticInfoStackSize : public DiagnosticInfo {
+private:
+ /// The function that is concerned by this stack size diagnostic.
+ const Function &Fn;
+ /// The computed stack size.
+ unsigned StackSize;
+
+public:
+ /// \p The function that is concerned by this stack size diagnostic.
+ /// \p The computed stack size.
+ DiagnosticInfoStackSize(const Function &Fn, unsigned StackSize,
+ DiagnosticSeverity Severity = DS_Warning)
+ : DiagnosticInfo(DK_StackSize, Severity), Fn(Fn), StackSize(StackSize) {}
+
+ const Function &getFunction() const { return Fn; }
+ unsigned getStackSize() const { return StackSize; }
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_StackSize;
+ }
+};
+
+/// Diagnostic information for debug metadata version reporting.
+/// This is basically a module and a version.
+class DiagnosticInfoDebugMetadataVersion : public DiagnosticInfo {
+private:
+ /// The module that is concerned by this debug metadata version diagnostic.
+ const Module &M;
+ /// The actual metadata version.
+ unsigned MetadataVersion;
+
+public:
+ /// \p The module that is concerned by this debug metadata version diagnostic.
+ /// \p The actual metadata version.
+ DiagnosticInfoDebugMetadataVersion(const Module &M, unsigned MetadataVersion,
+ DiagnosticSeverity Severity = DS_Warning)
+ : DiagnosticInfo(DK_DebugMetadataVersion, Severity), M(M),
+ MetadataVersion(MetadataVersion) {}
+
+ const Module &getModule() const { return M; }
+ unsigned getMetadataVersion() const { return MetadataVersion; }
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_DebugMetadataVersion;
+ }
+};
+
+/// Diagnostic information for the sample profiler.
+class DiagnosticInfoSampleProfile : public DiagnosticInfo {
+public:
+ DiagnosticInfoSampleProfile(StringRef FileName, unsigned LineNum,
+ const Twine &Msg,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_SampleProfile, Severity), FileName(FileName),
+ LineNum(LineNum), Msg(Msg) {}
+ DiagnosticInfoSampleProfile(StringRef FileName, const Twine &Msg,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_SampleProfile, Severity), FileName(FileName),
+ LineNum(0), Msg(Msg) {}
+ DiagnosticInfoSampleProfile(const Twine &Msg,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_SampleProfile, Severity), LineNum(0), Msg(Msg) {}
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_SampleProfile;
+ }
+
+ StringRef getFileName() const { return FileName; }
+ unsigned getLineNum() const { return LineNum; }
+ const Twine &getMsg() const { return Msg; }
+
+private:
+ /// Name of the input file associated with this diagnostic.
+ StringRef FileName;
+
+ /// Line number where the diagnostic occurred. If 0, no line number will
+ /// be emitted in the message.
+ unsigned LineNum;
+
+ /// Message to report.
+ const Twine &Msg;
+};
+
+/// Diagnostic information for the PGO profiler.
+class DiagnosticInfoPGOProfile : public DiagnosticInfo {
+public:
+ DiagnosticInfoPGOProfile(const char *FileName, const Twine &Msg,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_PGOProfile, Severity), FileName(FileName), Msg(Msg) {}
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_PGOProfile;
+ }
+
+ const char *getFileName() const { return FileName; }
+ const Twine &getMsg() const { return Msg; }
+
+private:
+ /// Name of the input file associated with this diagnostic.
+ const char *FileName;
+
+ /// Message to report.
+ const Twine &Msg;
+};
+
+/// Common features for diagnostics dealing with optimization remarks.
+class DiagnosticInfoOptimizationBase : public DiagnosticInfo {
+public:
+ /// \p PassName is the name of the pass emitting this diagnostic.
+ /// \p Fn is the function where the diagnostic is being emitted. \p DLoc is
+ /// the location information to use in the diagnostic. If line table
+ /// information is available, the diagnostic will include the source code
+ /// location. \p Msg is the message to show. Note that this class does not
+ /// copy this message, so this reference must be valid for the whole life time
+ /// of the diagnostic.
+ DiagnosticInfoOptimizationBase(enum DiagnosticKind Kind,
+ enum DiagnosticSeverity Severity,
+ const char *PassName, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg)
+ : DiagnosticInfo(Kind, Severity), PassName(PassName), Fn(Fn), DLoc(DLoc),
+ Msg(Msg) {}
+
+ /// \see DiagnosticInfo::print.
+ void print(DiagnosticPrinter &DP) const override;
+
+ /// Return true if this optimization remark is enabled by one of
+ /// of the LLVM command line flags (-pass-remarks, -pass-remarks-missed,
+ /// or -pass-remarks-analysis). Note that this only handles the LLVM
+ /// flags. We cannot access Clang flags from here (they are handled
+ /// in BackendConsumer::OptimizationRemarkHandler).
+ virtual bool isEnabled() const = 0;
+
+ /// Return true if location information is available for this diagnostic.
+ bool isLocationAvailable() const;
+
+ /// Return a string with the location information for this diagnostic
+ /// in the format "file:line:col". If location information is not available,
+ /// it returns "<unknown>:0:0".
+ const std::string getLocationStr() const;
+
+ /// Return location information for this diagnostic in three parts:
+ /// the source file name, line number and column.
+ void getLocation(StringRef *Filename, unsigned *Line, unsigned *Column) const;
+
+ const char *getPassName() const { return PassName; }
+ const Function &getFunction() const { return Fn; }
+ const DebugLoc &getDebugLoc() const { return DLoc; }
+ const Twine &getMsg() const { return Msg; }
+
+private:
+ /// Name of the pass that triggers this report. If this matches the
+ /// regular expression given in -Rpass=regexp, then the remark will
+ /// be emitted.
+ const char *PassName;
+
+ /// Function where this diagnostic is triggered.
+ const Function &Fn;
+
+ /// Debug location where this diagnostic is triggered.
+ DebugLoc DLoc;
+
+ /// Message to report.
+ const Twine &Msg;
+};
+
+/// Diagnostic information for applied optimization remarks.
+class DiagnosticInfoOptimizationRemark : public DiagnosticInfoOptimizationBase {
+public:
+ /// \p PassName is the name of the pass emitting this diagnostic. If
+ /// this name matches the regular expression given in -Rpass=, then the
+ /// diagnostic will be emitted. \p Fn is the function where the diagnostic
+ /// is being emitted. \p DLoc is the location information to use in the
+ /// diagnostic. If line table information is available, the diagnostic
+ /// will include the source code location. \p Msg is the message to show.
+ /// Note that this class does not copy this message, so this reference
+ /// must be valid for the whole life time of the diagnostic.
+ DiagnosticInfoOptimizationRemark(const char *PassName, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg)
+ : DiagnosticInfoOptimizationBase(DK_OptimizationRemark, DS_Remark,
+ PassName, Fn, DLoc, Msg) {}
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_OptimizationRemark;
+ }
+
+ /// \see DiagnosticInfoOptimizationBase::isEnabled.
+ bool isEnabled() const override;
+};
+
+/// Diagnostic information for missed-optimization remarks.
+class DiagnosticInfoOptimizationRemarkMissed
+ : public DiagnosticInfoOptimizationBase {
+public:
+ /// \p PassName is the name of the pass emitting this diagnostic. If
+ /// this name matches the regular expression given in -Rpass-missed=, then the
+ /// diagnostic will be emitted. \p Fn is the function where the diagnostic
+ /// is being emitted. \p DLoc is the location information to use in the
+ /// diagnostic. If line table information is available, the diagnostic
+ /// will include the source code location. \p Msg is the message to show.
+ /// Note that this class does not copy this message, so this reference
+ /// must be valid for the whole life time of the diagnostic.
+ DiagnosticInfoOptimizationRemarkMissed(const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg)
+ : DiagnosticInfoOptimizationBase(DK_OptimizationRemarkMissed, DS_Remark,
+ PassName, Fn, DLoc, Msg) {}
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_OptimizationRemarkMissed;
+ }
+
+ /// \see DiagnosticInfoOptimizationBase::isEnabled.
+ bool isEnabled() const override;
+};
+
+/// Diagnostic information for optimization analysis remarks.
+class DiagnosticInfoOptimizationRemarkAnalysis
+ : public DiagnosticInfoOptimizationBase {
+public:
+ /// \p PassName is the name of the pass emitting this diagnostic. If
+ /// this name matches the regular expression given in -Rpass-analysis=, then
+ /// the diagnostic will be emitted. \p Fn is the function where the diagnostic
+ /// is being emitted. \p DLoc is the location information to use in the
+ /// diagnostic. If line table information is available, the diagnostic will
+ /// include the source code location. \p Msg is the message to show. Note that
+ /// this class does not copy this message, so this reference must be valid for
+ /// the whole life time of the diagnostic.
+ DiagnosticInfoOptimizationRemarkAnalysis(const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg)
+ : DiagnosticInfoOptimizationBase(DK_OptimizationRemarkAnalysis, DS_Remark,
+ PassName, Fn, DLoc, Msg) {}
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_OptimizationRemarkAnalysis;
+ }
+
+ /// \see DiagnosticInfoOptimizationBase::isEnabled.
+ bool isEnabled() const override;
+
+protected:
+ DiagnosticInfoOptimizationRemarkAnalysis(enum DiagnosticKind Kind,
+ const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg)
+ : DiagnosticInfoOptimizationBase(Kind, DS_Remark, PassName, Fn, DLoc,
+ Msg) {}
+};
+
+/// Diagnostic information for optimization analysis remarks related to
+/// floating-point non-commutativity.
+class DiagnosticInfoOptimizationRemarkAnalysisFPCommute
+ : public DiagnosticInfoOptimizationRemarkAnalysis {
+public:
+ /// \p PassName is the name of the pass emitting this diagnostic. If
+ /// this name matches the regular expression given in -Rpass-analysis=, then
+ /// the diagnostic will be emitted. \p Fn is the function where the diagnostic
+ /// is being emitted. \p DLoc is the location information to use in the
+ /// diagnostic. If line table information is available, the diagnostic will
+ /// include the source code location. \p Msg is the message to show. The
+ /// front-end will append its own message related to options that address
+ /// floating-point non-commutativity. Note that this class does not copy this
+ /// message, so this reference must be valid for the whole life time of the
+ /// diagnostic.
+ DiagnosticInfoOptimizationRemarkAnalysisFPCommute(const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg)
+ : DiagnosticInfoOptimizationRemarkAnalysis(
+ DK_OptimizationRemarkAnalysisFPCommute, PassName, Fn, DLoc, Msg) {}
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_OptimizationRemarkAnalysisFPCommute;
+ }
+};
+
+/// Diagnostic information for optimization analysis remarks related to
+/// pointer aliasing.
+class DiagnosticInfoOptimizationRemarkAnalysisAliasing
+ : public DiagnosticInfoOptimizationRemarkAnalysis {
+public:
+ /// \p PassName is the name of the pass emitting this diagnostic. If
+ /// this name matches the regular expression given in -Rpass-analysis=, then
+ /// the diagnostic will be emitted. \p Fn is the function where the diagnostic
+ /// is being emitted. \p DLoc is the location information to use in the
+ /// diagnostic. If line table information is available, the diagnostic will
+ /// include the source code location. \p Msg is the message to show. The
+ /// front-end will append its own message related to options that address
+ /// pointer aliasing legality. Note that this class does not copy this
+ /// message, so this reference must be valid for the whole life time of the
+ /// diagnostic.
+ DiagnosticInfoOptimizationRemarkAnalysisAliasing(const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg)
+ : DiagnosticInfoOptimizationRemarkAnalysis(
+ DK_OptimizationRemarkAnalysisAliasing, PassName, Fn, DLoc, Msg) {}
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_OptimizationRemarkAnalysisAliasing;
+ }
+};
+
+/// Diagnostic information for machine IR parser.
+class DiagnosticInfoMIRParser : public DiagnosticInfo {
+ const SMDiagnostic &Diagnostic;
+
+public:
+ DiagnosticInfoMIRParser(DiagnosticSeverity Severity,
+ const SMDiagnostic &Diagnostic)
+ : DiagnosticInfo(DK_MIRParser, Severity), Diagnostic(Diagnostic) {}
+
+ const SMDiagnostic &getDiagnostic() const { return Diagnostic; }
+
+ void print(DiagnosticPrinter &DP) const override;
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_MIRParser;
+ }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DiagnosticInfo, LLVMDiagnosticInfoRef)
+
+/// Emit an optimization-applied message. \p PassName is the name of the pass
+/// emitting the message. If -Rpass= is given and \p PassName matches the
+/// regular expression in -Rpass, then the remark will be emitted. \p Fn is
+/// the function triggering the remark, \p DLoc is the debug location where
+/// the diagnostic is generated. \p Msg is the message string to use.
+void emitOptimizationRemark(LLVMContext &Ctx, const char *PassName,
+ const Function &Fn, const DebugLoc &DLoc,
+ const Twine &Msg);
+
+/// Emit an optimization-missed message. \p PassName is the name of the
+/// pass emitting the message. If -Rpass-missed= is given and \p PassName
+/// matches the regular expression in -Rpass, then the remark will be
+/// emitted. \p Fn is the function triggering the remark, \p DLoc is the
+/// debug location where the diagnostic is generated. \p Msg is the
+/// message string to use.
+void emitOptimizationRemarkMissed(LLVMContext &Ctx, const char *PassName,
+ const Function &Fn, const DebugLoc &DLoc,
+ const Twine &Msg);
+
+/// Emit an optimization analysis remark message. \p PassName is the name of
+/// the pass emitting the message. If -Rpass-analysis= is given and \p
+/// PassName matches the regular expression in -Rpass, then the remark will be
+/// emitted. \p Fn is the function triggering the remark, \p DLoc is the debug
+/// location where the diagnostic is generated. \p Msg is the message string
+/// to use.
+void emitOptimizationRemarkAnalysis(LLVMContext &Ctx, const char *PassName,
+ const Function &Fn, const DebugLoc &DLoc,
+ const Twine &Msg);
+
+/// Emit an optimization analysis remark related to messages about
+/// floating-point non-commutativity. \p PassName is the name of the pass
+/// emitting the message. If -Rpass-analysis= is given and \p PassName matches
+/// the regular expression in -Rpass, then the remark will be emitted. \p Fn is
+/// the function triggering the remark, \p DLoc is the debug location where the
+/// diagnostic is generated. \p Msg is the message string to use.
+void emitOptimizationRemarkAnalysisFPCommute(LLVMContext &Ctx,
+ const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg);
+
+/// Emit an optimization analysis remark related to messages about
+/// pointer aliasing. \p PassName is the name of the pass emitting the message.
+/// If -Rpass-analysis= is given and \p PassName matches the regular expression
+/// in -Rpass, then the remark will be emitted. \p Fn is the function triggering
+/// the remark, \p DLoc is the debug location where the diagnostic is generated.
+/// \p Msg is the message string to use.
+void emitOptimizationRemarkAnalysisAliasing(LLVMContext &Ctx,
+ const char *PassName,
+ const Function &Fn,
+ const DebugLoc &DLoc,
+ const Twine &Msg);
+
+/// Diagnostic information for optimization failures.
+class DiagnosticInfoOptimizationFailure
+ : public DiagnosticInfoOptimizationBase {
+public:
+ /// \p Fn is the function where the diagnostic is being emitted. \p DLoc is
+ /// the location information to use in the diagnostic. If line table
+ /// information is available, the diagnostic will include the source code
+ /// location. \p Msg is the message to show. Note that this class does not
+ /// copy this message, so this reference must be valid for the whole life time
+ /// of the diagnostic.
+ DiagnosticInfoOptimizationFailure(const Function &Fn, const DebugLoc &DLoc,
+ const Twine &Msg)
+ : DiagnosticInfoOptimizationBase(DK_OptimizationFailure, DS_Warning,
+ nullptr, Fn, DLoc, Msg) {}
+
+ static bool classof(const DiagnosticInfo *DI) {
+ return DI->getKind() == DK_OptimizationFailure;
+ }
+
+ /// \see DiagnosticInfoOptimizationBase::isEnabled.
+ bool isEnabled() const override;
+};
+
+/// Emit a warning when loop vectorization is specified but fails. \p Fn is the
+/// function triggering the warning, \p DLoc is the debug location where the
+/// diagnostic is generated. \p Msg is the message string to use.
+void emitLoopVectorizeWarning(LLVMContext &Ctx, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg);
+
+/// Emit a warning when loop interleaving is specified but fails. \p Fn is the
+/// function triggering the warning, \p DLoc is the debug location where the
+/// diagnostic is generated. \p Msg is the message string to use.
+void emitLoopInterleaveWarning(LLVMContext &Ctx, const Function &Fn,
+ const DebugLoc &DLoc, const Twine &Msg);
+
+} // End namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/DiagnosticPrinter.h b/contrib/llvm/include/llvm/IR/DiagnosticPrinter.h
new file mode 100644
index 0000000..1bcd737
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/DiagnosticPrinter.h
@@ -0,0 +1,94 @@
+//===- llvm/Support/DiagnosticPrinter.h - Diagnostic Printer ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the main interface for printer backend diagnostic.
+//
+// Clients of the backend diagnostics should overload this interface based
+// on their needs.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DIAGNOSTICPRINTER_H
+#define LLVM_IR_DIAGNOSTICPRINTER_H
+
+#include <string>
+
+namespace llvm {
+// Forward declarations.
+class Module;
+class raw_ostream;
+class SMDiagnostic;
+class StringRef;
+class Twine;
+class Value;
+
+/// \brief Interface for custom diagnostic printing.
+class DiagnosticPrinter {
+public:
+ virtual ~DiagnosticPrinter() {}
+
+ // Simple types.
+ virtual DiagnosticPrinter &operator<<(char C) = 0;
+ virtual DiagnosticPrinter &operator<<(unsigned char C) = 0;
+ virtual DiagnosticPrinter &operator<<(signed char C) = 0;
+ virtual DiagnosticPrinter &operator<<(StringRef Str) = 0;
+ virtual DiagnosticPrinter &operator<<(const char *Str) = 0;
+ virtual DiagnosticPrinter &operator<<(const std::string &Str) = 0;
+ virtual DiagnosticPrinter &operator<<(unsigned long N) = 0;
+ virtual DiagnosticPrinter &operator<<(long N) = 0;
+ virtual DiagnosticPrinter &operator<<(unsigned long long N) = 0;
+ virtual DiagnosticPrinter &operator<<(long long N) = 0;
+ virtual DiagnosticPrinter &operator<<(const void *P) = 0;
+ virtual DiagnosticPrinter &operator<<(unsigned int N) = 0;
+ virtual DiagnosticPrinter &operator<<(int N) = 0;
+ virtual DiagnosticPrinter &operator<<(double N) = 0;
+ virtual DiagnosticPrinter &operator<<(const Twine &Str) = 0;
+
+ // IR related types.
+ virtual DiagnosticPrinter &operator<<(const Value &V) = 0;
+ virtual DiagnosticPrinter &operator<<(const Module &M) = 0;
+
+ // Other types.
+ virtual DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) = 0;
+};
+
+/// \brief Basic diagnostic printer that uses an underlying raw_ostream.
+class DiagnosticPrinterRawOStream : public DiagnosticPrinter {
+protected:
+ raw_ostream &Stream;
+
+public:
+ DiagnosticPrinterRawOStream(raw_ostream &Stream) : Stream(Stream) {}
+
+ // Simple types.
+ DiagnosticPrinter &operator<<(char C) override;
+ DiagnosticPrinter &operator<<(unsigned char C) override;
+ DiagnosticPrinter &operator<<(signed char C) override;
+ DiagnosticPrinter &operator<<(StringRef Str) override;
+ DiagnosticPrinter &operator<<(const char *Str) override;
+ DiagnosticPrinter &operator<<(const std::string &Str) override;
+ DiagnosticPrinter &operator<<(unsigned long N) override;
+ DiagnosticPrinter &operator<<(long N) override;
+ DiagnosticPrinter &operator<<(unsigned long long N) override;
+ DiagnosticPrinter &operator<<(long long N) override;
+ DiagnosticPrinter &operator<<(const void *P) override;
+ DiagnosticPrinter &operator<<(unsigned int N) override;
+ DiagnosticPrinter &operator<<(int N) override;
+ DiagnosticPrinter &operator<<(double N) override;
+ DiagnosticPrinter &operator<<(const Twine &Str) override;
+
+ // IR related types.
+ DiagnosticPrinter &operator<<(const Value &V) override;
+ DiagnosticPrinter &operator<<(const Module &M) override;
+
+ // Other types.
+ DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) override;
+};
+} // End namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Dominators.h b/contrib/llvm/include/llvm/IR/Dominators.h
new file mode 100644
index 0000000..37447c3
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Dominators.h
@@ -0,0 +1,254 @@
+//===- Dominators.h - Dominator Info Calculation ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DominatorTree class, which provides fast and efficient
+// dominance queries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_DOMINATORS_H
+#define LLVM_IR_DOMINATORS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/GenericDomTree.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+namespace llvm {
+
+// FIXME: Replace this brittle forward declaration with the include of the new
+// PassManager.h when doing so doesn't break the PassManagerBuilder.
+template <typename IRUnitT> class AnalysisManager;
+class PreservedAnalyses;
+
+extern template class DomTreeNodeBase<BasicBlock>;
+extern template class DominatorTreeBase<BasicBlock>;
+
+extern template void Calculate<Function, BasicBlock *>(
+ DominatorTreeBase<GraphTraits<BasicBlock *>::NodeType> &DT, Function &F);
+extern template void Calculate<Function, Inverse<BasicBlock *>>(
+ DominatorTreeBase<GraphTraits<Inverse<BasicBlock *>>::NodeType> &DT,
+ Function &F);
+
+typedef DomTreeNodeBase<BasicBlock> DomTreeNode;
+
+class BasicBlockEdge {
+ const BasicBlock *Start;
+ const BasicBlock *End;
+public:
+ BasicBlockEdge(const BasicBlock *Start_, const BasicBlock *End_) :
+ Start(Start_), End(End_) { }
+ const BasicBlock *getStart() const {
+ return Start;
+ }
+ const BasicBlock *getEnd() const {
+ return End;
+ }
+ bool isSingleEdge() const;
+};
+
+/// \brief Concrete subclass of DominatorTreeBase that is used to compute a
+/// normal dominator tree.
+///
+/// Definition: A block is said to be forward statically reachable if there is
+/// a path from the entry of the function to the block. A statically reachable
+/// block may become statically unreachable during optimization.
+///
+/// A forward unreachable block may appear in the dominator tree, or it may
+/// not. If it does, dominance queries will return results as if all reachable
+/// blocks dominate it. When asking for a Node corresponding to a potentially
+/// unreachable block, calling code must handle the case where the block was
+/// unreachable and the result of getNode() is nullptr.
+///
+/// Generally, a block known to be unreachable when the dominator tree is
+/// constructed will not be in the tree. One which becomes unreachable after
+/// the dominator tree is initially constructed may still exist in the tree,
+/// even if the tree is properly updated. Calling code should not rely on the
+/// preceding statements; this is stated only to assist human understanding.
+class DominatorTree : public DominatorTreeBase<BasicBlock> {
+public:
+ typedef DominatorTreeBase<BasicBlock> Base;
+
+ DominatorTree() : DominatorTreeBase<BasicBlock>(false) {}
+ explicit DominatorTree(Function &F) : DominatorTreeBase<BasicBlock>(false) {
+ recalculate(F);
+ }
+
+ DominatorTree(DominatorTree &&Arg)
+ : Base(std::move(static_cast<Base &>(Arg))) {}
+ DominatorTree &operator=(DominatorTree &&RHS) {
+ Base::operator=(std::move(static_cast<Base &>(RHS)));
+ return *this;
+ }
+
+ /// \brief Returns *false* if the other dominator tree matches this dominator
+ /// tree.
+ inline bool compare(const DominatorTree &Other) const {
+ const DomTreeNode *R = getRootNode();
+ const DomTreeNode *OtherR = Other.getRootNode();
+
+ if (!R || !OtherR || R->getBlock() != OtherR->getBlock())
+ return true;
+
+ if (Base::compare(Other))
+ return true;
+
+ return false;
+ }
+
+ // Ensure base-class overloads are visible.
+ using Base::dominates;
+
+ /// \brief Return true if Def dominates a use in User.
+ ///
+ /// This performs the special checks necessary if Def and User are in the same
+ /// basic block. Note that Def doesn't dominate a use in Def itself!
+ bool dominates(const Instruction *Def, const Use &U) const;
+ bool dominates(const Instruction *Def, const Instruction *User) const;
+ bool dominates(const Instruction *Def, const BasicBlock *BB) const;
+ bool dominates(const BasicBlockEdge &BBE, const Use &U) const;
+ bool dominates(const BasicBlockEdge &BBE, const BasicBlock *BB) const;
+
+ // Ensure base class overloads are visible.
+ using Base::isReachableFromEntry;
+
+ /// \brief Provide an overload for a Use.
+ bool isReachableFromEntry(const Use &U) const;
+
+ /// \brief Verify the correctness of the domtree by re-computing it.
+ ///
+ /// This should only be used for debugging as it aborts the program if the
+ /// verification fails.
+ void verifyDomTree() const;
+};
+
+//===-------------------------------------
+// DominatorTree GraphTraits specializations so the DominatorTree can be
+// iterable by generic graph iterators.
+
+template <class Node, class ChildIterator> struct DomTreeGraphTraitsBase {
+ typedef Node NodeType;
+ typedef ChildIterator ChildIteratorType;
+ typedef df_iterator<Node *, SmallPtrSet<NodeType *, 8>> nodes_iterator;
+
+ static NodeType *getEntryNode(NodeType *N) { return N; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) { return N->end(); }
+
+ static nodes_iterator nodes_begin(NodeType *N) {
+ return df_begin(getEntryNode(N));
+ }
+
+ static nodes_iterator nodes_end(NodeType *N) {
+ return df_end(getEntryNode(N));
+ }
+};
+
+template <>
+struct GraphTraits<DomTreeNode *>
+ : public DomTreeGraphTraitsBase<DomTreeNode, DomTreeNode::iterator> {};
+
+template <>
+struct GraphTraits<const DomTreeNode *>
+ : public DomTreeGraphTraitsBase<const DomTreeNode,
+ DomTreeNode::const_iterator> {};
+
+template <> struct GraphTraits<DominatorTree*>
+ : public GraphTraits<DomTreeNode*> {
+ static NodeType *getEntryNode(DominatorTree *DT) {
+ return DT->getRootNode();
+ }
+
+ static nodes_iterator nodes_begin(DominatorTree *N) {
+ return df_begin(getEntryNode(N));
+ }
+
+ static nodes_iterator nodes_end(DominatorTree *N) {
+ return df_end(getEntryNode(N));
+ }
+};
+
+/// \brief Analysis pass which computes a \c DominatorTree.
+class DominatorTreeAnalysis {
+public:
+ /// \brief Provide the result typedef for this analysis pass.
+ typedef DominatorTree Result;
+
+ /// \brief Opaque, unique identifier for this analysis pass.
+ static void *ID() { return (void *)&PassID; }
+
+ /// \brief Run the analysis pass over a function and produce a dominator tree.
+ DominatorTree run(Function &F);
+
+ /// \brief Provide access to a name for this pass for debugging purposes.
+ static StringRef name() { return "DominatorTreeAnalysis"; }
+
+private:
+ static char PassID;
+};
+
+/// \brief Printer pass for the \c DominatorTree.
+class DominatorTreePrinterPass {
+ raw_ostream &OS;
+
+public:
+ explicit DominatorTreePrinterPass(raw_ostream &OS);
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+
+ static StringRef name() { return "DominatorTreePrinterPass"; }
+};
+
+/// \brief Verifier pass for the \c DominatorTree.
+struct DominatorTreeVerifierPass {
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+
+ static StringRef name() { return "DominatorTreeVerifierPass"; }
+};
+
+/// \brief Legacy analysis pass which computes a \c DominatorTree.
+class DominatorTreeWrapperPass : public FunctionPass {
+ DominatorTree DT;
+
+public:
+ static char ID;
+
+ DominatorTreeWrapperPass() : FunctionPass(ID) {
+ initializeDominatorTreeWrapperPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ DominatorTree &getDomTree() { return DT; }
+ const DominatorTree &getDomTree() const { return DT; }
+
+ bool runOnFunction(Function &F) override;
+
+ void verifyAnalysis() const override;
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ }
+
+ void releaseMemory() override { DT.releaseMemory(); }
+
+ void print(raw_ostream &OS, const Module *M = nullptr) const override;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Function.h b/contrib/llvm/include/llvm/IR/Function.h
new file mode 100644
index 0000000..2a98393
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Function.h
@@ -0,0 +1,665 @@
+//===-- llvm/Function.h - Class to represent a single function --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the Function class, which represents a
+// single function/procedure in LLVM.
+//
+// A function basically consists of a list of basic blocks, a list of arguments,
+// and a symbol table.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_FUNCTION_H
+#define LLVM_IR_FUNCTION_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+
+class FunctionType;
+class LLVMContext;
+class DISubprogram;
+
+template <>
+struct SymbolTableListSentinelTraits<Argument>
+ : public ilist_half_embedded_sentinel_traits<Argument> {};
+
+class Function : public GlobalObject, public ilist_node<Function> {
+public:
+ typedef SymbolTableList<Argument> ArgumentListType;
+ typedef SymbolTableList<BasicBlock> BasicBlockListType;
+
+ // BasicBlock iterators...
+ typedef BasicBlockListType::iterator iterator;
+ typedef BasicBlockListType::const_iterator const_iterator;
+
+ typedef ArgumentListType::iterator arg_iterator;
+ typedef ArgumentListType::const_iterator const_arg_iterator;
+
+private:
+ // Important things that make up a function!
+ BasicBlockListType BasicBlocks; ///< The basic blocks
+ mutable ArgumentListType ArgumentList; ///< The formal arguments
+ ValueSymbolTable *SymTab; ///< Symbol table of args/instructions
+ AttributeSet AttributeSets; ///< Parameter attributes
+ FunctionType *Ty;
+
+ /*
+ * Value::SubclassData
+ *
+ * bit 0 : HasLazyArguments
+ * bit 1 : HasPrefixData
+ * bit 2 : HasPrologueData
+ * bit 3 : HasPersonalityFn
+ * bits 4-13 : CallingConvention
+ * bits 14-15 : [reserved]
+ */
+
+ /// Bits from GlobalObject::GlobalObjectSubclassData.
+ enum {
+ /// Whether this function is materializable.
+ IsMaterializableBit = 1 << 0,
+ HasMetadataHashEntryBit = 1 << 1
+ };
+ void setGlobalObjectBit(unsigned Mask, bool Value) {
+ setGlobalObjectSubClassData((~Mask & getGlobalObjectSubClassData()) |
+ (Value ? Mask : 0u));
+ }
+
+ friend class SymbolTableListTraits<Function>;
+
+ void setParent(Module *parent);
+
+ /// hasLazyArguments/CheckLazyArguments - The argument list of a function is
+ /// built on demand, so that the list isn't allocated until the first client
+ /// needs it. The hasLazyArguments predicate returns true if the arg list
+ /// hasn't been set up yet.
+ bool hasLazyArguments() const {
+ return getSubclassDataFromValue() & (1<<0);
+ }
+ void CheckLazyArguments() const {
+ if (hasLazyArguments())
+ BuildLazyArguments();
+ }
+ void BuildLazyArguments() const;
+
+ Function(const Function&) = delete;
+ void operator=(const Function&) = delete;
+
+ /// Function ctor - If the (optional) Module argument is specified, the
+ /// function is automatically inserted into the end of the function list for
+ /// the module.
+ ///
+ Function(FunctionType *Ty, LinkageTypes Linkage,
+ const Twine &N = "", Module *M = nullptr);
+
+public:
+ static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
+ const Twine &N = "", Module *M = nullptr) {
+ return new Function(Ty, Linkage, N, M);
+ }
+
+ ~Function() override;
+
+ /// \brief Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ Type *getReturnType() const; // Return the type of the ret val
+ FunctionType *getFunctionType() const; // Return the FunctionType for me
+
+ /// getContext - Return a reference to the LLVMContext associated with this
+ /// function.
+ LLVMContext &getContext() const;
+
+ /// isVarArg - Return true if this function takes a variable number of
+ /// arguments.
+ bool isVarArg() const;
+
+ bool isMaterializable() const;
+ void setIsMaterializable(bool V);
+
+ /// getIntrinsicID - This method returns the ID number of the specified
+ /// function, or Intrinsic::not_intrinsic if the function is not an
+ /// intrinsic, or if the pointer is null. This value is always defined to be
+ /// zero to allow easy checking for whether a function is intrinsic or not.
+ /// The particular intrinsic functions which correspond to this value are
+ /// defined in llvm/Intrinsics.h.
+ Intrinsic::ID getIntrinsicID() const LLVM_READONLY { return IntID; }
+ bool isIntrinsic() const { return getName().startswith("llvm."); }
+
+ /// \brief Recalculate the ID for this function if it is an Intrinsic defined
+ /// in llvm/Intrinsics.h. Sets the intrinsic ID to Intrinsic::not_intrinsic
+ /// if the name of this function does not match an intrinsic in that header.
+ /// Note, this method does not need to be called directly, as it is called
+ /// from Value::setName() whenever the name of this function changes.
+ void recalculateIntrinsicID();
+
+ /// getCallingConv()/setCallingConv(CC) - These method get and set the
+ /// calling convention of this function. The enum values for the known
+ /// calling conventions are defined in CallingConv.h.
+ CallingConv::ID getCallingConv() const {
+ return static_cast<CallingConv::ID>((getSubclassDataFromValue() >> 4) &
+ CallingConv::MaxID);
+ }
+ void setCallingConv(CallingConv::ID CC) {
+ auto ID = static_cast<unsigned>(CC);
+ assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention");
+ setValueSubclassData((getSubclassDataFromValue() & 0xc00f) | (ID << 4));
+ }
+
+ /// @brief Return the attribute list for this Function.
+ AttributeSet getAttributes() const { return AttributeSets; }
+
+ /// @brief Set the attribute list for this Function.
+ void setAttributes(AttributeSet attrs) { AttributeSets = attrs; }
+
+ /// @brief Add function attributes to this function.
+ void addFnAttr(Attribute::AttrKind N) {
+ setAttributes(AttributeSets.addAttribute(getContext(),
+ AttributeSet::FunctionIndex, N));
+ }
+
+ /// @brief Remove function attributes from this function.
+ void removeFnAttr(Attribute::AttrKind N) {
+ setAttributes(AttributeSets.removeAttribute(
+ getContext(), AttributeSet::FunctionIndex, N));
+ }
+
+ /// @brief Add function attributes to this function.
+ void addFnAttr(StringRef Kind) {
+ setAttributes(
+ AttributeSets.addAttribute(getContext(),
+ AttributeSet::FunctionIndex, Kind));
+ }
+ void addFnAttr(StringRef Kind, StringRef Value) {
+ setAttributes(
+ AttributeSets.addAttribute(getContext(),
+ AttributeSet::FunctionIndex, Kind, Value));
+ }
+
+ /// Set the entry count for this function.
+ void setEntryCount(uint64_t Count);
+
+ /// Get the entry count for this function.
+ Optional<uint64_t> getEntryCount() const;
+
+ /// @brief Return true if the function has the attribute.
+ bool hasFnAttribute(Attribute::AttrKind Kind) const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Kind);
+ }
+ bool hasFnAttribute(StringRef Kind) const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex, Kind);
+ }
+
+ /// @brief Return the attribute for the given attribute kind.
+ Attribute getFnAttribute(Attribute::AttrKind Kind) const {
+ return AttributeSets.getAttribute(AttributeSet::FunctionIndex, Kind);
+ }
+ Attribute getFnAttribute(StringRef Kind) const {
+ return AttributeSets.getAttribute(AttributeSet::FunctionIndex, Kind);
+ }
+
+ /// \brief Return the stack alignment for the function.
+ unsigned getFnStackAlignment() const {
+ return AttributeSets.getStackAlignment(AttributeSet::FunctionIndex);
+ }
+
+ /// hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm
+ /// to use during code generation.
+ bool hasGC() const;
+ const char *getGC() const;
+ void setGC(const char *Str);
+ void clearGC();
+
+ /// @brief adds the attribute to the list of attributes.
+ void addAttribute(unsigned i, Attribute::AttrKind attr);
+
+ /// @brief adds the attributes to the list of attributes.
+ void addAttributes(unsigned i, AttributeSet attrs);
+
+ /// @brief removes the attributes from the list of attributes.
+ void removeAttributes(unsigned i, AttributeSet attr);
+
+ /// @brief adds the dereferenceable attribute to the list of attributes.
+ void addDereferenceableAttr(unsigned i, uint64_t Bytes);
+
+ /// @brief adds the dereferenceable_or_null attribute to the list of
+ /// attributes.
+ void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes);
+
+ /// @brief Extract the alignment for a call or parameter (0=unknown).
+ unsigned getParamAlignment(unsigned i) const {
+ return AttributeSets.getParamAlignment(i);
+ }
+
+ /// @brief Extract the number of dereferenceable bytes for a call or
+ /// parameter (0=unknown).
+ uint64_t getDereferenceableBytes(unsigned i) const {
+ return AttributeSets.getDereferenceableBytes(i);
+ }
+
+ /// @brief Extract the number of dereferenceable_or_null bytes for a call or
+ /// parameter (0=unknown).
+ uint64_t getDereferenceableOrNullBytes(unsigned i) const {
+ return AttributeSets.getDereferenceableOrNullBytes(i);
+ }
+
+ /// @brief Determine if the function does not access memory.
+ bool doesNotAccessMemory() const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::ReadNone);
+ }
+ void setDoesNotAccessMemory() {
+ addFnAttr(Attribute::ReadNone);
+ }
+
+ /// @brief Determine if the function does not access or only reads memory.
+ bool onlyReadsMemory() const {
+ return doesNotAccessMemory() ||
+ AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::ReadOnly);
+ }
+ void setOnlyReadsMemory() {
+ addFnAttr(Attribute::ReadOnly);
+ }
+
+ /// @brief Determine if the call can access memmory only using pointers based
+ /// on its arguments.
+ bool onlyAccessesArgMemory() const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::ArgMemOnly);
+ }
+ void setOnlyAccessesArgMemory() { addFnAttr(Attribute::ArgMemOnly); }
+
+ /// @brief Determine if the function may only access memory that is
+ /// inaccessible from the IR.
+ bool onlyAccessesInaccessibleMemory() const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::InaccessibleMemOnly);
+ }
+ void setOnlyAccessesInaccessibleMemory() {
+ addFnAttr(Attribute::InaccessibleMemOnly);
+ }
+
+ /// @brief Determine if the function may only access memory that is
+ // either inaccessible from the IR or pointed to by its arguments.
+ bool onlyAccessesInaccessibleMemOrArgMem() const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::InaccessibleMemOrArgMemOnly);
+ }
+ void setOnlyAccessesInaccessibleMemOrArgMem() {
+ addFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
+ }
+
+ /// @brief Determine if the function cannot return.
+ bool doesNotReturn() const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoReturn);
+ }
+ void setDoesNotReturn() {
+ addFnAttr(Attribute::NoReturn);
+ }
+
+ /// @brief Determine if the function cannot unwind.
+ bool doesNotThrow() const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoUnwind);
+ }
+ void setDoesNotThrow() {
+ addFnAttr(Attribute::NoUnwind);
+ }
+
+ /// @brief Determine if the call cannot be duplicated.
+ bool cannotDuplicate() const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoDuplicate);
+ }
+ void setCannotDuplicate() {
+ addFnAttr(Attribute::NoDuplicate);
+ }
+
+ /// @brief Determine if the call is convergent.
+ bool isConvergent() const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::Convergent);
+ }
+ void setConvergent() {
+ addFnAttr(Attribute::Convergent);
+ }
+
+ /// Determine if the function is known not to recurse, directly or
+ /// indirectly.
+ bool doesNotRecurse() const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoRecurse);
+ }
+ void setDoesNotRecurse() {
+ addFnAttr(Attribute::NoRecurse);
+ }
+
+ /// @brief True if the ABI mandates (or the user requested) that this
+ /// function be in a unwind table.
+ bool hasUWTable() const {
+ return AttributeSets.hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::UWTable);
+ }
+ void setHasUWTable() {
+ addFnAttr(Attribute::UWTable);
+ }
+
+ /// @brief True if this function needs an unwind table.
+ bool needsUnwindTableEntry() const {
+ return hasUWTable() || !doesNotThrow();
+ }
+
+ /// @brief Determine if the function returns a structure through first
+ /// pointer argument.
+ bool hasStructRetAttr() const {
+ return AttributeSets.hasAttribute(1, Attribute::StructRet) ||
+ AttributeSets.hasAttribute(2, Attribute::StructRet);
+ }
+
+ /// @brief Determine if the parameter or return value is marked with NoAlias
+ /// attribute.
+ /// @param n The parameter to check. 1 is the first parameter, 0 is the return
+ bool doesNotAlias(unsigned n) const {
+ return AttributeSets.hasAttribute(n, Attribute::NoAlias);
+ }
+ void setDoesNotAlias(unsigned n) {
+ addAttribute(n, Attribute::NoAlias);
+ }
+
+ /// @brief Determine if the parameter can be captured.
+ /// @param n The parameter to check. 1 is the first parameter, 0 is the return
+ bool doesNotCapture(unsigned n) const {
+ return AttributeSets.hasAttribute(n, Attribute::NoCapture);
+ }
+ void setDoesNotCapture(unsigned n) {
+ addAttribute(n, Attribute::NoCapture);
+ }
+
+ bool doesNotAccessMemory(unsigned n) const {
+ return AttributeSets.hasAttribute(n, Attribute::ReadNone);
+ }
+ void setDoesNotAccessMemory(unsigned n) {
+ addAttribute(n, Attribute::ReadNone);
+ }
+
+ bool onlyReadsMemory(unsigned n) const {
+ return doesNotAccessMemory(n) ||
+ AttributeSets.hasAttribute(n, Attribute::ReadOnly);
+ }
+ void setOnlyReadsMemory(unsigned n) {
+ addAttribute(n, Attribute::ReadOnly);
+ }
+
+ /// Optimize this function for minimum size (-Oz).
+ bool optForMinSize() const { return hasFnAttribute(Attribute::MinSize); };
+
+ /// Optimize this function for size (-Os) or minimum size (-Oz).
+ bool optForSize() const {
+ return hasFnAttribute(Attribute::OptimizeForSize) || optForMinSize();
+ }
+
+ /// copyAttributesFrom - copy all additional attributes (those not needed to
+ /// create a Function) from the Function Src to this one.
+ void copyAttributesFrom(const GlobalValue *Src) override;
+
+ /// deleteBody - This method deletes the body of the function, and converts
+ /// the linkage to external.
+ ///
+ void deleteBody() {
+ dropAllReferences();
+ setLinkage(ExternalLinkage);
+ }
+
+ /// removeFromParent - This method unlinks 'this' from the containing module,
+ /// but does not delete it.
+ ///
+ void removeFromParent() override;
+
+ /// eraseFromParent - This method unlinks 'this' from the containing module
+ /// and deletes it.
+ ///
+ void eraseFromParent() override;
+
+ /// Get the underlying elements of the Function... the basic block list is
+ /// empty for external functions.
+ ///
+ const ArgumentListType &getArgumentList() const {
+ CheckLazyArguments();
+ return ArgumentList;
+ }
+ ArgumentListType &getArgumentList() {
+ CheckLazyArguments();
+ return ArgumentList;
+ }
+ static ArgumentListType Function::*getSublistAccess(Argument*) {
+ return &Function::ArgumentList;
+ }
+
+ const BasicBlockListType &getBasicBlockList() const { return BasicBlocks; }
+ BasicBlockListType &getBasicBlockList() { return BasicBlocks; }
+ static BasicBlockListType Function::*getSublistAccess(BasicBlock*) {
+ return &Function::BasicBlocks;
+ }
+
+ const BasicBlock &getEntryBlock() const { return front(); }
+ BasicBlock &getEntryBlock() { return front(); }
+
+ //===--------------------------------------------------------------------===//
+ // Symbol Table Accessing functions...
+
+ /// getSymbolTable() - Return the symbol table...
+ ///
+ inline ValueSymbolTable &getValueSymbolTable() { return *SymTab; }
+ inline const ValueSymbolTable &getValueSymbolTable() const { return *SymTab; }
+
+ //===--------------------------------------------------------------------===//
+ // BasicBlock iterator forwarding functions
+ //
+ iterator begin() { return BasicBlocks.begin(); }
+ const_iterator begin() const { return BasicBlocks.begin(); }
+ iterator end () { return BasicBlocks.end(); }
+ const_iterator end () const { return BasicBlocks.end(); }
+
+ size_t size() const { return BasicBlocks.size(); }
+ bool empty() const { return BasicBlocks.empty(); }
+ const BasicBlock &front() const { return BasicBlocks.front(); }
+ BasicBlock &front() { return BasicBlocks.front(); }
+ const BasicBlock &back() const { return BasicBlocks.back(); }
+ BasicBlock &back() { return BasicBlocks.back(); }
+
+/// @name Function Argument Iteration
+/// @{
+
+ arg_iterator arg_begin() {
+ CheckLazyArguments();
+ return ArgumentList.begin();
+ }
+ const_arg_iterator arg_begin() const {
+ CheckLazyArguments();
+ return ArgumentList.begin();
+ }
+ arg_iterator arg_end() {
+ CheckLazyArguments();
+ return ArgumentList.end();
+ }
+ const_arg_iterator arg_end() const {
+ CheckLazyArguments();
+ return ArgumentList.end();
+ }
+
+ iterator_range<arg_iterator> args() {
+ return make_range(arg_begin(), arg_end());
+ }
+
+ iterator_range<const_arg_iterator> args() const {
+ return make_range(arg_begin(), arg_end());
+ }
+
+/// @}
+
+ size_t arg_size() const;
+ bool arg_empty() const;
+
+ /// \brief Check whether this function has a personality function.
+ bool hasPersonalityFn() const {
+ return getSubclassDataFromValue() & (1<<3);
+ }
+
+ /// \brief Get the personality function associated with this function.
+ Constant *getPersonalityFn() const;
+ void setPersonalityFn(Constant *Fn);
+
+ /// \brief Check whether this function has prefix data.
+ bool hasPrefixData() const {
+ return getSubclassDataFromValue() & (1<<1);
+ }
+
+ /// \brief Get the prefix data associated with this function.
+ Constant *getPrefixData() const;
+ void setPrefixData(Constant *PrefixData);
+
+ /// \brief Check whether this function has prologue data.
+ bool hasPrologueData() const {
+ return getSubclassDataFromValue() & (1<<2);
+ }
+
+ /// \brief Get the prologue data associated with this function.
+ Constant *getPrologueData() const;
+ void setPrologueData(Constant *PrologueData);
+
+ /// viewCFG - This function is meant for use from the debugger. You can just
+ /// say 'call F->viewCFG()' and a ghostview window should pop up from the
+ /// program, displaying the CFG of the current function with the code for each
+ /// basic block inside. This depends on there being a 'dot' and 'gv' program
+ /// in your path.
+ ///
+ void viewCFG() const;
+
+ /// viewCFGOnly - This function is meant for use from the debugger. It works
+ /// just like viewCFG, but it does not include the contents of basic blocks
+ /// into the nodes, just the label. If you are only interested in the CFG
+ /// this can make the graph smaller.
+ ///
+ void viewCFGOnly() const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == Value::FunctionVal;
+ }
+
+ /// dropAllReferences() - This method causes all the subinstructions to "let
+ /// go" of all references that they are maintaining. This allows one to
+ /// 'delete' a whole module at a time, even though there may be circular
+ /// references... first all references are dropped, and all use counts go to
+ /// zero. Then everything is deleted for real. Note that no operations are
+ /// valid on an object that has "dropped all references", except operator
+ /// delete.
+ ///
+ /// Since no other object in the module can have references into the body of a
+ /// function, dropping all references deletes the entire body of the function,
+ /// including any contained basic blocks.
+ ///
+ void dropAllReferences();
+
+ /// hasAddressTaken - returns true if there are any uses of this function
+ /// other than direct calls or invokes to it, or blockaddress expressions.
+ /// Optionally passes back an offending user for diagnostic purposes.
+ ///
+ bool hasAddressTaken(const User** = nullptr) const;
+
+ /// isDefTriviallyDead - Return true if it is trivially safe to remove
+ /// this function definition from the module (because it isn't externally
+ /// visible, does not have its address taken, and has no callers). To make
+ /// this more accurate, call removeDeadConstantUsers first.
+ bool isDefTriviallyDead() const;
+
+ /// callsFunctionThatReturnsTwice - Return true if the function has a call to
+ /// setjmp or other function that gcc recognizes as "returning twice".
+ bool callsFunctionThatReturnsTwice() const;
+
+ /// \brief Check if this has any metadata.
+ bool hasMetadata() const { return hasMetadataHashEntry(); }
+
+ /// \brief Get the current metadata attachment, if any.
+ ///
+ /// Returns \c nullptr if such an attachment is missing.
+ /// @{
+ MDNode *getMetadata(unsigned KindID) const;
+ MDNode *getMetadata(StringRef Kind) const;
+ /// @}
+
+ /// \brief Set a particular kind of metadata attachment.
+ ///
+ /// Sets the given attachment to \c MD, erasing it if \c MD is \c nullptr or
+ /// replacing it if it already exists.
+ /// @{
+ void setMetadata(unsigned KindID, MDNode *MD);
+ void setMetadata(StringRef Kind, MDNode *MD);
+ /// @}
+
+ /// \brief Get all current metadata attachments.
+ void
+ getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const;
+
+ /// \brief Drop metadata not in the given list.
+ ///
+ /// Drop all metadata from \c this not included in \c KnownIDs.
+ void dropUnknownMetadata(ArrayRef<unsigned> KnownIDs);
+
+ /// \brief Set the attached subprogram.
+ ///
+ /// Calls \a setMetadata() with \a LLVMContext::MD_dbg.
+ void setSubprogram(DISubprogram *SP);
+
+ /// \brief Get the attached subprogram.
+ ///
+ /// Calls \a getMetadata() with \a LLVMContext::MD_dbg and casts the result
+ /// to \a DISubprogram.
+ DISubprogram *getSubprogram() const;
+
+private:
+ void allocHungoffUselist();
+ template<int Idx> void setHungoffOperand(Constant *C);
+
+ // Shadow Value::setValueSubclassData with a private forwarding method so that
+ // subclasses cannot accidentally use it.
+ void setValueSubclassData(unsigned short D) {
+ Value::setValueSubclassData(D);
+ }
+ void setValueSubclassDataBit(unsigned Bit, bool On);
+
+ bool hasMetadataHashEntry() const {
+ return getGlobalObjectSubClassData() & HasMetadataHashEntryBit;
+ }
+ void setHasMetadataHashEntry(bool HasEntry) {
+ setGlobalObjectBit(HasMetadataHashEntryBit, HasEntry);
+ }
+
+ void clearMetadata();
+};
+
+template <>
+struct OperandTraits<Function> : public HungoffOperandTraits<3> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(Function, Value)
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/FunctionInfo.h b/contrib/llvm/include/llvm/IR/FunctionInfo.h
new file mode 100644
index 0000000..eba088a
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/FunctionInfo.h
@@ -0,0 +1,241 @@
+//===-- llvm/FunctionInfo.h - Function Info Index ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// FunctionInfo.h This file contains the declarations the classes that hold
+/// the function info index and summary.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_FUNCTIONINFO_H
+#define LLVM_IR_FUNCTIONINFO_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// \brief Function summary information to aid decisions and implementation of
+/// importing.
+///
+/// This is a separate class from FunctionInfo to enable lazy reading of this
+/// function summary information from the combined index file during imporing.
+class FunctionSummary {
+private:
+ /// \brief Path of module containing function IR, used to locate module when
+ /// importing this function.
+ ///
+ /// This is only used during parsing of the combined function index, or when
+ /// parsing the per-module index for creation of the combined function index,
+ /// not during writing of the per-module index which doesn't contain a
+ /// module path string table.
+ StringRef ModulePath;
+
+ /// \brief Used to flag functions that have local linkage types and need to
+ /// have module identifier appended before placing into the combined
+ /// index, to disambiguate from other functions with the same name.
+ ///
+ /// This is only used in the per-module function index, as it is consumed
+ /// while creating the combined index.
+ bool IsLocalFunction;
+
+ // The rest of the information is used to help decide whether importing
+ // is likely to be profitable.
+ // Other information will be added as the importing is tuned, such
+ // as hotness (when profile available), and other function characteristics.
+
+ /// Number of instructions (ignoring debug instructions, e.g.) computed
+ /// during the initial compile step when the function index is first built.
+ unsigned InstCount;
+
+public:
+ /// Construct a summary object from summary data expected for all
+ /// summary records.
+ FunctionSummary(unsigned NumInsts) : InstCount(NumInsts) {}
+
+ /// Set the path to the module containing this function, for use in
+ /// the combined index.
+ void setModulePath(StringRef ModPath) { ModulePath = ModPath; }
+
+ /// Get the path to the module containing this function.
+ StringRef modulePath() const { return ModulePath; }
+
+ /// Record whether this is a local function in the per-module index.
+ void setLocalFunction(bool IsLocal) { IsLocalFunction = IsLocal; }
+
+ /// Check whether this was a local function, for use in creating
+ /// the combined index.
+ bool isLocalFunction() const { return IsLocalFunction; }
+
+ /// Get the instruction count recorded for this function.
+ unsigned instCount() const { return InstCount; }
+};
+
+/// \brief Class to hold pointer to function summary and information required
+/// for parsing it.
+///
+/// For the per-module index, this holds the bitcode offset
+/// of the corresponding function block. For the combined index,
+/// after parsing of the \a ValueSymbolTable, this initially
+/// holds the offset of the corresponding function summary bitcode
+/// record. After parsing the associated summary information from the summary
+/// block the \a FunctionSummary is populated and stored here.
+class FunctionInfo {
+private:
+ /// Function summary information used to help make ThinLTO importing
+ /// decisions.
+ std::unique_ptr<FunctionSummary> Summary;
+
+ /// \brief The bitcode offset corresponding to either the associated
+ /// function's function body record, or its function summary record,
+ /// depending on whether this is a per-module or combined index.
+ ///
+ /// This bitcode offset is written to or read from the associated
+ /// \a ValueSymbolTable entry for the function.
+ /// For the per-module index this holds the bitcode offset of the
+ /// function's body record within bitcode module block in its module,
+ /// which is used during lazy function parsing or ThinLTO importing.
+ /// For the combined index this holds the offset of the corresponding
+ /// function summary record, to enable associating the combined index
+ /// VST records with the summary records.
+ uint64_t BitcodeIndex;
+
+public:
+ /// Constructor used during parsing of VST entries.
+ FunctionInfo(uint64_t FuncOffset)
+ : Summary(nullptr), BitcodeIndex(FuncOffset) {}
+
+ /// Constructor used for per-module index bitcode writing.
+ FunctionInfo(uint64_t FuncOffset,
+ std::unique_ptr<FunctionSummary> FuncSummary)
+ : Summary(std::move(FuncSummary)), BitcodeIndex(FuncOffset) {}
+
+ /// Record the function summary information parsed out of the function
+ /// summary block during parsing or combined index creation.
+ void setFunctionSummary(std::unique_ptr<FunctionSummary> FuncSummary) {
+ Summary = std::move(FuncSummary);
+ }
+
+ /// Get the function summary recorded for this function.
+ FunctionSummary *functionSummary() const { return Summary.get(); }
+
+ /// Get the bitcode index recorded for this function, depending on
+ /// the index type.
+ uint64_t bitcodeIndex() const { return BitcodeIndex; }
+
+ /// Record the bitcode index for this function, depending on
+ /// the index type.
+ void setBitcodeIndex(uint64_t FuncOffset) { BitcodeIndex = FuncOffset; }
+};
+
+/// List of function info structures for a particular function name held
+/// in the FunctionMap. Requires a vector in the case of multiple
+/// COMDAT functions of the same name.
+typedef std::vector<std::unique_ptr<FunctionInfo>> FunctionInfoList;
+
+/// Map from function name to corresponding function info structures.
+typedef StringMap<FunctionInfoList> FunctionInfoMapTy;
+
+/// Type used for iterating through the function info map.
+typedef FunctionInfoMapTy::const_iterator const_funcinfo_iterator;
+typedef FunctionInfoMapTy::iterator funcinfo_iterator;
+
+/// String table to hold/own module path strings, which additionally holds the
+/// module ID assigned to each module during the plugin step. The StringMap
+/// makes a copy of and owns inserted strings.
+typedef StringMap<uint64_t> ModulePathStringTableTy;
+
+/// Class to hold module path string table and function map,
+/// and encapsulate methods for operating on them.
+class FunctionInfoIndex {
+private:
+ /// Map from function name to list of function information instances
+ /// for functions of that name (may be duplicates in the COMDAT case, e.g.).
+ FunctionInfoMapTy FunctionMap;
+
+ /// Holds strings for combined index, mapping to the corresponding module ID.
+ ModulePathStringTableTy ModulePathStringTable;
+
+public:
+ FunctionInfoIndex() = default;
+
+ // Disable the copy constructor and assignment operators, so
+ // no unexpected copying/moving occurs.
+ FunctionInfoIndex(const FunctionInfoIndex &) = delete;
+ void operator=(const FunctionInfoIndex &) = delete;
+
+ funcinfo_iterator begin() { return FunctionMap.begin(); }
+ const_funcinfo_iterator begin() const { return FunctionMap.begin(); }
+ funcinfo_iterator end() { return FunctionMap.end(); }
+ const_funcinfo_iterator end() const { return FunctionMap.end(); }
+
+ /// Get the list of function info objects for a given function.
+ const FunctionInfoList &getFunctionInfoList(StringRef FuncName) {
+ return FunctionMap[FuncName];
+ }
+
+ /// Get the list of function info objects for a given function.
+ const const_funcinfo_iterator findFunctionInfoList(StringRef FuncName) const {
+ return FunctionMap.find(FuncName);
+ }
+
+ /// Add a function info for a function of the given name.
+ void addFunctionInfo(StringRef FuncName, std::unique_ptr<FunctionInfo> Info) {
+ FunctionMap[FuncName].push_back(std::move(Info));
+ }
+
+ /// Iterator to allow writer to walk through table during emission.
+ iterator_range<StringMap<uint64_t>::const_iterator>
+ modPathStringEntries() const {
+ return llvm::make_range(ModulePathStringTable.begin(),
+ ModulePathStringTable.end());
+ }
+
+ /// Get the module ID recorded for the given module path.
+ uint64_t getModuleId(const StringRef ModPath) const {
+ return ModulePathStringTable.lookup(ModPath);
+ }
+
+ /// Add the given per-module index into this function index/summary,
+ /// assigning it the given module ID. Each module merged in should have
+ /// a unique ID, necessary for consistent renaming of promoted
+ /// static (local) variables.
+ void mergeFrom(std::unique_ptr<FunctionInfoIndex> Other,
+ uint64_t NextModuleId);
+
+ /// Convenience method for creating a promoted global name
+ /// for the given value name of a local, and its original module's ID.
+ static std::string getGlobalNameForLocal(StringRef Name, uint64_t ModId) {
+ SmallString<256> NewName(Name);
+ NewName += ".llvm.";
+ raw_svector_ostream(NewName) << ModId;
+ return NewName.str();
+ }
+
+ /// Add a new module path, mapped to the given module Id, and return StringRef
+ /// owned by string table map.
+ StringRef addModulePath(StringRef ModPath, uint64_t ModId) {
+ return ModulePathStringTable.insert(std::make_pair(ModPath, ModId))
+ .first->first();
+ }
+
+ /// Check if the given Module has any functions available for exporting
+ /// in the index. We consider any module present in the ModulePathStringTable
+ /// to have exported functions.
+ bool hasExportedFunctions(const Module &M) const {
+ return ModulePathStringTable.count(M.getModuleIdentifier());
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/GVMaterializer.h b/contrib/llvm/include/llvm/IR/GVMaterializer.h
new file mode 100644
index 0000000..6cb593c
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/GVMaterializer.h
@@ -0,0 +1,63 @@
+//===- GVMaterializer.h - Interface for GV materializers --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an abstract interface for loading a module from some
+// place. This interface allows incremental or random access loading of
+// functions from the file. This is useful for applications like JIT compilers
+// or interprocedural optimizers that do not need the entire program in memory
+// at the same time.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GVMATERIALIZER_H
+#define LLVM_IR_GVMATERIALIZER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include <system_error>
+#include <vector>
+
+namespace llvm {
+class Function;
+class GlobalValue;
+class Metadata;
+class Module;
+class StructType;
+
+class GVMaterializer {
+protected:
+ GVMaterializer() {}
+
+public:
+ virtual ~GVMaterializer();
+
+ /// Make sure the given GlobalValue is fully read.
+ ///
+ virtual std::error_code materialize(GlobalValue *GV) = 0;
+
+ /// Make sure the entire Module has been completely read.
+ ///
+ virtual std::error_code materializeModule() = 0;
+
+ virtual std::error_code materializeMetadata() = 0;
+ virtual void setStripDebugInfo() = 0;
+
+ /// Client should define this interface if the mapping between metadata
+ /// values and value ids needs to be preserved, e.g. across materializer
+ /// instantiations. If OnlyTempMD is true, only those that have remained
+ /// temporary metadata are recorded in the map.
+ virtual void
+ saveMetadataList(DenseMap<const Metadata *, unsigned> &MetadataToIDs,
+ bool OnlyTempMD) {}
+
+ virtual std::vector<StructType *> getIdentifiedStructTypes() const = 0;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/GetElementPtrTypeIterator.h b/contrib/llvm/include/llvm/IR/GetElementPtrTypeIterator.h
new file mode 100644
index 0000000..7cb13fa
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/GetElementPtrTypeIterator.h
@@ -0,0 +1,139 @@
+//===- GetElementPtrTypeIterator.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements an iterator for walking through the types indexed by
+// getelementptr instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
+#define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
+
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/User.h"
+#include "llvm/ADT/PointerIntPair.h"
+
+namespace llvm {
+ template<typename ItTy = User::const_op_iterator>
+ class generic_gep_type_iterator
+ : public std::iterator<std::forward_iterator_tag, Type *, ptrdiff_t> {
+ typedef std::iterator<std::forward_iterator_tag,
+ Type *, ptrdiff_t> super;
+
+ ItTy OpIt;
+ PointerIntPair<Type *, 1> CurTy;
+ unsigned AddrSpace;
+ generic_gep_type_iterator() {}
+ public:
+
+ static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
+ generic_gep_type_iterator I;
+ I.CurTy.setPointer(Ty);
+ I.OpIt = It;
+ return I;
+ }
+ static generic_gep_type_iterator begin(Type *Ty, unsigned AddrSpace,
+ ItTy It) {
+ generic_gep_type_iterator I;
+ I.CurTy.setPointer(Ty);
+ I.CurTy.setInt(true);
+ I.AddrSpace = AddrSpace;
+ I.OpIt = It;
+ return I;
+ }
+ static generic_gep_type_iterator end(ItTy It) {
+ generic_gep_type_iterator I;
+ I.OpIt = It;
+ return I;
+ }
+
+ bool operator==(const generic_gep_type_iterator& x) const {
+ return OpIt == x.OpIt;
+ }
+ bool operator!=(const generic_gep_type_iterator& x) const {
+ return !operator==(x);
+ }
+
+ Type *operator*() const {
+ if (CurTy.getInt())
+ return CurTy.getPointer()->getPointerTo(AddrSpace);
+ return CurTy.getPointer();
+ }
+
+ Type *getIndexedType() const {
+ if (CurTy.getInt())
+ return CurTy.getPointer();
+ CompositeType *CT = cast<CompositeType>(CurTy.getPointer());
+ return CT->getTypeAtIndex(getOperand());
+ }
+
+ // This is a non-standard operator->. It allows you to call methods on the
+ // current type directly.
+ Type *operator->() const { return operator*(); }
+
+ Value *getOperand() const { return const_cast<Value *>(&**OpIt); }
+
+ generic_gep_type_iterator& operator++() { // Preincrement
+ if (CurTy.getInt()) {
+ CurTy.setInt(false);
+ } else if (CompositeType *CT =
+ dyn_cast<CompositeType>(CurTy.getPointer())) {
+ CurTy.setPointer(CT->getTypeAtIndex(getOperand()));
+ } else {
+ CurTy.setPointer(nullptr);
+ }
+ ++OpIt;
+ return *this;
+ }
+
+ generic_gep_type_iterator operator++(int) { // Postincrement
+ generic_gep_type_iterator tmp = *this; ++*this; return tmp;
+ }
+ };
+
+ typedef generic_gep_type_iterator<> gep_type_iterator;
+
+ inline gep_type_iterator gep_type_begin(const User *GEP) {
+ auto *GEPOp = cast<GEPOperator>(GEP);
+ return gep_type_iterator::begin(
+ GEPOp->getSourceElementType(),
+ cast<PointerType>(GEPOp->getPointerOperandType()->getScalarType())
+ ->getAddressSpace(),
+ GEP->op_begin() + 1);
+ }
+ inline gep_type_iterator gep_type_end(const User *GEP) {
+ return gep_type_iterator::end(GEP->op_end());
+ }
+ inline gep_type_iterator gep_type_begin(const User &GEP) {
+ auto &GEPOp = cast<GEPOperator>(GEP);
+ return gep_type_iterator::begin(
+ GEPOp.getSourceElementType(),
+ cast<PointerType>(GEPOp.getPointerOperandType()->getScalarType())
+ ->getAddressSpace(),
+ GEP.op_begin() + 1);
+ }
+ inline gep_type_iterator gep_type_end(const User &GEP) {
+ return gep_type_iterator::end(GEP.op_end());
+ }
+
+ template<typename T>
+ inline generic_gep_type_iterator<const T *>
+ gep_type_begin(Type *Op0, ArrayRef<T> A) {
+ return generic_gep_type_iterator<const T *>::begin(Op0, A.begin());
+ }
+
+ template<typename T>
+ inline generic_gep_type_iterator<const T *>
+ gep_type_end(Type * /*Op0*/, ArrayRef<T> A) {
+ return generic_gep_type_iterator<const T *>::end(A.end());
+ }
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/GlobalAlias.h b/contrib/llvm/include/llvm/IR/GlobalAlias.h
new file mode 100644
index 0000000..b077214
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/GlobalAlias.h
@@ -0,0 +1,124 @@
+//===-------- llvm/GlobalAlias.h - GlobalAlias class ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the GlobalAlias class, which
+// represents a single function or variable alias in the IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALALIAS_H
+#define LLVM_IR_GLOBALALIAS_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/OperandTraits.h"
+
+namespace llvm {
+
+class Module;
+template <typename ValueSubClass> class SymbolTableListTraits;
+
+class GlobalAlias : public GlobalValue, public ilist_node<GlobalAlias> {
+ friend class SymbolTableListTraits<GlobalAlias>;
+ void operator=(const GlobalAlias &) = delete;
+ GlobalAlias(const GlobalAlias &) = delete;
+
+ void setParent(Module *parent);
+
+ GlobalAlias(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage,
+ const Twine &Name, Constant *Aliasee, Module *Parent);
+
+public:
+ // allocate space for exactly one operand
+ void *operator new(size_t s) {
+ return User::operator new(s, 1);
+ }
+
+ /// If a parent module is specified, the alias is automatically inserted into
+ /// the end of the specified module's alias list.
+ static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ Constant *Aliasee, Module *Parent);
+
+ // Without the Aliasee.
+ static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ Module *Parent);
+
+ // The module is taken from the Aliasee.
+ static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
+ LinkageTypes Linkage, const Twine &Name,
+ GlobalValue *Aliasee);
+
+ // Type, Parent and AddressSpace taken from the Aliasee.
+ static GlobalAlias *create(LinkageTypes Linkage, const Twine &Name,
+ GlobalValue *Aliasee);
+
+ // Linkage, Type, Parent and AddressSpace taken from the Aliasee.
+ static GlobalAlias *create(const Twine &Name, GlobalValue *Aliasee);
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
+
+ /// removeFromParent - This method unlinks 'this' from the containing module,
+ /// but does not delete it.
+ ///
+ void removeFromParent() override;
+
+ /// eraseFromParent - This method unlinks 'this' from the containing module
+ /// and deletes it.
+ ///
+ void eraseFromParent() override;
+
+ /// These methods retrive and set alias target.
+ void setAliasee(Constant *Aliasee);
+ const Constant *getAliasee() const {
+ return const_cast<GlobalAlias *>(this)->getAliasee();
+ }
+ Constant *getAliasee() {
+ return getOperand(0);
+ }
+
+ const GlobalObject *getBaseObject() const {
+ return const_cast<GlobalAlias *>(this)->getBaseObject();
+ }
+ GlobalObject *getBaseObject() {
+ return dyn_cast<GlobalObject>(getAliasee()->stripInBoundsOffsets());
+ }
+
+ const GlobalObject *getBaseObject(const DataLayout &DL, APInt &Offset) const {
+ return const_cast<GlobalAlias *>(this)->getBaseObject(DL, Offset);
+ }
+ GlobalObject *getBaseObject(const DataLayout &DL, APInt &Offset) {
+ return dyn_cast<GlobalObject>(
+ getAliasee()->stripAndAccumulateInBoundsConstantOffsets(DL, Offset));
+ }
+
+ static bool isValidLinkage(LinkageTypes L) {
+ return isExternalLinkage(L) || isLocalLinkage(L) ||
+ isWeakLinkage(L) || isLinkOnceLinkage(L);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == Value::GlobalAliasVal;
+ }
+};
+
+template <>
+struct OperandTraits<GlobalAlias> :
+ public FixedNumOperandTraits<GlobalAlias, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalAlias, Constant)
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/GlobalObject.h b/contrib/llvm/include/llvm/IR/GlobalObject.h
new file mode 100644
index 0000000..ee111a0
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/GlobalObject.h
@@ -0,0 +1,78 @@
+//===-- llvm/GlobalObject.h - Class to represent global objects -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This represents an independent object. That is, a function or a global
+// variable, but not an alias.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALOBJECT_H
+#define LLVM_IR_GLOBALOBJECT_H
+
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GlobalValue.h"
+
+namespace llvm {
+class Comdat;
+class Module;
+
+class GlobalObject : public GlobalValue {
+ GlobalObject(const GlobalObject &) = delete;
+
+protected:
+ GlobalObject(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
+ LinkageTypes Linkage, const Twine &Name,
+ unsigned AddressSpace = 0)
+ : GlobalValue(Ty, VTy, Ops, NumOps, Linkage, Name, AddressSpace),
+ ObjComdat(nullptr) {
+ setGlobalValueSubClassData(0);
+ }
+
+ std::string Section; // Section to emit this into, empty means default
+ Comdat *ObjComdat;
+ static const unsigned AlignmentBits = 5;
+ static const unsigned GlobalObjectSubClassDataBits =
+ GlobalValueSubClassDataBits - AlignmentBits;
+
+private:
+ static const unsigned AlignmentMask = (1 << AlignmentBits) - 1;
+
+public:
+ unsigned getAlignment() const {
+ unsigned Data = getGlobalValueSubClassData();
+ unsigned AlignmentData = Data & AlignmentMask;
+ return (1u << AlignmentData) >> 1;
+ }
+ void setAlignment(unsigned Align);
+
+ unsigned getGlobalObjectSubClassData() const;
+ void setGlobalObjectSubClassData(unsigned Val);
+
+ bool hasSection() const { return !StringRef(getSection()).empty(); }
+ const char *getSection() const { return Section.c_str(); }
+ void setSection(StringRef S);
+
+ bool hasComdat() const { return getComdat() != nullptr; }
+ const Comdat *getComdat() const { return ObjComdat; }
+ Comdat *getComdat() { return ObjComdat; }
+ void setComdat(Comdat *C) { ObjComdat = C; }
+
+ void copyAttributesFrom(const GlobalValue *Src) override;
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == Value::FunctionVal ||
+ V->getValueID() == Value::GlobalVariableVal;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/GlobalValue.h b/contrib/llvm/include/llvm/IR/GlobalValue.h
new file mode 100644
index 0000000..4fa4e7d
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/GlobalValue.h
@@ -0,0 +1,370 @@
+//===-- llvm/GlobalValue.h - Class to represent a global value --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a common base class of all globally definable objects. As such,
+// it is subclassed by GlobalVariable, GlobalAlias and by Function. This is
+// used because you can do certain things with these global objects that you
+// can't do to anything else. For example, use the address of one as a
+// constant.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALVALUE_H
+#define LLVM_IR_GLOBALVALUE_H
+
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/DerivedTypes.h"
+#include <system_error>
+
+namespace llvm {
+
+class Comdat;
+class PointerType;
+class Module;
+
+namespace Intrinsic {
+ enum ID : unsigned;
+}
+
+class GlobalValue : public Constant {
+ GlobalValue(const GlobalValue &) = delete;
+public:
+ /// @brief An enumeration for the kinds of linkage for global values.
+ enum LinkageTypes {
+ ExternalLinkage = 0,///< Externally visible function
+ AvailableExternallyLinkage, ///< Available for inspection, not emission.
+ LinkOnceAnyLinkage, ///< Keep one copy of function when linking (inline)
+ LinkOnceODRLinkage, ///< Same, but only replaced by something equivalent.
+ WeakAnyLinkage, ///< Keep one copy of named function when linking (weak)
+ WeakODRLinkage, ///< Same, but only replaced by something equivalent.
+ AppendingLinkage, ///< Special purpose, only applies to global arrays
+ InternalLinkage, ///< Rename collisions when linking (static functions).
+ PrivateLinkage, ///< Like Internal, but omit from symbol table.
+ ExternalWeakLinkage,///< ExternalWeak linkage description.
+ CommonLinkage ///< Tentative definitions.
+ };
+
+ /// @brief An enumeration for the kinds of visibility of global values.
+ enum VisibilityTypes {
+ DefaultVisibility = 0, ///< The GV is visible
+ HiddenVisibility, ///< The GV is hidden
+ ProtectedVisibility ///< The GV is protected
+ };
+
+ /// @brief Storage classes of global values for PE targets.
+ enum DLLStorageClassTypes {
+ DefaultStorageClass = 0,
+ DLLImportStorageClass = 1, ///< Function to be imported from DLL
+ DLLExportStorageClass = 2 ///< Function to be accessible from DLL.
+ };
+
+protected:
+ GlobalValue(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
+ LinkageTypes Linkage, const Twine &Name, unsigned AddressSpace)
+ : Constant(PointerType::get(Ty, AddressSpace), VTy, Ops, NumOps),
+ ValueType(Ty), Linkage(Linkage), Visibility(DefaultVisibility),
+ UnnamedAddr(0), DllStorageClass(DefaultStorageClass),
+ ThreadLocal(NotThreadLocal), IntID((Intrinsic::ID)0U), Parent(nullptr) {
+ setName(Name);
+ }
+
+ Type *ValueType;
+ // Note: VC++ treats enums as signed, so an extra bit is required to prevent
+ // Linkage and Visibility from turning into negative values.
+ LinkageTypes Linkage : 5; // The linkage of this global
+ unsigned Visibility : 2; // The visibility style of this global
+ unsigned UnnamedAddr : 1; // This value's address is not significant
+ unsigned DllStorageClass : 2; // DLL storage class
+
+ unsigned ThreadLocal : 3; // Is this symbol "Thread Local", if so, what is
+ // the desired model?
+ static const unsigned GlobalValueSubClassDataBits = 19;
+
+private:
+ // Give subclasses access to what otherwise would be wasted padding.
+ // (19 + 3 + 2 + 1 + 2 + 5) == 32.
+ unsigned SubClassData : GlobalValueSubClassDataBits;
+
+ friend class Constant;
+ void destroyConstantImpl();
+ Value *handleOperandChangeImpl(Value *From, Value *To, Use *U);
+
+protected:
+ /// \brief The intrinsic ID for this subclass (which must be a Function).
+ ///
+ /// This member is defined by this class, but not used for anything.
+ /// Subclasses can use it to store their intrinsic ID, if they have one.
+ ///
+ /// This is stored here to save space in Function on 64-bit hosts.
+ Intrinsic::ID IntID;
+
+ unsigned getGlobalValueSubClassData() const {
+ return SubClassData;
+ }
+ void setGlobalValueSubClassData(unsigned V) {
+ assert(V < (1 << GlobalValueSubClassDataBits) && "It will not fit");
+ SubClassData = V;
+ }
+
+ Module *Parent; // The containing module.
+public:
+ enum ThreadLocalMode {
+ NotThreadLocal = 0,
+ GeneralDynamicTLSModel,
+ LocalDynamicTLSModel,
+ InitialExecTLSModel,
+ LocalExecTLSModel
+ };
+
+ ~GlobalValue() override {
+ removeDeadConstantUsers(); // remove any dead constants using this.
+ }
+
+ unsigned getAlignment() const;
+
+ bool hasUnnamedAddr() const { return UnnamedAddr; }
+ void setUnnamedAddr(bool Val) { UnnamedAddr = Val; }
+
+ bool hasComdat() const { return getComdat() != nullptr; }
+ Comdat *getComdat();
+ const Comdat *getComdat() const {
+ return const_cast<GlobalValue *>(this)->getComdat();
+ }
+
+ VisibilityTypes getVisibility() const { return VisibilityTypes(Visibility); }
+ bool hasDefaultVisibility() const { return Visibility == DefaultVisibility; }
+ bool hasHiddenVisibility() const { return Visibility == HiddenVisibility; }
+ bool hasProtectedVisibility() const {
+ return Visibility == ProtectedVisibility;
+ }
+ void setVisibility(VisibilityTypes V) {
+ assert((!hasLocalLinkage() || V == DefaultVisibility) &&
+ "local linkage requires default visibility");
+ Visibility = V;
+ }
+
+ /// If the value is "Thread Local", its value isn't shared by the threads.
+ bool isThreadLocal() const { return getThreadLocalMode() != NotThreadLocal; }
+ void setThreadLocal(bool Val) {
+ setThreadLocalMode(Val ? GeneralDynamicTLSModel : NotThreadLocal);
+ }
+ void setThreadLocalMode(ThreadLocalMode Val) {
+ assert(Val == NotThreadLocal || getValueID() != Value::FunctionVal);
+ ThreadLocal = Val;
+ }
+ ThreadLocalMode getThreadLocalMode() const {
+ return static_cast<ThreadLocalMode>(ThreadLocal);
+ }
+
+ DLLStorageClassTypes getDLLStorageClass() const {
+ return DLLStorageClassTypes(DllStorageClass);
+ }
+ bool hasDLLImportStorageClass() const {
+ return DllStorageClass == DLLImportStorageClass;
+ }
+ bool hasDLLExportStorageClass() const {
+ return DllStorageClass == DLLExportStorageClass;
+ }
+ void setDLLStorageClass(DLLStorageClassTypes C) { DllStorageClass = C; }
+
+ bool hasSection() const { return !StringRef(getSection()).empty(); }
+ // It is unfortunate that we have to use "char *" in here since this is
+ // always non NULL, but:
+ // * The C API expects a null terminated string, so we cannot use StringRef.
+ // * The C API expects us to own it, so we cannot use a std:string.
+ // * For GlobalAliases we can fail to find the section and we have to
+ // return "", so we cannot use a "const std::string &".
+ const char *getSection() const;
+
+ /// Global values are always pointers.
+ PointerType *getType() const { return cast<PointerType>(User::getType()); }
+
+ Type *getValueType() const { return ValueType; }
+
+ static LinkageTypes getLinkOnceLinkage(bool ODR) {
+ return ODR ? LinkOnceODRLinkage : LinkOnceAnyLinkage;
+ }
+ static LinkageTypes getWeakLinkage(bool ODR) {
+ return ODR ? WeakODRLinkage : WeakAnyLinkage;
+ }
+
+ static bool isExternalLinkage(LinkageTypes Linkage) {
+ return Linkage == ExternalLinkage;
+ }
+ static bool isAvailableExternallyLinkage(LinkageTypes Linkage) {
+ return Linkage == AvailableExternallyLinkage;
+ }
+ static bool isLinkOnceODRLinkage(LinkageTypes Linkage) {
+ return Linkage == LinkOnceODRLinkage;
+ }
+ static bool isLinkOnceLinkage(LinkageTypes Linkage) {
+ return Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage;
+ }
+ static bool isWeakAnyLinkage(LinkageTypes Linkage) {
+ return Linkage == WeakAnyLinkage;
+ }
+ static bool isWeakODRLinkage(LinkageTypes Linkage) {
+ return Linkage == WeakODRLinkage;
+ }
+ static bool isWeakLinkage(LinkageTypes Linkage) {
+ return isWeakAnyLinkage(Linkage) || isWeakODRLinkage(Linkage);
+ }
+ static bool isAppendingLinkage(LinkageTypes Linkage) {
+ return Linkage == AppendingLinkage;
+ }
+ static bool isInternalLinkage(LinkageTypes Linkage) {
+ return Linkage == InternalLinkage;
+ }
+ static bool isPrivateLinkage(LinkageTypes Linkage) {
+ return Linkage == PrivateLinkage;
+ }
+ static bool isLocalLinkage(LinkageTypes Linkage) {
+ return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage);
+ }
+ static bool isExternalWeakLinkage(LinkageTypes Linkage) {
+ return Linkage == ExternalWeakLinkage;
+ }
+ static bool isCommonLinkage(LinkageTypes Linkage) {
+ return Linkage == CommonLinkage;
+ }
+
+ /// Whether the definition of this global may be discarded if it is not used
+ /// in its compilation unit.
+ static bool isDiscardableIfUnused(LinkageTypes Linkage) {
+ return isLinkOnceLinkage(Linkage) || isLocalLinkage(Linkage) ||
+ isAvailableExternallyLinkage(Linkage);
+ }
+
+ /// Whether the definition of this global may be replaced by something
+ /// non-equivalent at link time. For example, if a function has weak linkage
+ /// then the code defining it may be replaced by different code.
+ static bool mayBeOverridden(LinkageTypes Linkage) {
+ return Linkage == WeakAnyLinkage || Linkage == LinkOnceAnyLinkage ||
+ Linkage == CommonLinkage || Linkage == ExternalWeakLinkage;
+ }
+
+ /// Whether the definition of this global may be replaced at link time. NB:
+ /// Using this method outside of the code generators is almost always a
+ /// mistake: when working at the IR level use mayBeOverridden instead as it
+ /// knows about ODR semantics.
+ static bool isWeakForLinker(LinkageTypes Linkage) {
+ return Linkage == WeakAnyLinkage || Linkage == WeakODRLinkage ||
+ Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage ||
+ Linkage == CommonLinkage || Linkage == ExternalWeakLinkage;
+ }
+
+ bool hasExternalLinkage() const { return isExternalLinkage(Linkage); }
+ bool hasAvailableExternallyLinkage() const {
+ return isAvailableExternallyLinkage(Linkage);
+ }
+ bool hasLinkOnceLinkage() const {
+ return isLinkOnceLinkage(Linkage);
+ }
+ bool hasLinkOnceODRLinkage() const { return isLinkOnceODRLinkage(Linkage); }
+ bool hasWeakLinkage() const {
+ return isWeakLinkage(Linkage);
+ }
+ bool hasWeakAnyLinkage() const {
+ return isWeakAnyLinkage(Linkage);
+ }
+ bool hasWeakODRLinkage() const {
+ return isWeakODRLinkage(Linkage);
+ }
+ bool hasAppendingLinkage() const { return isAppendingLinkage(Linkage); }
+ bool hasInternalLinkage() const { return isInternalLinkage(Linkage); }
+ bool hasPrivateLinkage() const { return isPrivateLinkage(Linkage); }
+ bool hasLocalLinkage() const { return isLocalLinkage(Linkage); }
+ bool hasExternalWeakLinkage() const { return isExternalWeakLinkage(Linkage); }
+ bool hasCommonLinkage() const { return isCommonLinkage(Linkage); }
+
+ void setLinkage(LinkageTypes LT) {
+ if (isLocalLinkage(LT))
+ Visibility = DefaultVisibility;
+ Linkage = LT;
+ }
+ LinkageTypes getLinkage() const { return Linkage; }
+
+ bool isDiscardableIfUnused() const {
+ return isDiscardableIfUnused(Linkage);
+ }
+
+ bool mayBeOverridden() const { return mayBeOverridden(Linkage); }
+
+ bool isWeakForLinker() const { return isWeakForLinker(Linkage); }
+
+ /// Copy all additional attributes (those not needed to create a GlobalValue)
+ /// from the GlobalValue Src to this one.
+ virtual void copyAttributesFrom(const GlobalValue *Src);
+
+ /// If special LLVM prefix that is used to inform the asm printer to not emit
+ /// usual symbol prefix before the symbol name is used then return linkage
+ /// name after skipping this special LLVM prefix.
+ static StringRef getRealLinkageName(StringRef Name) {
+ if (!Name.empty() && Name[0] == '\1')
+ return Name.substr(1);
+ return Name;
+ }
+
+/// @name Materialization
+/// Materialization is used to construct functions only as they're needed. This
+/// is useful to reduce memory usage in LLVM or parsing work done by the
+/// BitcodeReader to load the Module.
+/// @{
+
+ /// If this function's Module is being lazily streamed in functions from disk
+ /// or some other source, this method can be used to check to see if the
+ /// function has been read in yet or not.
+ bool isMaterializable() const;
+
+ /// Make sure this GlobalValue is fully read. If the module is corrupt, this
+ /// returns true and fills in the optional string with information about the
+ /// problem. If successful, this returns false.
+ std::error_code materialize();
+
+/// @}
+
+ /// Return true if the primary definition of this global value is outside of
+ /// the current translation unit.
+ bool isDeclaration() const;
+
+ bool isDeclarationForLinker() const {
+ if (hasAvailableExternallyLinkage())
+ return true;
+
+ return isDeclaration();
+ }
+
+ /// Returns true if this global's definition will be the one chosen by the
+ /// linker.
+ bool isStrongDefinitionForLinker() const {
+ return !(isDeclarationForLinker() || isWeakForLinker());
+ }
+
+ /// This method unlinks 'this' from the containing module, but does not delete
+ /// it.
+ virtual void removeFromParent() = 0;
+
+ /// This method unlinks 'this' from the containing module and deletes it.
+ virtual void eraseFromParent() = 0;
+
+ /// Get the module that this global value is contained inside of...
+ Module *getParent() { return Parent; }
+ const Module *getParent() const { return Parent; }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Value *V) {
+ return V->getValueID() == Value::FunctionVal ||
+ V->getValueID() == Value::GlobalVariableVal ||
+ V->getValueID() == Value::GlobalAliasVal;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/GlobalVariable.h b/contrib/llvm/include/llvm/IR/GlobalVariable.h
new file mode 100644
index 0000000..342bdc0
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/GlobalVariable.h
@@ -0,0 +1,177 @@
+//===-- llvm/GlobalVariable.h - GlobalVariable class ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the GlobalVariable class, which
+// represents a single global variable (or constant) in the VM.
+//
+// Global variables are constant pointers that refer to hunks of space that are
+// allocated by either the VM, or by the linker in a static compiler. A global
+// variable may have an initial value, which is copied into the executables .data
+// area. Global Constants are required to have initializers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_GLOBALVARIABLE_H
+#define LLVM_IR_GLOBALVARIABLE_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/OperandTraits.h"
+
+namespace llvm {
+
+class Module;
+class Constant;
+template <typename ValueSubClass> class SymbolTableListTraits;
+
+class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
+ friend class SymbolTableListTraits<GlobalVariable>;
+ void *operator new(size_t, unsigned) = delete;
+ void operator=(const GlobalVariable &) = delete;
+ GlobalVariable(const GlobalVariable &) = delete;
+
+ void setParent(Module *parent);
+
+ bool isConstantGlobal : 1; // Is this a global constant?
+ bool isExternallyInitializedConstant : 1; // Is this a global whose value
+ // can change from its initial
+ // value before global
+ // initializers are run?
+public:
+ // allocate space for exactly one operand
+ void *operator new(size_t s) {
+ return User::operator new(s, 1);
+ }
+
+ /// GlobalVariable ctor - If a parent module is specified, the global is
+ /// automatically inserted into the end of the specified modules global list.
+ GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
+ Constant *Initializer = nullptr, const Twine &Name = "",
+ ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
+ bool isExternallyInitialized = false);
+ /// GlobalVariable ctor - This creates a global and inserts it before the
+ /// specified other global.
+ GlobalVariable(Module &M, Type *Ty, bool isConstant,
+ LinkageTypes Linkage, Constant *Initializer,
+ const Twine &Name = "", GlobalVariable *InsertBefore = nullptr,
+ ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
+ bool isExternallyInitialized = false);
+
+ ~GlobalVariable() override {
+ // FIXME: needed by operator delete
+ setGlobalVariableNumOperands(1);
+ }
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// Definitions have initializers, declarations don't.
+ ///
+ inline bool hasInitializer() const { return !isDeclaration(); }
+
+ /// hasDefinitiveInitializer - Whether the global variable has an initializer,
+ /// and any other instances of the global (this can happen due to weak
+ /// linkage) are guaranteed to have the same initializer.
+ ///
+ /// Note that if you want to transform a global, you must use
+ /// hasUniqueInitializer() instead, because of the *_odr linkage type.
+ ///
+ /// Example:
+ ///
+ /// @a = global SomeType* null - Initializer is both definitive and unique.
+ ///
+ /// @b = global weak SomeType* null - Initializer is neither definitive nor
+ /// unique.
+ ///
+ /// @c = global weak_odr SomeType* null - Initializer is definitive, but not
+ /// unique.
+ inline bool hasDefinitiveInitializer() const {
+ return hasInitializer() &&
+ // The initializer of a global variable with weak linkage may change at
+ // link time.
+ !mayBeOverridden() &&
+ // The initializer of a global variable with the externally_initialized
+ // marker may change at runtime before C++ initializers are evaluated.
+ !isExternallyInitialized();
+ }
+
+ /// hasUniqueInitializer - Whether the global variable has an initializer, and
+ /// any changes made to the initializer will turn up in the final executable.
+ inline bool hasUniqueInitializer() const {
+ return
+ // We need to be sure this is the definition that will actually be used
+ isStrongDefinitionForLinker() &&
+ // It is not safe to modify initializers of global variables with the
+ // external_initializer marker since the value may be changed at runtime
+ // before C++ initializers are evaluated.
+ !isExternallyInitialized();
+ }
+
+ /// getInitializer - Return the initializer for this global variable. It is
+ /// illegal to call this method if the global is external, because we cannot
+ /// tell what the value is initialized to!
+ ///
+ inline const Constant *getInitializer() const {
+ assert(hasInitializer() && "GV doesn't have initializer!");
+ return static_cast<Constant*>(Op<0>().get());
+ }
+ inline Constant *getInitializer() {
+ assert(hasInitializer() && "GV doesn't have initializer!");
+ return static_cast<Constant*>(Op<0>().get());
+ }
+ /// setInitializer - Sets the initializer for this global variable, removing
+ /// any existing initializer if InitVal==NULL. If this GV has type T*, the
+ /// initializer must have type T.
+ void setInitializer(Constant *InitVal);
+
+ /// If the value is a global constant, its value is immutable throughout the
+ /// runtime execution of the program. Assigning a value into the constant
+ /// leads to undefined behavior.
+ ///
+ bool isConstant() const { return isConstantGlobal; }
+ void setConstant(bool Val) { isConstantGlobal = Val; }
+
+ bool isExternallyInitialized() const {
+ return isExternallyInitializedConstant;
+ }
+ void setExternallyInitialized(bool Val) {
+ isExternallyInitializedConstant = Val;
+ }
+
+ /// copyAttributesFrom - copy all additional attributes (those not needed to
+ /// create a GlobalVariable) from the GlobalVariable Src to this one.
+ void copyAttributesFrom(const GlobalValue *Src) override;
+
+ /// removeFromParent - This method unlinks 'this' from the containing module,
+ /// but does not delete it.
+ ///
+ void removeFromParent() override;
+
+ /// eraseFromParent - This method unlinks 'this' from the containing module
+ /// and deletes it.
+ ///
+ void eraseFromParent() override;
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == Value::GlobalVariableVal;
+ }
+};
+
+template <>
+struct OperandTraits<GlobalVariable> :
+ public OptionalOperandTraits<GlobalVariable> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalVariable, Value)
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/IRBuilder.h b/contrib/llvm/include/llvm/IR/IRBuilder.h
new file mode 100644
index 0000000..7fe04f2
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IRBuilder.h
@@ -0,0 +1,1782 @@
+//===---- llvm/IRBuilder.h - Builder for LLVM Instructions ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the IRBuilder class, which is used as a convenient way
+// to create LLVM instructions with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_IRBUILDER_H
+#define LLVM_IR_IRBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/ConstantFolder.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/CBindingWrapping.h"
+
+namespace llvm {
+class MDNode;
+
+/// \brief This provides the default implementation of the IRBuilder
+/// 'InsertHelper' method that is called whenever an instruction is created by
+/// IRBuilder and needs to be inserted.
+///
+/// By default, this inserts the instruction at the insertion point.
+template <bool preserveNames = true>
+class IRBuilderDefaultInserter {
+protected:
+ void InsertHelper(Instruction *I, const Twine &Name,
+ BasicBlock *BB, BasicBlock::iterator InsertPt) const {
+ if (BB) BB->getInstList().insert(InsertPt, I);
+ if (preserveNames)
+ I->setName(Name);
+ }
+};
+
+/// \brief Common base class shared among various IRBuilders.
+class IRBuilderBase {
+ DebugLoc CurDbgLocation;
+
+protected:
+ BasicBlock *BB;
+ BasicBlock::iterator InsertPt;
+ LLVMContext &Context;
+
+ MDNode *DefaultFPMathTag;
+ FastMathFlags FMF;
+
+public:
+ IRBuilderBase(LLVMContext &context, MDNode *FPMathTag = nullptr)
+ : Context(context), DefaultFPMathTag(FPMathTag), FMF() {
+ ClearInsertionPoint();
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Builder configuration methods
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Clear the insertion point: created instructions will not be
+ /// inserted into a block.
+ void ClearInsertionPoint() {
+ BB = nullptr;
+ InsertPt.reset(nullptr);
+ }
+
+ BasicBlock *GetInsertBlock() const { return BB; }
+ BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
+ LLVMContext &getContext() const { return Context; }
+
+ /// \brief This specifies that created instructions should be appended to the
+ /// end of the specified block.
+ void SetInsertPoint(BasicBlock *TheBB) {
+ BB = TheBB;
+ InsertPt = BB->end();
+ }
+
+ /// \brief This specifies that created instructions should be inserted before
+ /// the specified instruction.
+ void SetInsertPoint(Instruction *I) {
+ BB = I->getParent();
+ InsertPt = I->getIterator();
+ assert(InsertPt != BB->end() && "Can't read debug loc from end()");
+ SetCurrentDebugLocation(I->getDebugLoc());
+ }
+
+ /// \brief This specifies that created instructions should be inserted at the
+ /// specified point.
+ void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
+ BB = TheBB;
+ InsertPt = IP;
+ if (IP != TheBB->end())
+ SetCurrentDebugLocation(IP->getDebugLoc());
+ }
+
+ /// \brief Set location information used by debugging information.
+ void SetCurrentDebugLocation(DebugLoc L) { CurDbgLocation = std::move(L); }
+
+ /// \brief Get location information used by debugging information.
+ const DebugLoc &getCurrentDebugLocation() const { return CurDbgLocation; }
+
+ /// \brief If this builder has a current debug location, set it on the
+ /// specified instruction.
+ void SetInstDebugLocation(Instruction *I) const {
+ if (CurDbgLocation)
+ I->setDebugLoc(CurDbgLocation);
+ }
+
+ /// \brief Get the return type of the current function that we're emitting
+ /// into.
+ Type *getCurrentFunctionReturnType() const;
+
+ /// InsertPoint - A saved insertion point.
+ class InsertPoint {
+ BasicBlock *Block;
+ BasicBlock::iterator Point;
+
+ public:
+ /// \brief Creates a new insertion point which doesn't point to anything.
+ InsertPoint() : Block(nullptr) {}
+
+ /// \brief Creates a new insertion point at the given location.
+ InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
+ : Block(InsertBlock), Point(InsertPoint) {}
+
+ /// \brief Returns true if this insert point is set.
+ bool isSet() const { return (Block != nullptr); }
+
+ llvm::BasicBlock *getBlock() const { return Block; }
+ llvm::BasicBlock::iterator getPoint() const { return Point; }
+ };
+
+ /// \brief Returns the current insert point.
+ InsertPoint saveIP() const {
+ return InsertPoint(GetInsertBlock(), GetInsertPoint());
+ }
+
+ /// \brief Returns the current insert point, clearing it in the process.
+ InsertPoint saveAndClearIP() {
+ InsertPoint IP(GetInsertBlock(), GetInsertPoint());
+ ClearInsertionPoint();
+ return IP;
+ }
+
+ /// \brief Sets the current insert point to a previously-saved location.
+ void restoreIP(InsertPoint IP) {
+ if (IP.isSet())
+ SetInsertPoint(IP.getBlock(), IP.getPoint());
+ else
+ ClearInsertionPoint();
+ }
+
+ /// \brief Get the floating point math metadata being used.
+ MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
+
+ /// \brief Get the flags to be applied to created floating point ops
+ FastMathFlags getFastMathFlags() const { return FMF; }
+
+ /// \brief Clear the fast-math flags.
+ void clearFastMathFlags() { FMF.clear(); }
+
+ /// \brief Set the floating point math metadata to be used.
+ void SetDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
+
+ /// \brief Set the fast-math flags to be used with generated fp-math operators
+ void SetFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
+
+ //===--------------------------------------------------------------------===//
+ // RAII helpers.
+ //===--------------------------------------------------------------------===//
+
+ // \brief RAII object that stores the current insertion point and restores it
+ // when the object is destroyed. This includes the debug location.
+ class InsertPointGuard {
+ IRBuilderBase &Builder;
+ AssertingVH<BasicBlock> Block;
+ BasicBlock::iterator Point;
+ DebugLoc DbgLoc;
+
+ InsertPointGuard(const InsertPointGuard &) = delete;
+ InsertPointGuard &operator=(const InsertPointGuard &) = delete;
+
+ public:
+ InsertPointGuard(IRBuilderBase &B)
+ : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
+ DbgLoc(B.getCurrentDebugLocation()) {}
+
+ ~InsertPointGuard() {
+ Builder.restoreIP(InsertPoint(Block, Point));
+ Builder.SetCurrentDebugLocation(DbgLoc);
+ }
+ };
+
+ // \brief RAII object that stores the current fast math settings and restores
+ // them when the object is destroyed.
+ class FastMathFlagGuard {
+ IRBuilderBase &Builder;
+ FastMathFlags FMF;
+ MDNode *FPMathTag;
+
+ FastMathFlagGuard(const FastMathFlagGuard &) = delete;
+ FastMathFlagGuard &operator=(
+ const FastMathFlagGuard &) = delete;
+
+ public:
+ FastMathFlagGuard(IRBuilderBase &B)
+ : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag) {}
+
+ ~FastMathFlagGuard() {
+ Builder.FMF = FMF;
+ Builder.DefaultFPMathTag = FPMathTag;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ // Miscellaneous creation methods.
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Make a new global variable with initializer type i8*
+ ///
+ /// Make a new global variable with an initializer that has array of i8 type
+ /// filled in with the null terminated string value specified. The new global
+ /// variable will be marked mergable with any others of the same contents. If
+ /// Name is specified, it is the name of the global variable created.
+ GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
+ unsigned AddressSpace = 0);
+
+ /// \brief Get a constant value representing either true or false.
+ ConstantInt *getInt1(bool V) {
+ return ConstantInt::get(getInt1Ty(), V);
+ }
+
+ /// \brief Get the constant value for i1 true.
+ ConstantInt *getTrue() {
+ return ConstantInt::getTrue(Context);
+ }
+
+ /// \brief Get the constant value for i1 false.
+ ConstantInt *getFalse() {
+ return ConstantInt::getFalse(Context);
+ }
+
+ /// \brief Get a constant 8-bit value.
+ ConstantInt *getInt8(uint8_t C) {
+ return ConstantInt::get(getInt8Ty(), C);
+ }
+
+ /// \brief Get a constant 16-bit value.
+ ConstantInt *getInt16(uint16_t C) {
+ return ConstantInt::get(getInt16Ty(), C);
+ }
+
+ /// \brief Get a constant 32-bit value.
+ ConstantInt *getInt32(uint32_t C) {
+ return ConstantInt::get(getInt32Ty(), C);
+ }
+
+ /// \brief Get a constant 64-bit value.
+ ConstantInt *getInt64(uint64_t C) {
+ return ConstantInt::get(getInt64Ty(), C);
+ }
+
+ /// \brief Get a constant N-bit value, zero extended or truncated from
+ /// a 64-bit value.
+ ConstantInt *getIntN(unsigned N, uint64_t C) {
+ return ConstantInt::get(getIntNTy(N), C);
+ }
+
+ /// \brief Get a constant integer value.
+ ConstantInt *getInt(const APInt &AI) {
+ return ConstantInt::get(Context, AI);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Type creation methods
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Fetch the type representing a single bit
+ IntegerType *getInt1Ty() {
+ return Type::getInt1Ty(Context);
+ }
+
+ /// \brief Fetch the type representing an 8-bit integer.
+ IntegerType *getInt8Ty() {
+ return Type::getInt8Ty(Context);
+ }
+
+ /// \brief Fetch the type representing a 16-bit integer.
+ IntegerType *getInt16Ty() {
+ return Type::getInt16Ty(Context);
+ }
+
+ /// \brief Fetch the type representing a 32-bit integer.
+ IntegerType *getInt32Ty() {
+ return Type::getInt32Ty(Context);
+ }
+
+ /// \brief Fetch the type representing a 64-bit integer.
+ IntegerType *getInt64Ty() {
+ return Type::getInt64Ty(Context);
+ }
+
+ /// \brief Fetch the type representing a 128-bit integer.
+ IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
+
+ /// \brief Fetch the type representing an N-bit integer.
+ IntegerType *getIntNTy(unsigned N) {
+ return Type::getIntNTy(Context, N);
+ }
+
+ /// \brief Fetch the type representing a 16-bit floating point value.
+ Type *getHalfTy() {
+ return Type::getHalfTy(Context);
+ }
+
+ /// \brief Fetch the type representing a 32-bit floating point value.
+ Type *getFloatTy() {
+ return Type::getFloatTy(Context);
+ }
+
+ /// \brief Fetch the type representing a 64-bit floating point value.
+ Type *getDoubleTy() {
+ return Type::getDoubleTy(Context);
+ }
+
+ /// \brief Fetch the type representing void.
+ Type *getVoidTy() {
+ return Type::getVoidTy(Context);
+ }
+
+ /// \brief Fetch the type representing a pointer to an 8-bit integer value.
+ PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
+ return Type::getInt8PtrTy(Context, AddrSpace);
+ }
+
+ /// \brief Fetch the type representing a pointer to an integer value.
+ IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
+ return DL.getIntPtrType(Context, AddrSpace);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Intrinsic creation methods
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Create and insert a memset to the specified pointer and the
+ /// specified value.
+ ///
+ /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
+ /// specified, it will be added to the instruction. Likewise with alias.scope
+ /// and noalias tags.
+ CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = nullptr,
+ MDNode *ScopeTag = nullptr,
+ MDNode *NoAliasTag = nullptr) {
+ return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
+ TBAATag, ScopeTag, NoAliasTag);
+ }
+
+ CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = nullptr,
+ MDNode *ScopeTag = nullptr,
+ MDNode *NoAliasTag = nullptr);
+
+ /// \brief Create and insert a memcpy between the specified pointers.
+ ///
+ /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
+ /// specified, it will be added to the instruction. Likewise with alias.scope
+ /// and noalias tags.
+ CallInst *CreateMemCpy(Value *Dst, Value *Src, uint64_t Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = nullptr,
+ MDNode *TBAAStructTag = nullptr,
+ MDNode *ScopeTag = nullptr,
+ MDNode *NoAliasTag = nullptr) {
+ return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag,
+ TBAAStructTag, ScopeTag, NoAliasTag);
+ }
+
+ CallInst *CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = nullptr,
+ MDNode *TBAAStructTag = nullptr,
+ MDNode *ScopeTag = nullptr,
+ MDNode *NoAliasTag = nullptr);
+
+ /// \brief Create and insert a memmove between the specified
+ /// pointers.
+ ///
+ /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
+ /// specified, it will be added to the instruction. Likewise with alias.scope
+ /// and noalias tags.
+ CallInst *CreateMemMove(Value *Dst, Value *Src, uint64_t Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = nullptr,
+ MDNode *ScopeTag = nullptr,
+ MDNode *NoAliasTag = nullptr) {
+ return CreateMemMove(Dst, Src, getInt64(Size), Align, isVolatile,
+ TBAATag, ScopeTag, NoAliasTag);
+ }
+
+ CallInst *CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
+ bool isVolatile = false, MDNode *TBAATag = nullptr,
+ MDNode *ScopeTag = nullptr,
+ MDNode *NoAliasTag = nullptr);
+
+ /// \brief Create a lifetime.start intrinsic.
+ ///
+ /// If the pointer isn't i8* it will be converted.
+ CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
+
+ /// \brief Create a lifetime.end intrinsic.
+ ///
+ /// If the pointer isn't i8* it will be converted.
+ CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
+
+ /// \brief Create a call to Masked Load intrinsic
+ CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask,
+ Value *PassThru = nullptr, const Twine &Name = "");
+
+ /// \brief Create a call to Masked Store intrinsic
+ CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align,
+ Value *Mask);
+
+ /// \brief Create an assume intrinsic call that allows the optimizer to
+ /// assume that the provided condition will be true.
+ CallInst *CreateAssumption(Value *Cond);
+
+ /// \brief Create a call to the experimental.gc.statepoint intrinsic to
+ /// start a new statepoint sequence.
+ CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
+ Value *ActualCallee,
+ ArrayRef<Value *> CallArgs,
+ ArrayRef<Value *> DeoptArgs,
+ ArrayRef<Value *> GCArgs,
+ const Twine &Name = "");
+
+ /// \brief Create a call to the experimental.gc.statepoint intrinsic to
+ /// start a new statepoint sequence.
+ CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
+ Value *ActualCallee, uint32_t Flags,
+ ArrayRef<Use> CallArgs,
+ ArrayRef<Use> TransitionArgs,
+ ArrayRef<Use> DeoptArgs,
+ ArrayRef<Value *> GCArgs,
+ const Twine &Name = "");
+
+ // \brief Conveninence function for the common case when CallArgs are filled
+ // in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
+ // .get()'ed to get the Value pointer.
+ CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
+ Value *ActualCallee, ArrayRef<Use> CallArgs,
+ ArrayRef<Value *> DeoptArgs,
+ ArrayRef<Value *> GCArgs,
+ const Twine &Name = "");
+
+ /// brief Create an invoke to the experimental.gc.statepoint intrinsic to
+ /// start a new statepoint sequence.
+ InvokeInst *
+ CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
+ Value *ActualInvokee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
+ ArrayRef<Value *> DeoptArgs,
+ ArrayRef<Value *> GCArgs, const Twine &Name = "");
+
+ /// brief Create an invoke to the experimental.gc.statepoint intrinsic to
+ /// start a new statepoint sequence.
+ InvokeInst *CreateGCStatepointInvoke(
+ uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
+ BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
+ ArrayRef<Use> InvokeArgs, ArrayRef<Use> TransitionArgs,
+ ArrayRef<Use> DeoptArgs, ArrayRef<Value *> GCArgs,
+ const Twine &Name = "");
+
+ // Conveninence function for the common case when CallArgs are filled in using
+ // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
+ // get the Value *.
+ InvokeInst *
+ CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
+ Value *ActualInvokee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
+ ArrayRef<Value *> DeoptArgs,
+ ArrayRef<Value *> GCArgs, const Twine &Name = "");
+
+ /// \brief Create a call to the experimental.gc.result intrinsic to extract
+ /// the result from a call wrapped in a statepoint.
+ CallInst *CreateGCResult(Instruction *Statepoint,
+ Type *ResultType,
+ const Twine &Name = "");
+
+ /// \brief Create a call to the experimental.gc.relocate intrinsics to
+ /// project the relocated value of one pointer from the statepoint.
+ CallInst *CreateGCRelocate(Instruction *Statepoint,
+ int BaseOffset,
+ int DerivedOffset,
+ Type *ResultType,
+ const Twine &Name = "");
+
+private:
+ /// \brief Create a call to a masked intrinsic with given Id.
+ /// Masked intrinsic has only one overloaded type - data type.
+ CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
+ Type *DataTy, const Twine &Name = "");
+
+ Value *getCastedInt8PtrValue(Value *Ptr);
+};
+
+/// \brief This provides a uniform API for creating instructions and inserting
+/// them into a basic block: either at the end of a BasicBlock, or at a specific
+/// iterator location in a block.
+///
+/// Note that the builder does not expose the full generality of LLVM
+/// instructions. For access to extra instruction properties, use the mutators
+/// (e.g. setVolatile) on the instructions after they have been
+/// created. Convenience state exists to specify fast-math flags and fp-math
+/// tags.
+///
+/// The first template argument handles whether or not to preserve names in the
+/// final instruction output. This defaults to on. The second template argument
+/// specifies a class to use for creating constants. This defaults to creating
+/// minimally folded constants. The third template argument allows clients to
+/// specify custom insertion hooks that are called on every newly created
+/// insertion.
+template<bool preserveNames = true, typename T = ConstantFolder,
+ typename Inserter = IRBuilderDefaultInserter<preserveNames> >
+class IRBuilder : public IRBuilderBase, public Inserter {
+ T Folder;
+
+public:
+ IRBuilder(LLVMContext &C, const T &F, Inserter I = Inserter(),
+ MDNode *FPMathTag = nullptr)
+ : IRBuilderBase(C, FPMathTag), Inserter(std::move(I)), Folder(F) {}
+
+ explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr)
+ : IRBuilderBase(C, FPMathTag), Folder() {
+ }
+
+ explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = nullptr)
+ : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder(F) {
+ SetInsertPoint(TheBB);
+ }
+
+ explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr)
+ : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder() {
+ SetInsertPoint(TheBB);
+ }
+
+ explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr)
+ : IRBuilderBase(IP->getContext(), FPMathTag), Folder() {
+ SetInsertPoint(IP);
+ }
+
+ IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F,
+ MDNode *FPMathTag = nullptr)
+ : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder(F) {
+ SetInsertPoint(TheBB, IP);
+ }
+
+ IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
+ MDNode *FPMathTag = nullptr)
+ : IRBuilderBase(TheBB->getContext(), FPMathTag), Folder() {
+ SetInsertPoint(TheBB, IP);
+ }
+
+ /// \brief Get the constant folder being used.
+ const T &getFolder() { return Folder; }
+
+ /// \brief Return true if this builder is configured to actually add the
+ /// requested names to IR created through it.
+ bool isNamePreserving() const { return preserveNames; }
+
+ /// \brief Insert and return the specified instruction.
+ template<typename InstTy>
+ InstTy *Insert(InstTy *I, const Twine &Name = "") const {
+ this->InsertHelper(I, Name, BB, InsertPt);
+ this->SetInstDebugLocation(I);
+ return I;
+ }
+
+ /// \brief No-op overload to handle constants.
+ Constant *Insert(Constant *C, const Twine& = "") const {
+ return C;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Instruction creation methods: Terminators
+ //===--------------------------------------------------------------------===//
+
+private:
+ /// \brief Helper to add branch weight and unpredictable metadata onto an
+ /// instruction.
+ /// \returns The annotated instruction.
+ template <typename InstTy>
+ InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
+ if (Weights)
+ I->setMetadata(LLVMContext::MD_prof, Weights);
+ if (Unpredictable)
+ I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
+ return I;
+ }
+
+public:
+ /// \brief Create a 'ret void' instruction.
+ ReturnInst *CreateRetVoid() {
+ return Insert(ReturnInst::Create(Context));
+ }
+
+ /// \brief Create a 'ret <val>' instruction.
+ ReturnInst *CreateRet(Value *V) {
+ return Insert(ReturnInst::Create(Context, V));
+ }
+
+ /// \brief Create a sequence of N insertvalue instructions,
+ /// with one Value from the retVals array each, that build a aggregate
+ /// return value one value at a time, and a ret instruction to return
+ /// the resulting aggregate value.
+ ///
+ /// This is a convenience function for code that uses aggregate return values
+ /// as a vehicle for having multiple return values.
+ ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
+ Value *V = UndefValue::get(getCurrentFunctionReturnType());
+ for (unsigned i = 0; i != N; ++i)
+ V = CreateInsertValue(V, retVals[i], i, "mrv");
+ return Insert(ReturnInst::Create(Context, V));
+ }
+
+ /// \brief Create an unconditional 'br label X' instruction.
+ BranchInst *CreateBr(BasicBlock *Dest) {
+ return Insert(BranchInst::Create(Dest));
+ }
+
+ /// \brief Create a conditional 'br Cond, TrueDest, FalseDest'
+ /// instruction.
+ BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
+ MDNode *BranchWeights = nullptr,
+ MDNode *Unpredictable = nullptr) {
+ return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
+ BranchWeights, Unpredictable));
+ }
+
+ /// \brief Create a switch instruction with the specified value, default dest,
+ /// and with a hint for the number of cases that will be added (for efficient
+ /// allocation).
+ SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
+ MDNode *BranchWeights = nullptr,
+ MDNode *Unpredictable = nullptr) {
+ return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
+ BranchWeights, Unpredictable));
+ }
+
+ /// \brief Create an indirect branch instruction with the specified address
+ /// operand, with an optional hint for the number of destinations that will be
+ /// added (for efficient allocation).
+ IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
+ return Insert(IndirectBrInst::Create(Addr, NumDests));
+ }
+
+ InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest, const Twine &Name = "") {
+ return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, None),
+ Name);
+ }
+ InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest, Value *Arg1,
+ const Twine &Name = "") {
+ return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Arg1),
+ Name);
+ }
+ InvokeInst *CreateInvoke3(Value *Callee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest, Value *Arg1,
+ Value *Arg2, Value *Arg3,
+ const Twine &Name = "") {
+ Value *Args[] = { Arg1, Arg2, Arg3 };
+ return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args),
+ Name);
+ }
+ /// \brief Create an invoke instruction.
+ InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest, ArrayRef<Value *> Args,
+ const Twine &Name = "") {
+ return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args),
+ Name);
+ }
+ InvokeInst *CreateInvoke(Value *Callee, BasicBlock *NormalDest,
+ BasicBlock *UnwindDest, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> OpBundles,
+ const Twine &Name = "") {
+ return Insert(InvokeInst::Create(Callee, NormalDest, UnwindDest, Args,
+ OpBundles), Name);
+ }
+
+ ResumeInst *CreateResume(Value *Exn) {
+ return Insert(ResumeInst::Create(Exn));
+ }
+
+ CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
+ BasicBlock *UnwindBB = nullptr) {
+ return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
+ }
+
+ CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
+ unsigned NumHandlers,
+ const Twine &Name = "") {
+ return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
+ Name);
+ }
+
+ CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
+ const Twine &Name = "") {
+ return Insert(CatchPadInst::Create(ParentPad, Args), Name);
+ }
+
+ CleanupPadInst *CreateCleanupPad(Value *ParentPad,
+ ArrayRef<Value *> Args = None,
+ const Twine &Name = "") {
+ return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
+ }
+
+ CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
+ return Insert(CatchReturnInst::Create(CatchPad, BB));
+ }
+
+ UnreachableInst *CreateUnreachable() {
+ return Insert(new UnreachableInst(Context));
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Instruction creation methods: Binary Operators
+ //===--------------------------------------------------------------------===//
+private:
+ BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
+ Value *LHS, Value *RHS,
+ const Twine &Name,
+ bool HasNUW, bool HasNSW) {
+ BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+
+ Instruction *AddFPMathAttributes(Instruction *I,
+ MDNode *FPMathTag,
+ FastMathFlags FMF) const {
+ if (!FPMathTag)
+ FPMathTag = DefaultFPMathTag;
+ if (FPMathTag)
+ I->setMetadata(LLVMContext::MD_fpmath, FPMathTag);
+ I->setFastMathFlags(FMF);
+ return I;
+ }
+
+public:
+ Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
+ return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
+ HasNUW, HasNSW);
+ }
+ Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateAdd(LHS, RHS, Name, false, true);
+ }
+ Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateAdd(LHS, RHS, Name, true, false);
+ }
+ Value *CreateFAdd(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateFAdd(LC, RC), Name);
+ return Insert(AddFPMathAttributes(BinaryOperator::CreateFAdd(LHS, RHS),
+ FPMathTag, FMF), Name);
+ }
+ Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
+ return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
+ HasNUW, HasNSW);
+ }
+ Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateSub(LHS, RHS, Name, false, true);
+ }
+ Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateSub(LHS, RHS, Name, true, false);
+ }
+ Value *CreateFSub(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateFSub(LC, RC), Name);
+ return Insert(AddFPMathAttributes(BinaryOperator::CreateFSub(LHS, RHS),
+ FPMathTag, FMF), Name);
+ }
+ Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
+ return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
+ HasNUW, HasNSW);
+ }
+ Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateMul(LHS, RHS, Name, false, true);
+ }
+ Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateMul(LHS, RHS, Name, true, false);
+ }
+ Value *CreateFMul(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateFMul(LC, RC), Name);
+ return Insert(AddFPMathAttributes(BinaryOperator::CreateFMul(LHS, RHS),
+ FPMathTag, FMF), Name);
+ }
+ Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool isExact = false) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
+ if (!isExact)
+ return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
+ return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
+ }
+ Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateUDiv(LHS, RHS, Name, true);
+ }
+ Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool isExact = false) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
+ if (!isExact)
+ return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
+ return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
+ }
+ Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateSDiv(LHS, RHS, Name, true);
+ }
+ Value *CreateFDiv(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateFDiv(LC, RC), Name);
+ return Insert(AddFPMathAttributes(BinaryOperator::CreateFDiv(LHS, RHS),
+ FPMathTag, FMF), Name);
+ }
+ Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateURem(LC, RC), Name);
+ return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
+ }
+ Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateSRem(LC, RC), Name);
+ return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
+ }
+ Value *CreateFRem(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateFRem(LC, RC), Name);
+ return Insert(AddFPMathAttributes(BinaryOperator::CreateFRem(LHS, RHS),
+ FPMathTag, FMF), Name);
+ }
+
+ Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
+ return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
+ HasNUW, HasNSW);
+ }
+ Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
+ return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
+ HasNUW, HasNSW);
+ }
+ Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
+ return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
+ HasNUW, HasNSW);
+ }
+
+ Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool isExact = false) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
+ if (!isExact)
+ return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
+ return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
+ }
+ Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
+ bool isExact = false) {
+ return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
+ }
+ Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
+ bool isExact = false) {
+ return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
+ }
+
+ Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
+ bool isExact = false) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
+ if (!isExact)
+ return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
+ return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
+ }
+ Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
+ bool isExact = false) {
+ return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
+ }
+ Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
+ bool isExact = false) {
+ return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
+ }
+
+ Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
+ if (Constant *RC = dyn_cast<Constant>(RHS)) {
+ if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isAllOnesValue())
+ return LHS; // LHS & -1 -> LHS
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Insert(Folder.CreateAnd(LC, RC), Name);
+ }
+ return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
+ }
+ Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+ return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+ }
+ Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
+ return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+ }
+
+ Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
+ if (Constant *RC = dyn_cast<Constant>(RHS)) {
+ if (RC->isNullValue())
+ return LHS; // LHS | 0 -> LHS
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ return Insert(Folder.CreateOr(LC, RC), Name);
+ }
+ return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
+ }
+ Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+ return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+ }
+ Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
+ return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+ }
+
+ Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateXor(LC, RC), Name);
+ return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
+ }
+ Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
+ return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+ }
+ Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
+ return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
+ }
+
+ Value *CreateBinOp(Instruction::BinaryOps Opc,
+ Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateBinOp(Opc, LC, RC), Name);
+ llvm::Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
+ if (isa<FPMathOperator>(BinOp))
+ BinOp = AddFPMathAttributes(BinOp, FPMathTag, FMF);
+ return Insert(BinOp, Name);
+ }
+
+ Value *CreateNeg(Value *V, const Twine &Name = "",
+ bool HasNUW = false, bool HasNSW = false) {
+ if (Constant *VC = dyn_cast<Constant>(V))
+ return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
+ BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
+ return CreateNeg(V, Name, false, true);
+ }
+ Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
+ return CreateNeg(V, Name, true, false);
+ }
+ Value *CreateFNeg(Value *V, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ if (Constant *VC = dyn_cast<Constant>(V))
+ return Insert(Folder.CreateFNeg(VC), Name);
+ return Insert(AddFPMathAttributes(BinaryOperator::CreateFNeg(V),
+ FPMathTag, FMF), Name);
+ }
+ Value *CreateNot(Value *V, const Twine &Name = "") {
+ if (Constant *VC = dyn_cast<Constant>(V))
+ return Insert(Folder.CreateNot(VC), Name);
+ return Insert(BinaryOperator::CreateNot(V), Name);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Instruction creation methods: Memory Instructions
+ //===--------------------------------------------------------------------===//
+
+ AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
+ const Twine &Name = "") {
+ return Insert(new AllocaInst(Ty, ArraySize), Name);
+ }
+ // \brief Provided to resolve 'CreateLoad(Ptr, "...")' correctly, instead of
+ // converting the string to 'bool' for the isVolatile parameter.
+ LoadInst *CreateLoad(Value *Ptr, const char *Name) {
+ return Insert(new LoadInst(Ptr), Name);
+ }
+ LoadInst *CreateLoad(Value *Ptr, const Twine &Name = "") {
+ return Insert(new LoadInst(Ptr), Name);
+ }
+ LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
+ return Insert(new LoadInst(Ty, Ptr), Name);
+ }
+ LoadInst *CreateLoad(Value *Ptr, bool isVolatile, const Twine &Name = "") {
+ return Insert(new LoadInst(Ptr, nullptr, isVolatile), Name);
+ }
+ StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
+ return Insert(new StoreInst(Val, Ptr, isVolatile));
+ }
+ // \brief Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")'
+ // correctly, instead of converting the string to 'bool' for the isVolatile
+ // parameter.
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) {
+ LoadInst *LI = CreateLoad(Ptr, Name);
+ LI->setAlignment(Align);
+ return LI;
+ }
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align,
+ const Twine &Name = "") {
+ LoadInst *LI = CreateLoad(Ptr, Name);
+ LI->setAlignment(Align);
+ return LI;
+ }
+ LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile,
+ const Twine &Name = "") {
+ LoadInst *LI = CreateLoad(Ptr, isVolatile, Name);
+ LI->setAlignment(Align);
+ return LI;
+ }
+ StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align,
+ bool isVolatile = false) {
+ StoreInst *SI = CreateStore(Val, Ptr, isVolatile);
+ SI->setAlignment(Align);
+ return SI;
+ }
+ FenceInst *CreateFence(AtomicOrdering Ordering,
+ SynchronizationScope SynchScope = CrossThread,
+ const Twine &Name = "") {
+ return Insert(new FenceInst(Context, Ordering, SynchScope), Name);
+ }
+ AtomicCmpXchgInst *
+ CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope = CrossThread) {
+ return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering,
+ FailureOrdering, SynchScope));
+ }
+ AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering,
+ SynchronizationScope SynchScope = CrossThread) {
+ return Insert(new AtomicRMWInst(Op, Ptr, Val, Ordering, SynchScope));
+ }
+ Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
+ const Twine &Name = "") {
+ return CreateGEP(nullptr, Ptr, IdxList, Name);
+ }
+ Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
+ const Twine &Name = "") {
+ if (Constant *PC = dyn_cast<Constant>(Ptr)) {
+ // Every index must be constant.
+ size_t i, e;
+ for (i = 0, e = IdxList.size(); i != e; ++i)
+ if (!isa<Constant>(IdxList[i]))
+ break;
+ if (i == e)
+ return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
+ }
+ return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
+ }
+ Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
+ const Twine &Name = "") {
+ return CreateInBoundsGEP(nullptr, Ptr, IdxList, Name);
+ }
+ Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
+ const Twine &Name = "") {
+ if (Constant *PC = dyn_cast<Constant>(Ptr)) {
+ // Every index must be constant.
+ size_t i, e;
+ for (i = 0, e = IdxList.size(); i != e; ++i)
+ if (!isa<Constant>(IdxList[i]))
+ break;
+ if (i == e)
+ return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
+ Name);
+ }
+ return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
+ }
+ Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
+ return CreateGEP(nullptr, Ptr, Idx, Name);
+ }
+ Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
+ if (Constant *PC = dyn_cast<Constant>(Ptr))
+ if (Constant *IC = dyn_cast<Constant>(Idx))
+ return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
+ return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
+ }
+ Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
+ const Twine &Name = "") {
+ if (Constant *PC = dyn_cast<Constant>(Ptr))
+ if (Constant *IC = dyn_cast<Constant>(Idx))
+ return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
+ return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
+ }
+ Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
+ return CreateConstGEP1_32(nullptr, Ptr, Idx0, Name);
+ }
+ Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
+ const Twine &Name = "") {
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
+
+ if (Constant *PC = dyn_cast<Constant>(Ptr))
+ return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
+
+ return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
+ }
+ Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
+ const Twine &Name = "") {
+ Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
+
+ if (Constant *PC = dyn_cast<Constant>(Ptr))
+ return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
+
+ return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
+ }
+ Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
+ const Twine &Name = "") {
+ Value *Idxs[] = {
+ ConstantInt::get(Type::getInt32Ty(Context), Idx0),
+ ConstantInt::get(Type::getInt32Ty(Context), Idx1)
+ };
+
+ if (Constant *PC = dyn_cast<Constant>(Ptr))
+ return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
+
+ return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
+ }
+ Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
+ unsigned Idx1, const Twine &Name = "") {
+ Value *Idxs[] = {
+ ConstantInt::get(Type::getInt32Ty(Context), Idx0),
+ ConstantInt::get(Type::getInt32Ty(Context), Idx1)
+ };
+
+ if (Constant *PC = dyn_cast<Constant>(Ptr))
+ return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
+
+ return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
+ }
+ Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
+ Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
+
+ if (Constant *PC = dyn_cast<Constant>(Ptr))
+ return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idx), Name);
+
+ return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idx), Name);
+ }
+ Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
+ const Twine &Name = "") {
+ Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
+
+ if (Constant *PC = dyn_cast<Constant>(Ptr))
+ return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idx), Name);
+
+ return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idx), Name);
+ }
+ Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
+ const Twine &Name = "") {
+ Value *Idxs[] = {
+ ConstantInt::get(Type::getInt64Ty(Context), Idx0),
+ ConstantInt::get(Type::getInt64Ty(Context), Idx1)
+ };
+
+ if (Constant *PC = dyn_cast<Constant>(Ptr))
+ return Insert(Folder.CreateGetElementPtr(nullptr, PC, Idxs), Name);
+
+ return Insert(GetElementPtrInst::Create(nullptr, Ptr, Idxs), Name);
+ }
+ Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
+ const Twine &Name = "") {
+ Value *Idxs[] = {
+ ConstantInt::get(Type::getInt64Ty(Context), Idx0),
+ ConstantInt::get(Type::getInt64Ty(Context), Idx1)
+ };
+
+ if (Constant *PC = dyn_cast<Constant>(Ptr))
+ return Insert(Folder.CreateInBoundsGetElementPtr(nullptr, PC, Idxs),
+ Name);
+
+ return Insert(GetElementPtrInst::CreateInBounds(nullptr, Ptr, Idxs), Name);
+ }
+ Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
+ const Twine &Name = "") {
+ return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
+ }
+
+ /// \brief Same as CreateGlobalString, but return a pointer with "i8*" type
+ /// instead of a pointer to array of i8.
+ Value *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
+ unsigned AddressSpace = 0) {
+ GlobalVariable *gv = CreateGlobalString(Str, Name, AddressSpace);
+ Value *zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
+ Value *Args[] = { zero, zero };
+ return CreateInBoundsGEP(gv->getValueType(), gv, Args, Name);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Instruction creation methods: Cast/Conversion Operators
+ //===--------------------------------------------------------------------===//
+
+ Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
+ return CreateCast(Instruction::Trunc, V, DestTy, Name);
+ }
+ Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
+ return CreateCast(Instruction::ZExt, V, DestTy, Name);
+ }
+ Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
+ return CreateCast(Instruction::SExt, V, DestTy, Name);
+ }
+ /// \brief Create a ZExt or Trunc from the integer value V to DestTy. Return
+ /// the value untouched if the type of V is already DestTy.
+ Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ assert(V->getType()->isIntOrIntVectorTy() &&
+ DestTy->isIntOrIntVectorTy() &&
+ "Can only zero extend/truncate integers!");
+ Type *VTy = V->getType();
+ if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
+ return CreateZExt(V, DestTy, Name);
+ if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
+ return CreateTrunc(V, DestTy, Name);
+ return V;
+ }
+ /// \brief Create a SExt or Trunc from the integer value V to DestTy. Return
+ /// the value untouched if the type of V is already DestTy.
+ Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ assert(V->getType()->isIntOrIntVectorTy() &&
+ DestTy->isIntOrIntVectorTy() &&
+ "Can only sign extend/truncate integers!");
+ Type *VTy = V->getType();
+ if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
+ return CreateSExt(V, DestTy, Name);
+ if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
+ return CreateTrunc(V, DestTy, Name);
+ return V;
+ }
+ Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = ""){
+ return CreateCast(Instruction::FPToUI, V, DestTy, Name);
+ }
+ Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = ""){
+ return CreateCast(Instruction::FPToSI, V, DestTy, Name);
+ }
+ Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
+ return CreateCast(Instruction::UIToFP, V, DestTy, Name);
+ }
+ Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
+ return CreateCast(Instruction::SIToFP, V, DestTy, Name);
+ }
+ Value *CreateFPTrunc(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
+ }
+ Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
+ return CreateCast(Instruction::FPExt, V, DestTy, Name);
+ }
+ Value *CreatePtrToInt(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
+ }
+ Value *CreateIntToPtr(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
+ }
+ Value *CreateBitCast(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ return CreateCast(Instruction::BitCast, V, DestTy, Name);
+ }
+ Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
+ }
+ Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ if (V->getType() == DestTy)
+ return V;
+ if (Constant *VC = dyn_cast<Constant>(V))
+ return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
+ return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
+ }
+ Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ if (V->getType() == DestTy)
+ return V;
+ if (Constant *VC = dyn_cast<Constant>(V))
+ return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
+ return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
+ }
+ Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ if (V->getType() == DestTy)
+ return V;
+ if (Constant *VC = dyn_cast<Constant>(V))
+ return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
+ return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
+ }
+ Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ if (V->getType() == DestTy)
+ return V;
+ if (Constant *VC = dyn_cast<Constant>(V))
+ return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
+ return Insert(CastInst::Create(Op, V, DestTy), Name);
+ }
+ Value *CreatePointerCast(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ if (V->getType() == DestTy)
+ return V;
+ if (Constant *VC = dyn_cast<Constant>(V))
+ return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
+ return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
+ }
+
+ Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ if (V->getType() == DestTy)
+ return V;
+
+ if (Constant *VC = dyn_cast<Constant>(V)) {
+ return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
+ Name);
+ }
+
+ return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
+ Name);
+ }
+
+ Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
+ const Twine &Name = "") {
+ if (V->getType() == DestTy)
+ return V;
+ if (Constant *VC = dyn_cast<Constant>(V))
+ return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
+ return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
+ }
+
+ Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
+ const Twine &Name = "") {
+ if (V->getType() == DestTy)
+ return V;
+ if (V->getType()->getScalarType()->isPointerTy() &&
+ DestTy->getScalarType()->isIntegerTy())
+ return CreatePtrToInt(V, DestTy, Name);
+ if (V->getType()->getScalarType()->isIntegerTy() &&
+ DestTy->getScalarType()->isPointerTy())
+ return CreateIntToPtr(V, DestTy, Name);
+
+ return CreateBitCast(V, DestTy, Name);
+ }
+
+private:
+ // \brief Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
+ // compile time error, instead of converting the string to bool for the
+ // isSigned parameter.
+ Value *CreateIntCast(Value *, Type *, const char *) = delete;
+
+public:
+ Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
+ if (V->getType() == DestTy)
+ return V;
+ if (Constant *VC = dyn_cast<Constant>(V))
+ return Insert(Folder.CreateFPCast(VC, DestTy), Name);
+ return Insert(CastInst::CreateFPCast(V, DestTy), Name);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Instruction creation methods: Compare Instructions
+ //===--------------------------------------------------------------------===//
+
+ Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
+ }
+ Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
+ }
+ Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
+ }
+ Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
+ }
+ Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
+ }
+ Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
+ }
+ Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
+ }
+ Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
+ }
+ Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
+ }
+ Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
+ return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
+ }
+
+ Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
+ }
+ Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
+ }
+
+ Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
+ const Twine &Name = "") {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateICmp(P, LC, RC), Name);
+ return Insert(new ICmpInst(P, LHS, RHS), Name);
+ }
+ Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
+ const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+ if (Constant *LC = dyn_cast<Constant>(LHS))
+ if (Constant *RC = dyn_cast<Constant>(RHS))
+ return Insert(Folder.CreateFCmp(P, LC, RC), Name);
+ return Insert(AddFPMathAttributes(new FCmpInst(P, LHS, RHS),
+ FPMathTag, FMF), Name);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Instruction creation methods: Other Instructions
+ //===--------------------------------------------------------------------===//
+
+ PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
+ const Twine &Name = "") {
+ return Insert(PHINode::Create(Ty, NumReservedValues), Name);
+ }
+
+ CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args = None,
+ ArrayRef<OperandBundleDef> OpBundles = None,
+ const Twine &Name = "") {
+ return Insert(CallInst::Create(Callee, Args, OpBundles), Name);
+ }
+
+ CallInst *CreateCall(Value *Callee, ArrayRef<Value *> Args,
+ const Twine &Name, MDNode *FPMathTag = nullptr) {
+ PointerType *PTy = cast<PointerType>(Callee->getType());
+ FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
+ return CreateCall(FTy, Callee, Args, Name, FPMathTag);
+ }
+
+ CallInst *CreateCall(llvm::FunctionType *FTy, Value *Callee,
+ ArrayRef<Value *> Args, const Twine &Name = "",
+ MDNode *FPMathTag = nullptr) {
+ CallInst *CI = CallInst::Create(FTy, Callee, Args);
+ if (isa<FPMathOperator>(CI))
+ CI = cast<CallInst>(AddFPMathAttributes(CI, FPMathTag, FMF));
+ return Insert(CI, Name);
+ }
+
+ CallInst *CreateCall(Function *Callee, ArrayRef<Value *> Args,
+ const Twine &Name = "", MDNode *FPMathTag = nullptr) {
+ return CreateCall(Callee->getFunctionType(), Callee, Args, Name, FPMathTag);
+ }
+
+ Value *CreateSelect(Value *C, Value *True, Value *False,
+ const Twine &Name = "") {
+ if (Constant *CC = dyn_cast<Constant>(C))
+ if (Constant *TC = dyn_cast<Constant>(True))
+ if (Constant *FC = dyn_cast<Constant>(False))
+ return Insert(Folder.CreateSelect(CC, TC, FC), Name);
+ return Insert(SelectInst::Create(C, True, False), Name);
+ }
+
+ VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
+ return Insert(new VAArgInst(List, Ty), Name);
+ }
+
+ Value *CreateExtractElement(Value *Vec, Value *Idx,
+ const Twine &Name = "") {
+ if (Constant *VC = dyn_cast<Constant>(Vec))
+ if (Constant *IC = dyn_cast<Constant>(Idx))
+ return Insert(Folder.CreateExtractElement(VC, IC), Name);
+ return Insert(ExtractElementInst::Create(Vec, Idx), Name);
+ }
+
+ Value *CreateExtractElement(Value *Vec, uint64_t Idx,
+ const Twine &Name = "") {
+ return CreateExtractElement(Vec, getInt64(Idx), Name);
+ }
+
+ Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
+ const Twine &Name = "") {
+ if (Constant *VC = dyn_cast<Constant>(Vec))
+ if (Constant *NC = dyn_cast<Constant>(NewElt))
+ if (Constant *IC = dyn_cast<Constant>(Idx))
+ return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
+ return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
+ }
+
+ Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
+ const Twine &Name = "") {
+ return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
+ }
+
+ Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
+ const Twine &Name = "") {
+ if (Constant *V1C = dyn_cast<Constant>(V1))
+ if (Constant *V2C = dyn_cast<Constant>(V2))
+ if (Constant *MC = dyn_cast<Constant>(Mask))
+ return Insert(Folder.CreateShuffleVector(V1C, V2C, MC), Name);
+ return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
+ }
+
+ Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> IntMask,
+ const Twine &Name = "") {
+ size_t MaskSize = IntMask.size();
+ SmallVector<Constant*, 8> MaskVec(MaskSize);
+ for (size_t i = 0; i != MaskSize; ++i)
+ MaskVec[i] = getInt32(IntMask[i]);
+ Value *Mask = ConstantVector::get(MaskVec);
+ return CreateShuffleVector(V1, V2, Mask, Name);
+ }
+
+ Value *CreateExtractValue(Value *Agg,
+ ArrayRef<unsigned> Idxs,
+ const Twine &Name = "") {
+ if (Constant *AggC = dyn_cast<Constant>(Agg))
+ return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
+ return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
+ }
+
+ Value *CreateInsertValue(Value *Agg, Value *Val,
+ ArrayRef<unsigned> Idxs,
+ const Twine &Name = "") {
+ if (Constant *AggC = dyn_cast<Constant>(Agg))
+ if (Constant *ValC = dyn_cast<Constant>(Val))
+ return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
+ return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
+ }
+
+ LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
+ const Twine &Name = "") {
+ return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utility creation methods
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Return an i1 value testing if \p Arg is null.
+ Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
+ return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
+ Name);
+ }
+
+ /// \brief Return an i1 value testing if \p Arg is not null.
+ Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
+ return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
+ Name);
+ }
+
+ /// \brief Return the i64 difference between two pointer values, dividing out
+ /// the size of the pointed-to objects.
+ ///
+ /// This is intended to implement C-style pointer subtraction. As such, the
+ /// pointers must be appropriately aligned for their element types and
+ /// pointing into the same object.
+ Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "") {
+ assert(LHS->getType() == RHS->getType() &&
+ "Pointer subtraction operand types must match!");
+ PointerType *ArgType = cast<PointerType>(LHS->getType());
+ Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
+ Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
+ Value *Difference = CreateSub(LHS_int, RHS_int);
+ return CreateExactSDiv(Difference,
+ ConstantExpr::getSizeOf(ArgType->getElementType()),
+ Name);
+ }
+
+ /// \brief Create an invariant.group.barrier intrinsic call, that stops
+ /// optimizer to propagate equality using invariant.group metadata.
+ /// If Ptr type is different from i8*, it's casted to i8* before call
+ /// and casted back to Ptr type after call.
+ Value *CreateInvariantGroupBarrier(Value *Ptr) {
+ Module *M = BB->getParent()->getParent();
+ Function *FnInvariantGroupBarrier = Intrinsic::getDeclaration(M,
+ Intrinsic::invariant_group_barrier);
+
+ Type *ArgumentAndReturnType = FnInvariantGroupBarrier->getReturnType();
+ assert(ArgumentAndReturnType ==
+ FnInvariantGroupBarrier->getFunctionType()->getParamType(0) &&
+ "InvariantGroupBarrier should take and return the same type");
+ Type *PtrType = Ptr->getType();
+
+ bool PtrTypeConversionNeeded = PtrType != ArgumentAndReturnType;
+ if (PtrTypeConversionNeeded)
+ Ptr = CreateBitCast(Ptr, ArgumentAndReturnType);
+
+ CallInst *Fn = CreateCall(FnInvariantGroupBarrier, {Ptr});
+
+ if (PtrTypeConversionNeeded)
+ return CreateBitCast(Fn, PtrType);
+ return Fn;
+ }
+
+ /// \brief Return a vector value that contains \arg V broadcasted to \p
+ /// NumElts elements.
+ Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "") {
+ assert(NumElts > 0 && "Cannot splat to an empty vector!");
+
+ // First insert it into an undef vector so we can shuffle it.
+ Type *I32Ty = getInt32Ty();
+ Value *Undef = UndefValue::get(VectorType::get(V->getType(), NumElts));
+ V = CreateInsertElement(Undef, V, ConstantInt::get(I32Ty, 0),
+ Name + ".splatinsert");
+
+ // Shuffle the value across the desired number of elements.
+ Value *Zeros = ConstantAggregateZero::get(VectorType::get(I32Ty, NumElts));
+ return CreateShuffleVector(V, Undef, Zeros, Name + ".splat");
+ }
+
+ /// \brief Return a value that has been extracted from a larger integer type.
+ Value *CreateExtractInteger(const DataLayout &DL, Value *From,
+ IntegerType *ExtractedTy, uint64_t Offset,
+ const Twine &Name) {
+ IntegerType *IntTy = cast<IntegerType>(From->getType());
+ assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
+ DL.getTypeStoreSize(IntTy) &&
+ "Element extends past full value");
+ uint64_t ShAmt = 8 * Offset;
+ Value *V = From;
+ if (DL.isBigEndian())
+ ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
+ DL.getTypeStoreSize(ExtractedTy) - Offset);
+ if (ShAmt) {
+ V = CreateLShr(V, ShAmt, Name + ".shift");
+ }
+ assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
+ "Cannot extract to a larger integer!");
+ if (ExtractedTy != IntTy) {
+ V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
+ }
+ return V;
+ }
+
+ /// \brief Create an assume intrinsic call that represents an alignment
+ /// assumption on the provided pointer.
+ ///
+ /// An optional offset can be provided, and if it is provided, the offset
+ /// must be subtracted from the provided pointer to get the pointer with the
+ /// specified alignment.
+ CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
+ unsigned Alignment,
+ Value *OffsetValue = nullptr) {
+ assert(isa<PointerType>(PtrValue->getType()) &&
+ "trying to create an alignment assumption on a non-pointer?");
+
+ PointerType *PtrTy = cast<PointerType>(PtrValue->getType());
+ Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
+ Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
+
+ Value *Mask = ConstantInt::get(IntPtrTy,
+ Alignment > 0 ? Alignment - 1 : 0);
+ if (OffsetValue) {
+ bool IsOffsetZero = false;
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(OffsetValue))
+ IsOffsetZero = CI->isZero();
+
+ if (!IsOffsetZero) {
+ if (OffsetValue->getType() != IntPtrTy)
+ OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true,
+ "offsetcast");
+ PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr");
+ }
+ }
+
+ Value *Zero = ConstantInt::get(IntPtrTy, 0);
+ Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
+ Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
+
+ return CreateAssumption(InvCond);
+ }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)
+
+} // end namespace llvm
+
+#endif // LLVM_IR_IRBUILDER_H
diff --git a/contrib/llvm/include/llvm/IR/IRPrintingPasses.h b/contrib/llvm/include/llvm/IR/IRPrintingPasses.h
new file mode 100644
index 0000000..88b18e8
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IRPrintingPasses.h
@@ -0,0 +1,94 @@
+//===- IRPrintingPasses.h - Passes to print out IR constructs ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines passes to print out IR in various granularities. The
+/// PrintModulePass pass simply prints out the entire module when it is
+/// executed. The PrintFunctionPass class is designed to be pipelined with
+/// other FunctionPass's, and prints out the functions of the module as they
+/// are processed.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_IRPRINTINGPASSES_H
+#define LLVM_IR_IRPRINTINGPASSES_H
+
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace llvm {
+class BasicBlockPass;
+class Function;
+class FunctionPass;
+class Module;
+class ModulePass;
+class PreservedAnalyses;
+class raw_ostream;
+
+/// \brief Create and return a pass that writes the module to the specified
+/// \c raw_ostream.
+ModulePass *createPrintModulePass(raw_ostream &OS,
+ const std::string &Banner = "",
+ bool ShouldPreserveUseListOrder = false);
+
+/// \brief Create and return a pass that prints functions to the specified
+/// \c raw_ostream as they are processed.
+FunctionPass *createPrintFunctionPass(raw_ostream &OS,
+ const std::string &Banner = "");
+
+/// \brief Create and return a pass that writes the BB to the specified
+/// \c raw_ostream.
+BasicBlockPass *createPrintBasicBlockPass(raw_ostream &OS,
+ const std::string &Banner = "");
+
+/// Print out a name of an LLVM value without any prefixes.
+///
+/// The name is surrounded with ""'s and escaped if it has any special or
+/// non-printable characters in it.
+void printLLVMNameWithoutPrefix(raw_ostream &OS, StringRef Name);
+
+/// \brief Pass for printing a Module as LLVM's text IR assembly.
+///
+/// Note: This pass is for use with the new pass manager. Use the create...Pass
+/// functions above to create passes for use with the legacy pass manager.
+class PrintModulePass {
+ raw_ostream &OS;
+ std::string Banner;
+ bool ShouldPreserveUseListOrder;
+
+public:
+ PrintModulePass();
+ PrintModulePass(raw_ostream &OS, const std::string &Banner = "",
+ bool ShouldPreserveUseListOrder = false);
+
+ PreservedAnalyses run(Module &M);
+
+ static StringRef name() { return "PrintModulePass"; }
+};
+
+/// \brief Pass for printing a Function as LLVM's text IR assembly.
+///
+/// Note: This pass is for use with the new pass manager. Use the create...Pass
+/// functions above to create passes for use with the legacy pass manager.
+class PrintFunctionPass {
+ raw_ostream &OS;
+ std::string Banner;
+
+public:
+ PrintFunctionPass();
+ PrintFunctionPass(raw_ostream &OS, const std::string &Banner = "");
+
+ PreservedAnalyses run(Function &F);
+
+ static StringRef name() { return "PrintFunctionPass"; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/InlineAsm.h b/contrib/llvm/include/llvm/IR/InlineAsm.h
new file mode 100644
index 0000000..d2e9e48
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/InlineAsm.h
@@ -0,0 +1,362 @@
+//===-- llvm/InlineAsm.h - Class to represent inline asm strings-*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class represents the inline asm strings, which are Value*'s that are
+// used as the callee operand of call instructions. InlineAsm's are uniqued
+// like constants, and created via InlineAsm::get(...).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INLINEASM_H
+#define LLVM_IR_INLINEASM_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Value.h"
+#include <vector>
+
+namespace llvm {
+
+class PointerType;
+class FunctionType;
+class Module;
+
+struct InlineAsmKeyType;
+template <class ConstantClass> class ConstantUniqueMap;
+
+class InlineAsm : public Value {
+public:
+ enum AsmDialect {
+ AD_ATT,
+ AD_Intel
+ };
+
+private:
+ friend struct InlineAsmKeyType;
+ friend class ConstantUniqueMap<InlineAsm>;
+
+ InlineAsm(const InlineAsm &) = delete;
+ void operator=(const InlineAsm&) = delete;
+
+ std::string AsmString, Constraints;
+ FunctionType *FTy;
+ bool HasSideEffects;
+ bool IsAlignStack;
+ AsmDialect Dialect;
+
+ InlineAsm(FunctionType *Ty, const std::string &AsmString,
+ const std::string &Constraints, bool hasSideEffects,
+ bool isAlignStack, AsmDialect asmDialect);
+ ~InlineAsm() override;
+
+ /// When the ConstantUniqueMap merges two types and makes two InlineAsms
+ /// identical, it destroys one of them with this method.
+ void destroyConstant();
+
+public:
+ /// InlineAsm::get - Return the specified uniqued inline asm string.
+ ///
+ static InlineAsm *get(FunctionType *Ty, StringRef AsmString,
+ StringRef Constraints, bool hasSideEffects,
+ bool isAlignStack = false,
+ AsmDialect asmDialect = AD_ATT);
+
+ bool hasSideEffects() const { return HasSideEffects; }
+ bool isAlignStack() const { return IsAlignStack; }
+ AsmDialect getDialect() const { return Dialect; }
+
+ /// getType - InlineAsm's are always pointers.
+ ///
+ PointerType *getType() const {
+ return reinterpret_cast<PointerType*>(Value::getType());
+ }
+
+ /// getFunctionType - InlineAsm's are always pointers to functions.
+ ///
+ FunctionType *getFunctionType() const;
+
+ const std::string &getAsmString() const { return AsmString; }
+ const std::string &getConstraintString() const { return Constraints; }
+
+ /// Verify - This static method can be used by the parser to check to see if
+ /// the specified constraint string is legal for the type. This returns true
+ /// if legal, false if not.
+ ///
+ static bool Verify(FunctionType *Ty, StringRef Constraints);
+
+ // Constraint String Parsing
+ enum ConstraintPrefix {
+ isInput, // 'x'
+ isOutput, // '=x'
+ isClobber // '~x'
+ };
+
+ typedef std::vector<std::string> ConstraintCodeVector;
+
+ struct SubConstraintInfo {
+ /// MatchingInput - If this is not -1, this is an output constraint where an
+ /// input constraint is required to match it (e.g. "0"). The value is the
+ /// constraint number that matches this one (for example, if this is
+ /// constraint #0 and constraint #4 has the value "0", this will be 4).
+ signed char MatchingInput;
+ /// Code - The constraint code, either the register name (in braces) or the
+ /// constraint letter/number.
+ ConstraintCodeVector Codes;
+ /// Default constructor.
+ SubConstraintInfo() : MatchingInput(-1) {}
+ };
+
+ typedef std::vector<SubConstraintInfo> SubConstraintInfoVector;
+ struct ConstraintInfo;
+ typedef std::vector<ConstraintInfo> ConstraintInfoVector;
+
+ struct ConstraintInfo {
+ /// Type - The basic type of the constraint: input/output/clobber
+ ///
+ ConstraintPrefix Type;
+
+ /// isEarlyClobber - "&": output operand writes result before inputs are all
+ /// read. This is only ever set for an output operand.
+ bool isEarlyClobber;
+
+ /// MatchingInput - If this is not -1, this is an output constraint where an
+ /// input constraint is required to match it (e.g. "0"). The value is the
+ /// constraint number that matches this one (for example, if this is
+ /// constraint #0 and constraint #4 has the value "0", this will be 4).
+ signed char MatchingInput;
+
+ /// hasMatchingInput - Return true if this is an output constraint that has
+ /// a matching input constraint.
+ bool hasMatchingInput() const { return MatchingInput != -1; }
+
+ /// isCommutative - This is set to true for a constraint that is commutative
+ /// with the next operand.
+ bool isCommutative;
+
+ /// isIndirect - True if this operand is an indirect operand. This means
+ /// that the address of the source or destination is present in the call
+ /// instruction, instead of it being returned or passed in explicitly. This
+ /// is represented with a '*' in the asm string.
+ bool isIndirect;
+
+ /// Code - The constraint code, either the register name (in braces) or the
+ /// constraint letter/number.
+ ConstraintCodeVector Codes;
+
+ /// isMultipleAlternative - '|': has multiple-alternative constraints.
+ bool isMultipleAlternative;
+
+ /// multipleAlternatives - If there are multiple alternative constraints,
+ /// this array will contain them. Otherwise it will be empty.
+ SubConstraintInfoVector multipleAlternatives;
+
+ /// The currently selected alternative constraint index.
+ unsigned currentAlternativeIndex;
+
+ /// Default constructor.
+ ConstraintInfo();
+
+ /// Parse - Analyze the specified string (e.g. "=*&{eax}") and fill in the
+ /// fields in this structure. If the constraint string is not understood,
+ /// return true, otherwise return false.
+ bool Parse(StringRef Str, ConstraintInfoVector &ConstraintsSoFar);
+
+ /// selectAlternative - Point this constraint to the alternative constraint
+ /// indicated by the index.
+ void selectAlternative(unsigned index);
+ };
+
+ /// ParseConstraints - Split up the constraint string into the specific
+ /// constraints and their prefixes. If this returns an empty vector, and if
+ /// the constraint string itself isn't empty, there was an error parsing.
+ static ConstraintInfoVector ParseConstraints(StringRef ConstraintString);
+
+ /// ParseConstraints - Parse the constraints of this inlineasm object,
+ /// returning them the same way that ParseConstraints(str) does.
+ ConstraintInfoVector ParseConstraints() const {
+ return ParseConstraints(Constraints);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == Value::InlineAsmVal;
+ }
+
+ // These are helper methods for dealing with flags in the INLINEASM SDNode
+ // in the backend.
+ //
+ // The encoding of the flag word is currently:
+ // Bits 2-0 - A Kind_* value indicating the kind of the operand.
+ // Bits 15-3 - The number of SDNode operands associated with this inline
+ // assembly operand.
+ // If bit 31 is set:
+ // Bit 30-16 - The operand number that this operand must match.
+ // When bits 2-0 are Kind_Mem, the Constraint_* value must be
+ // obtained from the flags for this operand number.
+ // Else if bits 2-0 are Kind_Mem:
+ // Bit 30-16 - A Constraint_* value indicating the original constraint
+ // code.
+ // Else:
+ // Bit 30-16 - The register class ID to use for the operand.
+
+ enum : uint32_t {
+ // Fixed operands on an INLINEASM SDNode.
+ Op_InputChain = 0,
+ Op_AsmString = 1,
+ Op_MDNode = 2,
+ Op_ExtraInfo = 3, // HasSideEffects, IsAlignStack, AsmDialect.
+ Op_FirstOperand = 4,
+
+ // Fixed operands on an INLINEASM MachineInstr.
+ MIOp_AsmString = 0,
+ MIOp_ExtraInfo = 1, // HasSideEffects, IsAlignStack, AsmDialect.
+ MIOp_FirstOperand = 2,
+
+ // Interpretation of the MIOp_ExtraInfo bit field.
+ Extra_HasSideEffects = 1,
+ Extra_IsAlignStack = 2,
+ Extra_AsmDialect = 4,
+ Extra_MayLoad = 8,
+ Extra_MayStore = 16,
+
+ // Inline asm operands map to multiple SDNode / MachineInstr operands.
+ // The first operand is an immediate describing the asm operand, the low
+ // bits is the kind:
+ Kind_RegUse = 1, // Input register, "r".
+ Kind_RegDef = 2, // Output register, "=r".
+ Kind_RegDefEarlyClobber = 3, // Early-clobber output register, "=&r".
+ Kind_Clobber = 4, // Clobbered register, "~r".
+ Kind_Imm = 5, // Immediate.
+ Kind_Mem = 6, // Memory operand, "m".
+
+ // Memory constraint codes.
+ // These could be tablegenerated but there's little need to do that since
+ // there's plenty of space in the encoding to support the union of all
+ // constraint codes for all targets.
+ Constraint_Unknown = 0,
+ Constraint_es,
+ Constraint_i,
+ Constraint_m,
+ Constraint_o,
+ Constraint_v,
+ Constraint_Q,
+ Constraint_R,
+ Constraint_S,
+ Constraint_T,
+ Constraint_Um,
+ Constraint_Un,
+ Constraint_Uq,
+ Constraint_Us,
+ Constraint_Ut,
+ Constraint_Uv,
+ Constraint_Uy,
+ Constraint_X,
+ Constraint_Z,
+ Constraint_ZC,
+ Constraint_Zy,
+ Constraints_Max = Constraint_Zy,
+ Constraints_ShiftAmount = 16,
+
+ Flag_MatchingOperand = 0x80000000
+ };
+
+ static unsigned getFlagWord(unsigned Kind, unsigned NumOps) {
+ assert(((NumOps << 3) & ~0xffff) == 0 && "Too many inline asm operands!");
+ assert(Kind >= Kind_RegUse && Kind <= Kind_Mem && "Invalid Kind");
+ return Kind | (NumOps << 3);
+ }
+
+ /// getFlagWordForMatchingOp - Augment an existing flag word returned by
+ /// getFlagWord with information indicating that this input operand is tied
+ /// to a previous output operand.
+ static unsigned getFlagWordForMatchingOp(unsigned InputFlag,
+ unsigned MatchedOperandNo) {
+ assert(MatchedOperandNo <= 0x7fff && "Too big matched operand");
+ assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
+ return InputFlag | Flag_MatchingOperand | (MatchedOperandNo << 16);
+ }
+
+ /// getFlagWordForRegClass - Augment an existing flag word returned by
+ /// getFlagWord with the required register class for the following register
+ /// operands.
+ /// A tied use operand cannot have a register class, use the register class
+ /// from the def operand instead.
+ static unsigned getFlagWordForRegClass(unsigned InputFlag, unsigned RC) {
+ // Store RC + 1, reserve the value 0 to mean 'no register class'.
+ ++RC;
+ assert(RC <= 0x7fff && "Too large register class ID");
+ assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
+ return InputFlag | (RC << 16);
+ }
+
+ /// Augment an existing flag word returned by getFlagWord with the constraint
+ /// code for a memory constraint.
+ static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint) {
+ assert(Constraint <= 0x7fff && "Too large a memory constraint ID");
+ assert(Constraint <= Constraints_Max && "Unknown constraint ID");
+ assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
+ return InputFlag | (Constraint << Constraints_ShiftAmount);
+ }
+
+ static unsigned convertMemFlagWordToMatchingFlagWord(unsigned InputFlag) {
+ assert(isMemKind(InputFlag));
+ return InputFlag & ~(0x7fff << Constraints_ShiftAmount);
+ }
+
+ static unsigned getKind(unsigned Flags) {
+ return Flags & 7;
+ }
+
+ static bool isRegDefKind(unsigned Flag){ return getKind(Flag) == Kind_RegDef;}
+ static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind_Imm; }
+ static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind_Mem; }
+ static bool isRegDefEarlyClobberKind(unsigned Flag) {
+ return getKind(Flag) == Kind_RegDefEarlyClobber;
+ }
+ static bool isClobberKind(unsigned Flag) {
+ return getKind(Flag) == Kind_Clobber;
+ }
+
+ static unsigned getMemoryConstraintID(unsigned Flag) {
+ assert(isMemKind(Flag));
+ return (Flag >> Constraints_ShiftAmount) & 0x7fff;
+ }
+
+ /// getNumOperandRegisters - Extract the number of registers field from the
+ /// inline asm operand flag.
+ static unsigned getNumOperandRegisters(unsigned Flag) {
+ return (Flag & 0xffff) >> 3;
+ }
+
+ /// isUseOperandTiedToDef - Return true if the flag of the inline asm
+ /// operand indicates it is an use operand that's matched to a def operand.
+ static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx) {
+ if ((Flag & Flag_MatchingOperand) == 0)
+ return false;
+ Idx = (Flag & ~Flag_MatchingOperand) >> 16;
+ return true;
+ }
+
+ /// hasRegClassConstraint - Returns true if the flag contains a register
+ /// class constraint. Sets RC to the register class ID.
+ static bool hasRegClassConstraint(unsigned Flag, unsigned &RC) {
+ if (Flag & Flag_MatchingOperand)
+ return false;
+ unsigned High = Flag >> 16;
+ // getFlagWordForRegClass() uses 0 to mean no register class, and otherwise
+ // stores RC + 1.
+ if (!High)
+ return false;
+ RC = High - 1;
+ return true;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/InstIterator.h b/contrib/llvm/include/llvm/IR/InstIterator.h
new file mode 100644
index 0000000..1baca21
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/InstIterator.h
@@ -0,0 +1,157 @@
+//===- InstIterator.h - Classes for inst iteration --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions of two iterators for iterating over the
+// instructions in a function. This is effectively a wrapper around a two level
+// iterator that can probably be genericized later.
+//
+// Note that this iterator gets invalidated any time that basic blocks or
+// instructions are moved around.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INSTITERATOR_H
+#define LLVM_IR_INSTITERATOR_H
+
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+
+namespace llvm {
+
+// This class implements inst_begin() & inst_end() for
+// inst_iterator and const_inst_iterator's.
+//
+template <class BB_t, class BB_i_t, class BI_t, class II_t> class InstIterator {
+ typedef BB_t BBty;
+ typedef BB_i_t BBIty;
+ typedef BI_t BIty;
+ typedef II_t IIty;
+ BB_t *BBs; // BasicBlocksType
+ BB_i_t BB; // BasicBlocksType::iterator
+ BI_t BI; // BasicBlock::iterator
+public:
+ typedef std::bidirectional_iterator_tag iterator_category;
+ typedef IIty value_type;
+ typedef signed difference_type;
+ typedef IIty* pointer;
+ typedef IIty& reference;
+
+ // Default constructor
+ InstIterator() {}
+
+ // Copy constructor...
+ template<typename A, typename B, typename C, typename D>
+ InstIterator(const InstIterator<A,B,C,D> &II)
+ : BBs(II.BBs), BB(II.BB), BI(II.BI) {}
+
+ template<typename A, typename B, typename C, typename D>
+ InstIterator(InstIterator<A,B,C,D> &II)
+ : BBs(II.BBs), BB(II.BB), BI(II.BI) {}
+
+ template<class M> InstIterator(M &m)
+ : BBs(&m.getBasicBlockList()), BB(BBs->begin()) { // begin ctor
+ if (BB != BBs->end()) {
+ BI = BB->begin();
+ advanceToNextBB();
+ }
+ }
+
+ template<class M> InstIterator(M &m, bool)
+ : BBs(&m.getBasicBlockList()), BB(BBs->end()) { // end ctor
+ }
+
+ // Accessors to get at the underlying iterators...
+ inline BBIty &getBasicBlockIterator() { return BB; }
+ inline BIty &getInstructionIterator() { return BI; }
+
+ inline reference operator*() const { return *BI; }
+ inline pointer operator->() const { return &operator*(); }
+
+ inline bool operator==(const InstIterator &y) const {
+ return BB == y.BB && (BB == BBs->end() || BI == y.BI);
+ }
+ inline bool operator!=(const InstIterator& y) const {
+ return !operator==(y);
+ }
+
+ InstIterator& operator++() {
+ ++BI;
+ advanceToNextBB();
+ return *this;
+ }
+ inline InstIterator operator++(int) {
+ InstIterator tmp = *this; ++*this; return tmp;
+ }
+
+ InstIterator& operator--() {
+ while (BB == BBs->end() || BI == BB->begin()) {
+ --BB;
+ BI = BB->end();
+ }
+ --BI;
+ return *this;
+ }
+ inline InstIterator operator--(int) {
+ InstIterator tmp = *this; --*this; return tmp;
+ }
+
+ inline bool atEnd() const { return BB == BBs->end(); }
+
+private:
+ inline void advanceToNextBB() {
+ // The only way that the II could be broken is if it is now pointing to
+ // the end() of the current BasicBlock and there are successor BBs.
+ while (BI == BB->end()) {
+ ++BB;
+ if (BB == BBs->end()) break;
+ BI = BB->begin();
+ }
+ }
+};
+
+typedef InstIterator<SymbolTableList<BasicBlock>, Function::iterator,
+ BasicBlock::iterator, Instruction> inst_iterator;
+typedef InstIterator<const SymbolTableList<BasicBlock>,
+ Function::const_iterator, BasicBlock::const_iterator,
+ const Instruction> const_inst_iterator;
+typedef iterator_range<inst_iterator> inst_range;
+typedef iterator_range<const_inst_iterator> const_inst_range;
+
+inline inst_iterator inst_begin(Function *F) { return inst_iterator(*F); }
+inline inst_iterator inst_end(Function *F) { return inst_iterator(*F, true); }
+inline inst_range instructions(Function *F) {
+ return inst_range(inst_begin(F), inst_end(F));
+}
+inline const_inst_iterator inst_begin(const Function *F) {
+ return const_inst_iterator(*F);
+}
+inline const_inst_iterator inst_end(const Function *F) {
+ return const_inst_iterator(*F, true);
+}
+inline const_inst_range instructions(const Function *F) {
+ return const_inst_range(inst_begin(F), inst_end(F));
+}
+inline inst_iterator inst_begin(Function &F) { return inst_iterator(F); }
+inline inst_iterator inst_end(Function &F) { return inst_iterator(F, true); }
+inline inst_range instructions(Function &F) {
+ return inst_range(inst_begin(F), inst_end(F));
+}
+inline const_inst_iterator inst_begin(const Function &F) {
+ return const_inst_iterator(F);
+}
+inline const_inst_iterator inst_end(const Function &F) {
+ return const_inst_iterator(F, true);
+}
+inline const_inst_range instructions(const Function &F) {
+ return const_inst_range(inst_begin(F), inst_end(F));
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/InstVisitor.h b/contrib/llvm/include/llvm/IR/InstVisitor.h
new file mode 100644
index 0000000..088d3e0
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/InstVisitor.h
@@ -0,0 +1,295 @@
+//===- InstVisitor.h - Instruction visitor templates ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_IR_INSTVISITOR_H
+#define LLVM_IR_INSTVISITOR_H
+
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+// We operate on opaque instruction classes, so forward declare all instruction
+// types now...
+//
+#define HANDLE_INST(NUM, OPCODE, CLASS) class CLASS;
+#include "llvm/IR/Instruction.def"
+
+#define DELEGATE(CLASS_TO_VISIT) \
+ return static_cast<SubClass*>(this)-> \
+ visit##CLASS_TO_VISIT(static_cast<CLASS_TO_VISIT&>(I))
+
+
+/// @brief Base class for instruction visitors
+///
+/// Instruction visitors are used when you want to perform different actions
+/// for different kinds of instructions without having to use lots of casts
+/// and a big switch statement (in your code, that is).
+///
+/// To define your own visitor, inherit from this class, specifying your
+/// new type for the 'SubClass' template parameter, and "override" visitXXX
+/// functions in your class. I say "override" because this class is defined
+/// in terms of statically resolved overloading, not virtual functions.
+///
+/// For example, here is a visitor that counts the number of malloc
+/// instructions processed:
+///
+/// /// Declare the class. Note that we derive from InstVisitor instantiated
+/// /// with _our new subclasses_ type.
+/// ///
+/// struct CountAllocaVisitor : public InstVisitor<CountAllocaVisitor> {
+/// unsigned Count;
+/// CountAllocaVisitor() : Count(0) {}
+///
+/// void visitAllocaInst(AllocaInst &AI) { ++Count; }
+/// };
+///
+/// And this class would be used like this:
+/// CountAllocaVisitor CAV;
+/// CAV.visit(function);
+/// NumAllocas = CAV.Count;
+///
+/// The defined has 'visit' methods for Instruction, and also for BasicBlock,
+/// Function, and Module, which recursively process all contained instructions.
+///
+/// Note that if you don't implement visitXXX for some instruction type,
+/// the visitXXX method for instruction superclass will be invoked. So
+/// if instructions are added in the future, they will be automatically
+/// supported, if you handle one of their superclasses.
+///
+/// The optional second template argument specifies the type that instruction
+/// visitation functions should return. If you specify this, you *MUST* provide
+/// an implementation of visitInstruction though!.
+///
+/// Note that this class is specifically designed as a template to avoid
+/// virtual function call overhead. Defining and using an InstVisitor is just
+/// as efficient as having your own switch statement over the instruction
+/// opcode.
+template<typename SubClass, typename RetTy=void>
+class InstVisitor {
+ //===--------------------------------------------------------------------===//
+ // Interface code - This is the public interface of the InstVisitor that you
+ // use to visit instructions...
+ //
+
+public:
+ // Generic visit method - Allow visitation to all instructions in a range
+ template<class Iterator>
+ void visit(Iterator Start, Iterator End) {
+ while (Start != End)
+ static_cast<SubClass*>(this)->visit(*Start++);
+ }
+
+ // Define visitors for functions and basic blocks...
+ //
+ void visit(Module &M) {
+ static_cast<SubClass*>(this)->visitModule(M);
+ visit(M.begin(), M.end());
+ }
+ void visit(Function &F) {
+ static_cast<SubClass*>(this)->visitFunction(F);
+ visit(F.begin(), F.end());
+ }
+ void visit(BasicBlock &BB) {
+ static_cast<SubClass*>(this)->visitBasicBlock(BB);
+ visit(BB.begin(), BB.end());
+ }
+
+ // Forwarding functions so that the user can visit with pointers AND refs.
+ void visit(Module *M) { visit(*M); }
+ void visit(Function *F) { visit(*F); }
+ void visit(BasicBlock *BB) { visit(*BB); }
+ RetTy visit(Instruction *I) { return visit(*I); }
+
+ // visit - Finally, code to visit an instruction...
+ //
+ RetTy visit(Instruction &I) {
+ switch (I.getOpcode()) {
+ default: llvm_unreachable("Unknown instruction type encountered!");
+ // Build the switch statement using the Instruction.def file...
+#define HANDLE_INST(NUM, OPCODE, CLASS) \
+ case Instruction::OPCODE: return \
+ static_cast<SubClass*>(this)-> \
+ visit##OPCODE(static_cast<CLASS&>(I));
+#include "llvm/IR/Instruction.def"
+ }
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitation functions... these functions provide default fallbacks in case
+ // the user does not specify what to do for a particular instruction type.
+ // The default behavior is to generalize the instruction type to its subtype
+ // and try visiting the subtype. All of this should be inlined perfectly,
+ // because there are no virtual functions to get in the way.
+ //
+
+ // When visiting a module, function or basic block directly, these methods get
+ // called to indicate when transitioning into a new unit.
+ //
+ void visitModule (Module &M) {}
+ void visitFunction (Function &F) {}
+ void visitBasicBlock(BasicBlock &BB) {}
+
+ // Define instruction specific visitor functions that can be overridden to
+ // handle SPECIFIC instructions. These functions automatically define
+ // visitMul to proxy to visitBinaryOperator for instance in case the user does
+ // not need this generality.
+ //
+ // These functions can also implement fan-out, when a single opcode and
+ // instruction have multiple more specific Instruction subclasses. The Call
+ // instruction currently supports this. We implement that by redirecting that
+ // instruction to a special delegation helper.
+#define HANDLE_INST(NUM, OPCODE, CLASS) \
+ RetTy visit##OPCODE(CLASS &I) { \
+ if (NUM == Instruction::Call) \
+ return delegateCallInst(I); \
+ else \
+ DELEGATE(CLASS); \
+ }
+#include "llvm/IR/Instruction.def"
+
+ // Specific Instruction type classes... note that all of the casts are
+ // necessary because we use the instruction classes as opaque types...
+ //
+ RetTy visitReturnInst(ReturnInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitBranchInst(BranchInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitSwitchInst(SwitchInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitIndirectBrInst(IndirectBrInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitResumeInst(ResumeInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitCleanupReturnInst(CleanupReturnInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitCatchReturnInst(CatchReturnInst &I) { DELEGATE(TerminatorInst); }
+ RetTy visitCatchSwitchInst(CatchSwitchInst &I) { DELEGATE(TerminatorInst);}
+ RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);}
+ RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);}
+ RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitLoadInst(LoadInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitStoreInst(StoreInst &I) { DELEGATE(Instruction);}
+ RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { DELEGATE(Instruction);}
+ RetTy visitAtomicRMWInst(AtomicRMWInst &I) { DELEGATE(Instruction);}
+ RetTy visitFenceInst(FenceInst &I) { DELEGATE(Instruction);}
+ RetTy visitGetElementPtrInst(GetElementPtrInst &I){ DELEGATE(Instruction);}
+ RetTy visitPHINode(PHINode &I) { DELEGATE(Instruction);}
+ RetTy visitTruncInst(TruncInst &I) { DELEGATE(CastInst);}
+ RetTy visitZExtInst(ZExtInst &I) { DELEGATE(CastInst);}
+ RetTy visitSExtInst(SExtInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPTruncInst(FPTruncInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPExtInst(FPExtInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPToUIInst(FPToUIInst &I) { DELEGATE(CastInst);}
+ RetTy visitFPToSIInst(FPToSIInst &I) { DELEGATE(CastInst);}
+ RetTy visitUIToFPInst(UIToFPInst &I) { DELEGATE(CastInst);}
+ RetTy visitSIToFPInst(SIToFPInst &I) { DELEGATE(CastInst);}
+ RetTy visitPtrToIntInst(PtrToIntInst &I) { DELEGATE(CastInst);}
+ RetTy visitIntToPtrInst(IntToPtrInst &I) { DELEGATE(CastInst);}
+ RetTy visitBitCastInst(BitCastInst &I) { DELEGATE(CastInst);}
+ RetTy visitAddrSpaceCastInst(AddrSpaceCastInst &I) { DELEGATE(CastInst);}
+ RetTy visitSelectInst(SelectInst &I) { DELEGATE(Instruction);}
+ RetTy visitVAArgInst(VAArgInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitExtractElementInst(ExtractElementInst &I) { DELEGATE(Instruction);}
+ RetTy visitInsertElementInst(InsertElementInst &I) { DELEGATE(Instruction);}
+ RetTy visitShuffleVectorInst(ShuffleVectorInst &I) { DELEGATE(Instruction);}
+ RetTy visitExtractValueInst(ExtractValueInst &I){ DELEGATE(UnaryInstruction);}
+ RetTy visitInsertValueInst(InsertValueInst &I) { DELEGATE(Instruction); }
+ RetTy visitLandingPadInst(LandingPadInst &I) { DELEGATE(Instruction); }
+ RetTy visitFuncletPadInst(FuncletPadInst &I) { DELEGATE(Instruction); }
+ RetTy visitCleanupPadInst(CleanupPadInst &I) { DELEGATE(FuncletPadInst); }
+ RetTy visitCatchPadInst(CatchPadInst &I) { DELEGATE(FuncletPadInst); }
+
+ // Handle the special instrinsic instruction classes.
+ RetTy visitDbgDeclareInst(DbgDeclareInst &I) { DELEGATE(DbgInfoIntrinsic);}
+ RetTy visitDbgValueInst(DbgValueInst &I) { DELEGATE(DbgInfoIntrinsic);}
+ RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
+ RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
+ RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
+ RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
+ RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVAStartInst(VAStartInst &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); }
+ RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); }
+
+ // Call and Invoke are slightly different as they delegate first through
+ // a generic CallSite visitor.
+ RetTy visitCallInst(CallInst &I) {
+ return static_cast<SubClass*>(this)->visitCallSite(&I);
+ }
+ RetTy visitInvokeInst(InvokeInst &I) {
+ return static_cast<SubClass*>(this)->visitCallSite(&I);
+ }
+
+ // Next level propagators: If the user does not overload a specific
+ // instruction type, they can overload one of these to get the whole class
+ // of instructions...
+ //
+ RetTy visitCastInst(CastInst &I) { DELEGATE(UnaryInstruction);}
+ RetTy visitBinaryOperator(BinaryOperator &I) { DELEGATE(Instruction);}
+ RetTy visitCmpInst(CmpInst &I) { DELEGATE(Instruction);}
+ RetTy visitTerminatorInst(TerminatorInst &I) { DELEGATE(Instruction);}
+ RetTy visitUnaryInstruction(UnaryInstruction &I){ DELEGATE(Instruction);}
+
+ // Provide a special visitor for a 'callsite' that visits both calls and
+ // invokes. When unimplemented, properly delegates to either the terminator or
+ // regular instruction visitor.
+ RetTy visitCallSite(CallSite CS) {
+ assert(CS);
+ Instruction &I = *CS.getInstruction();
+ if (CS.isCall())
+ DELEGATE(Instruction);
+
+ assert(CS.isInvoke());
+ DELEGATE(TerminatorInst);
+ }
+
+ // If the user wants a 'default' case, they can choose to override this
+ // function. If this function is not overloaded in the user's subclass, then
+ // this instruction just gets ignored.
+ //
+ // Note that you MUST override this function if your return type is not void.
+ //
+ void visitInstruction(Instruction &I) {} // Ignore unhandled instructions
+
+private:
+ // Special helper function to delegate to CallInst subclass visitors.
+ RetTy delegateCallInst(CallInst &I) {
+ if (const Function *F = I.getCalledFunction()) {
+ switch (F->getIntrinsicID()) {
+ default: DELEGATE(IntrinsicInst);
+ case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst);
+ case Intrinsic::dbg_value: DELEGATE(DbgValueInst);
+ case Intrinsic::memcpy: DELEGATE(MemCpyInst);
+ case Intrinsic::memmove: DELEGATE(MemMoveInst);
+ case Intrinsic::memset: DELEGATE(MemSetInst);
+ case Intrinsic::vastart: DELEGATE(VAStartInst);
+ case Intrinsic::vaend: DELEGATE(VAEndInst);
+ case Intrinsic::vacopy: DELEGATE(VACopyInst);
+ case Intrinsic::not_intrinsic: break;
+ }
+ }
+ DELEGATE(CallInst);
+ }
+
+ // An overload that will never actually be called, it is used only from dead
+ // code in the dispatching from opcodes to instruction subclasses.
+ RetTy delegateCallInst(Instruction &I) {
+ llvm_unreachable("delegateCallInst called for non-CallInst");
+ }
+};
+
+#undef DELEGATE
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/InstrTypes.h b/contrib/llvm/include/llvm/IR/InstrTypes.h
new file mode 100644
index 0000000..5091bb4
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/InstrTypes.h
@@ -0,0 +1,1633 @@
+//===-- llvm/InstrTypes.h - Important Instruction subclasses ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various meta classes of instructions that exist in the VM
+// representation. Specific concrete subclasses of these may be found in the
+// i*.h files...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INSTRTYPES_H
+#define LLVM_IR_INSTRTYPES_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/OperandTraits.h"
+
+namespace llvm {
+
+class LLVMContext;
+
+//===----------------------------------------------------------------------===//
+// TerminatorInst Class
+//===----------------------------------------------------------------------===//
+
+/// Subclasses of this class are all able to terminate a basic
+/// block. Thus, these are all the flow control type of operations.
+///
+class TerminatorInst : public Instruction {
+protected:
+ TerminatorInst(Type *Ty, Instruction::TermOps iType,
+ Use *Ops, unsigned NumOps,
+ Instruction *InsertBefore = nullptr)
+ : Instruction(Ty, iType, Ops, NumOps, InsertBefore) {}
+
+ TerminatorInst(Type *Ty, Instruction::TermOps iType,
+ Use *Ops, unsigned NumOps, BasicBlock *InsertAtEnd)
+ : Instruction(Ty, iType, Ops, NumOps, InsertAtEnd) {}
+
+ // Out of line virtual method, so the vtable, etc has a home.
+ ~TerminatorInst() override;
+
+ /// Virtual methods - Terminators should overload these and provide inline
+ /// overrides of non-V methods.
+ virtual BasicBlock *getSuccessorV(unsigned idx) const = 0;
+ virtual unsigned getNumSuccessorsV() const = 0;
+ virtual void setSuccessorV(unsigned idx, BasicBlock *B) = 0;
+
+public:
+ /// Return the number of successors that this terminator has.
+ unsigned getNumSuccessors() const {
+ return getNumSuccessorsV();
+ }
+
+ /// Return the specified successor.
+ BasicBlock *getSuccessor(unsigned idx) const {
+ return getSuccessorV(idx);
+ }
+
+ /// Update the specified successor to point at the provided block.
+ void setSuccessor(unsigned idx, BasicBlock *B) {
+ setSuccessorV(idx, B);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->isTerminator();
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+ // \brief Returns true if this terminator relates to exception handling.
+ bool isExceptional() const {
+ switch (getOpcode()) {
+ case Instruction::CatchSwitch:
+ case Instruction::CatchRet:
+ case Instruction::CleanupRet:
+ case Instruction::Invoke:
+ case Instruction::Resume:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ //===--------------------------------------------------------------------===//
+ // succ_iterator definition
+ //===--------------------------------------------------------------------===//
+
+ template <class Term, class BB> // Successor Iterator
+ class SuccIterator : public std::iterator<std::random_access_iterator_tag, BB,
+ int, BB *, BB *> {
+ typedef std::iterator<std::random_access_iterator_tag, BB, int, BB *, BB *>
+ super;
+
+ public:
+ typedef typename super::pointer pointer;
+ typedef typename super::reference reference;
+
+ private:
+ Term TermInst;
+ unsigned idx;
+ typedef SuccIterator<Term, BB> Self;
+
+ inline bool index_is_valid(unsigned idx) {
+ return idx < TermInst->getNumSuccessors();
+ }
+
+ /// \brief Proxy object to allow write access in operator[]
+ class SuccessorProxy {
+ Self it;
+
+ public:
+ explicit SuccessorProxy(const Self &it) : it(it) {}
+
+ SuccessorProxy(const SuccessorProxy &) = default;
+
+ SuccessorProxy &operator=(SuccessorProxy r) {
+ *this = reference(r);
+ return *this;
+ }
+
+ SuccessorProxy &operator=(reference r) {
+ it.TermInst->setSuccessor(it.idx, r);
+ return *this;
+ }
+
+ operator reference() const { return *it; }
+ };
+
+ public:
+ // begin iterator
+ explicit inline SuccIterator(Term T) : TermInst(T), idx(0) {}
+ // end iterator
+ inline SuccIterator(Term T, bool) : TermInst(T) {
+ if (TermInst)
+ idx = TermInst->getNumSuccessors();
+ else
+ // Term == NULL happens, if a basic block is not fully constructed and
+ // consequently getTerminator() returns NULL. In this case we construct
+ // a SuccIterator which describes a basic block that has zero
+ // successors.
+ // Defining SuccIterator for incomplete and malformed CFGs is especially
+ // useful for debugging.
+ idx = 0;
+ }
+
+ /// This is used to interface between code that wants to
+ /// operate on terminator instructions directly.
+ unsigned getSuccessorIndex() const { return idx; }
+
+ inline bool operator==(const Self &x) const { return idx == x.idx; }
+ inline bool operator!=(const Self &x) const { return !operator==(x); }
+
+ inline reference operator*() const { return TermInst->getSuccessor(idx); }
+ inline pointer operator->() const { return operator*(); }
+
+ inline Self &operator++() {
+ ++idx;
+ return *this;
+ } // Preincrement
+
+ inline Self operator++(int) { // Postincrement
+ Self tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ inline Self &operator--() {
+ --idx;
+ return *this;
+ } // Predecrement
+ inline Self operator--(int) { // Postdecrement
+ Self tmp = *this;
+ --*this;
+ return tmp;
+ }
+
+ inline bool operator<(const Self &x) const {
+ assert(TermInst == x.TermInst &&
+ "Cannot compare iterators of different blocks!");
+ return idx < x.idx;
+ }
+
+ inline bool operator<=(const Self &x) const {
+ assert(TermInst == x.TermInst &&
+ "Cannot compare iterators of different blocks!");
+ return idx <= x.idx;
+ }
+ inline bool operator>=(const Self &x) const {
+ assert(TermInst == x.TermInst &&
+ "Cannot compare iterators of different blocks!");
+ return idx >= x.idx;
+ }
+
+ inline bool operator>(const Self &x) const {
+ assert(TermInst == x.TermInst &&
+ "Cannot compare iterators of different blocks!");
+ return idx > x.idx;
+ }
+
+ inline Self &operator+=(int Right) {
+ unsigned new_idx = idx + Right;
+ assert(index_is_valid(new_idx) && "Iterator index out of bound");
+ idx = new_idx;
+ return *this;
+ }
+
+ inline Self operator+(int Right) const {
+ Self tmp = *this;
+ tmp += Right;
+ return tmp;
+ }
+
+ inline Self &operator-=(int Right) { return operator+=(-Right); }
+
+ inline Self operator-(int Right) const { return operator+(-Right); }
+
+ inline int operator-(const Self &x) const {
+ assert(TermInst == x.TermInst &&
+ "Cannot work on iterators of different blocks!");
+ int distance = idx - x.idx;
+ return distance;
+ }
+
+ inline SuccessorProxy operator[](int offset) {
+ Self tmp = *this;
+ tmp += offset;
+ return SuccessorProxy(tmp);
+ }
+
+ /// Get the source BB of this iterator.
+ inline BB *getSource() {
+ assert(TermInst && "Source not available, if basic block was malformed");
+ return TermInst->getParent();
+ }
+ };
+
+ typedef SuccIterator<TerminatorInst *, BasicBlock> succ_iterator;
+ typedef SuccIterator<const TerminatorInst *, const BasicBlock>
+ succ_const_iterator;
+ typedef llvm::iterator_range<succ_iterator> succ_range;
+ typedef llvm::iterator_range<succ_const_iterator> succ_const_range;
+
+private:
+ inline succ_iterator succ_begin() { return succ_iterator(this); }
+ inline succ_const_iterator succ_begin() const {
+ return succ_const_iterator(this);
+ }
+ inline succ_iterator succ_end() { return succ_iterator(this, true); }
+ inline succ_const_iterator succ_end() const {
+ return succ_const_iterator(this, true);
+ }
+
+public:
+ inline succ_range successors() {
+ return succ_range(succ_begin(), succ_end());
+ }
+ inline succ_const_range successors() const {
+ return succ_const_range(succ_begin(), succ_end());
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// UnaryInstruction Class
+//===----------------------------------------------------------------------===//
+
+class UnaryInstruction : public Instruction {
+ void *operator new(size_t, unsigned) = delete;
+
+protected:
+ UnaryInstruction(Type *Ty, unsigned iType, Value *V,
+ Instruction *IB = nullptr)
+ : Instruction(Ty, iType, &Op<0>(), 1, IB) {
+ Op<0>() = V;
+ }
+ UnaryInstruction(Type *Ty, unsigned iType, Value *V, BasicBlock *IAE)
+ : Instruction(Ty, iType, &Op<0>(), 1, IAE) {
+ Op<0>() = V;
+ }
+
+public:
+ // allocate space for exactly one operand
+ void *operator new(size_t s) {
+ return User::operator new(s, 1);
+ }
+
+ // Out of line virtual method, so the vtable, etc has a home.
+ ~UnaryInstruction() override;
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Alloca ||
+ I->getOpcode() == Instruction::Load ||
+ I->getOpcode() == Instruction::VAArg ||
+ I->getOpcode() == Instruction::ExtractValue ||
+ (I->getOpcode() >= CastOpsBegin && I->getOpcode() < CastOpsEnd);
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<UnaryInstruction> :
+ public FixedNumOperandTraits<UnaryInstruction, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)
+
+//===----------------------------------------------------------------------===//
+// BinaryOperator Class
+//===----------------------------------------------------------------------===//
+
+class BinaryOperator : public Instruction {
+ void *operator new(size_t, unsigned) = delete;
+
+protected:
+ void init(BinaryOps iType);
+ BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
+ const Twine &Name, Instruction *InsertBefore);
+ BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
+ const Twine &Name, BasicBlock *InsertAtEnd);
+
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ BinaryOperator *cloneImpl() const;
+
+public:
+ // allocate space for exactly two operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// Construct a binary instruction, given the opcode and the two
+ /// operands. Optionally (if InstBefore is specified) insert the instruction
+ /// into a BasicBlock right before the specified instruction. The specified
+ /// Instruction is allowed to be a dereferenced end iterator.
+ ///
+ static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
+ const Twine &Name = Twine(),
+ Instruction *InsertBefore = nullptr);
+
+ /// Construct a binary instruction, given the opcode and the two
+ /// operands. Also automatically insert this instruction to the end of the
+ /// BasicBlock specified.
+ ///
+ static BinaryOperator *Create(BinaryOps Op, Value *S1, Value *S2,
+ const Twine &Name, BasicBlock *InsertAtEnd);
+
+ /// These methods just forward to Create, and are useful when you
+ /// statically know what type of instruction you're going to create. These
+ /// helpers just save some typing.
+#define HANDLE_BINARY_INST(N, OPC, CLASS) \
+ static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
+ const Twine &Name = "") {\
+ return Create(Instruction::OPC, V1, V2, Name);\
+ }
+#include "llvm/IR/Instruction.def"
+#define HANDLE_BINARY_INST(N, OPC, CLASS) \
+ static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
+ const Twine &Name, BasicBlock *BB) {\
+ return Create(Instruction::OPC, V1, V2, Name, BB);\
+ }
+#include "llvm/IR/Instruction.def"
+#define HANDLE_BINARY_INST(N, OPC, CLASS) \
+ static BinaryOperator *Create##OPC(Value *V1, Value *V2, \
+ const Twine &Name, Instruction *I) {\
+ return Create(Instruction::OPC, V1, V2, Name, I);\
+ }
+#include "llvm/IR/Instruction.def"
+
+ static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name = "") {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name);
+ BO->setHasNoSignedWrap(true);
+ return BO;
+ }
+ static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, BasicBlock *BB) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
+ BO->setHasNoSignedWrap(true);
+ return BO;
+ }
+ static BinaryOperator *CreateNSW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, Instruction *I) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
+ BO->setHasNoSignedWrap(true);
+ return BO;
+ }
+
+ static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name = "") {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name);
+ BO->setHasNoUnsignedWrap(true);
+ return BO;
+ }
+ static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, BasicBlock *BB) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
+ BO->setHasNoUnsignedWrap(true);
+ return BO;
+ }
+ static BinaryOperator *CreateNUW(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, Instruction *I) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
+ BO->setHasNoUnsignedWrap(true);
+ return BO;
+ }
+
+ static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name = "") {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name);
+ BO->setIsExact(true);
+ return BO;
+ }
+ static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, BasicBlock *BB) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, BB);
+ BO->setIsExact(true);
+ return BO;
+ }
+ static BinaryOperator *CreateExact(BinaryOps Opc, Value *V1, Value *V2,
+ const Twine &Name, Instruction *I) {
+ BinaryOperator *BO = Create(Opc, V1, V2, Name, I);
+ BO->setIsExact(true);
+ return BO;
+ }
+
+#define DEFINE_HELPERS(OPC, NUWNSWEXACT) \
+ static BinaryOperator *Create##NUWNSWEXACT##OPC(Value *V1, Value *V2, \
+ const Twine &Name = "") { \
+ return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name); \
+ } \
+ static BinaryOperator *Create##NUWNSWEXACT##OPC( \
+ Value *V1, Value *V2, const Twine &Name, BasicBlock *BB) { \
+ return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, BB); \
+ } \
+ static BinaryOperator *Create##NUWNSWEXACT##OPC( \
+ Value *V1, Value *V2, const Twine &Name, Instruction *I) { \
+ return Create##NUWNSWEXACT(Instruction::OPC, V1, V2, Name, I); \
+ }
+
+ DEFINE_HELPERS(Add, NSW) // CreateNSWAdd
+ DEFINE_HELPERS(Add, NUW) // CreateNUWAdd
+ DEFINE_HELPERS(Sub, NSW) // CreateNSWSub
+ DEFINE_HELPERS(Sub, NUW) // CreateNUWSub
+ DEFINE_HELPERS(Mul, NSW) // CreateNSWMul
+ DEFINE_HELPERS(Mul, NUW) // CreateNUWMul
+ DEFINE_HELPERS(Shl, NSW) // CreateNSWShl
+ DEFINE_HELPERS(Shl, NUW) // CreateNUWShl
+
+ DEFINE_HELPERS(SDiv, Exact) // CreateExactSDiv
+ DEFINE_HELPERS(UDiv, Exact) // CreateExactUDiv
+ DEFINE_HELPERS(AShr, Exact) // CreateExactAShr
+ DEFINE_HELPERS(LShr, Exact) // CreateExactLShr
+
+#undef DEFINE_HELPERS
+
+ /// Helper functions to construct and inspect unary operations (NEG and NOT)
+ /// via binary operators SUB and XOR:
+ ///
+ /// Create the NEG and NOT instructions out of SUB and XOR instructions.
+ ///
+ static BinaryOperator *CreateNeg(Value *Op, const Twine &Name = "",
+ Instruction *InsertBefore = nullptr);
+ static BinaryOperator *CreateNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd);
+ static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name = "",
+ Instruction *InsertBefore = nullptr);
+ static BinaryOperator *CreateNSWNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd);
+ static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name = "",
+ Instruction *InsertBefore = nullptr);
+ static BinaryOperator *CreateNUWNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd);
+ static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name = "",
+ Instruction *InsertBefore = nullptr);
+ static BinaryOperator *CreateFNeg(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd);
+ static BinaryOperator *CreateNot(Value *Op, const Twine &Name = "",
+ Instruction *InsertBefore = nullptr);
+ static BinaryOperator *CreateNot(Value *Op, const Twine &Name,
+ BasicBlock *InsertAtEnd);
+
+ /// Check if the given Value is a NEG, FNeg, or NOT instruction.
+ ///
+ static bool isNeg(const Value *V);
+ static bool isFNeg(const Value *V, bool IgnoreZeroSign=false);
+ static bool isNot(const Value *V);
+
+ /// Helper functions to extract the unary argument of a NEG, FNEG or NOT
+ /// operation implemented via Sub, FSub, or Xor.
+ ///
+ static const Value *getNegArgument(const Value *BinOp);
+ static Value *getNegArgument( Value *BinOp);
+ static const Value *getFNegArgument(const Value *BinOp);
+ static Value *getFNegArgument( Value *BinOp);
+ static const Value *getNotArgument(const Value *BinOp);
+ static Value *getNotArgument( Value *BinOp);
+
+ BinaryOps getOpcode() const {
+ return static_cast<BinaryOps>(Instruction::getOpcode());
+ }
+
+ /// Exchange the two operands to this instruction.
+ /// This instruction is safe to use on any binary instruction and
+ /// does not modify the semantics of the instruction. If the instruction
+ /// cannot be reversed (ie, it's a Div), then return true.
+ ///
+ bool swapOperands();
+
+ /// Set or clear the nsw flag on this instruction, which must be an operator
+ /// which supports this flag. See LangRef.html for the meaning of this flag.
+ void setHasNoUnsignedWrap(bool b = true);
+
+ /// Set or clear the nsw flag on this instruction, which must be an operator
+ /// which supports this flag. See LangRef.html for the meaning of this flag.
+ void setHasNoSignedWrap(bool b = true);
+
+ /// Set or clear the exact flag on this instruction, which must be an operator
+ /// which supports this flag. See LangRef.html for the meaning of this flag.
+ void setIsExact(bool b = true);
+
+ /// Determine whether the no unsigned wrap flag is set.
+ bool hasNoUnsignedWrap() const;
+
+ /// Determine whether the no signed wrap flag is set.
+ bool hasNoSignedWrap() const;
+
+ /// Determine whether the exact flag is set.
+ bool isExact() const;
+
+ /// Convenience method to copy supported wrapping, exact, and fast-math flags
+ /// from V to this instruction.
+ void copyIRFlags(const Value *V);
+
+ /// Logical 'and' of any supported wrapping, exact, and fast-math flags of
+ /// V and this instruction.
+ void andIRFlags(const Value *V);
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->isBinaryOp();
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<BinaryOperator> :
+ public FixedNumOperandTraits<BinaryOperator, 2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value)
+
+//===----------------------------------------------------------------------===//
+// CastInst Class
+//===----------------------------------------------------------------------===//
+
+/// This is the base class for all instructions that perform data
+/// casts. It is simply provided so that instruction category testing
+/// can be performed with code like:
+///
+/// if (isa<CastInst>(Instr)) { ... }
+/// @brief Base class of casting instructions.
+class CastInst : public UnaryInstruction {
+ void anchor() override;
+
+protected:
+ /// @brief Constructor with insert-before-instruction semantics for subclasses
+ CastInst(Type *Ty, unsigned iType, Value *S,
+ const Twine &NameStr = "", Instruction *InsertBefore = nullptr)
+ : UnaryInstruction(Ty, iType, S, InsertBefore) {
+ setName(NameStr);
+ }
+ /// @brief Constructor with insert-at-end-of-block semantics for subclasses
+ CastInst(Type *Ty, unsigned iType, Value *S,
+ const Twine &NameStr, BasicBlock *InsertAtEnd)
+ : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
+ setName(NameStr);
+ }
+
+public:
+ /// Provides a way to construct any of the CastInst subclasses using an
+ /// opcode instead of the subclass's constructor. The opcode must be in the
+ /// CastOps category (Instruction::isCast(opcode) returns true). This
+ /// constructor has insert-before-instruction semantics to automatically
+ /// insert the new CastInst before InsertBefore (if it is non-null).
+ /// @brief Construct any of the CastInst subclasses
+ static CastInst *Create(
+ Instruction::CastOps, ///< The opcode of the cast instruction
+ Value *S, ///< The value to be casted (operand 0)
+ Type *Ty, ///< The type to which cast should be made
+ const Twine &Name = "", ///< Name for the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+ );
+ /// Provides a way to construct any of the CastInst subclasses using an
+ /// opcode instead of the subclass's constructor. The opcode must be in the
+ /// CastOps category. This constructor has insert-at-end-of-block semantics
+ /// to automatically insert the new CastInst at the end of InsertAtEnd (if
+ /// its non-null).
+ /// @brief Construct any of the CastInst subclasses
+ static CastInst *Create(
+ Instruction::CastOps, ///< The opcode for the cast instruction
+ Value *S, ///< The value to be casted (operand 0)
+ Type *Ty, ///< The type to which operand is casted
+ const Twine &Name, ///< The name for the instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// @brief Create a ZExt or BitCast cast instruction
+ static CastInst *CreateZExtOrBitCast(
+ Value *S, ///< The value to be casted (operand 0)
+ Type *Ty, ///< The type to which cast should be made
+ const Twine &Name = "", ///< Name for the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+ );
+
+ /// @brief Create a ZExt or BitCast cast instruction
+ static CastInst *CreateZExtOrBitCast(
+ Value *S, ///< The value to be casted (operand 0)
+ Type *Ty, ///< The type to which operand is casted
+ const Twine &Name, ///< The name for the instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// @brief Create a SExt or BitCast cast instruction
+ static CastInst *CreateSExtOrBitCast(
+ Value *S, ///< The value to be casted (operand 0)
+ Type *Ty, ///< The type to which cast should be made
+ const Twine &Name = "", ///< Name for the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+ );
+
+ /// @brief Create a SExt or BitCast cast instruction
+ static CastInst *CreateSExtOrBitCast(
+ Value *S, ///< The value to be casted (operand 0)
+ Type *Ty, ///< The type to which operand is casted
+ const Twine &Name, ///< The name for the instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// @brief Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
+ static CastInst *CreatePointerCast(
+ Value *S, ///< The pointer value to be casted (operand 0)
+ Type *Ty, ///< The type to which operand is casted
+ const Twine &Name, ///< The name for the instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// @brief Create a BitCast, AddrSpaceCast or a PtrToInt cast instruction.
+ static CastInst *CreatePointerCast(
+ Value *S, ///< The pointer value to be casted (operand 0)
+ Type *Ty, ///< The type to which cast should be made
+ const Twine &Name = "", ///< Name for the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+ );
+
+ /// @brief Create a BitCast or an AddrSpaceCast cast instruction.
+ static CastInst *CreatePointerBitCastOrAddrSpaceCast(
+ Value *S, ///< The pointer value to be casted (operand 0)
+ Type *Ty, ///< The type to which operand is casted
+ const Twine &Name, ///< The name for the instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// @brief Create a BitCast or an AddrSpaceCast cast instruction.
+ static CastInst *CreatePointerBitCastOrAddrSpaceCast(
+ Value *S, ///< The pointer value to be casted (operand 0)
+ Type *Ty, ///< The type to which cast should be made
+ const Twine &Name = "", ///< Name for the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+ );
+
+ /// @brief Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
+ ///
+ /// If the value is a pointer type and the destination an integer type,
+ /// creates a PtrToInt cast. If the value is an integer type and the
+ /// destination a pointer type, creates an IntToPtr cast. Otherwise, creates
+ /// a bitcast.
+ static CastInst *CreateBitOrPointerCast(
+ Value *S, ///< The pointer value to be casted (operand 0)
+ Type *Ty, ///< The type to which cast should be made
+ const Twine &Name = "", ///< Name for the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+ );
+
+ /// @brief Create a ZExt, BitCast, or Trunc for int -> int casts.
+ static CastInst *CreateIntegerCast(
+ Value *S, ///< The pointer value to be casted (operand 0)
+ Type *Ty, ///< The type to which cast should be made
+ bool isSigned, ///< Whether to regard S as signed or not
+ const Twine &Name = "", ///< Name for the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+ );
+
+ /// @brief Create a ZExt, BitCast, or Trunc for int -> int casts.
+ static CastInst *CreateIntegerCast(
+ Value *S, ///< The integer value to be casted (operand 0)
+ Type *Ty, ///< The integer type to which operand is casted
+ bool isSigned, ///< Whether to regard S as signed or not
+ const Twine &Name, ///< The name for the instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
+ static CastInst *CreateFPCast(
+ Value *S, ///< The floating point value to be casted
+ Type *Ty, ///< The floating point type to cast to
+ const Twine &Name = "", ///< Name for the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+ );
+
+ /// @brief Create an FPExt, BitCast, or FPTrunc for fp -> fp casts
+ static CastInst *CreateFPCast(
+ Value *S, ///< The floating point value to be casted
+ Type *Ty, ///< The floating point type to cast to
+ const Twine &Name, ///< The name for the instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// @brief Create a Trunc or BitCast cast instruction
+ static CastInst *CreateTruncOrBitCast(
+ Value *S, ///< The value to be casted (operand 0)
+ Type *Ty, ///< The type to which cast should be made
+ const Twine &Name = "", ///< Name for the instruction
+ Instruction *InsertBefore = nullptr ///< Place to insert the instruction
+ );
+
+ /// @brief Create a Trunc or BitCast cast instruction
+ static CastInst *CreateTruncOrBitCast(
+ Value *S, ///< The value to be casted (operand 0)
+ Type *Ty, ///< The type to which operand is casted
+ const Twine &Name, ///< The name for the instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// @brief Check whether it is valid to call getCastOpcode for these types.
+ static bool isCastable(
+ Type *SrcTy, ///< The Type from which the value should be cast.
+ Type *DestTy ///< The Type to which the value should be cast.
+ );
+
+ /// @brief Check whether a bitcast between these types is valid
+ static bool isBitCastable(
+ Type *SrcTy, ///< The Type from which the value should be cast.
+ Type *DestTy ///< The Type to which the value should be cast.
+ );
+
+ /// @brief Check whether a bitcast, inttoptr, or ptrtoint cast between these
+ /// types is valid and a no-op.
+ ///
+ /// This ensures that any pointer<->integer cast has enough bits in the
+ /// integer and any other cast is a bitcast.
+ static bool isBitOrNoopPointerCastable(
+ Type *SrcTy, ///< The Type from which the value should be cast.
+ Type *DestTy, ///< The Type to which the value should be cast.
+ const DataLayout &DL);
+
+ /// Returns the opcode necessary to cast Val into Ty using usual casting
+ /// rules.
+ /// @brief Infer the opcode for cast operand and type
+ static Instruction::CastOps getCastOpcode(
+ const Value *Val, ///< The value to cast
+ bool SrcIsSigned, ///< Whether to treat the source as signed
+ Type *Ty, ///< The Type to which the value should be casted
+ bool DstIsSigned ///< Whether to treate the dest. as signed
+ );
+
+ /// There are several places where we need to know if a cast instruction
+ /// only deals with integer source and destination types. To simplify that
+ /// logic, this method is provided.
+ /// @returns true iff the cast has only integral typed operand and dest type.
+ /// @brief Determine if this is an integer-only cast.
+ bool isIntegerCast() const;
+
+ /// A lossless cast is one that does not alter the basic value. It implies
+ /// a no-op cast but is more stringent, preventing things like int->float,
+ /// long->double, or int->ptr.
+ /// @returns true iff the cast is lossless.
+ /// @brief Determine if this is a lossless cast.
+ bool isLosslessCast() const;
+
+ /// A no-op cast is one that can be effected without changing any bits.
+ /// It implies that the source and destination types are the same size. The
+ /// IntPtrTy argument is used to make accurate determinations for casts
+ /// involving Integer and Pointer types. They are no-op casts if the integer
+ /// is the same size as the pointer. However, pointer size varies with
+ /// platform. Generally, the result of DataLayout::getIntPtrType() should be
+ /// passed in. If that's not available, use Type::Int64Ty, which will make
+ /// the isNoopCast call conservative.
+ /// @brief Determine if the described cast is a no-op cast.
+ static bool isNoopCast(
+ Instruction::CastOps Opcode, ///< Opcode of cast
+ Type *SrcTy, ///< SrcTy of cast
+ Type *DstTy, ///< DstTy of cast
+ Type *IntPtrTy ///< Integer type corresponding to Ptr types
+ );
+
+ /// @brief Determine if this cast is a no-op cast.
+ bool isNoopCast(
+ Type *IntPtrTy ///< Integer type corresponding to pointer
+ ) const;
+
+ /// @brief Determine if this cast is a no-op cast.
+ ///
+ /// \param DL is the DataLayout to get the Int Ptr type from.
+ bool isNoopCast(const DataLayout &DL) const;
+
+ /// Determine how a pair of casts can be eliminated, if they can be at all.
+ /// This is a helper function for both CastInst and ConstantExpr.
+ /// @returns 0 if the CastInst pair can't be eliminated, otherwise
+ /// returns Instruction::CastOps value for a cast that can replace
+ /// the pair, casting SrcTy to DstTy.
+ /// @brief Determine if a cast pair is eliminable
+ static unsigned isEliminableCastPair(
+ Instruction::CastOps firstOpcode, ///< Opcode of first cast
+ Instruction::CastOps secondOpcode, ///< Opcode of second cast
+ Type *SrcTy, ///< SrcTy of 1st cast
+ Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast
+ Type *DstTy, ///< DstTy of 2nd cast
+ Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null
+ Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null
+ Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null
+ );
+
+ /// @brief Return the opcode of this CastInst
+ Instruction::CastOps getOpcode() const {
+ return Instruction::CastOps(Instruction::getOpcode());
+ }
+
+ /// @brief Return the source type, as a convenience
+ Type* getSrcTy() const { return getOperand(0)->getType(); }
+ /// @brief Return the destination type, as a convenience
+ Type* getDestTy() const { return getType(); }
+
+ /// This method can be used to determine if a cast from S to DstTy using
+ /// Opcode op is valid or not.
+ /// @returns true iff the proposed cast is valid.
+ /// @brief Determine if a cast is valid without creating one.
+ static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy);
+
+ /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->isCast();
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// CmpInst Class
+//===----------------------------------------------------------------------===//
+
+/// This class is the base class for the comparison instructions.
+/// @brief Abstract base class of comparison instructions.
+class CmpInst : public Instruction {
+public:
+ /// This enumeration lists the possible predicates for CmpInst subclasses.
+ /// Values in the range 0-31 are reserved for FCmpInst, while values in the
+ /// range 32-64 are reserved for ICmpInst. This is necessary to ensure the
+ /// predicate values are not overlapping between the classes.
+ enum Predicate {
+ // Opcode U L G E Intuitive operation
+ FCMP_FALSE = 0, ///< 0 0 0 0 Always false (always folded)
+ FCMP_OEQ = 1, ///< 0 0 0 1 True if ordered and equal
+ FCMP_OGT = 2, ///< 0 0 1 0 True if ordered and greater than
+ FCMP_OGE = 3, ///< 0 0 1 1 True if ordered and greater than or equal
+ FCMP_OLT = 4, ///< 0 1 0 0 True if ordered and less than
+ FCMP_OLE = 5, ///< 0 1 0 1 True if ordered and less than or equal
+ FCMP_ONE = 6, ///< 0 1 1 0 True if ordered and operands are unequal
+ FCMP_ORD = 7, ///< 0 1 1 1 True if ordered (no nans)
+ FCMP_UNO = 8, ///< 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
+ FCMP_UEQ = 9, ///< 1 0 0 1 True if unordered or equal
+ FCMP_UGT = 10, ///< 1 0 1 0 True if unordered or greater than
+ FCMP_UGE = 11, ///< 1 0 1 1 True if unordered, greater than, or equal
+ FCMP_ULT = 12, ///< 1 1 0 0 True if unordered or less than
+ FCMP_ULE = 13, ///< 1 1 0 1 True if unordered, less than, or equal
+ FCMP_UNE = 14, ///< 1 1 1 0 True if unordered or not equal
+ FCMP_TRUE = 15, ///< 1 1 1 1 Always true (always folded)
+ FIRST_FCMP_PREDICATE = FCMP_FALSE,
+ LAST_FCMP_PREDICATE = FCMP_TRUE,
+ BAD_FCMP_PREDICATE = FCMP_TRUE + 1,
+ ICMP_EQ = 32, ///< equal
+ ICMP_NE = 33, ///< not equal
+ ICMP_UGT = 34, ///< unsigned greater than
+ ICMP_UGE = 35, ///< unsigned greater or equal
+ ICMP_ULT = 36, ///< unsigned less than
+ ICMP_ULE = 37, ///< unsigned less or equal
+ ICMP_SGT = 38, ///< signed greater than
+ ICMP_SGE = 39, ///< signed greater or equal
+ ICMP_SLT = 40, ///< signed less than
+ ICMP_SLE = 41, ///< signed less or equal
+ FIRST_ICMP_PREDICATE = ICMP_EQ,
+ LAST_ICMP_PREDICATE = ICMP_SLE,
+ BAD_ICMP_PREDICATE = ICMP_SLE + 1
+ };
+
+private:
+ void *operator new(size_t, unsigned) = delete;
+ CmpInst() = delete;
+
+protected:
+ CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
+ Value *LHS, Value *RHS, const Twine &Name = "",
+ Instruction *InsertBefore = nullptr);
+
+ CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred,
+ Value *LHS, Value *RHS, const Twine &Name,
+ BasicBlock *InsertAtEnd);
+
+ void anchor() override; // Out of line virtual method.
+
+public:
+ // allocate space for exactly two operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+ /// Construct a compare instruction, given the opcode, the predicate and
+ /// the two operands. Optionally (if InstBefore is specified) insert the
+ /// instruction into a BasicBlock right before the specified instruction.
+ /// The specified Instruction is allowed to be a dereferenced end iterator.
+ /// @brief Create a CmpInst
+ static CmpInst *Create(OtherOps Op,
+ Predicate predicate, Value *S1,
+ Value *S2, const Twine &Name = "",
+ Instruction *InsertBefore = nullptr);
+
+ /// Construct a compare instruction, given the opcode, the predicate and the
+ /// two operands. Also automatically insert this instruction to the end of
+ /// the BasicBlock specified.
+ /// @brief Create a CmpInst
+ static CmpInst *Create(OtherOps Op, Predicate predicate, Value *S1,
+ Value *S2, const Twine &Name, BasicBlock *InsertAtEnd);
+
+ /// @brief Get the opcode casted to the right type
+ OtherOps getOpcode() const {
+ return static_cast<OtherOps>(Instruction::getOpcode());
+ }
+
+ /// @brief Return the predicate for this instruction.
+ Predicate getPredicate() const {
+ return Predicate(getSubclassDataFromInstruction());
+ }
+
+ /// @brief Set the predicate for this instruction to the specified value.
+ void setPredicate(Predicate P) { setInstructionSubclassData(P); }
+
+ static bool isFPPredicate(Predicate P) {
+ return P >= FIRST_FCMP_PREDICATE && P <= LAST_FCMP_PREDICATE;
+ }
+
+ static bool isIntPredicate(Predicate P) {
+ return P >= FIRST_ICMP_PREDICATE && P <= LAST_ICMP_PREDICATE;
+ }
+
+ bool isFPPredicate() const { return isFPPredicate(getPredicate()); }
+ bool isIntPredicate() const { return isIntPredicate(getPredicate()); }
+
+ /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
+ /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
+ /// @returns the inverse predicate for the instruction's current predicate.
+ /// @brief Return the inverse of the instruction's predicate.
+ Predicate getInversePredicate() const {
+ return getInversePredicate(getPredicate());
+ }
+
+ /// For example, EQ -> NE, UGT -> ULE, SLT -> SGE,
+ /// OEQ -> UNE, UGT -> OLE, OLT -> UGE, etc.
+ /// @returns the inverse predicate for predicate provided in \p pred.
+ /// @brief Return the inverse of a given predicate
+ static Predicate getInversePredicate(Predicate pred);
+
+ /// For example, EQ->EQ, SLE->SGE, ULT->UGT,
+ /// OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
+ /// @returns the predicate that would be the result of exchanging the two
+ /// operands of the CmpInst instruction without changing the result
+ /// produced.
+ /// @brief Return the predicate as if the operands were swapped
+ Predicate getSwappedPredicate() const {
+ return getSwappedPredicate(getPredicate());
+ }
+
+ /// This is a static version that you can use without an instruction
+ /// available.
+ /// @brief Return the predicate as if the operands were swapped.
+ static Predicate getSwappedPredicate(Predicate pred);
+
+ /// @brief Provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// This is just a convenience that dispatches to the subclasses.
+ /// @brief Swap the operands and adjust predicate accordingly to retain
+ /// the same comparison.
+ void swapOperands();
+
+ /// This is just a convenience that dispatches to the subclasses.
+ /// @brief Determine if this CmpInst is commutative.
+ bool isCommutative() const;
+
+ /// This is just a convenience that dispatches to the subclasses.
+ /// @brief Determine if this is an equals/not equals predicate.
+ bool isEquality() const;
+
+ /// @returns true if the comparison is signed, false otherwise.
+ /// @brief Determine if this instruction is using a signed comparison.
+ bool isSigned() const {
+ return isSigned(getPredicate());
+ }
+
+ /// @returns true if the comparison is unsigned, false otherwise.
+ /// @brief Determine if this instruction is using an unsigned comparison.
+ bool isUnsigned() const {
+ return isUnsigned(getPredicate());
+ }
+
+ /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
+ /// @returns the signed version of the unsigned predicate pred.
+ /// @brief return the signed version of a predicate
+ static Predicate getSignedPredicate(Predicate pred);
+
+ /// For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert
+ /// @returns the signed version of the predicate for this instruction (which
+ /// has to be an unsigned predicate).
+ /// @brief return the signed version of a predicate
+ Predicate getSignedPredicate() {
+ return getSignedPredicate(getPredicate());
+ }
+
+ /// This is just a convenience.
+ /// @brief Determine if this is true when both operands are the same.
+ bool isTrueWhenEqual() const {
+ return isTrueWhenEqual(getPredicate());
+ }
+
+ /// This is just a convenience.
+ /// @brief Determine if this is false when both operands are the same.
+ bool isFalseWhenEqual() const {
+ return isFalseWhenEqual(getPredicate());
+ }
+
+ /// @returns true if the predicate is unsigned, false otherwise.
+ /// @brief Determine if the predicate is an unsigned operation.
+ static bool isUnsigned(Predicate predicate);
+
+ /// @returns true if the predicate is signed, false otherwise.
+ /// @brief Determine if the predicate is an signed operation.
+ static bool isSigned(Predicate predicate);
+
+ /// @brief Determine if the predicate is an ordered operation.
+ static bool isOrdered(Predicate predicate);
+
+ /// @brief Determine if the predicate is an unordered operation.
+ static bool isUnordered(Predicate predicate);
+
+ /// Determine if the predicate is true when comparing a value with itself.
+ static bool isTrueWhenEqual(Predicate predicate);
+
+ /// Determine if the predicate is false when comparing a value with itself.
+ static bool isFalseWhenEqual(Predicate predicate);
+
+ /// @brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::ICmp ||
+ I->getOpcode() == Instruction::FCmp;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+ /// @brief Create a result type for fcmp/icmp
+ static Type* makeCmpResultType(Type* opnd_type) {
+ if (VectorType* vt = dyn_cast<VectorType>(opnd_type)) {
+ return VectorType::get(Type::getInt1Ty(opnd_type->getContext()),
+ vt->getNumElements());
+ }
+ return Type::getInt1Ty(opnd_type->getContext());
+ }
+
+private:
+ // Shadow Value::setValueSubclassData with a private forwarding method so that
+ // subclasses cannot accidentally use it.
+ void setValueSubclassData(unsigned short D) {
+ Value::setValueSubclassData(D);
+ }
+};
+
+// FIXME: these are redundant if CmpInst < BinaryOperator
+template <>
+struct OperandTraits<CmpInst> : public FixedNumOperandTraits<CmpInst, 2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)
+
+//===----------------------------------------------------------------------===//
+// FuncletPadInst Class
+//===----------------------------------------------------------------------===//
+class FuncletPadInst : public Instruction {
+private:
+ void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
+
+ FuncletPadInst(const FuncletPadInst &CPI);
+
+ explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
+ ArrayRef<Value *> Args, unsigned Values,
+ const Twine &NameStr, Instruction *InsertBefore);
+ explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
+ ArrayRef<Value *> Args, unsigned Values,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ friend class CatchPadInst;
+ friend class CleanupPadInst;
+ FuncletPadInst *cloneImpl() const;
+
+public:
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// getNumArgOperands - Return the number of funcletpad arguments.
+ ///
+ unsigned getNumArgOperands() const { return getNumOperands() - 1; }
+
+ /// Convenience accessors
+
+ /// \brief Return the outer EH-pad this funclet is nested within.
+ ///
+ /// Note: This returns the associated CatchSwitchInst if this FuncletPadInst
+ /// is a CatchPadInst.
+ Value *getParentPad() const { return Op<-1>(); }
+ void setParentPad(Value *ParentPad) {
+ assert(ParentPad);
+ Op<-1>() = ParentPad;
+ }
+
+ /// getArgOperand/setArgOperand - Return/set the i-th funcletpad argument.
+ ///
+ Value *getArgOperand(unsigned i) const { return getOperand(i); }
+ void setArgOperand(unsigned i, Value *v) { setOperand(i, v); }
+
+ /// arg_operands - iteration adapter for range-for loops.
+ op_range arg_operands() { return op_range(op_begin(), op_end() - 1); }
+
+ /// arg_operands - iteration adapter for range-for loops.
+ const_op_range arg_operands() const {
+ return const_op_range(op_begin(), op_end() - 1);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) { return I->isFuncletPad(); }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<FuncletPadInst>
+ : public VariadicOperandTraits<FuncletPadInst, /*MINARITY=*/1> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(FuncletPadInst, Value)
+
+/// \brief A lightweight accessor for an operand bundle meant to be passed
+/// around by value.
+struct OperandBundleUse {
+ ArrayRef<Use> Inputs;
+
+ OperandBundleUse() {}
+ explicit OperandBundleUse(StringMapEntry<uint32_t> *Tag, ArrayRef<Use> Inputs)
+ : Inputs(Inputs), Tag(Tag) {}
+
+ /// \brief Return true if the operand at index \p Idx in this operand bundle
+ /// has the attribute A.
+ bool operandHasAttr(unsigned Idx, Attribute::AttrKind A) const {
+ if (isDeoptOperandBundle())
+ if (A == Attribute::ReadOnly || A == Attribute::NoCapture)
+ return Inputs[Idx]->getType()->isPointerTy();
+
+ // Conservative answer: no operands have any attributes.
+ return false;
+ };
+
+ /// \brief Return the tag of this operand bundle as a string.
+ StringRef getTagName() const {
+ return Tag->getKey();
+ }
+
+ /// \brief Return the tag of this operand bundle as an integer.
+ ///
+ /// Operand bundle tags are interned by LLVMContextImpl::getOrInsertBundleTag,
+ /// and this function returns the unique integer getOrInsertBundleTag
+ /// associated the tag of this operand bundle to.
+ uint32_t getTagID() const {
+ return Tag->getValue();
+ }
+
+ /// \brief Return true if this is a "deopt" operand bundle.
+ bool isDeoptOperandBundle() const {
+ return getTagID() == LLVMContext::OB_deopt;
+ }
+
+ /// \brief Return true if this is a "funclet" operand bundle.
+ bool isFuncletOperandBundle() const {
+ return getTagID() == LLVMContext::OB_funclet;
+ }
+
+private:
+ /// \brief Pointer to an entry in LLVMContextImpl::getOrInsertBundleTag.
+ StringMapEntry<uint32_t> *Tag;
+};
+
+/// \brief A container for an operand bundle being viewed as a set of values
+/// rather than a set of uses.
+///
+/// Unlike OperandBundleUse, OperandBundleDefT owns the memory it carries, and
+/// so it is possible to create and pass around "self-contained" instances of
+/// OperandBundleDef and ConstOperandBundleDef.
+template <typename InputTy> class OperandBundleDefT {
+ std::string Tag;
+ std::vector<InputTy> Inputs;
+
+public:
+ explicit OperandBundleDefT(std::string Tag, std::vector<InputTy> Inputs)
+ : Tag(std::move(Tag)), Inputs(std::move(Inputs)) {}
+ explicit OperandBundleDefT(std::string Tag, ArrayRef<InputTy> Inputs)
+ : Tag(std::move(Tag)), Inputs(Inputs) {}
+
+ explicit OperandBundleDefT(const OperandBundleUse &OBU) {
+ Tag = OBU.getTagName();
+ Inputs.insert(Inputs.end(), OBU.Inputs.begin(), OBU.Inputs.end());
+ }
+
+ ArrayRef<InputTy> inputs() const { return Inputs; }
+
+ typedef typename std::vector<InputTy>::const_iterator input_iterator;
+ size_t input_size() const { return Inputs.size(); }
+ input_iterator input_begin() const { return Inputs.begin(); }
+ input_iterator input_end() const { return Inputs.end(); }
+
+ StringRef getTag() const { return Tag; }
+};
+
+typedef OperandBundleDefT<Value *> OperandBundleDef;
+typedef OperandBundleDefT<const Value *> ConstOperandBundleDef;
+
+/// \brief A mixin to add operand bundle functionality to llvm instruction
+/// classes.
+///
+/// OperandBundleUser uses the descriptor area co-allocated with the host User
+/// to store some meta information about which operands are "normal" operands,
+/// and which ones belong to some operand bundle.
+///
+/// The layout of an operand bundle user is
+///
+/// +-----------uint32_t End-------------------------------------+
+/// | |
+/// | +--------uint32_t Begin--------------------+ |
+/// | | | |
+/// ^ ^ v v
+/// |------|------|----|----|----|----|----|---------|----|---------|----|-----
+/// | BOI0 | BOI1 | .. | DU | U0 | U1 | .. | BOI0_U0 | .. | BOI1_U0 | .. | Un
+/// |------|------|----|----|----|----|----|---------|----|---------|----|-----
+/// v v ^ ^
+/// | | | |
+/// | +--------uint32_t Begin------------+ |
+/// | |
+/// +-----------uint32_t End-----------------------------+
+///
+///
+/// BOI0, BOI1 ... are descriptions of operand bundles in this User's use list.
+/// These descriptions are installed and managed by this class, and they're all
+/// instances of OperandBundleUser<T>::BundleOpInfo.
+///
+/// DU is an additional descriptor installed by User's 'operator new' to keep
+/// track of the 'BOI0 ... BOIN' co-allocation. OperandBundleUser does not
+/// access or modify DU in any way, it's an implementation detail private to
+/// User.
+///
+/// The regular Use& vector for the User starts at U0. The operand bundle uses
+/// are part of the Use& vector, just like normal uses. In the diagram above,
+/// the operand bundle uses start at BOI0_U0. Each instance of BundleOpInfo has
+/// information about a contiguous set of uses constituting an operand bundle,
+/// and the total set of operand bundle uses themselves form a contiguous set of
+/// uses (i.e. there are no gaps between uses corresponding to individual
+/// operand bundles).
+///
+/// This class does not know the location of the set of operand bundle uses
+/// within the use list -- that is decided by the User using this class via the
+/// BeginIdx argument in populateBundleOperandInfos.
+///
+/// Currently operand bundle users with hung-off operands are not supported.
+template <typename InstrTy, typename OpIteratorTy> class OperandBundleUser {
+public:
+ /// \brief Return the number of operand bundles associated with this User.
+ unsigned getNumOperandBundles() const {
+ return std::distance(bundle_op_info_begin(), bundle_op_info_end());
+ }
+
+ /// \brief Return true if this User has any operand bundles.
+ bool hasOperandBundles() const { return getNumOperandBundles() != 0; }
+
+ /// \brief Return the index of the first bundle operand in the Use array.
+ unsigned getBundleOperandsStartIndex() const {
+ assert(hasOperandBundles() && "Don't call otherwise!");
+ return bundle_op_info_begin()->Begin;
+ }
+
+ /// \brief Return the index of the last bundle operand in the Use array.
+ unsigned getBundleOperandsEndIndex() const {
+ assert(hasOperandBundles() && "Don't call otherwise!");
+ return bundle_op_info_end()[-1].End;
+ }
+
+ /// \brief Return the total number operands (not operand bundles) used by
+ /// every operand bundle in this OperandBundleUser.
+ unsigned getNumTotalBundleOperands() const {
+ if (!hasOperandBundles())
+ return 0;
+
+ unsigned Begin = getBundleOperandsStartIndex();
+ unsigned End = getBundleOperandsEndIndex();
+
+ assert(Begin <= End && "Should be!");
+ return End - Begin;
+ }
+
+ /// \brief Return the operand bundle at a specific index.
+ OperandBundleUse getOperandBundleAt(unsigned Index) const {
+ assert(Index < getNumOperandBundles() && "Index out of bounds!");
+ return operandBundleFromBundleOpInfo(*(bundle_op_info_begin() + Index));
+ }
+
+ /// \brief Return the number of operand bundles with the tag Name attached to
+ /// this instruction.
+ unsigned countOperandBundlesOfType(StringRef Name) const {
+ unsigned Count = 0;
+ for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
+ if (getOperandBundleAt(i).getTagName() == Name)
+ Count++;
+
+ return Count;
+ }
+
+ /// \brief Return the number of operand bundles with the tag ID attached to
+ /// this instruction.
+ unsigned countOperandBundlesOfType(uint32_t ID) const {
+ unsigned Count = 0;
+ for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
+ if (getOperandBundleAt(i).getTagID() == ID)
+ Count++;
+
+ return Count;
+ }
+
+ /// \brief Return an operand bundle by name, if present.
+ ///
+ /// It is an error to call this for operand bundle types that may have
+ /// multiple instances of them on the same instruction.
+ Optional<OperandBundleUse> getOperandBundle(StringRef Name) const {
+ assert(countOperandBundlesOfType(Name) < 2 && "Precondition violated!");
+
+ for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
+ OperandBundleUse U = getOperandBundleAt(i);
+ if (U.getTagName() == Name)
+ return U;
+ }
+
+ return None;
+ }
+
+ /// \brief Return an operand bundle by tag ID, if present.
+ ///
+ /// It is an error to call this for operand bundle types that may have
+ /// multiple instances of them on the same instruction.
+ Optional<OperandBundleUse> getOperandBundle(uint32_t ID) const {
+ assert(countOperandBundlesOfType(ID) < 2 && "Precondition violated!");
+
+ for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i) {
+ OperandBundleUse U = getOperandBundleAt(i);
+ if (U.getTagID() == ID)
+ return U;
+ }
+
+ return None;
+ }
+
+ /// \brief Return the list of operand bundles attached to this instruction as
+ /// a vector of OperandBundleDefs.
+ ///
+ /// This function copies the OperandBundeUse instances associated with this
+ /// OperandBundleUser to a vector of OperandBundleDefs. Note:
+ /// OperandBundeUses and OperandBundleDefs are non-trivially *different*
+ /// representations of operand bundles (see documentation above).
+ void getOperandBundlesAsDefs(SmallVectorImpl<OperandBundleDef> &Defs) const {
+ for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
+ Defs.emplace_back(getOperandBundleAt(i));
+ }
+
+ /// \brief Return the operand bundle for the operand at index OpIdx.
+ ///
+ /// It is an error to call this with an OpIdx that does not correspond to an
+ /// bundle operand.
+ OperandBundleUse getOperandBundleForOperand(unsigned OpIdx) const {
+ return operandBundleFromBundleOpInfo(getBundleOpInfoForOperand(OpIdx));
+ }
+
+ /// \brief Return true if this operand bundle user has operand bundles that
+ /// may read from the heap.
+ bool hasReadingOperandBundles() const {
+ // Implementation note: this is a conservative implementation of operand
+ // bundle semantics, where *any* operand bundle forces a callsite to be at
+ // least readonly.
+ return hasOperandBundles();
+ }
+
+ /// \brief Return true if this operand bundle user has operand bundles that
+ /// may write to the heap.
+ bool hasClobberingOperandBundles() const {
+ for (auto &BOI : bundle_op_infos()) {
+ if (BOI.Tag->second == LLVMContext::OB_deopt ||
+ BOI.Tag->second == LLVMContext::OB_funclet)
+ continue;
+
+ // This instruction has an operand bundle that is not known to us.
+ // Assume the worst.
+ return true;
+ }
+
+ return false;
+ }
+
+ /// \brief Return true if the bundle operand at index \p OpIdx has the
+ /// attribute \p A.
+ bool bundleOperandHasAttr(unsigned OpIdx, Attribute::AttrKind A) const {
+ auto &BOI = getBundleOpInfoForOperand(OpIdx);
+ auto OBU = operandBundleFromBundleOpInfo(BOI);
+ return OBU.operandHasAttr(OpIdx - BOI.Begin, A);
+ }
+
+ /// \brief Return true if \p Other has the same sequence of operand bundle
+ /// tags with the same number of operands on each one of them as this
+ /// OperandBundleUser.
+ bool hasIdenticalOperandBundleSchema(
+ const OperandBundleUser<InstrTy, OpIteratorTy> &Other) const {
+ if (getNumOperandBundles() != Other.getNumOperandBundles())
+ return false;
+
+ return std::equal(bundle_op_info_begin(), bundle_op_info_end(),
+ Other.bundle_op_info_begin());
+ };
+
+protected:
+ /// \brief Is the function attribute S disallowed by some operand bundle on
+ /// this operand bundle user?
+ bool isFnAttrDisallowedByOpBundle(StringRef S) const {
+ // Operand bundles only possibly disallow readnone, readonly and argmenonly
+ // attributes. All String attributes are fine.
+ return false;
+ }
+
+ /// \brief Is the function attribute A disallowed by some operand bundle on
+ /// this operand bundle user?
+ bool isFnAttrDisallowedByOpBundle(Attribute::AttrKind A) const {
+ switch (A) {
+ default:
+ return false;
+
+ case Attribute::ArgMemOnly:
+ return hasReadingOperandBundles();
+
+ case Attribute::ReadNone:
+ return hasReadingOperandBundles();
+
+ case Attribute::ReadOnly:
+ return hasClobberingOperandBundles();
+ }
+
+ llvm_unreachable("switch has a default case!");
+ }
+
+ /// \brief Used to keep track of an operand bundle. See the main comment on
+ /// OperandBundleUser above.
+ struct BundleOpInfo {
+ /// \brief The operand bundle tag, interned by
+ /// LLVMContextImpl::getOrInsertBundleTag.
+ StringMapEntry<uint32_t> *Tag;
+
+ /// \brief The index in the Use& vector where operands for this operand
+ /// bundle starts.
+ uint32_t Begin;
+
+ /// \brief The index in the Use& vector where operands for this operand
+ /// bundle ends.
+ uint32_t End;
+
+ bool operator==(const BundleOpInfo &Other) const {
+ return Tag == Other.Tag && Begin == Other.Begin && End == Other.End;
+ }
+ };
+
+ /// \brief Simple helper function to map a BundleOpInfo to an
+ /// OperandBundleUse.
+ OperandBundleUse
+ operandBundleFromBundleOpInfo(const BundleOpInfo &BOI) const {
+ auto op_begin = static_cast<const InstrTy *>(this)->op_begin();
+ ArrayRef<Use> Inputs(op_begin + BOI.Begin, op_begin + BOI.End);
+ return OperandBundleUse(BOI.Tag, Inputs);
+ }
+
+ typedef BundleOpInfo *bundle_op_iterator;
+ typedef const BundleOpInfo *const_bundle_op_iterator;
+
+ /// \brief Return the start of the list of BundleOpInfo instances associated
+ /// with this OperandBundleUser.
+ bundle_op_iterator bundle_op_info_begin() {
+ if (!static_cast<InstrTy *>(this)->hasDescriptor())
+ return nullptr;
+
+ uint8_t *BytesBegin = static_cast<InstrTy *>(this)->getDescriptor().begin();
+ return reinterpret_cast<bundle_op_iterator>(BytesBegin);
+ }
+
+ /// \brief Return the start of the list of BundleOpInfo instances associated
+ /// with this OperandBundleUser.
+ const_bundle_op_iterator bundle_op_info_begin() const {
+ auto *NonConstThis =
+ const_cast<OperandBundleUser<InstrTy, OpIteratorTy> *>(this);
+ return NonConstThis->bundle_op_info_begin();
+ }
+
+ /// \brief Return the end of the list of BundleOpInfo instances associated
+ /// with this OperandBundleUser.
+ bundle_op_iterator bundle_op_info_end() {
+ if (!static_cast<InstrTy *>(this)->hasDescriptor())
+ return nullptr;
+
+ uint8_t *BytesEnd = static_cast<InstrTy *>(this)->getDescriptor().end();
+ return reinterpret_cast<bundle_op_iterator>(BytesEnd);
+ }
+
+ /// \brief Return the end of the list of BundleOpInfo instances associated
+ /// with this OperandBundleUser.
+ const_bundle_op_iterator bundle_op_info_end() const {
+ auto *NonConstThis =
+ const_cast<OperandBundleUser<InstrTy, OpIteratorTy> *>(this);
+ return NonConstThis->bundle_op_info_end();
+ }
+
+ /// \brief Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
+ iterator_range<bundle_op_iterator> bundle_op_infos() {
+ return make_range(bundle_op_info_begin(), bundle_op_info_end());
+ }
+
+ /// \brief Return the range [\p bundle_op_info_begin, \p bundle_op_info_end).
+ iterator_range<const_bundle_op_iterator> bundle_op_infos() const {
+ return make_range(bundle_op_info_begin(), bundle_op_info_end());
+ }
+
+ /// \brief Populate the BundleOpInfo instances and the Use& vector from \p
+ /// Bundles. Return the op_iterator pointing to the Use& one past the last
+ /// last bundle operand use.
+ ///
+ /// Each \p OperandBundleDef instance is tracked by a OperandBundleInfo
+ /// instance allocated in this User's descriptor.
+ OpIteratorTy populateBundleOperandInfos(ArrayRef<OperandBundleDef> Bundles,
+ const unsigned BeginIndex) {
+ auto It = static_cast<InstrTy *>(this)->op_begin() + BeginIndex;
+ for (auto &B : Bundles)
+ It = std::copy(B.input_begin(), B.input_end(), It);
+
+ auto *ContextImpl = static_cast<InstrTy *>(this)->getContext().pImpl;
+ auto BI = Bundles.begin();
+ unsigned CurrentIndex = BeginIndex;
+
+ for (auto &BOI : bundle_op_infos()) {
+ assert(BI != Bundles.end() && "Incorrect allocation?");
+
+ BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
+ BOI.Begin = CurrentIndex;
+ BOI.End = CurrentIndex + BI->input_size();
+ CurrentIndex = BOI.End;
+ BI++;
+ }
+
+ assert(BI == Bundles.end() && "Incorrect allocation?");
+
+ return It;
+ }
+
+ /// \brief Return the BundleOpInfo for the operand at index OpIdx.
+ ///
+ /// It is an error to call this with an OpIdx that does not correspond to an
+ /// bundle operand.
+ const BundleOpInfo &getBundleOpInfoForOperand(unsigned OpIdx) const {
+ for (auto &BOI : bundle_op_infos())
+ if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
+ return BOI;
+
+ llvm_unreachable("Did not find operand bundle for operand!");
+ }
+
+ /// \brief Return the total number of values used in \p Bundles.
+ static unsigned CountBundleInputs(ArrayRef<OperandBundleDef> Bundles) {
+ unsigned Total = 0;
+ for (auto &B : Bundles)
+ Total += B.input_size();
+ return Total;
+ }
+};
+
+} // end llvm namespace
+
+#endif // LLVM_IR_INSTRTYPES_H
diff --git a/contrib/llvm/include/llvm/IR/Instruction.def b/contrib/llvm/include/llvm/IR/Instruction.def
new file mode 100644
index 0000000..18711ab
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Instruction.def
@@ -0,0 +1,225 @@
+//===-- llvm/Instruction.def - File that describes Instructions -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains descriptions of the various LLVM instructions. This is
+// used as a central place for enumerating the different instructions and
+// should eventually be the place to put comments about the instructions.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+// Provide definitions of macros so that users of this file do not have to
+// define everything to use it...
+//
+#ifndef FIRST_TERM_INST
+#define FIRST_TERM_INST(num)
+#endif
+#ifndef HANDLE_TERM_INST
+#ifndef HANDLE_INST
+#define HANDLE_TERM_INST(num, opcode, Class)
+#else
+#define HANDLE_TERM_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_TERM_INST
+#define LAST_TERM_INST(num)
+#endif
+
+#ifndef FIRST_BINARY_INST
+#define FIRST_BINARY_INST(num)
+#endif
+#ifndef HANDLE_BINARY_INST
+#ifndef HANDLE_INST
+#define HANDLE_BINARY_INST(num, opcode, instclass)
+#else
+#define HANDLE_BINARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_BINARY_INST
+#define LAST_BINARY_INST(num)
+#endif
+
+#ifndef FIRST_MEMORY_INST
+#define FIRST_MEMORY_INST(num)
+#endif
+#ifndef HANDLE_MEMORY_INST
+#ifndef HANDLE_INST
+#define HANDLE_MEMORY_INST(num, opcode, Class)
+#else
+#define HANDLE_MEMORY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_MEMORY_INST
+#define LAST_MEMORY_INST(num)
+#endif
+
+#ifndef FIRST_CAST_INST
+#define FIRST_CAST_INST(num)
+#endif
+#ifndef HANDLE_CAST_INST
+#ifndef HANDLE_INST
+#define HANDLE_CAST_INST(num, opcode, Class)
+#else
+#define HANDLE_CAST_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_CAST_INST
+#define LAST_CAST_INST(num)
+#endif
+
+#ifndef FIRST_FUNCLETPAD_INST
+#define FIRST_FUNCLETPAD_INST(num)
+#endif
+#ifndef HANDLE_FUNCLETPAD_INST
+#ifndef HANDLE_INST
+#define HANDLE_FUNCLETPAD_INST(num, opcode, Class)
+#else
+#define HANDLE_FUNCLETPAD_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_FUNCLETPAD_INST
+#define LAST_FUNCLETPAD_INST(num)
+#endif
+
+#ifndef FIRST_OTHER_INST
+#define FIRST_OTHER_INST(num)
+#endif
+#ifndef HANDLE_OTHER_INST
+#ifndef HANDLE_INST
+#define HANDLE_OTHER_INST(num, opcode, Class)
+#else
+#define HANDLE_OTHER_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
+#endif
+#endif
+#ifndef LAST_OTHER_INST
+#define LAST_OTHER_INST(num)
+#endif
+
+// Terminator Instructions - These instructions are used to terminate a basic
+// block of the program. Every basic block must end with one of these
+// instructions for it to be a well formed basic block.
+//
+ FIRST_TERM_INST ( 1)
+HANDLE_TERM_INST ( 1, Ret , ReturnInst)
+HANDLE_TERM_INST ( 2, Br , BranchInst)
+HANDLE_TERM_INST ( 3, Switch , SwitchInst)
+HANDLE_TERM_INST ( 4, IndirectBr , IndirectBrInst)
+HANDLE_TERM_INST ( 5, Invoke , InvokeInst)
+HANDLE_TERM_INST ( 6, Resume , ResumeInst)
+HANDLE_TERM_INST ( 7, Unreachable , UnreachableInst)
+HANDLE_TERM_INST ( 8, CleanupRet , CleanupReturnInst)
+HANDLE_TERM_INST ( 9, CatchRet , CatchReturnInst)
+HANDLE_TERM_INST (10, CatchSwitch , CatchSwitchInst)
+ LAST_TERM_INST (10)
+
+// Standard binary operators...
+ FIRST_BINARY_INST(11)
+HANDLE_BINARY_INST(11, Add , BinaryOperator)
+HANDLE_BINARY_INST(12, FAdd , BinaryOperator)
+HANDLE_BINARY_INST(13, Sub , BinaryOperator)
+HANDLE_BINARY_INST(14, FSub , BinaryOperator)
+HANDLE_BINARY_INST(15, Mul , BinaryOperator)
+HANDLE_BINARY_INST(16, FMul , BinaryOperator)
+HANDLE_BINARY_INST(17, UDiv , BinaryOperator)
+HANDLE_BINARY_INST(18, SDiv , BinaryOperator)
+HANDLE_BINARY_INST(19, FDiv , BinaryOperator)
+HANDLE_BINARY_INST(20, URem , BinaryOperator)
+HANDLE_BINARY_INST(21, SRem , BinaryOperator)
+HANDLE_BINARY_INST(22, FRem , BinaryOperator)
+
+// Logical operators (integer operands)
+HANDLE_BINARY_INST(23, Shl , BinaryOperator) // Shift left (logical)
+HANDLE_BINARY_INST(24, LShr , BinaryOperator) // Shift right (logical)
+HANDLE_BINARY_INST(25, AShr , BinaryOperator) // Shift right (arithmetic)
+HANDLE_BINARY_INST(26, And , BinaryOperator)
+HANDLE_BINARY_INST(27, Or , BinaryOperator)
+HANDLE_BINARY_INST(28, Xor , BinaryOperator)
+ LAST_BINARY_INST(28)
+
+// Memory operators...
+ FIRST_MEMORY_INST(29)
+HANDLE_MEMORY_INST(29, Alloca, AllocaInst) // Stack management
+HANDLE_MEMORY_INST(30, Load , LoadInst ) // Memory manipulation instrs
+HANDLE_MEMORY_INST(31, Store , StoreInst )
+HANDLE_MEMORY_INST(32, GetElementPtr, GetElementPtrInst)
+HANDLE_MEMORY_INST(33, Fence , FenceInst )
+HANDLE_MEMORY_INST(34, AtomicCmpXchg , AtomicCmpXchgInst )
+HANDLE_MEMORY_INST(35, AtomicRMW , AtomicRMWInst )
+ LAST_MEMORY_INST(35)
+
+// Cast operators ...
+// NOTE: The order matters here because CastInst::isEliminableCastPair
+// NOTE: (see Instructions.cpp) encodes a table based on this ordering.
+ FIRST_CAST_INST(36)
+HANDLE_CAST_INST(36, Trunc , TruncInst ) // Truncate integers
+HANDLE_CAST_INST(37, ZExt , ZExtInst ) // Zero extend integers
+HANDLE_CAST_INST(38, SExt , SExtInst ) // Sign extend integers
+HANDLE_CAST_INST(39, FPToUI , FPToUIInst ) // floating point -> UInt
+HANDLE_CAST_INST(40, FPToSI , FPToSIInst ) // floating point -> SInt
+HANDLE_CAST_INST(41, UIToFP , UIToFPInst ) // UInt -> floating point
+HANDLE_CAST_INST(42, SIToFP , SIToFPInst ) // SInt -> floating point
+HANDLE_CAST_INST(43, FPTrunc , FPTruncInst ) // Truncate floating point
+HANDLE_CAST_INST(44, FPExt , FPExtInst ) // Extend floating point
+HANDLE_CAST_INST(45, PtrToInt, PtrToIntInst) // Pointer -> Integer
+HANDLE_CAST_INST(46, IntToPtr, IntToPtrInst) // Integer -> Pointer
+HANDLE_CAST_INST(47, BitCast , BitCastInst ) // Type cast
+HANDLE_CAST_INST(48, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast
+ LAST_CAST_INST(48)
+
+ FIRST_FUNCLETPAD_INST(49)
+HANDLE_FUNCLETPAD_INST(49, CleanupPad, CleanupPadInst)
+HANDLE_FUNCLETPAD_INST(50, CatchPad , CatchPadInst)
+ LAST_FUNCLETPAD_INST(50)
+
+// Other operators...
+ FIRST_OTHER_INST(51)
+HANDLE_OTHER_INST(51, ICmp , ICmpInst ) // Integer comparison instruction
+HANDLE_OTHER_INST(52, FCmp , FCmpInst ) // Floating point comparison instr.
+HANDLE_OTHER_INST(53, PHI , PHINode ) // PHI node instruction
+HANDLE_OTHER_INST(54, Call , CallInst ) // Call a function
+HANDLE_OTHER_INST(55, Select , SelectInst ) // select instruction
+HANDLE_OTHER_INST(56, UserOp1, Instruction) // May be used internally in a pass
+HANDLE_OTHER_INST(57, UserOp2, Instruction) // Internal to passes only
+HANDLE_OTHER_INST(58, VAArg , VAArgInst ) // vaarg instruction
+HANDLE_OTHER_INST(59, ExtractElement, ExtractElementInst)// extract from vector
+HANDLE_OTHER_INST(60, InsertElement, InsertElementInst) // insert into vector
+HANDLE_OTHER_INST(61, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
+HANDLE_OTHER_INST(62, ExtractValue, ExtractValueInst)// extract from aggregate
+HANDLE_OTHER_INST(63, InsertValue, InsertValueInst) // insert into aggregate
+HANDLE_OTHER_INST(64, LandingPad, LandingPadInst) // Landing pad instruction.
+ LAST_OTHER_INST(64)
+
+#undef FIRST_TERM_INST
+#undef HANDLE_TERM_INST
+#undef LAST_TERM_INST
+
+#undef FIRST_BINARY_INST
+#undef HANDLE_BINARY_INST
+#undef LAST_BINARY_INST
+
+#undef FIRST_MEMORY_INST
+#undef HANDLE_MEMORY_INST
+#undef LAST_MEMORY_INST
+
+#undef FIRST_CAST_INST
+#undef HANDLE_CAST_INST
+#undef LAST_CAST_INST
+
+#undef FIRST_FUNCLETPAD_INST
+#undef HANDLE_FUNCLETPAD_INST
+#undef LAST_FUNCLETPAD_INST
+
+#undef FIRST_OTHER_INST
+#undef HANDLE_OTHER_INST
+#undef LAST_OTHER_INST
+
+#ifdef HANDLE_INST
+#undef HANDLE_INST
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Instruction.h b/contrib/llvm/include/llvm/IR/Instruction.h
new file mode 100644
index 0000000..03c4549
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Instruction.h
@@ -0,0 +1,553 @@
+//===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the Instruction class, which is the
+// base class for all of the LLVM instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INSTRUCTION_H
+#define LLVM_IR_INSTRUCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/SymbolTableListTraits.h"
+#include "llvm/IR/User.h"
+
+namespace llvm {
+
+class FastMathFlags;
+class LLVMContext;
+class MDNode;
+class BasicBlock;
+struct AAMDNodes;
+
+template <>
+struct SymbolTableListSentinelTraits<Instruction>
+ : public ilist_half_embedded_sentinel_traits<Instruction> {};
+
+class Instruction : public User,
+ public ilist_node_with_parent<Instruction, BasicBlock> {
+ void operator=(const Instruction &) = delete;
+ Instruction(const Instruction &) = delete;
+
+ BasicBlock *Parent;
+ DebugLoc DbgLoc; // 'dbg' Metadata cache.
+
+ enum {
+ /// HasMetadataBit - This is a bit stored in the SubClassData field which
+ /// indicates whether this instruction has metadata attached to it or not.
+ HasMetadataBit = 1 << 15
+ };
+public:
+ // Out of line virtual method, so the vtable, etc has a home.
+ ~Instruction() override;
+
+ /// user_back - Specialize the methods defined in Value, as we know that an
+ /// instruction can only be used by other instructions.
+ Instruction *user_back() { return cast<Instruction>(*user_begin());}
+ const Instruction *user_back() const { return cast<Instruction>(*user_begin());}
+
+ inline const BasicBlock *getParent() const { return Parent; }
+ inline BasicBlock *getParent() { return Parent; }
+
+ /// \brief Return the module owning the function this instruction belongs to
+ /// or nullptr it the function does not have a module.
+ ///
+ /// Note: this is undefined behavior if the instruction does not have a
+ /// parent, or the parent basic block does not have a parent function.
+ const Module *getModule() const;
+ Module *getModule();
+
+ /// \brief Return the function this instruction belongs to.
+ ///
+ /// Note: it is undefined behavior to call this on an instruction not
+ /// currently inserted into a function.
+ const Function *getFunction() const;
+ Function *getFunction();
+
+ /// removeFromParent - This method unlinks 'this' from the containing basic
+ /// block, but does not delete it.
+ ///
+ void removeFromParent();
+
+ /// eraseFromParent - This method unlinks 'this' from the containing basic
+ /// block and deletes it.
+ ///
+ /// \returns an iterator pointing to the element after the erased one
+ SymbolTableList<Instruction>::iterator eraseFromParent();
+
+ /// Insert an unlinked instruction into a basic block immediately before
+ /// the specified instruction.
+ void insertBefore(Instruction *InsertPos);
+
+ /// Insert an unlinked instruction into a basic block immediately after the
+ /// specified instruction.
+ void insertAfter(Instruction *InsertPos);
+
+ /// moveBefore - Unlink this instruction from its current basic block and
+ /// insert it into the basic block that MovePos lives in, right before
+ /// MovePos.
+ void moveBefore(Instruction *MovePos);
+
+ //===--------------------------------------------------------------------===//
+ // Subclass classification.
+ //===--------------------------------------------------------------------===//
+
+ /// getOpcode() returns a member of one of the enums like Instruction::Add.
+ unsigned getOpcode() const { return getValueID() - InstructionVal; }
+
+ const char *getOpcodeName() const { return getOpcodeName(getOpcode()); }
+ bool isTerminator() const { return isTerminator(getOpcode()); }
+ bool isBinaryOp() const { return isBinaryOp(getOpcode()); }
+ bool isShift() { return isShift(getOpcode()); }
+ bool isCast() const { return isCast(getOpcode()); }
+ bool isFuncletPad() const { return isFuncletPad(getOpcode()); }
+
+ static const char* getOpcodeName(unsigned OpCode);
+
+ static inline bool isTerminator(unsigned OpCode) {
+ return OpCode >= TermOpsBegin && OpCode < TermOpsEnd;
+ }
+
+ static inline bool isBinaryOp(unsigned Opcode) {
+ return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd;
+ }
+
+ /// @brief Determine if the Opcode is one of the shift instructions.
+ static inline bool isShift(unsigned Opcode) {
+ return Opcode >= Shl && Opcode <= AShr;
+ }
+
+ /// isLogicalShift - Return true if this is a logical shift left or a logical
+ /// shift right.
+ inline bool isLogicalShift() const {
+ return getOpcode() == Shl || getOpcode() == LShr;
+ }
+
+ /// isArithmeticShift - Return true if this is an arithmetic shift right.
+ inline bool isArithmeticShift() const {
+ return getOpcode() == AShr;
+ }
+
+ /// @brief Determine if the OpCode is one of the CastInst instructions.
+ static inline bool isCast(unsigned OpCode) {
+ return OpCode >= CastOpsBegin && OpCode < CastOpsEnd;
+ }
+
+ /// @brief Determine if the OpCode is one of the FuncletPadInst instructions.
+ static inline bool isFuncletPad(unsigned OpCode) {
+ return OpCode >= FuncletPadOpsBegin && OpCode < FuncletPadOpsEnd;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Metadata manipulation.
+ //===--------------------------------------------------------------------===//
+
+ /// hasMetadata() - Return true if this instruction has any metadata attached
+ /// to it.
+ bool hasMetadata() const { return DbgLoc || hasMetadataHashEntry(); }
+
+ /// hasMetadataOtherThanDebugLoc - Return true if this instruction has
+ /// metadata attached to it other than a debug location.
+ bool hasMetadataOtherThanDebugLoc() const {
+ return hasMetadataHashEntry();
+ }
+
+ /// getMetadata - Get the metadata of given kind attached to this Instruction.
+ /// If the metadata is not found then return null.
+ MDNode *getMetadata(unsigned KindID) const {
+ if (!hasMetadata()) return nullptr;
+ return getMetadataImpl(KindID);
+ }
+
+ /// getMetadata - Get the metadata of given kind attached to this Instruction.
+ /// If the metadata is not found then return null.
+ MDNode *getMetadata(StringRef Kind) const {
+ if (!hasMetadata()) return nullptr;
+ return getMetadataImpl(Kind);
+ }
+
+ /// getAllMetadata - Get all metadata attached to this Instruction. The first
+ /// element of each pair returned is the KindID, the second element is the
+ /// metadata value. This list is returned sorted by the KindID.
+ void
+ getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
+ if (hasMetadata())
+ getAllMetadataImpl(MDs);
+ }
+
+ /// getAllMetadataOtherThanDebugLoc - This does the same thing as
+ /// getAllMetadata, except that it filters out the debug location.
+ void getAllMetadataOtherThanDebugLoc(
+ SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
+ if (hasMetadataOtherThanDebugLoc())
+ getAllMetadataOtherThanDebugLocImpl(MDs);
+ }
+
+ /// getAAMetadata - Fills the AAMDNodes structure with AA metadata from
+ /// this instruction. When Merge is true, the existing AA metadata is
+ /// merged with that from this instruction providing the most-general result.
+ void getAAMetadata(AAMDNodes &N, bool Merge = false) const;
+
+ /// setMetadata - Set the metadata of the specified kind to the specified
+ /// node. This updates/replaces metadata if already present, or removes it if
+ /// Node is null.
+ void setMetadata(unsigned KindID, MDNode *Node);
+ void setMetadata(StringRef Kind, MDNode *Node);
+
+ /// Drop all unknown metadata except for debug locations.
+ /// @{
+ /// Passes are required to drop metadata they don't understand. This is a
+ /// convenience method for passes to do so.
+ void dropUnknownNonDebugMetadata(ArrayRef<unsigned> KnownIDs);
+ void dropUnknownNonDebugMetadata() {
+ return dropUnknownNonDebugMetadata(None);
+ }
+ void dropUnknownNonDebugMetadata(unsigned ID1) {
+ return dropUnknownNonDebugMetadata(makeArrayRef(ID1));
+ }
+ void dropUnknownNonDebugMetadata(unsigned ID1, unsigned ID2) {
+ unsigned IDs[] = {ID1, ID2};
+ return dropUnknownNonDebugMetadata(IDs);
+ }
+ /// @}
+
+ /// setAAMetadata - Sets the metadata on this instruction from the
+ /// AAMDNodes structure.
+ void setAAMetadata(const AAMDNodes &N);
+
+ /// setDebugLoc - Set the debug location information for this instruction.
+ void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); }
+
+ /// getDebugLoc - Return the debug location for this node as a DebugLoc.
+ const DebugLoc &getDebugLoc() const { return DbgLoc; }
+
+ /// Set or clear the unsafe-algebra flag on this instruction, which must be an
+ /// operator which supports this flag. See LangRef.html for the meaning of
+ /// this flag.
+ void setHasUnsafeAlgebra(bool B);
+
+ /// Set or clear the no-nans flag on this instruction, which must be an
+ /// operator which supports this flag. See LangRef.html for the meaning of
+ /// this flag.
+ void setHasNoNaNs(bool B);
+
+ /// Set or clear the no-infs flag on this instruction, which must be an
+ /// operator which supports this flag. See LangRef.html for the meaning of
+ /// this flag.
+ void setHasNoInfs(bool B);
+
+ /// Set or clear the no-signed-zeros flag on this instruction, which must be
+ /// an operator which supports this flag. See LangRef.html for the meaning of
+ /// this flag.
+ void setHasNoSignedZeros(bool B);
+
+ /// Set or clear the allow-reciprocal flag on this instruction, which must be
+ /// an operator which supports this flag. See LangRef.html for the meaning of
+ /// this flag.
+ void setHasAllowReciprocal(bool B);
+
+ /// Convenience function for setting multiple fast-math flags on this
+ /// instruction, which must be an operator which supports these flags. See
+ /// LangRef.html for the meaning of these flags.
+ void setFastMathFlags(FastMathFlags FMF);
+
+ /// Convenience function for transferring all fast-math flag values to this
+ /// instruction, which must be an operator which supports these flags. See
+ /// LangRef.html for the meaning of these flags.
+ void copyFastMathFlags(FastMathFlags FMF);
+
+ /// Determine whether the unsafe-algebra flag is set.
+ bool hasUnsafeAlgebra() const;
+
+ /// Determine whether the no-NaNs flag is set.
+ bool hasNoNaNs() const;
+
+ /// Determine whether the no-infs flag is set.
+ bool hasNoInfs() const;
+
+ /// Determine whether the no-signed-zeros flag is set.
+ bool hasNoSignedZeros() const;
+
+ /// Determine whether the allow-reciprocal flag is set.
+ bool hasAllowReciprocal() const;
+
+ /// Convenience function for getting all the fast-math flags, which must be an
+ /// operator which supports these flags. See LangRef.html for the meaning of
+ /// these flags.
+ FastMathFlags getFastMathFlags() const;
+
+ /// Copy I's fast-math flags
+ void copyFastMathFlags(const Instruction *I);
+
+private:
+ /// hasMetadataHashEntry - Return true if we have an entry in the on-the-side
+ /// metadata hash.
+ bool hasMetadataHashEntry() const {
+ return (getSubclassDataFromValue() & HasMetadataBit) != 0;
+ }
+
+ // These are all implemented in Metadata.cpp.
+ MDNode *getMetadataImpl(unsigned KindID) const;
+ MDNode *getMetadataImpl(StringRef Kind) const;
+ void
+ getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const;
+ void getAllMetadataOtherThanDebugLocImpl(
+ SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const;
+ void clearMetadataHashEntries();
+public:
+ //===--------------------------------------------------------------------===//
+ // Predicates and helper methods.
+ //===--------------------------------------------------------------------===//
+
+
+ /// isAssociative - Return true if the instruction is associative:
+ ///
+ /// Associative operators satisfy: x op (y op z) === (x op y) op z
+ ///
+ /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
+ ///
+ bool isAssociative() const;
+ static bool isAssociative(unsigned op);
+
+ /// isCommutative - Return true if the instruction is commutative:
+ ///
+ /// Commutative operators satisfy: (x op y) === (y op x)
+ ///
+ /// In LLVM, these are the associative operators, plus SetEQ and SetNE, when
+ /// applied to any type.
+ ///
+ bool isCommutative() const { return isCommutative(getOpcode()); }
+ static bool isCommutative(unsigned op);
+
+ /// isIdempotent - Return true if the instruction is idempotent:
+ ///
+ /// Idempotent operators satisfy: x op x === x
+ ///
+ /// In LLVM, the And and Or operators are idempotent.
+ ///
+ bool isIdempotent() const { return isIdempotent(getOpcode()); }
+ static bool isIdempotent(unsigned op);
+
+ /// isNilpotent - Return true if the instruction is nilpotent:
+ ///
+ /// Nilpotent operators satisfy: x op x === Id,
+ ///
+ /// where Id is the identity for the operator, i.e. a constant such that
+ /// x op Id === x and Id op x === x for all x.
+ ///
+ /// In LLVM, the Xor operator is nilpotent.
+ ///
+ bool isNilpotent() const { return isNilpotent(getOpcode()); }
+ static bool isNilpotent(unsigned op);
+
+ /// mayWriteToMemory - Return true if this instruction may modify memory.
+ ///
+ bool mayWriteToMemory() const;
+
+ /// mayReadFromMemory - Return true if this instruction may read memory.
+ ///
+ bool mayReadFromMemory() const;
+
+ /// mayReadOrWriteMemory - Return true if this instruction may read or
+ /// write memory.
+ ///
+ bool mayReadOrWriteMemory() const {
+ return mayReadFromMemory() || mayWriteToMemory();
+ }
+
+ /// isAtomic - Return true if this instruction has an
+ /// AtomicOrdering of unordered or higher.
+ ///
+ bool isAtomic() const;
+
+ /// mayThrow - Return true if this instruction may throw an exception.
+ ///
+ bool mayThrow() const;
+
+ /// mayReturn - Return true if this is a function that may return.
+ /// this is true for all normal instructions. The only exception
+ /// is functions that are marked with the 'noreturn' attribute.
+ ///
+ bool mayReturn() const;
+
+ /// mayHaveSideEffects - Return true if the instruction may have side effects.
+ ///
+ /// Note that this does not consider malloc and alloca to have side
+ /// effects because the newly allocated memory is completely invisible to
+ /// instructions which don't use the returned value. For cases where this
+ /// matters, isSafeToSpeculativelyExecute may be more appropriate.
+ bool mayHaveSideEffects() const {
+ return mayWriteToMemory() || mayThrow() || !mayReturn();
+ }
+
+ /// \brief Return true if the instruction is a variety of EH-block.
+ bool isEHPad() const {
+ switch (getOpcode()) {
+ case Instruction::CatchSwitch:
+ case Instruction::CatchPad:
+ case Instruction::CleanupPad:
+ case Instruction::LandingPad:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /// clone() - Create a copy of 'this' instruction that is identical in all
+ /// ways except the following:
+ /// * The instruction has no parent
+ /// * The instruction has no name
+ ///
+ Instruction *clone() const;
+
+ /// isIdenticalTo - Return true if the specified instruction is exactly
+ /// identical to the current one. This means that all operands match and any
+ /// extra information (e.g. load is volatile) agree.
+ bool isIdenticalTo(const Instruction *I) const;
+
+ /// isIdenticalToWhenDefined - This is like isIdenticalTo, except that it
+ /// ignores the SubclassOptionalData flags, which specify conditions
+ /// under which the instruction's result is undefined.
+ bool isIdenticalToWhenDefined(const Instruction *I) const;
+
+ /// When checking for operation equivalence (using isSameOperationAs) it is
+ /// sometimes useful to ignore certain attributes.
+ enum OperationEquivalenceFlags {
+ /// Check for equivalence ignoring load/store alignment.
+ CompareIgnoringAlignment = 1<<0,
+ /// Check for equivalence treating a type and a vector of that type
+ /// as equivalent.
+ CompareUsingScalarTypes = 1<<1
+ };
+
+ /// This function determines if the specified instruction executes the same
+ /// operation as the current one. This means that the opcodes, type, operand
+ /// types and any other factors affecting the operation must be the same. This
+ /// is similar to isIdenticalTo except the operands themselves don't have to
+ /// be identical.
+ /// @returns true if the specified instruction is the same operation as
+ /// the current one.
+ /// @brief Determine if one instruction is the same operation as another.
+ bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const;
+
+ /// isUsedOutsideOfBlock - Return true if there are any uses of this
+ /// instruction in blocks other than the specified block. Note that PHI nodes
+ /// are considered to evaluate their operands in the corresponding predecessor
+ /// block.
+ bool isUsedOutsideOfBlock(const BasicBlock *BB) const;
+
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return V->getValueID() >= Value::InstructionVal;
+ }
+
+ //----------------------------------------------------------------------
+ // Exported enumerations.
+ //
+ enum TermOps { // These terminate basic blocks
+#define FIRST_TERM_INST(N) TermOpsBegin = N,
+#define HANDLE_TERM_INST(N, OPC, CLASS) OPC = N,
+#define LAST_TERM_INST(N) TermOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+ };
+
+ enum BinaryOps {
+#define FIRST_BINARY_INST(N) BinaryOpsBegin = N,
+#define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N,
+#define LAST_BINARY_INST(N) BinaryOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+ };
+
+ enum MemoryOps {
+#define FIRST_MEMORY_INST(N) MemoryOpsBegin = N,
+#define HANDLE_MEMORY_INST(N, OPC, CLASS) OPC = N,
+#define LAST_MEMORY_INST(N) MemoryOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+ };
+
+ enum CastOps {
+#define FIRST_CAST_INST(N) CastOpsBegin = N,
+#define HANDLE_CAST_INST(N, OPC, CLASS) OPC = N,
+#define LAST_CAST_INST(N) CastOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+ };
+
+ enum FuncletPadOps {
+#define FIRST_FUNCLETPAD_INST(N) FuncletPadOpsBegin = N,
+#define HANDLE_FUNCLETPAD_INST(N, OPC, CLASS) OPC = N,
+#define LAST_FUNCLETPAD_INST(N) FuncletPadOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+ };
+
+ enum OtherOps {
+#define FIRST_OTHER_INST(N) OtherOpsBegin = N,
+#define HANDLE_OTHER_INST(N, OPC, CLASS) OPC = N,
+#define LAST_OTHER_INST(N) OtherOpsEnd = N+1
+#include "llvm/IR/Instruction.def"
+ };
+private:
+ // Shadow Value::setValueSubclassData with a private forwarding method so that
+ // subclasses cannot accidentally use it.
+ void setValueSubclassData(unsigned short D) {
+ Value::setValueSubclassData(D);
+ }
+ unsigned short getSubclassDataFromValue() const {
+ return Value::getSubclassDataFromValue();
+ }
+
+ void setHasMetadataHashEntry(bool V) {
+ setValueSubclassData((getSubclassDataFromValue() & ~HasMetadataBit) |
+ (V ? HasMetadataBit : 0));
+ }
+
+ friend class SymbolTableListTraits<Instruction>;
+ void setParent(BasicBlock *P);
+protected:
+ // Instruction subclasses can stick up to 15 bits of stuff into the
+ // SubclassData field of instruction with these members.
+
+ // Verify that only the low 15 bits are used.
+ void setInstructionSubclassData(unsigned short D) {
+ assert((D & HasMetadataBit) == 0 && "Out of range value put into field");
+ setValueSubclassData((getSubclassDataFromValue() & HasMetadataBit) | D);
+ }
+
+ unsigned getSubclassDataFromInstruction() const {
+ return getSubclassDataFromValue() & ~HasMetadataBit;
+ }
+
+ Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
+ Instruction *InsertBefore = nullptr);
+ Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
+ BasicBlock *InsertAtEnd);
+
+private:
+ /// Create a copy of this instruction.
+ Instruction *cloneImpl() const;
+};
+
+// Instruction* is only 4-byte aligned.
+template<>
+class PointerLikeTypeTraits<Instruction*> {
+ typedef Instruction* PT;
+public:
+ static inline void *getAsVoidPointer(PT P) { return P; }
+ static inline PT getFromVoidPointer(void *P) {
+ return static_cast<PT>(P);
+ }
+ enum { NumLowBitsAvailable = 2 };
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Instructions.h b/contrib/llvm/include/llvm/IR/Instructions.h
new file mode 100644
index 0000000..d781c7a
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Instructions.h
@@ -0,0 +1,4806 @@
+//===-- llvm/Instructions.h - Instruction subclass definitions --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes the class definitions of all of the subclasses of the
+// Instruction class. This is meant to be an easy way to get access to all
+// instruction subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INSTRUCTIONS_H
+#define LLVM_IR_INSTRUCTIONS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <iterator>
+
+namespace llvm {
+
+class APInt;
+class ConstantInt;
+class ConstantRange;
+class DataLayout;
+class LLVMContext;
+
+enum AtomicOrdering {
+ NotAtomic = 0,
+ Unordered = 1,
+ Monotonic = 2,
+ // Consume = 3, // Not specified yet.
+ Acquire = 4,
+ Release = 5,
+ AcquireRelease = 6,
+ SequentiallyConsistent = 7
+};
+
+enum SynchronizationScope {
+ SingleThread = 0,
+ CrossThread = 1
+};
+
+/// Returns true if the ordering is at least as strong as acquire
+/// (i.e. acquire, acq_rel or seq_cst)
+inline bool isAtLeastAcquire(AtomicOrdering Ord) {
+ return (Ord == Acquire ||
+ Ord == AcquireRelease ||
+ Ord == SequentiallyConsistent);
+}
+
+/// Returns true if the ordering is at least as strong as release
+/// (i.e. release, acq_rel or seq_cst)
+inline bool isAtLeastRelease(AtomicOrdering Ord) {
+return (Ord == Release ||
+ Ord == AcquireRelease ||
+ Ord == SequentiallyConsistent);
+}
+
+//===----------------------------------------------------------------------===//
+// AllocaInst Class
+//===----------------------------------------------------------------------===//
+
+/// AllocaInst - an instruction to allocate memory on the stack
+///
+class AllocaInst : public UnaryInstruction {
+ Type *AllocatedType;
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ AllocaInst *cloneImpl() const;
+
+public:
+ explicit AllocaInst(Type *Ty, Value *ArraySize = nullptr,
+ const Twine &Name = "",
+ Instruction *InsertBefore = nullptr);
+ AllocaInst(Type *Ty, Value *ArraySize,
+ const Twine &Name, BasicBlock *InsertAtEnd);
+
+ AllocaInst(Type *Ty, const Twine &Name, Instruction *InsertBefore = nullptr);
+ AllocaInst(Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd);
+
+ AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
+ const Twine &Name = "", Instruction *InsertBefore = nullptr);
+ AllocaInst(Type *Ty, Value *ArraySize, unsigned Align,
+ const Twine &Name, BasicBlock *InsertAtEnd);
+
+ // Out of line virtual method, so the vtable, etc. has a home.
+ ~AllocaInst() override;
+
+ /// isArrayAllocation - Return true if there is an allocation size parameter
+ /// to the allocation instruction that is not 1.
+ ///
+ bool isArrayAllocation() const;
+
+ /// getArraySize - Get the number of elements allocated. For a simple
+ /// allocation of a single element, this will return a constant 1 value.
+ ///
+ const Value *getArraySize() const { return getOperand(0); }
+ Value *getArraySize() { return getOperand(0); }
+
+ /// getType - Overload to return most specific pointer type
+ ///
+ PointerType *getType() const {
+ return cast<PointerType>(Instruction::getType());
+ }
+
+ /// getAllocatedType - Return the type that is being allocated by the
+ /// instruction.
+ ///
+ Type *getAllocatedType() const { return AllocatedType; }
+ /// \brief for use only in special circumstances that need to generically
+ /// transform a whole instruction (eg: IR linking and vectorization).
+ void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
+
+ /// getAlignment - Return the alignment of the memory that is being allocated
+ /// by the instruction.
+ ///
+ unsigned getAlignment() const {
+ return (1u << (getSubclassDataFromInstruction() & 31)) >> 1;
+ }
+ void setAlignment(unsigned Align);
+
+ /// isStaticAlloca - Return true if this alloca is in the entry block of the
+ /// function and is a constant size. If so, the code generator will fold it
+ /// into the prolog/epilog code, so it is basically free.
+ bool isStaticAlloca() const;
+
+ /// \brief Return true if this alloca is used as an inalloca argument to a
+ /// call. Such allocas are never considered static even if they are in the
+ /// entry block.
+ bool isUsedWithInAlloca() const {
+ return getSubclassDataFromInstruction() & 32;
+ }
+
+ /// \brief Specify whether this alloca is used to represent the arguments to
+ /// a call.
+ void setUsedWithInAlloca(bool V) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~32) |
+ (V ? 32 : 0));
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return (I->getOpcode() == Instruction::Alloca);
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ // Shadow Instruction::setInstructionSubclassData with a private forwarding
+ // method so that subclasses cannot accidentally use it.
+ void setInstructionSubclassData(unsigned short D) {
+ Instruction::setInstructionSubclassData(D);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// LoadInst Class
+//===----------------------------------------------------------------------===//
+
+/// LoadInst - an instruction for reading from memory. This uses the
+/// SubclassData field in Value to store whether or not the load is volatile.
+///
+class LoadInst : public UnaryInstruction {
+ void AssertOK();
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ LoadInst *cloneImpl() const;
+
+public:
+ LoadInst(Value *Ptr, const Twine &NameStr, Instruction *InsertBefore);
+ LoadInst(Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
+ LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile = false,
+ Instruction *InsertBefore = nullptr);
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
+ Instruction *InsertBefore = nullptr)
+ : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
+ NameStr, isVolatile, InsertBefore) {}
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+ BasicBlock *InsertAtEnd);
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+ Instruction *InsertBefore = nullptr)
+ : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
+ NameStr, isVolatile, Align, InsertBefore) {}
+ LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
+ unsigned Align, Instruction *InsertBefore = nullptr);
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+ unsigned Align, BasicBlock *InsertAtEnd);
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile, unsigned Align,
+ AtomicOrdering Order, SynchronizationScope SynchScope = CrossThread,
+ Instruction *InsertBefore = nullptr)
+ : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
+ NameStr, isVolatile, Align, Order, SynchScope, InsertBefore) {}
+ LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope = CrossThread,
+ Instruction *InsertBefore = nullptr);
+ LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd);
+
+ LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
+ LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
+ LoadInst(Type *Ty, Value *Ptr, const char *NameStr = nullptr,
+ bool isVolatile = false, Instruction *InsertBefore = nullptr);
+ explicit LoadInst(Value *Ptr, const char *NameStr = nullptr,
+ bool isVolatile = false,
+ Instruction *InsertBefore = nullptr)
+ : LoadInst(cast<PointerType>(Ptr->getType())->getElementType(), Ptr,
+ NameStr, isVolatile, InsertBefore) {}
+ LoadInst(Value *Ptr, const char *NameStr, bool isVolatile,
+ BasicBlock *InsertAtEnd);
+
+ /// isVolatile - Return true if this is a load from a volatile memory
+ /// location.
+ ///
+ bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
+
+ /// setVolatile - Specify whether this is a volatile load or not.
+ ///
+ void setVolatile(bool V) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+ (V ? 1 : 0));
+ }
+
+ /// getAlignment - Return the alignment of the access that is being performed
+ ///
+ unsigned getAlignment() const {
+ return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
+ }
+
+ void setAlignment(unsigned Align);
+
+ /// Returns the ordering effect of this fence.
+ AtomicOrdering getOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
+ }
+
+ /// Set the ordering constraint on this load. May not be Release or
+ /// AcquireRelease.
+ void setOrdering(AtomicOrdering Ordering) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
+ (Ordering << 7));
+ }
+
+ SynchronizationScope getSynchScope() const {
+ return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
+ }
+
+ /// Specify whether this load is ordered with respect to all
+ /// concurrently executing threads, or only with respect to signal handlers
+ /// executing in the same thread.
+ void setSynchScope(SynchronizationScope xthread) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
+ (xthread << 6));
+ }
+
+ void setAtomic(AtomicOrdering Ordering,
+ SynchronizationScope SynchScope = CrossThread) {
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
+ }
+
+ bool isSimple() const { return !isAtomic() && !isVolatile(); }
+ bool isUnordered() const {
+ return getOrdering() <= Unordered && !isVolatile();
+ }
+
+ Value *getPointerOperand() { return getOperand(0); }
+ const Value *getPointerOperand() const { return getOperand(0); }
+ static unsigned getPointerOperandIndex() { return 0U; }
+
+ /// \brief Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperand()->getType()->getPointerAddressSpace();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Load;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ // Shadow Instruction::setInstructionSubclassData with a private forwarding
+ // method so that subclasses cannot accidentally use it.
+ void setInstructionSubclassData(unsigned short D) {
+ Instruction::setInstructionSubclassData(D);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// StoreInst Class
+//===----------------------------------------------------------------------===//
+
+/// StoreInst - an instruction for storing to memory
+///
+class StoreInst : public Instruction {
+ void *operator new(size_t, unsigned) = delete;
+ void AssertOK();
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ StoreInst *cloneImpl() const;
+
+public:
+ // allocate space for exactly two operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+ StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
+ StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
+ Instruction *InsertBefore = nullptr);
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile,
+ unsigned Align, Instruction *InsertBefore = nullptr);
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile,
+ unsigned Align, BasicBlock *InsertAtEnd);
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope = CrossThread,
+ Instruction *InsertBefore = nullptr);
+ StoreInst(Value *Val, Value *Ptr, bool isVolatile,
+ unsigned Align, AtomicOrdering Order,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd);
+
+ /// isVolatile - Return true if this is a store to a volatile memory
+ /// location.
+ ///
+ bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
+
+ /// setVolatile - Specify whether this is a volatile store or not.
+ ///
+ void setVolatile(bool V) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+ (V ? 1 : 0));
+ }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// getAlignment - Return the alignment of the access that is being performed
+ ///
+ unsigned getAlignment() const {
+ return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
+ }
+
+ void setAlignment(unsigned Align);
+
+ /// Returns the ordering effect of this store.
+ AtomicOrdering getOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
+ }
+
+ /// Set the ordering constraint on this store. May not be Acquire or
+ /// AcquireRelease.
+ void setOrdering(AtomicOrdering Ordering) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
+ (Ordering << 7));
+ }
+
+ SynchronizationScope getSynchScope() const {
+ return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
+ }
+
+ /// Specify whether this store instruction is ordered with respect to all
+ /// concurrently executing threads, or only with respect to signal handlers
+ /// executing in the same thread.
+ void setSynchScope(SynchronizationScope xthread) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
+ (xthread << 6));
+ }
+
+ void setAtomic(AtomicOrdering Ordering,
+ SynchronizationScope SynchScope = CrossThread) {
+ setOrdering(Ordering);
+ setSynchScope(SynchScope);
+ }
+
+ bool isSimple() const { return !isAtomic() && !isVolatile(); }
+ bool isUnordered() const {
+ return getOrdering() <= Unordered && !isVolatile();
+ }
+
+ Value *getValueOperand() { return getOperand(0); }
+ const Value *getValueOperand() const { return getOperand(0); }
+
+ Value *getPointerOperand() { return getOperand(1); }
+ const Value *getPointerOperand() const { return getOperand(1); }
+ static unsigned getPointerOperandIndex() { return 1U; }
+
+ /// \brief Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperand()->getType()->getPointerAddressSpace();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Store;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ // Shadow Instruction::setInstructionSubclassData with a private forwarding
+ // method so that subclasses cannot accidentally use it.
+ void setInstructionSubclassData(unsigned short D) {
+ Instruction::setInstructionSubclassData(D);
+ }
+};
+
+template <>
+struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)
+
+//===----------------------------------------------------------------------===//
+// FenceInst Class
+//===----------------------------------------------------------------------===//
+
+/// FenceInst - an instruction for ordering other memory operations
+///
+class FenceInst : public Instruction {
+ void *operator new(size_t, unsigned) = delete;
+ void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ FenceInst *cloneImpl() const;
+
+public:
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+
+ // Ordering may only be Acquire, Release, AcquireRelease, or
+ // SequentiallyConsistent.
+ FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+ SynchronizationScope SynchScope = CrossThread,
+ Instruction *InsertBefore = nullptr);
+ FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd);
+
+ /// Returns the ordering effect of this fence.
+ AtomicOrdering getOrdering() const {
+ return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
+ }
+
+ /// Set the ordering constraint on this fence. May only be Acquire, Release,
+ /// AcquireRelease, or SequentiallyConsistent.
+ void setOrdering(AtomicOrdering Ordering) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
+ (Ordering << 1));
+ }
+
+ SynchronizationScope getSynchScope() const {
+ return SynchronizationScope(getSubclassDataFromInstruction() & 1);
+ }
+
+ /// Specify whether this fence orders other operations with respect to all
+ /// concurrently executing threads, or only with respect to signal handlers
+ /// executing in the same thread.
+ void setSynchScope(SynchronizationScope xthread) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+ xthread);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Fence;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ // Shadow Instruction::setInstructionSubclassData with a private forwarding
+ // method so that subclasses cannot accidentally use it.
+ void setInstructionSubclassData(unsigned short D) {
+ Instruction::setInstructionSubclassData(D);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// AtomicCmpXchgInst Class
+//===----------------------------------------------------------------------===//
+
+/// AtomicCmpXchgInst - an instruction that atomically checks whether a
+/// specified value is in a memory location, and, if it is, stores a new value
+/// there. Returns the value that was loaded.
+///
+class AtomicCmpXchgInst : public Instruction {
+ void *operator new(size_t, unsigned) = delete;
+ void Init(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ AtomicCmpXchgInst *cloneImpl() const;
+
+public:
+ // allocate space for exactly three operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 3);
+ }
+ AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope,
+ Instruction *InsertBefore = nullptr);
+ AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
+ AtomicOrdering SuccessOrdering,
+ AtomicOrdering FailureOrdering,
+ SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd);
+
+ /// isVolatile - Return true if this is a cmpxchg from a volatile memory
+ /// location.
+ ///
+ bool isVolatile() const {
+ return getSubclassDataFromInstruction() & 1;
+ }
+
+ /// setVolatile - Specify whether this is a volatile cmpxchg.
+ ///
+ void setVolatile(bool V) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+ (unsigned)V);
+ }
+
+ /// Return true if this cmpxchg may spuriously fail.
+ bool isWeak() const {
+ return getSubclassDataFromInstruction() & 0x100;
+ }
+
+ void setWeak(bool IsWeak) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x100) |
+ (IsWeak << 8));
+ }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// Set the ordering constraint on this cmpxchg.
+ void setSuccessOrdering(AtomicOrdering Ordering) {
+ assert(Ordering != NotAtomic &&
+ "CmpXchg instructions can only be atomic.");
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
+ (Ordering << 2));
+ }
+
+ void setFailureOrdering(AtomicOrdering Ordering) {
+ assert(Ordering != NotAtomic &&
+ "CmpXchg instructions can only be atomic.");
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
+ (Ordering << 5));
+ }
+
+ /// Specify whether this cmpxchg is atomic and orders other operations with
+ /// respect to all concurrently executing threads, or only with respect to
+ /// signal handlers executing in the same thread.
+ void setSynchScope(SynchronizationScope SynchScope) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) |
+ (SynchScope << 1));
+ }
+
+ /// Returns the ordering constraint on this cmpxchg.
+ AtomicOrdering getSuccessOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+ }
+
+ /// Returns the ordering constraint on this cmpxchg.
+ AtomicOrdering getFailureOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 5) & 7);
+ }
+
+ /// Returns whether this cmpxchg is atomic between threads or only within a
+ /// single thread.
+ SynchronizationScope getSynchScope() const {
+ return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1);
+ }
+
+ Value *getPointerOperand() { return getOperand(0); }
+ const Value *getPointerOperand() const { return getOperand(0); }
+ static unsigned getPointerOperandIndex() { return 0U; }
+
+ Value *getCompareOperand() { return getOperand(1); }
+ const Value *getCompareOperand() const { return getOperand(1); }
+
+ Value *getNewValOperand() { return getOperand(2); }
+ const Value *getNewValOperand() const { return getOperand(2); }
+
+ /// \brief Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperand()->getType()->getPointerAddressSpace();
+ }
+
+ /// \brief Returns the strongest permitted ordering on failure, given the
+ /// desired ordering on success.
+ ///
+ /// If the comparison in a cmpxchg operation fails, there is no atomic store
+ /// so release semantics cannot be provided. So this function drops explicit
+ /// Release requests from the AtomicOrdering. A SequentiallyConsistent
+ /// operation would remain SequentiallyConsistent.
+ static AtomicOrdering
+ getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
+ switch (SuccessOrdering) {
+ default: llvm_unreachable("invalid cmpxchg success ordering");
+ case Release:
+ case Monotonic:
+ return Monotonic;
+ case AcquireRelease:
+ case Acquire:
+ return Acquire;
+ case SequentiallyConsistent:
+ return SequentiallyConsistent;
+ }
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::AtomicCmpXchg;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ // Shadow Instruction::setInstructionSubclassData with a private forwarding
+ // method so that subclasses cannot accidentally use it.
+ void setInstructionSubclassData(unsigned short D) {
+ Instruction::setInstructionSubclassData(D);
+ }
+};
+
+template <>
+struct OperandTraits<AtomicCmpXchgInst> :
+ public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)
+
+//===----------------------------------------------------------------------===//
+// AtomicRMWInst Class
+//===----------------------------------------------------------------------===//
+
+/// AtomicRMWInst - an instruction that atomically reads a memory location,
+/// combines it with another value, and then stores the result back. Returns
+/// the old value.
+///
+class AtomicRMWInst : public Instruction {
+ void *operator new(size_t, unsigned) = delete;
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ AtomicRMWInst *cloneImpl() const;
+
+public:
+ /// This enumeration lists the possible modifications atomicrmw can make. In
+ /// the descriptions, 'p' is the pointer to the instruction's memory location,
+ /// 'old' is the initial value of *p, and 'v' is the other value passed to the
+ /// instruction. These instructions always return 'old'.
+ enum BinOp {
+ /// *p = v
+ Xchg,
+ /// *p = old + v
+ Add,
+ /// *p = old - v
+ Sub,
+ /// *p = old & v
+ And,
+ /// *p = ~(old & v)
+ Nand,
+ /// *p = old | v
+ Or,
+ /// *p = old ^ v
+ Xor,
+ /// *p = old >signed v ? old : v
+ Max,
+ /// *p = old <signed v ? old : v
+ Min,
+ /// *p = old >unsigned v ? old : v
+ UMax,
+ /// *p = old <unsigned v ? old : v
+ UMin,
+
+ FIRST_BINOP = Xchg,
+ LAST_BINOP = UMin,
+ BAD_BINOP
+ };
+
+ // allocate space for exactly two operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+ AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering, SynchronizationScope SynchScope,
+ Instruction *InsertBefore = nullptr);
+ AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering, SynchronizationScope SynchScope,
+ BasicBlock *InsertAtEnd);
+
+ BinOp getOperation() const {
+ return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
+ }
+
+ void setOperation(BinOp Operation) {
+ unsigned short SubclassData = getSubclassDataFromInstruction();
+ setInstructionSubclassData((SubclassData & 31) |
+ (Operation << 5));
+ }
+
+ /// isVolatile - Return true if this is a RMW on a volatile memory location.
+ ///
+ bool isVolatile() const {
+ return getSubclassDataFromInstruction() & 1;
+ }
+
+ /// setVolatile - Specify whether this is a volatile RMW or not.
+ ///
+ void setVolatile(bool V) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+ (unsigned)V);
+ }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// Set the ordering constraint on this RMW.
+ void setOrdering(AtomicOrdering Ordering) {
+ assert(Ordering != NotAtomic &&
+ "atomicrmw instructions can only be atomic.");
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
+ (Ordering << 2));
+ }
+
+ /// Specify whether this RMW orders other operations with respect to all
+ /// concurrently executing threads, or only with respect to signal handlers
+ /// executing in the same thread.
+ void setSynchScope(SynchronizationScope SynchScope) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~2) |
+ (SynchScope << 1));
+ }
+
+ /// Returns the ordering constraint on this RMW.
+ AtomicOrdering getOrdering() const {
+ return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
+ }
+
+ /// Returns whether this RMW is atomic between threads or only within a
+ /// single thread.
+ SynchronizationScope getSynchScope() const {
+ return SynchronizationScope((getSubclassDataFromInstruction() & 2) >> 1);
+ }
+
+ Value *getPointerOperand() { return getOperand(0); }
+ const Value *getPointerOperand() const { return getOperand(0); }
+ static unsigned getPointerOperandIndex() { return 0U; }
+
+ Value *getValOperand() { return getOperand(1); }
+ const Value *getValOperand() const { return getOperand(1); }
+
+ /// \brief Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperand()->getType()->getPointerAddressSpace();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::AtomicRMW;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ void Init(BinOp Operation, Value *Ptr, Value *Val,
+ AtomicOrdering Ordering, SynchronizationScope SynchScope);
+ // Shadow Instruction::setInstructionSubclassData with a private forwarding
+ // method so that subclasses cannot accidentally use it.
+ void setInstructionSubclassData(unsigned short D) {
+ Instruction::setInstructionSubclassData(D);
+ }
+};
+
+template <>
+struct OperandTraits<AtomicRMWInst>
+ : public FixedNumOperandTraits<AtomicRMWInst,2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)
+
+//===----------------------------------------------------------------------===//
+// GetElementPtrInst Class
+//===----------------------------------------------------------------------===//
+
+// checkGEPType - Simple wrapper function to give a better assertion failure
+// message on bad indexes for a gep instruction.
+//
+inline Type *checkGEPType(Type *Ty) {
+ assert(Ty && "Invalid GetElementPtrInst indices for type!");
+ return Ty;
+}
+
+/// GetElementPtrInst - an instruction for type-safe pointer arithmetic to
+/// access elements of arrays and structs
+///
+class GetElementPtrInst : public Instruction {
+ Type *SourceElementType;
+ Type *ResultElementType;
+
+ void anchor() override;
+
+ GetElementPtrInst(const GetElementPtrInst &GEPI);
+ void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
+
+ /// Constructors - Create a getelementptr instruction with a base pointer an
+ /// list of indices. The first ctor can optionally insert before an existing
+ /// instruction, the second appends the new instruction to the specified
+ /// BasicBlock.
+ inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
+ ArrayRef<Value *> IdxList, unsigned Values,
+ const Twine &NameStr, Instruction *InsertBefore);
+ inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
+ ArrayRef<Value *> IdxList, unsigned Values,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ GetElementPtrInst *cloneImpl() const;
+
+public:
+ static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
+ ArrayRef<Value *> IdxList,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ unsigned Values = 1 + unsigned(IdxList.size());
+ if (!PointeeType)
+ PointeeType =
+ cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
+ else
+ assert(
+ PointeeType ==
+ cast<PointerType>(Ptr->getType()->getScalarType())->getElementType());
+ return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
+ NameStr, InsertBefore);
+ }
+ static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
+ ArrayRef<Value *> IdxList,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ unsigned Values = 1 + unsigned(IdxList.size());
+ if (!PointeeType)
+ PointeeType =
+ cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
+ else
+ assert(
+ PointeeType ==
+ cast<PointerType>(Ptr->getType()->getScalarType())->getElementType());
+ return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
+ NameStr, InsertAtEnd);
+ }
+
+ /// Create an "inbounds" getelementptr. See the documentation for the
+ /// "inbounds" flag in LangRef.html for details.
+ static GetElementPtrInst *CreateInBounds(Value *Ptr,
+ ArrayRef<Value *> IdxList,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr){
+ return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
+ }
+ static GetElementPtrInst *
+ CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ GetElementPtrInst *GEP =
+ Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
+ GEP->setIsInBounds(true);
+ return GEP;
+ }
+ static GetElementPtrInst *CreateInBounds(Value *Ptr,
+ ArrayRef<Value *> IdxList,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
+ }
+ static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
+ ArrayRef<Value *> IdxList,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ GetElementPtrInst *GEP =
+ Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
+ GEP->setIsInBounds(true);
+ return GEP;
+ }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ // getType - Overload to return most specific sequential type.
+ SequentialType *getType() const {
+ return cast<SequentialType>(Instruction::getType());
+ }
+
+ Type *getSourceElementType() const { return SourceElementType; }
+
+ void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
+ void setResultElementType(Type *Ty) { ResultElementType = Ty; }
+
+ Type *getResultElementType() const {
+ assert(ResultElementType ==
+ cast<PointerType>(getType()->getScalarType())->getElementType());
+ return ResultElementType;
+ }
+
+ /// \brief Returns the address space of this instruction's pointer type.
+ unsigned getAddressSpace() const {
+ // Note that this is always the same as the pointer operand's address space
+ // and that is cheaper to compute, so cheat here.
+ return getPointerAddressSpace();
+ }
+
+ /// getIndexedType - Returns the type of the element that would be loaded with
+ /// a load instruction with the specified parameters.
+ ///
+ /// Null is returned if the indices are invalid for the specified
+ /// pointer type.
+ ///
+ static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
+ static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
+ static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
+
+ inline op_iterator idx_begin() { return op_begin()+1; }
+ inline const_op_iterator idx_begin() const { return op_begin()+1; }
+ inline op_iterator idx_end() { return op_end(); }
+ inline const_op_iterator idx_end() const { return op_end(); }
+
+ Value *getPointerOperand() {
+ return getOperand(0);
+ }
+ const Value *getPointerOperand() const {
+ return getOperand(0);
+ }
+ static unsigned getPointerOperandIndex() {
+ return 0U; // get index for modifying correct operand.
+ }
+
+ /// getPointerOperandType - Method to return the pointer operand as a
+ /// PointerType.
+ Type *getPointerOperandType() const {
+ return getPointerOperand()->getType();
+ }
+
+ /// \brief Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperandType()->getPointerAddressSpace();
+ }
+
+ /// GetGEPReturnType - Returns the pointer type returned by the GEP
+ /// instruction, which may be a vector of pointers.
+ static Type *getGEPReturnType(Value *Ptr, ArrayRef<Value *> IdxList) {
+ return getGEPReturnType(
+ cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(),
+ Ptr, IdxList);
+ }
+ static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
+ ArrayRef<Value *> IdxList) {
+ Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
+ Ptr->getType()->getPointerAddressSpace());
+ // Vector GEP
+ if (Ptr->getType()->isVectorTy()) {
+ unsigned NumElem = Ptr->getType()->getVectorNumElements();
+ return VectorType::get(PtrTy, NumElem);
+ }
+ for (Value *Index : IdxList)
+ if (Index->getType()->isVectorTy()) {
+ unsigned NumElem = Index->getType()->getVectorNumElements();
+ return VectorType::get(PtrTy, NumElem);
+ }
+ // Scalar GEP
+ return PtrTy;
+ }
+
+ unsigned getNumIndices() const { // Note: always non-negative
+ return getNumOperands() - 1;
+ }
+
+ bool hasIndices() const {
+ return getNumOperands() > 1;
+ }
+
+ /// hasAllZeroIndices - Return true if all of the indices of this GEP are
+ /// zeros. If so, the result pointer and the first operand have the same
+ /// value, just potentially different types.
+ bool hasAllZeroIndices() const;
+
+ /// hasAllConstantIndices - Return true if all of the indices of this GEP are
+ /// constant integers. If so, the result pointer and the first operand have
+ /// a constant offset between them.
+ bool hasAllConstantIndices() const;
+
+ /// setIsInBounds - Set or clear the inbounds flag on this GEP instruction.
+ /// See LangRef.html for the meaning of inbounds on a getelementptr.
+ void setIsInBounds(bool b = true);
+
+ /// isInBounds - Determine whether the GEP has the inbounds flag.
+ bool isInBounds() const;
+
+ /// \brief Accumulate the constant address offset of this GEP if possible.
+ ///
+ /// This routine accepts an APInt into which it will accumulate the constant
+ /// offset of this GEP if the GEP is in fact constant. If the GEP is not
+ /// all-constant, it returns false and the value of the offset APInt is
+ /// undefined (it is *not* preserved!). The APInt passed into this routine
+ /// must be at least as wide as the IntPtr type for the address space of
+ /// the base GEP pointer.
+ bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return (I->getOpcode() == Instruction::GetElementPtr);
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<GetElementPtrInst> :
+ public VariadicOperandTraits<GetElementPtrInst, 1> {
+};
+
+GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
+ ArrayRef<Value *> IdxList, unsigned Values,
+ const Twine &NameStr,
+ Instruction *InsertBefore)
+ : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
+ OperandTraits<GetElementPtrInst>::op_end(this) - Values,
+ Values, InsertBefore),
+ SourceElementType(PointeeType),
+ ResultElementType(getIndexedType(PointeeType, IdxList)) {
+ assert(ResultElementType ==
+ cast<PointerType>(getType()->getScalarType())->getElementType());
+ init(Ptr, IdxList, NameStr);
+}
+GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
+ ArrayRef<Value *> IdxList, unsigned Values,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd)
+ : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
+ OperandTraits<GetElementPtrInst>::op_end(this) - Values,
+ Values, InsertAtEnd),
+ SourceElementType(PointeeType),
+ ResultElementType(getIndexedType(PointeeType, IdxList)) {
+ assert(ResultElementType ==
+ cast<PointerType>(getType()->getScalarType())->getElementType());
+ init(Ptr, IdxList, NameStr);
+}
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)
+
+//===----------------------------------------------------------------------===//
+// ICmpInst Class
+//===----------------------------------------------------------------------===//
+
+/// This instruction compares its operands according to the predicate given
+/// to the constructor. It only operates on integers or pointers. The operands
+/// must be identical types.
+/// \brief Represent an integer comparison operator.
+class ICmpInst: public CmpInst {
+ void anchor() override;
+
+ void AssertOK() {
+ assert(getPredicate() >= CmpInst::FIRST_ICMP_PREDICATE &&
+ getPredicate() <= CmpInst::LAST_ICMP_PREDICATE &&
+ "Invalid ICmp predicate value");
+ assert(getOperand(0)->getType() == getOperand(1)->getType() &&
+ "Both operands to ICmp instruction are not of the same type!");
+ // Check that the operands are the right type
+ assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||
+ getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
+ "Invalid operand types for ICmp instruction");
+ }
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical ICmpInst
+ ICmpInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics.
+ ICmpInst(
+ Instruction *InsertBefore, ///< Where to insert
+ Predicate pred, ///< The predicate to use for the comparison
+ Value *LHS, ///< The left-hand-side of the expression
+ Value *RHS, ///< The right-hand-side of the expression
+ const Twine &NameStr = "" ///< Name of the instruction
+ ) : CmpInst(makeCmpResultType(LHS->getType()),
+ Instruction::ICmp, pred, LHS, RHS, NameStr,
+ InsertBefore) {
+#ifndef NDEBUG
+ AssertOK();
+#endif
+ }
+
+ /// \brief Constructor with insert-at-end semantics.
+ ICmpInst(
+ BasicBlock &InsertAtEnd, ///< Block to insert into.
+ Predicate pred, ///< The predicate to use for the comparison
+ Value *LHS, ///< The left-hand-side of the expression
+ Value *RHS, ///< The right-hand-side of the expression
+ const Twine &NameStr = "" ///< Name of the instruction
+ ) : CmpInst(makeCmpResultType(LHS->getType()),
+ Instruction::ICmp, pred, LHS, RHS, NameStr,
+ &InsertAtEnd) {
+#ifndef NDEBUG
+ AssertOK();
+#endif
+ }
+
+ /// \brief Constructor with no-insertion semantics
+ ICmpInst(
+ Predicate pred, ///< The predicate to use for the comparison
+ Value *LHS, ///< The left-hand-side of the expression
+ Value *RHS, ///< The right-hand-side of the expression
+ const Twine &NameStr = "" ///< Name of the instruction
+ ) : CmpInst(makeCmpResultType(LHS->getType()),
+ Instruction::ICmp, pred, LHS, RHS, NameStr) {
+#ifndef NDEBUG
+ AssertOK();
+#endif
+ }
+
+ /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
+ /// @returns the predicate that would be the result if the operand were
+ /// regarded as signed.
+ /// \brief Return the signed version of the predicate
+ Predicate getSignedPredicate() const {
+ return getSignedPredicate(getPredicate());
+ }
+
+ /// This is a static version that you can use without an instruction.
+ /// \brief Return the signed version of the predicate.
+ static Predicate getSignedPredicate(Predicate pred);
+
+ /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
+ /// @returns the predicate that would be the result if the operand were
+ /// regarded as unsigned.
+ /// \brief Return the unsigned version of the predicate
+ Predicate getUnsignedPredicate() const {
+ return getUnsignedPredicate(getPredicate());
+ }
+
+ /// This is a static version that you can use without an instruction.
+ /// \brief Return the unsigned version of the predicate.
+ static Predicate getUnsignedPredicate(Predicate pred);
+
+ /// isEquality - Return true if this predicate is either EQ or NE. This also
+ /// tests for commutativity.
+ static bool isEquality(Predicate P) {
+ return P == ICMP_EQ || P == ICMP_NE;
+ }
+
+ /// isEquality - Return true if this predicate is either EQ or NE. This also
+ /// tests for commutativity.
+ bool isEquality() const {
+ return isEquality(getPredicate());
+ }
+
+ /// @returns true if the predicate of this ICmpInst is commutative
+ /// \brief Determine if this relation is commutative.
+ bool isCommutative() const { return isEquality(); }
+
+ /// isRelational - Return true if the predicate is relational (not EQ or NE).
+ ///
+ bool isRelational() const {
+ return !isEquality();
+ }
+
+ /// isRelational - Return true if the predicate is relational (not EQ or NE).
+ ///
+ static bool isRelational(Predicate P) {
+ return !isEquality(P);
+ }
+
+ /// Initialize a set of values that all satisfy the predicate with C.
+ /// \brief Make a ConstantRange for a relation with a constant value.
+ static ConstantRange makeConstantRange(Predicate pred, const APInt &C);
+
+ /// Exchange the two operands to this instruction in such a way that it does
+ /// not modify the semantics of the instruction. The predicate value may be
+ /// changed to retain the same result if the predicate is order dependent
+ /// (e.g. ult).
+ /// \brief Swap operands and adjust predicate.
+ void swapOperands() {
+ setPredicate(getSwappedPredicate());
+ Op<0>().swap(Op<1>());
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::ICmp;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// FCmpInst Class
+//===----------------------------------------------------------------------===//
+
+/// This instruction compares its operands according to the predicate given
+/// to the constructor. It only operates on floating point values or packed
+/// vectors of floating point values. The operands must be identical types.
+/// \brief Represents a floating point comparison operator.
+class FCmpInst: public CmpInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical FCmpInst
+ FCmpInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics.
+ FCmpInst(
+ Instruction *InsertBefore, ///< Where to insert
+ Predicate pred, ///< The predicate to use for the comparison
+ Value *LHS, ///< The left-hand-side of the expression
+ Value *RHS, ///< The right-hand-side of the expression
+ const Twine &NameStr = "" ///< Name of the instruction
+ ) : CmpInst(makeCmpResultType(LHS->getType()),
+ Instruction::FCmp, pred, LHS, RHS, NameStr,
+ InsertBefore) {
+ assert(pred <= FCmpInst::LAST_FCMP_PREDICATE &&
+ "Invalid FCmp predicate value");
+ assert(getOperand(0)->getType() == getOperand(1)->getType() &&
+ "Both operands to FCmp instruction are not of the same type!");
+ // Check that the operands are the right type
+ assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
+ "Invalid operand types for FCmp instruction");
+ }
+
+ /// \brief Constructor with insert-at-end semantics.
+ FCmpInst(
+ BasicBlock &InsertAtEnd, ///< Block to insert into.
+ Predicate pred, ///< The predicate to use for the comparison
+ Value *LHS, ///< The left-hand-side of the expression
+ Value *RHS, ///< The right-hand-side of the expression
+ const Twine &NameStr = "" ///< Name of the instruction
+ ) : CmpInst(makeCmpResultType(LHS->getType()),
+ Instruction::FCmp, pred, LHS, RHS, NameStr,
+ &InsertAtEnd) {
+ assert(pred <= FCmpInst::LAST_FCMP_PREDICATE &&
+ "Invalid FCmp predicate value");
+ assert(getOperand(0)->getType() == getOperand(1)->getType() &&
+ "Both operands to FCmp instruction are not of the same type!");
+ // Check that the operands are the right type
+ assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
+ "Invalid operand types for FCmp instruction");
+ }
+
+ /// \brief Constructor with no-insertion semantics
+ FCmpInst(
+ Predicate pred, ///< The predicate to use for the comparison
+ Value *LHS, ///< The left-hand-side of the expression
+ Value *RHS, ///< The right-hand-side of the expression
+ const Twine &NameStr = "" ///< Name of the instruction
+ ) : CmpInst(makeCmpResultType(LHS->getType()),
+ Instruction::FCmp, pred, LHS, RHS, NameStr) {
+ assert(pred <= FCmpInst::LAST_FCMP_PREDICATE &&
+ "Invalid FCmp predicate value");
+ assert(getOperand(0)->getType() == getOperand(1)->getType() &&
+ "Both operands to FCmp instruction are not of the same type!");
+ // Check that the operands are the right type
+ assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&
+ "Invalid operand types for FCmp instruction");
+ }
+
+ /// @returns true if the predicate of this instruction is EQ or NE.
+ /// \brief Determine if this is an equality predicate.
+ static bool isEquality(Predicate Pred) {
+ return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
+ Pred == FCMP_UNE;
+ }
+
+ /// @returns true if the predicate of this instruction is EQ or NE.
+ /// \brief Determine if this is an equality predicate.
+ bool isEquality() const { return isEquality(getPredicate()); }
+
+ /// @returns true if the predicate of this instruction is commutative.
+ /// \brief Determine if this is a commutative predicate.
+ bool isCommutative() const {
+ return isEquality() ||
+ getPredicate() == FCMP_FALSE ||
+ getPredicate() == FCMP_TRUE ||
+ getPredicate() == FCMP_ORD ||
+ getPredicate() == FCMP_UNO;
+ }
+
+ /// @returns true if the predicate is relational (not EQ or NE).
+ /// \brief Determine if this a relational predicate.
+ bool isRelational() const { return !isEquality(); }
+
+ /// Exchange the two operands to this instruction in such a way that it does
+ /// not modify the semantics of the instruction. The predicate value may be
+ /// changed to retain the same result if the predicate is order dependent
+ /// (e.g. ult).
+ /// \brief Swap operands and adjust predicate.
+ void swapOperands() {
+ setPredicate(getSwappedPredicate());
+ Op<0>().swap(Op<1>());
+ }
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::FCmp;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// CallInst - This class represents a function call, abstracting a target
+/// machine's calling convention. This class uses low bit of the SubClassData
+/// field to indicate whether or not this is a tail call. The rest of the bits
+/// hold the calling convention of the call.
+///
+class CallInst : public Instruction,
+ public OperandBundleUser<CallInst, User::op_iterator> {
+ AttributeSet AttributeList; ///< parameter attributes for call
+ FunctionType *FTy;
+ CallInst(const CallInst &CI);
+ void init(Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
+ init(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, Args, Bundles, NameStr);
+ }
+ void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
+ void init(Value *Func, const Twine &NameStr);
+
+ /// Construct a CallInst given a range of arguments.
+ /// \brief Construct a CallInst from a range of arguments
+ inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
+ Instruction *InsertBefore);
+ inline CallInst(Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
+ Instruction *InsertBefore)
+ : CallInst(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, Args, Bundles, NameStr, InsertBefore) {}
+
+ inline CallInst(Value *Func, ArrayRef<Value *> Args, const Twine &NameStr,
+ Instruction *InsertBefore)
+ : CallInst(Func, Args, None, NameStr, InsertBefore) {}
+
+ /// Construct a CallInst given a range of arguments.
+ /// \brief Construct a CallInst from a range of arguments
+ inline CallInst(Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
+ BasicBlock *InsertAtEnd);
+
+ explicit CallInst(Value *F, const Twine &NameStr,
+ Instruction *InsertBefore);
+ CallInst(Value *F, const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+ friend class OperandBundleUser<CallInst, User::op_iterator>;
+ bool hasDescriptor() const { return HasDescriptor; }
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ CallInst *cloneImpl() const;
+
+public:
+ static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles = None,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, Args, Bundles, NameStr, InsertBefore);
+ }
+ static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+ const Twine &NameStr,
+ Instruction *InsertBefore = nullptr) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, Args, None, NameStr, InsertBefore);
+ }
+ static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+ const Twine &NameStr,
+ Instruction *InsertBefore = nullptr) {
+ return new (unsigned(Args.size() + 1))
+ CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
+ }
+ static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles = None,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ const unsigned TotalOps =
+ unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
+ const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+
+ return new (TotalOps, DescriptorBytes)
+ CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
+ }
+ static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ const unsigned TotalOps =
+ unsigned(Args.size()) + CountBundleInputs(Bundles) + 1;
+ const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+
+ return new (TotalOps, DescriptorBytes)
+ CallInst(Func, Args, Bundles, NameStr, InsertAtEnd);
+ }
+ static CallInst *Create(Value *Func, ArrayRef<Value *> Args,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ return new (unsigned(Args.size() + 1))
+ CallInst(Func, Args, None, NameStr, InsertAtEnd);
+ }
+ static CallInst *Create(Value *F, const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return new(1) CallInst(F, NameStr, InsertBefore);
+ }
+ static CallInst *Create(Value *F, const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new(1) CallInst(F, NameStr, InsertAtEnd);
+ }
+
+ /// \brief Create a clone of \p CI with a different set of operand bundles and
+ /// insert it before \p InsertPt.
+ ///
+ /// The returned call instruction is identical \p CI in every way except that
+ /// the operand bundles for the new instruction are set to the operand bundles
+ /// in \p Bundles.
+ static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
+ Instruction *InsertPt = nullptr);
+
+ /// CreateMalloc - Generate the IR for a call to malloc:
+ /// 1. Compute the malloc call's argument as the specified type's size,
+ /// possibly multiplied by the array size if the array size is not
+ /// constant 1.
+ /// 2. Call malloc with that argument.
+ /// 3. Bitcast the result of the malloc call to the specified type.
+ static Instruction *CreateMalloc(Instruction *InsertBefore,
+ Type *IntPtrTy, Type *AllocTy,
+ Value *AllocSize, Value *ArraySize = nullptr,
+ Function* MallocF = nullptr,
+ const Twine &Name = "");
+ static Instruction *CreateMalloc(BasicBlock *InsertAtEnd,
+ Type *IntPtrTy, Type *AllocTy,
+ Value *AllocSize, Value *ArraySize = nullptr,
+ Function* MallocF = nullptr,
+ const Twine &Name = "");
+ /// CreateFree - Generate the IR for a call to the builtin free function.
+ static Instruction* CreateFree(Value* Source, Instruction *InsertBefore);
+ static Instruction* CreateFree(Value* Source, BasicBlock *InsertAtEnd);
+
+ ~CallInst() override;
+
+ FunctionType *getFunctionType() const { return FTy; }
+
+ void mutateFunctionType(FunctionType *FTy) {
+ mutateType(FTy->getReturnType());
+ this->FTy = FTy;
+ }
+
+ // Note that 'musttail' implies 'tail'.
+ enum TailCallKind { TCK_None = 0, TCK_Tail = 1, TCK_MustTail = 2,
+ TCK_NoTail = 3 };
+ TailCallKind getTailCallKind() const {
+ return TailCallKind(getSubclassDataFromInstruction() & 3);
+ }
+ bool isTailCall() const {
+ unsigned Kind = getSubclassDataFromInstruction() & 3;
+ return Kind == TCK_Tail || Kind == TCK_MustTail;
+ }
+ bool isMustTailCall() const {
+ return (getSubclassDataFromInstruction() & 3) == TCK_MustTail;
+ }
+ bool isNoTailCall() const {
+ return (getSubclassDataFromInstruction() & 3) == TCK_NoTail;
+ }
+ void setTailCall(bool isTC = true) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
+ unsigned(isTC ? TCK_Tail : TCK_None));
+ }
+ void setTailCallKind(TailCallKind TCK) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~3) |
+ unsigned(TCK));
+ }
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// getNumArgOperands - Return the number of call arguments.
+ ///
+ unsigned getNumArgOperands() const {
+ return getNumOperands() - getNumTotalBundleOperands() - 1;
+ }
+
+ /// getArgOperand/setArgOperand - Return/set the i-th call argument.
+ ///
+ Value *getArgOperand(unsigned i) const {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ return getOperand(i);
+ }
+ void setArgOperand(unsigned i, Value *v) {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ setOperand(i, v);
+ }
+
+ /// \brief Return the iterator pointing to the beginning of the argument list.
+ op_iterator arg_begin() { return op_begin(); }
+
+ /// \brief Return the iterator pointing to the end of the argument list.
+ op_iterator arg_end() {
+ // [ call args ], [ operand bundles ], callee
+ return op_end() - getNumTotalBundleOperands() - 1;
+ };
+
+ /// \brief Iteration adapter for range-for loops.
+ iterator_range<op_iterator> arg_operands() {
+ return make_range(arg_begin(), arg_end());
+ }
+
+ /// \brief Return the iterator pointing to the beginning of the argument list.
+ const_op_iterator arg_begin() const { return op_begin(); }
+
+ /// \brief Return the iterator pointing to the end of the argument list.
+ const_op_iterator arg_end() const {
+ // [ call args ], [ operand bundles ], callee
+ return op_end() - getNumTotalBundleOperands() - 1;
+ };
+
+ /// \brief Iteration adapter for range-for loops.
+ iterator_range<const_op_iterator> arg_operands() const {
+ return make_range(arg_begin(), arg_end());
+ }
+
+ /// \brief Wrappers for getting the \c Use of a call argument.
+ const Use &getArgOperandUse(unsigned i) const {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ return getOperandUse(i);
+ }
+ Use &getArgOperandUse(unsigned i) {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ return getOperandUse(i);
+ }
+
+ /// getCallingConv/setCallingConv - Get or set the calling convention of this
+ /// function call.
+ CallingConv::ID getCallingConv() const {
+ return static_cast<CallingConv::ID>(getSubclassDataFromInstruction() >> 2);
+ }
+ void setCallingConv(CallingConv::ID CC) {
+ auto ID = static_cast<unsigned>(CC);
+ assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention");
+ setInstructionSubclassData((getSubclassDataFromInstruction() & 3) |
+ (ID << 2));
+ }
+
+ /// getAttributes - Return the parameter attributes for this call.
+ ///
+ const AttributeSet &getAttributes() const { return AttributeList; }
+
+ /// setAttributes - Set the parameter attributes for this call.
+ ///
+ void setAttributes(const AttributeSet &Attrs) { AttributeList = Attrs; }
+
+ /// addAttribute - adds the attribute to the list of attributes.
+ void addAttribute(unsigned i, Attribute::AttrKind attr);
+
+ /// addAttribute - adds the attribute to the list of attributes.
+ void addAttribute(unsigned i, StringRef Kind, StringRef Value);
+
+ /// removeAttribute - removes the attribute from the list of attributes.
+ void removeAttribute(unsigned i, Attribute attr);
+
+ /// \brief adds the dereferenceable attribute to the list of attributes.
+ void addDereferenceableAttr(unsigned i, uint64_t Bytes);
+
+ /// \brief adds the dereferenceable_or_null attribute to the list of
+ /// attributes.
+ void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes);
+
+ /// \brief Determine whether this call has the given attribute.
+ bool hasFnAttr(Attribute::AttrKind A) const {
+ assert(A != Attribute::NoBuiltin &&
+ "Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin");
+ return hasFnAttrImpl(A);
+ }
+
+ /// \brief Determine whether this call has the given attribute.
+ bool hasFnAttr(StringRef A) const {
+ return hasFnAttrImpl(A);
+ }
+
+ /// \brief Determine whether the call or the callee has the given attributes.
+ bool paramHasAttr(unsigned i, Attribute::AttrKind A) const;
+
+ /// \brief Return true if the data operand at index \p i has the attribute \p
+ /// A.
+ ///
+ /// Data operands include call arguments and values used in operand bundles,
+ /// but does not include the callee operand. This routine dispatches to the
+ /// underlying AttributeList or the OperandBundleUser as appropriate.
+ ///
+ /// The index \p i is interpreted as
+ ///
+ /// \p i == Attribute::ReturnIndex -> the return value
+ /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
+ /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
+ /// (\p i - 1) in the operand list.
+ bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind A) const;
+
+ /// \brief Extract the alignment for a call or parameter (0=unknown).
+ unsigned getParamAlignment(unsigned i) const {
+ return AttributeList.getParamAlignment(i);
+ }
+
+ /// \brief Extract the number of dereferenceable bytes for a call or
+ /// parameter (0=unknown).
+ uint64_t getDereferenceableBytes(unsigned i) const {
+ return AttributeList.getDereferenceableBytes(i);
+ }
+
+ /// \brief Extract the number of dereferenceable_or_null bytes for a call or
+ /// parameter (0=unknown).
+ uint64_t getDereferenceableOrNullBytes(unsigned i) const {
+ return AttributeList.getDereferenceableOrNullBytes(i);
+ }
+
+ /// @brief Determine if the parameter or return value is marked with NoAlias
+ /// attribute.
+ /// @param n The parameter to check. 1 is the first parameter, 0 is the return
+ bool doesNotAlias(unsigned n) const {
+ return AttributeList.hasAttribute(n, Attribute::NoAlias);
+ }
+
+ /// \brief Return true if the call should not be treated as a call to a
+ /// builtin.
+ bool isNoBuiltin() const {
+ return hasFnAttrImpl(Attribute::NoBuiltin) &&
+ !hasFnAttrImpl(Attribute::Builtin);
+ }
+
+ /// \brief Return true if the call should not be inlined.
+ bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
+ void setIsNoInline() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::NoInline);
+ }
+
+ /// \brief Return true if the call can return twice
+ bool canReturnTwice() const {
+ return hasFnAttr(Attribute::ReturnsTwice);
+ }
+ void setCanReturnTwice() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::ReturnsTwice);
+ }
+
+ /// \brief Determine if the call does not access memory.
+ bool doesNotAccessMemory() const {
+ return hasFnAttr(Attribute::ReadNone);
+ }
+ void setDoesNotAccessMemory() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
+ }
+
+ /// \brief Determine if the call does not access or only reads memory.
+ bool onlyReadsMemory() const {
+ return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+ }
+ void setOnlyReadsMemory() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly);
+ }
+
+ /// @brief Determine if the call can access memmory only using pointers based
+ /// on its arguments.
+ bool onlyAccessesArgMemory() const {
+ return hasFnAttr(Attribute::ArgMemOnly);
+ }
+ void setOnlyAccessesArgMemory() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::ArgMemOnly);
+ }
+
+ /// \brief Determine if the call cannot return.
+ bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
+ void setDoesNotReturn() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::NoReturn);
+ }
+
+ /// \brief Determine if the call cannot unwind.
+ bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
+ void setDoesNotThrow() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
+ }
+
+ /// \brief Determine if the call cannot be duplicated.
+ bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
+ void setCannotDuplicate() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::NoDuplicate);
+ }
+
+ /// \brief Determine if the call is convergent
+ bool isConvergent() const { return hasFnAttr(Attribute::Convergent); }
+ void setConvergent() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::Convergent);
+ }
+
+ /// \brief Determine if the call returns a structure through first
+ /// pointer argument.
+ bool hasStructRetAttr() const {
+ if (getNumArgOperands() == 0)
+ return false;
+
+ // Be friendly and also check the callee.
+ return paramHasAttr(1, Attribute::StructRet);
+ }
+
+ /// \brief Determine if any call argument is an aggregate passed by value.
+ bool hasByValArgument() const {
+ return AttributeList.hasAttrSomewhere(Attribute::ByVal);
+ }
+
+ /// getCalledFunction - Return the function called, or null if this is an
+ /// indirect function invocation.
+ ///
+ Function *getCalledFunction() const {
+ return dyn_cast<Function>(Op<-1>());
+ }
+
+ /// getCalledValue - Get a pointer to the function that is invoked by this
+ /// instruction.
+ const Value *getCalledValue() const { return Op<-1>(); }
+ Value *getCalledValue() { return Op<-1>(); }
+
+ /// setCalledFunction - Set the function called.
+ void setCalledFunction(Value* Fn) {
+ setCalledFunction(
+ cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
+ Fn);
+ }
+ void setCalledFunction(FunctionType *FTy, Value *Fn) {
+ this->FTy = FTy;
+ assert(FTy == cast<FunctionType>(
+ cast<PointerType>(Fn->getType())->getElementType()));
+ Op<-1>() = Fn;
+ }
+
+ /// isInlineAsm - Check if this call is an inline asm statement.
+ bool isInlineAsm() const {
+ return isa<InlineAsm>(Op<-1>());
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Call;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ template <typename AttrKind> bool hasFnAttrImpl(AttrKind A) const {
+ if (AttributeList.hasAttribute(AttributeSet::FunctionIndex, A))
+ return true;
+
+ // Operand bundles override attributes on the called function, but don't
+ // override attributes directly present on the call instruction.
+ if (isFnAttrDisallowedByOpBundle(A))
+ return false;
+
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, A);
+ return false;
+ }
+
+ // Shadow Instruction::setInstructionSubclassData with a private forwarding
+ // method so that subclasses cannot accidentally use it.
+ void setInstructionSubclassData(unsigned short D) {
+ Instruction::setInstructionSubclassData(D);
+ }
+};
+
+template <>
+struct OperandTraits<CallInst> : public VariadicOperandTraits<CallInst, 1> {
+};
+
+CallInst::CallInst(Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
+ BasicBlock *InsertAtEnd)
+ : Instruction(
+ cast<FunctionType>(cast<PointerType>(Func->getType())
+ ->getElementType())->getReturnType(),
+ Instruction::Call, OperandTraits<CallInst>::op_end(this) -
+ (Args.size() + CountBundleInputs(Bundles) + 1),
+ unsigned(Args.size() + CountBundleInputs(Bundles) + 1), InsertAtEnd) {
+ init(Func, Args, Bundles, NameStr);
+}
+
+CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
+ Instruction *InsertBefore)
+ : Instruction(Ty->getReturnType(), Instruction::Call,
+ OperandTraits<CallInst>::op_end(this) -
+ (Args.size() + CountBundleInputs(Bundles) + 1),
+ unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
+ InsertBefore) {
+ init(Ty, Func, Args, Bundles, NameStr);
+}
+
+// Note: if you get compile errors about private methods then
+// please update your code to use the high-level operand
+// interfaces. See line 943 above.
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CallInst, Value)
+
+//===----------------------------------------------------------------------===//
+// SelectInst Class
+//===----------------------------------------------------------------------===//
+
+/// SelectInst - This class represents the LLVM 'select' instruction.
+///
+class SelectInst : public Instruction {
+ void init(Value *C, Value *S1, Value *S2) {
+ assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select");
+ Op<0>() = C;
+ Op<1>() = S1;
+ Op<2>() = S2;
+ }
+
+ SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
+ Instruction *InsertBefore)
+ : Instruction(S1->getType(), Instruction::Select,
+ &Op<0>(), 3, InsertBefore) {
+ init(C, S1, S2);
+ setName(NameStr);
+ }
+ SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
+ BasicBlock *InsertAtEnd)
+ : Instruction(S1->getType(), Instruction::Select,
+ &Op<0>(), 3, InsertAtEnd) {
+ init(C, S1, S2);
+ setName(NameStr);
+ }
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ SelectInst *cloneImpl() const;
+
+public:
+ static SelectInst *Create(Value *C, Value *S1, Value *S2,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
+ }
+ static SelectInst *Create(Value *C, Value *S1, Value *S2,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
+ }
+
+ const Value *getCondition() const { return Op<0>(); }
+ const Value *getTrueValue() const { return Op<1>(); }
+ const Value *getFalseValue() const { return Op<2>(); }
+ Value *getCondition() { return Op<0>(); }
+ Value *getTrueValue() { return Op<1>(); }
+ Value *getFalseValue() { return Op<2>(); }
+
+ /// areInvalidOperands - Return a string if the specified operands are invalid
+ /// for a select operation, otherwise return null.
+ static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ OtherOps getOpcode() const {
+ return static_cast<OtherOps>(Instruction::getOpcode());
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Select;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)
+
+//===----------------------------------------------------------------------===//
+// VAArgInst Class
+//===----------------------------------------------------------------------===//
+
+/// VAArgInst - This class represents the va_arg llvm instruction, which returns
+/// an argument of the specified type given a va_list and increments that list
+///
+class VAArgInst : public UnaryInstruction {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ VAArgInst *cloneImpl() const;
+
+public:
+ VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr)
+ : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
+ setName(NameStr);
+ }
+ VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
+ BasicBlock *InsertAtEnd)
+ : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
+ setName(NameStr);
+ }
+
+ Value *getPointerOperand() { return getOperand(0); }
+ const Value *getPointerOperand() const { return getOperand(0); }
+ static unsigned getPointerOperandIndex() { return 0U; }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == VAArg;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// ExtractElementInst Class
+//===----------------------------------------------------------------------===//
+
+/// ExtractElementInst - This instruction extracts a single (scalar)
+/// element from a VectorType value
+///
+class ExtractElementInst : public Instruction {
+ ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr);
+ ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
+ BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ ExtractElementInst *cloneImpl() const;
+
+public:
+ static ExtractElementInst *Create(Value *Vec, Value *Idx,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
+ }
+ static ExtractElementInst *Create(Value *Vec, Value *Idx,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
+ }
+
+ /// isValidOperands - Return true if an extractelement instruction can be
+ /// formed with the specified operands.
+ static bool isValidOperands(const Value *Vec, const Value *Idx);
+
+ Value *getVectorOperand() { return Op<0>(); }
+ Value *getIndexOperand() { return Op<1>(); }
+ const Value *getVectorOperand() const { return Op<0>(); }
+ const Value *getIndexOperand() const { return Op<1>(); }
+
+ VectorType *getVectorOperandType() const {
+ return cast<VectorType>(getVectorOperand()->getType());
+ }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::ExtractElement;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<ExtractElementInst> :
+ public FixedNumOperandTraits<ExtractElementInst, 2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)
+
+//===----------------------------------------------------------------------===//
+// InsertElementInst Class
+//===----------------------------------------------------------------------===//
+
+/// InsertElementInst - This instruction inserts a single (scalar)
+/// element into a VectorType value
+///
+class InsertElementInst : public Instruction {
+ InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr);
+ InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
+ BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ InsertElementInst *cloneImpl() const;
+
+public:
+ static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
+ }
+ static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
+ }
+
+ /// isValidOperands - Return true if an insertelement instruction can be
+ /// formed with the specified operands.
+ static bool isValidOperands(const Value *Vec, const Value *NewElt,
+ const Value *Idx);
+
+ /// getType - Overload to return most specific vector type.
+ ///
+ VectorType *getType() const {
+ return cast<VectorType>(Instruction::getType());
+ }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::InsertElement;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<InsertElementInst> :
+ public FixedNumOperandTraits<InsertElementInst, 3> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)
+
+//===----------------------------------------------------------------------===//
+// ShuffleVectorInst Class
+//===----------------------------------------------------------------------===//
+
+/// ShuffleVectorInst - This instruction constructs a fixed permutation of two
+/// input vectors.
+///
+class ShuffleVectorInst : public Instruction {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ ShuffleVectorInst *cloneImpl() const;
+
+public:
+ // allocate space for exactly three operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 3);
+ }
+ ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
+ const Twine &NameStr = "",
+ Instruction *InsertBefor = nullptr);
+ ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+ /// isValidOperands - Return true if a shufflevector instruction can be
+ /// formed with the specified operands.
+ static bool isValidOperands(const Value *V1, const Value *V2,
+ const Value *Mask);
+
+ /// getType - Overload to return most specific vector type.
+ ///
+ VectorType *getType() const {
+ return cast<VectorType>(Instruction::getType());
+ }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ Constant *getMask() const {
+ return cast<Constant>(getOperand(2));
+ }
+
+ /// getMaskValue - Return the index from the shuffle mask for the specified
+ /// output result. This is either -1 if the element is undef or a number less
+ /// than 2*numelements.
+ static int getMaskValue(Constant *Mask, unsigned i);
+
+ int getMaskValue(unsigned i) const {
+ return getMaskValue(getMask(), i);
+ }
+
+ /// getShuffleMask - Return the full mask for this instruction, where each
+ /// element is the element number and undef's are returned as -1.
+ static void getShuffleMask(Constant *Mask, SmallVectorImpl<int> &Result);
+
+ void getShuffleMask(SmallVectorImpl<int> &Result) const {
+ return getShuffleMask(getMask(), Result);
+ }
+
+ SmallVector<int, 16> getShuffleMask() const {
+ SmallVector<int, 16> Mask;
+ getShuffleMask(Mask);
+ return Mask;
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::ShuffleVector;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<ShuffleVectorInst> :
+ public FixedNumOperandTraits<ShuffleVectorInst, 3> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)
+
+//===----------------------------------------------------------------------===//
+// ExtractValueInst Class
+//===----------------------------------------------------------------------===//
+
+/// ExtractValueInst - This instruction extracts a struct member or array
+/// element value from an aggregate value.
+///
+class ExtractValueInst : public UnaryInstruction {
+ SmallVector<unsigned, 4> Indices;
+
+ ExtractValueInst(const ExtractValueInst &EVI);
+ void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
+
+ /// Constructors - Create a extractvalue instruction with a base aggregate
+ /// value and a list of indices. The first ctor can optionally insert before
+ /// an existing instruction, the second appends the new instruction to the
+ /// specified BasicBlock.
+ inline ExtractValueInst(Value *Agg,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr,
+ Instruction *InsertBefore);
+ inline ExtractValueInst(Value *Agg,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+ // allocate space for exactly one operand
+ void *operator new(size_t s) { return User::operator new(s, 1); }
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ ExtractValueInst *cloneImpl() const;
+
+public:
+ static ExtractValueInst *Create(Value *Agg,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return new
+ ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
+ }
+ static ExtractValueInst *Create(Value *Agg,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
+ }
+
+ /// getIndexedType - Returns the type of the element that would be extracted
+ /// with an extractvalue instruction with the specified parameters.
+ ///
+ /// Null is returned if the indices are invalid for the specified type.
+ static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
+
+ typedef const unsigned* idx_iterator;
+ inline idx_iterator idx_begin() const { return Indices.begin(); }
+ inline idx_iterator idx_end() const { return Indices.end(); }
+ inline iterator_range<idx_iterator> indices() const {
+ return make_range(idx_begin(), idx_end());
+ }
+
+ Value *getAggregateOperand() {
+ return getOperand(0);
+ }
+ const Value *getAggregateOperand() const {
+ return getOperand(0);
+ }
+ static unsigned getAggregateOperandIndex() {
+ return 0U; // get index for modifying correct operand
+ }
+
+ ArrayRef<unsigned> getIndices() const {
+ return Indices;
+ }
+
+ unsigned getNumIndices() const {
+ return (unsigned)Indices.size();
+ }
+
+ bool hasIndices() const {
+ return true;
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::ExtractValue;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+ExtractValueInst::ExtractValueInst(Value *Agg,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr,
+ Instruction *InsertBefore)
+ : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
+ ExtractValue, Agg, InsertBefore) {
+ init(Idxs, NameStr);
+}
+ExtractValueInst::ExtractValueInst(Value *Agg,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd)
+ : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
+ ExtractValue, Agg, InsertAtEnd) {
+ init(Idxs, NameStr);
+}
+
+//===----------------------------------------------------------------------===//
+// InsertValueInst Class
+//===----------------------------------------------------------------------===//
+
+/// InsertValueInst - This instruction inserts a struct field of array element
+/// value into an aggregate value.
+///
+class InsertValueInst : public Instruction {
+ SmallVector<unsigned, 4> Indices;
+
+ void *operator new(size_t, unsigned) = delete;
+ InsertValueInst(const InsertValueInst &IVI);
+ void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
+ const Twine &NameStr);
+
+ /// Constructors - Create a insertvalue instruction with a base aggregate
+ /// value, a value to insert, and a list of indices. The first ctor can
+ /// optionally insert before an existing instruction, the second appends
+ /// the new instruction to the specified BasicBlock.
+ inline InsertValueInst(Value *Agg, Value *Val,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr,
+ Instruction *InsertBefore);
+ inline InsertValueInst(Value *Agg, Value *Val,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+ /// Constructors - These two constructors are convenience methods because one
+ /// and two index insertvalue instructions are so common.
+ InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr);
+ InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
+ BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ InsertValueInst *cloneImpl() const;
+
+public:
+ // allocate space for exactly two operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 2);
+ }
+
+ static InsertValueInst *Create(Value *Agg, Value *Val,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
+ }
+ static InsertValueInst *Create(Value *Agg, Value *Val,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
+ }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ typedef const unsigned* idx_iterator;
+ inline idx_iterator idx_begin() const { return Indices.begin(); }
+ inline idx_iterator idx_end() const { return Indices.end(); }
+ inline iterator_range<idx_iterator> indices() const {
+ return make_range(idx_begin(), idx_end());
+ }
+
+ Value *getAggregateOperand() {
+ return getOperand(0);
+ }
+ const Value *getAggregateOperand() const {
+ return getOperand(0);
+ }
+ static unsigned getAggregateOperandIndex() {
+ return 0U; // get index for modifying correct operand
+ }
+
+ Value *getInsertedValueOperand() {
+ return getOperand(1);
+ }
+ const Value *getInsertedValueOperand() const {
+ return getOperand(1);
+ }
+ static unsigned getInsertedValueOperandIndex() {
+ return 1U; // get index for modifying correct operand
+ }
+
+ ArrayRef<unsigned> getIndices() const {
+ return Indices;
+ }
+
+ unsigned getNumIndices() const {
+ return (unsigned)Indices.size();
+ }
+
+ bool hasIndices() const {
+ return true;
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::InsertValue;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<InsertValueInst> :
+ public FixedNumOperandTraits<InsertValueInst, 2> {
+};
+
+InsertValueInst::InsertValueInst(Value *Agg,
+ Value *Val,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr,
+ Instruction *InsertBefore)
+ : Instruction(Agg->getType(), InsertValue,
+ OperandTraits<InsertValueInst>::op_begin(this),
+ 2, InsertBefore) {
+ init(Agg, Val, Idxs, NameStr);
+}
+InsertValueInst::InsertValueInst(Value *Agg,
+ Value *Val,
+ ArrayRef<unsigned> Idxs,
+ const Twine &NameStr,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Agg->getType(), InsertValue,
+ OperandTraits<InsertValueInst>::op_begin(this),
+ 2, InsertAtEnd) {
+ init(Agg, Val, Idxs, NameStr);
+}
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)
+
+//===----------------------------------------------------------------------===//
+// PHINode Class
+//===----------------------------------------------------------------------===//
+
+// PHINode - The PHINode class is used to represent the magical mystical PHI
+// node, that can not exist in nature, but can be synthesized in a computer
+// scientist's overactive imagination.
+//
+class PHINode : public Instruction {
+ void anchor() override;
+
+ void *operator new(size_t, unsigned) = delete;
+ /// ReservedSpace - The number of operands actually allocated. NumOperands is
+ /// the number actually in use.
+ unsigned ReservedSpace;
+ PHINode(const PHINode &PN);
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s);
+ }
+ explicit PHINode(Type *Ty, unsigned NumReservedValues,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr)
+ : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
+ ReservedSpace(NumReservedValues) {
+ setName(NameStr);
+ allocHungoffUses(ReservedSpace);
+ }
+
+ PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
+ BasicBlock *InsertAtEnd)
+ : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
+ ReservedSpace(NumReservedValues) {
+ setName(NameStr);
+ allocHungoffUses(ReservedSpace);
+ }
+
+protected:
+ // allocHungoffUses - this is more complicated than the generic
+ // User::allocHungoffUses, because we have to allocate Uses for the incoming
+ // values and pointers to the incoming blocks, all in one allocation.
+ void allocHungoffUses(unsigned N) {
+ User::allocHungoffUses(N, /* IsPhi */ true);
+ }
+
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ PHINode *cloneImpl() const;
+
+public:
+ /// Constructors - NumReservedValues is a hint for the number of incoming
+ /// edges that this phi node will have (use 0 if you really have no idea).
+ static PHINode *Create(Type *Ty, unsigned NumReservedValues,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
+ }
+ static PHINode *Create(Type *Ty, unsigned NumReservedValues,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
+ }
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ // Block iterator interface. This provides access to the list of incoming
+ // basic blocks, which parallels the list of incoming values.
+
+ typedef BasicBlock **block_iterator;
+ typedef BasicBlock * const *const_block_iterator;
+
+ block_iterator block_begin() {
+ Use::UserRef *ref =
+ reinterpret_cast<Use::UserRef*>(op_begin() + ReservedSpace);
+ return reinterpret_cast<block_iterator>(ref + 1);
+ }
+
+ const_block_iterator block_begin() const {
+ const Use::UserRef *ref =
+ reinterpret_cast<const Use::UserRef*>(op_begin() + ReservedSpace);
+ return reinterpret_cast<const_block_iterator>(ref + 1);
+ }
+
+ block_iterator block_end() {
+ return block_begin() + getNumOperands();
+ }
+
+ const_block_iterator block_end() const {
+ return block_begin() + getNumOperands();
+ }
+
+ op_range incoming_values() { return operands(); }
+
+ const_op_range incoming_values() const { return operands(); }
+
+ /// getNumIncomingValues - Return the number of incoming edges
+ ///
+ unsigned getNumIncomingValues() const { return getNumOperands(); }
+
+ /// getIncomingValue - Return incoming value number x
+ ///
+ Value *getIncomingValue(unsigned i) const {
+ return getOperand(i);
+ }
+ void setIncomingValue(unsigned i, Value *V) {
+ assert(V && "PHI node got a null value!");
+ assert(getType() == V->getType() &&
+ "All operands to PHI node must be the same type as the PHI node!");
+ setOperand(i, V);
+ }
+ static unsigned getOperandNumForIncomingValue(unsigned i) {
+ return i;
+ }
+ static unsigned getIncomingValueNumForOperand(unsigned i) {
+ return i;
+ }
+
+ /// getIncomingBlock - Return incoming basic block number @p i.
+ ///
+ BasicBlock *getIncomingBlock(unsigned i) const {
+ return block_begin()[i];
+ }
+
+ /// getIncomingBlock - Return incoming basic block corresponding
+ /// to an operand of the PHI.
+ ///
+ BasicBlock *getIncomingBlock(const Use &U) const {
+ assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
+ return getIncomingBlock(unsigned(&U - op_begin()));
+ }
+
+ /// getIncomingBlock - Return incoming basic block corresponding
+ /// to value use iterator.
+ ///
+ BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
+ return getIncomingBlock(I.getUse());
+ }
+
+ void setIncomingBlock(unsigned i, BasicBlock *BB) {
+ assert(BB && "PHI node got a null basic block!");
+ block_begin()[i] = BB;
+ }
+
+ /// addIncoming - Add an incoming value to the end of the PHI list
+ ///
+ void addIncoming(Value *V, BasicBlock *BB) {
+ if (getNumOperands() == ReservedSpace)
+ growOperands(); // Get more space!
+ // Initialize some new operands.
+ setNumHungOffUseOperands(getNumOperands() + 1);
+ setIncomingValue(getNumOperands() - 1, V);
+ setIncomingBlock(getNumOperands() - 1, BB);
+ }
+
+ /// removeIncomingValue - Remove an incoming value. This is useful if a
+ /// predecessor basic block is deleted. The value removed is returned.
+ ///
+ /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
+ /// is true), the PHI node is destroyed and any uses of it are replaced with
+ /// dummy values. The only time there should be zero incoming values to a PHI
+ /// node is when the block is dead, so this strategy is sound.
+ ///
+ Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
+
+ Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
+ int Idx = getBasicBlockIndex(BB);
+ assert(Idx >= 0 && "Invalid basic block argument to remove!");
+ return removeIncomingValue(Idx, DeletePHIIfEmpty);
+ }
+
+ /// getBasicBlockIndex - Return the first index of the specified basic
+ /// block in the value list for this PHI. Returns -1 if no instance.
+ ///
+ int getBasicBlockIndex(const BasicBlock *BB) const {
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ if (block_begin()[i] == BB)
+ return i;
+ return -1;
+ }
+
+ Value *getIncomingValueForBlock(const BasicBlock *BB) const {
+ int Idx = getBasicBlockIndex(BB);
+ assert(Idx >= 0 && "Invalid basic block argument!");
+ return getIncomingValue(Idx);
+ }
+
+ /// hasConstantValue - If the specified PHI node always merges together the
+ /// same value, return the value, otherwise return null.
+ Value *hasConstantValue() const;
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::PHI;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ void growOperands();
+};
+
+template <>
+struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)
+
+//===----------------------------------------------------------------------===//
+// LandingPadInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// LandingPadInst - The landingpad instruction holds all of the information
+/// necessary to generate correct exception handling. The landingpad instruction
+/// cannot be moved from the top of a landing pad block, which itself is
+/// accessible only from the 'unwind' edge of an invoke. This uses the
+/// SubclassData field in Value to store whether or not the landingpad is a
+/// cleanup.
+///
+class LandingPadInst : public Instruction {
+ /// ReservedSpace - The number of operands actually allocated. NumOperands is
+ /// the number actually in use.
+ unsigned ReservedSpace;
+ LandingPadInst(const LandingPadInst &LP);
+
+public:
+ enum ClauseType { Catch, Filter };
+
+private:
+ void *operator new(size_t, unsigned) = delete;
+ // Allocate space for exactly zero operands.
+ void *operator new(size_t s) {
+ return User::operator new(s);
+ }
+ void growOperands(unsigned Size);
+ void init(unsigned NumReservedValues, const Twine &NameStr);
+
+ explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+ const Twine &NameStr, Instruction *InsertBefore);
+ explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ LandingPadInst *cloneImpl() const;
+
+public:
+ /// Constructors - NumReservedClauses is a hint for the number of incoming
+ /// clauses that this landingpad will have (use 0 if you really have no idea).
+ static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr);
+ static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
+ const Twine &NameStr, BasicBlock *InsertAtEnd);
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// isCleanup - Return 'true' if this landingpad instruction is a
+ /// cleanup. I.e., it should be run when unwinding even if its landing pad
+ /// doesn't catch the exception.
+ bool isCleanup() const { return getSubclassDataFromInstruction() & 1; }
+
+ /// setCleanup - Indicate that this landingpad instruction is a cleanup.
+ void setCleanup(bool V) {
+ setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
+ (V ? 1 : 0));
+ }
+
+ /// Add a catch or filter clause to the landing pad.
+ void addClause(Constant *ClauseVal);
+
+ /// Get the value of the clause at index Idx. Use isCatch/isFilter to
+ /// determine what type of clause this is.
+ Constant *getClause(unsigned Idx) const {
+ return cast<Constant>(getOperandList()[Idx]);
+ }
+
+ /// isCatch - Return 'true' if the clause and index Idx is a catch clause.
+ bool isCatch(unsigned Idx) const {
+ return !isa<ArrayType>(getOperandList()[Idx]->getType());
+ }
+
+ /// isFilter - Return 'true' if the clause and index Idx is a filter clause.
+ bool isFilter(unsigned Idx) const {
+ return isa<ArrayType>(getOperandList()[Idx]->getType());
+ }
+
+ /// getNumClauses - Get the number of clauses for this landing pad.
+ unsigned getNumClauses() const { return getNumOperands(); }
+
+ /// reserveClauses - Grow the size of the operand list to accommodate the new
+ /// number of clauses.
+ void reserveClauses(unsigned Size) { growOperands(Size); }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::LandingPad;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+template <>
+struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)
+
+//===----------------------------------------------------------------------===//
+// ReturnInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// ReturnInst - Return a value (possibly void), from a function. Execution
+/// does not continue in this function any longer.
+///
+class ReturnInst : public TerminatorInst {
+ ReturnInst(const ReturnInst &RI);
+
+private:
+ // ReturnInst constructors:
+ // ReturnInst() - 'ret void' instruction
+ // ReturnInst( null) - 'ret void' instruction
+ // ReturnInst(Value* X) - 'ret X' instruction
+ // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
+ // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
+ // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
+ // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
+ //
+ // NOTE: If the Value* passed is of type void then the constructor behaves as
+ // if it was passed NULL.
+ explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
+ Instruction *InsertBefore = nullptr);
+ ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
+ explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ ReturnInst *cloneImpl() const;
+
+public:
+ static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
+ Instruction *InsertBefore = nullptr) {
+ return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
+ }
+ static ReturnInst* Create(LLVMContext &C, Value *retVal,
+ BasicBlock *InsertAtEnd) {
+ return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
+ }
+ static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
+ return new(0) ReturnInst(C, InsertAtEnd);
+ }
+ ~ReturnInst() override;
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// Convenience accessor. Returns null if there is no return value.
+ Value *getReturnValue() const {
+ return getNumOperands() != 0 ? getOperand(0) : nullptr;
+ }
+
+ unsigned getNumSuccessors() const { return 0; }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return (I->getOpcode() == Instruction::Ret);
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
+};
+
+template <>
+struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)
+
+//===----------------------------------------------------------------------===//
+// BranchInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// BranchInst - Conditional or Unconditional Branch instruction.
+///
+class BranchInst : public TerminatorInst {
+ /// Ops list - Branches are strange. The operands are ordered:
+ /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
+ /// they don't have to check for cond/uncond branchness. These are mostly
+ /// accessed relative from op_end().
+ BranchInst(const BranchInst &BI);
+ void AssertOK();
+ // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
+ // BranchInst(BB *B) - 'br B'
+ // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
+ // BranchInst(BB* B, Inst *I) - 'br B' insert before I
+ // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
+ // BranchInst(BB* B, BB *I) - 'br B' insert at end
+ // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
+ explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
+ BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
+ Instruction *InsertBefore = nullptr);
+ BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
+ BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
+ BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ BranchInst *cloneImpl() const;
+
+public:
+ static BranchInst *Create(BasicBlock *IfTrue,
+ Instruction *InsertBefore = nullptr) {
+ return new(1) BranchInst(IfTrue, InsertBefore);
+ }
+ static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
+ Value *Cond, Instruction *InsertBefore = nullptr) {
+ return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
+ }
+ static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
+ return new(1) BranchInst(IfTrue, InsertAtEnd);
+ }
+ static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
+ Value *Cond, BasicBlock *InsertAtEnd) {
+ return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
+ }
+
+ /// Transparently provide more efficient getOperand methods.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ bool isUnconditional() const { return getNumOperands() == 1; }
+ bool isConditional() const { return getNumOperands() == 3; }
+
+ Value *getCondition() const {
+ assert(isConditional() && "Cannot get condition of an uncond branch!");
+ return Op<-3>();
+ }
+
+ void setCondition(Value *V) {
+ assert(isConditional() && "Cannot set condition of unconditional branch!");
+ Op<-3>() = V;
+ }
+
+ unsigned getNumSuccessors() const { return 1+isConditional(); }
+
+ BasicBlock *getSuccessor(unsigned i) const {
+ assert(i < getNumSuccessors() && "Successor # out of range for Branch!");
+ return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
+ }
+
+ void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
+ assert(idx < getNumSuccessors() && "Successor # out of range for Branch!");
+ *(&Op<-1>() - idx) = NewSucc;
+ }
+
+ /// \brief Swap the successors of this branch instruction.
+ ///
+ /// Swaps the successors of the branch instruction. This also swaps any
+ /// branch weight metadata associated with the instruction so that it
+ /// continues to map correctly to each operand.
+ void swapSuccessors();
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return (I->getOpcode() == Instruction::Br);
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
+};
+
+template <>
+struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)
+
+//===----------------------------------------------------------------------===//
+// SwitchInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// SwitchInst - Multiway switch
+///
+class SwitchInst : public TerminatorInst {
+ void *operator new(size_t, unsigned) = delete;
+ unsigned ReservedSpace;
+ // Operand[0] = Value to switch on
+ // Operand[1] = Default basic block destination
+ // Operand[2n ] = Value to match
+ // Operand[2n+1] = BasicBlock to go to on match
+ SwitchInst(const SwitchInst &SI);
+ void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
+ void growOperands();
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s);
+ }
+ /// SwitchInst ctor - Create a new switch instruction, specifying a value to
+ /// switch on and a default destination. The number of additional cases can
+ /// be specified here to make memory allocation more efficient. This
+ /// constructor can also autoinsert before another instruction.
+ SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
+ Instruction *InsertBefore);
+
+ /// SwitchInst ctor - Create a new switch instruction, specifying a value to
+ /// switch on and a default destination. The number of additional cases can
+ /// be specified here to make memory allocation more efficient. This
+ /// constructor also autoinserts at the end of the specified BasicBlock.
+ SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
+ BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ SwitchInst *cloneImpl() const;
+
+public:
+ // -2
+ static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
+
+ template <class SwitchInstTy, class ConstantIntTy, class BasicBlockTy>
+ class CaseIteratorT {
+ protected:
+ SwitchInstTy *SI;
+ unsigned Index;
+
+ public:
+ typedef CaseIteratorT<SwitchInstTy, ConstantIntTy, BasicBlockTy> Self;
+
+ /// Initializes case iterator for given SwitchInst and for given
+ /// case number.
+ CaseIteratorT(SwitchInstTy *SI, unsigned CaseNum) {
+ this->SI = SI;
+ Index = CaseNum;
+ }
+
+ /// Initializes case iterator for given SwitchInst and for given
+ /// TerminatorInst's successor index.
+ static Self fromSuccessorIndex(SwitchInstTy *SI, unsigned SuccessorIndex) {
+ assert(SuccessorIndex < SI->getNumSuccessors() &&
+ "Successor index # out of range!");
+ return SuccessorIndex != 0 ?
+ Self(SI, SuccessorIndex - 1) :
+ Self(SI, DefaultPseudoIndex);
+ }
+
+ /// Resolves case value for current case.
+ ConstantIntTy *getCaseValue() {
+ assert(Index < SI->getNumCases() && "Index out the number of cases.");
+ return reinterpret_cast<ConstantIntTy*>(SI->getOperand(2 + Index*2));
+ }
+
+ /// Resolves successor for current case.
+ BasicBlockTy *getCaseSuccessor() {
+ assert((Index < SI->getNumCases() ||
+ Index == DefaultPseudoIndex) &&
+ "Index out the number of cases.");
+ return SI->getSuccessor(getSuccessorIndex());
+ }
+
+ /// Returns number of current case.
+ unsigned getCaseIndex() const { return Index; }
+
+ /// Returns TerminatorInst's successor index for current case successor.
+ unsigned getSuccessorIndex() const {
+ assert((Index == DefaultPseudoIndex || Index < SI->getNumCases()) &&
+ "Index out the number of cases.");
+ return Index != DefaultPseudoIndex ? Index + 1 : 0;
+ }
+
+ Self operator++() {
+ // Check index correctness after increment.
+ // Note: Index == getNumCases() means end().
+ assert(Index+1 <= SI->getNumCases() && "Index out the number of cases.");
+ ++Index;
+ return *this;
+ }
+ Self operator++(int) {
+ Self tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+ Self operator--() {
+ // Check index correctness after decrement.
+ // Note: Index == getNumCases() means end().
+ // Also allow "-1" iterator here. That will became valid after ++.
+ assert((Index == 0 || Index-1 <= SI->getNumCases()) &&
+ "Index out the number of cases.");
+ --Index;
+ return *this;
+ }
+ Self operator--(int) {
+ Self tmp = *this;
+ --(*this);
+ return tmp;
+ }
+ bool operator==(const Self& RHS) const {
+ assert(RHS.SI == SI && "Incompatible operators.");
+ return RHS.Index == Index;
+ }
+ bool operator!=(const Self& RHS) const {
+ assert(RHS.SI == SI && "Incompatible operators.");
+ return RHS.Index != Index;
+ }
+ Self &operator*() {
+ return *this;
+ }
+ };
+
+ typedef CaseIteratorT<const SwitchInst, const ConstantInt, const BasicBlock>
+ ConstCaseIt;
+
+ class CaseIt : public CaseIteratorT<SwitchInst, ConstantInt, BasicBlock> {
+
+ typedef CaseIteratorT<SwitchInst, ConstantInt, BasicBlock> ParentTy;
+
+ public:
+ CaseIt(const ParentTy &Src) : ParentTy(Src) {}
+ CaseIt(SwitchInst *SI, unsigned CaseNum) : ParentTy(SI, CaseNum) {}
+
+ /// Sets the new value for current case.
+ void setValue(ConstantInt *V) {
+ assert(Index < SI->getNumCases() && "Index out the number of cases.");
+ SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
+ }
+
+ /// Sets the new successor for current case.
+ void setSuccessor(BasicBlock *S) {
+ SI->setSuccessor(getSuccessorIndex(), S);
+ }
+ };
+
+ static SwitchInst *Create(Value *Value, BasicBlock *Default,
+ unsigned NumCases,
+ Instruction *InsertBefore = nullptr) {
+ return new SwitchInst(Value, Default, NumCases, InsertBefore);
+ }
+ static SwitchInst *Create(Value *Value, BasicBlock *Default,
+ unsigned NumCases, BasicBlock *InsertAtEnd) {
+ return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
+ }
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ // Accessor Methods for Switch stmt
+ Value *getCondition() const { return getOperand(0); }
+ void setCondition(Value *V) { setOperand(0, V); }
+
+ BasicBlock *getDefaultDest() const {
+ return cast<BasicBlock>(getOperand(1));
+ }
+
+ void setDefaultDest(BasicBlock *DefaultCase) {
+ setOperand(1, reinterpret_cast<Value*>(DefaultCase));
+ }
+
+ /// getNumCases - return the number of 'cases' in this switch instruction,
+ /// except the default case
+ unsigned getNumCases() const {
+ return getNumOperands()/2 - 1;
+ }
+
+ /// Returns a read/write iterator that points to the first
+ /// case in SwitchInst.
+ CaseIt case_begin() {
+ return CaseIt(this, 0);
+ }
+ /// Returns a read-only iterator that points to the first
+ /// case in the SwitchInst.
+ ConstCaseIt case_begin() const {
+ return ConstCaseIt(this, 0);
+ }
+
+ /// Returns a read/write iterator that points one past the last
+ /// in the SwitchInst.
+ CaseIt case_end() {
+ return CaseIt(this, getNumCases());
+ }
+ /// Returns a read-only iterator that points one past the last
+ /// in the SwitchInst.
+ ConstCaseIt case_end() const {
+ return ConstCaseIt(this, getNumCases());
+ }
+
+ /// cases - iteration adapter for range-for loops.
+ iterator_range<CaseIt> cases() {
+ return make_range(case_begin(), case_end());
+ }
+
+ /// cases - iteration adapter for range-for loops.
+ iterator_range<ConstCaseIt> cases() const {
+ return make_range(case_begin(), case_end());
+ }
+
+ /// Returns an iterator that points to the default case.
+ /// Note: this iterator allows to resolve successor only. Attempt
+ /// to resolve case value causes an assertion.
+ /// Also note, that increment and decrement also causes an assertion and
+ /// makes iterator invalid.
+ CaseIt case_default() {
+ return CaseIt(this, DefaultPseudoIndex);
+ }
+ ConstCaseIt case_default() const {
+ return ConstCaseIt(this, DefaultPseudoIndex);
+ }
+
+ /// findCaseValue - Search all of the case values for the specified constant.
+ /// If it is explicitly handled, return the case iterator of it, otherwise
+ /// return default case iterator to indicate
+ /// that it is handled by the default handler.
+ CaseIt findCaseValue(const ConstantInt *C) {
+ for (CaseIt i = case_begin(), e = case_end(); i != e; ++i)
+ if (i.getCaseValue() == C)
+ return i;
+ return case_default();
+ }
+ ConstCaseIt findCaseValue(const ConstantInt *C) const {
+ for (ConstCaseIt i = case_begin(), e = case_end(); i != e; ++i)
+ if (i.getCaseValue() == C)
+ return i;
+ return case_default();
+ }
+
+ /// findCaseDest - Finds the unique case value for a given successor. Returns
+ /// null if the successor is not found, not unique, or is the default case.
+ ConstantInt *findCaseDest(BasicBlock *BB) {
+ if (BB == getDefaultDest()) return nullptr;
+
+ ConstantInt *CI = nullptr;
+ for (CaseIt i = case_begin(), e = case_end(); i != e; ++i) {
+ if (i.getCaseSuccessor() == BB) {
+ if (CI) return nullptr; // Multiple cases lead to BB.
+ else CI = i.getCaseValue();
+ }
+ }
+ return CI;
+ }
+
+ /// addCase - Add an entry to the switch instruction...
+ /// Note:
+ /// This action invalidates case_end(). Old case_end() iterator will
+ /// point to the added case.
+ void addCase(ConstantInt *OnVal, BasicBlock *Dest);
+
+ /// removeCase - This method removes the specified case and its successor
+ /// from the switch instruction. Note that this operation may reorder the
+ /// remaining cases at index idx and above.
+ /// Note:
+ /// This action invalidates iterators for all cases following the one removed,
+ /// including the case_end() iterator.
+ void removeCase(CaseIt i);
+
+ unsigned getNumSuccessors() const { return getNumOperands()/2; }
+ BasicBlock *getSuccessor(unsigned idx) const {
+ assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!");
+ return cast<BasicBlock>(getOperand(idx*2+1));
+ }
+ void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
+ assert(idx < getNumSuccessors() && "Successor # out of range for switch!");
+ setOperand(idx * 2 + 1, NewSucc);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Switch;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
+};
+
+template <>
+struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)
+
+//===----------------------------------------------------------------------===//
+// IndirectBrInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// IndirectBrInst - Indirect Branch Instruction.
+///
+class IndirectBrInst : public TerminatorInst {
+ void *operator new(size_t, unsigned) = delete;
+ unsigned ReservedSpace;
+ // Operand[0] = Value to switch on
+ // Operand[1] = Default basic block destination
+ // Operand[2n ] = Value to match
+ // Operand[2n+1] = BasicBlock to go to on match
+ IndirectBrInst(const IndirectBrInst &IBI);
+ void init(Value *Address, unsigned NumDests);
+ void growOperands();
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s);
+ }
+ /// IndirectBrInst ctor - Create a new indirectbr instruction, specifying an
+ /// Address to jump to. The number of expected destinations can be specified
+ /// here to make memory allocation more efficient. This constructor can also
+ /// autoinsert before another instruction.
+ IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
+
+ /// IndirectBrInst ctor - Create a new indirectbr instruction, specifying an
+ /// Address to jump to. The number of expected destinations can be specified
+ /// here to make memory allocation more efficient. This constructor also
+ /// autoinserts at the end of the specified BasicBlock.
+ IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ IndirectBrInst *cloneImpl() const;
+
+public:
+ static IndirectBrInst *Create(Value *Address, unsigned NumDests,
+ Instruction *InsertBefore = nullptr) {
+ return new IndirectBrInst(Address, NumDests, InsertBefore);
+ }
+ static IndirectBrInst *Create(Value *Address, unsigned NumDests,
+ BasicBlock *InsertAtEnd) {
+ return new IndirectBrInst(Address, NumDests, InsertAtEnd);
+ }
+
+ /// Provide fast operand accessors.
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ // Accessor Methods for IndirectBrInst instruction.
+ Value *getAddress() { return getOperand(0); }
+ const Value *getAddress() const { return getOperand(0); }
+ void setAddress(Value *V) { setOperand(0, V); }
+
+ /// getNumDestinations - return the number of possible destinations in this
+ /// indirectbr instruction.
+ unsigned getNumDestinations() const { return getNumOperands()-1; }
+
+ /// getDestination - Return the specified destination.
+ BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
+ const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
+
+ /// addDestination - Add a destination.
+ ///
+ void addDestination(BasicBlock *Dest);
+
+ /// removeDestination - This method removes the specified successor from the
+ /// indirectbr instruction.
+ void removeDestination(unsigned i);
+
+ unsigned getNumSuccessors() const { return getNumOperands()-1; }
+ BasicBlock *getSuccessor(unsigned i) const {
+ return cast<BasicBlock>(getOperand(i+1));
+ }
+ void setSuccessor(unsigned i, BasicBlock *NewSucc) {
+ setOperand(i + 1, NewSucc);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::IndirectBr;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
+};
+
+template <>
+struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)
+
+//===----------------------------------------------------------------------===//
+// InvokeInst Class
+//===----------------------------------------------------------------------===//
+
+/// InvokeInst - Invoke instruction. The SubclassData field is used to hold the
+/// calling convention of the call.
+///
+class InvokeInst : public TerminatorInst,
+ public OperandBundleUser<InvokeInst, User::op_iterator> {
+ AttributeSet AttributeList;
+ FunctionType *FTy;
+ InvokeInst(const InvokeInst &BI);
+ void init(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
+ ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr) {
+ init(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, IfNormal, IfException, Args, Bundles, NameStr);
+ }
+ void init(FunctionType *FTy, Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
+
+ /// Construct an InvokeInst given a range of arguments.
+ ///
+ /// \brief Construct an InvokeInst from a range of arguments
+ inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
+ ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
+ unsigned Values, const Twine &NameStr,
+ Instruction *InsertBefore)
+ : InvokeInst(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, IfNormal, IfException, Args, Bundles, Values, NameStr,
+ InsertBefore) {}
+
+ inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, unsigned Values,
+ const Twine &NameStr, Instruction *InsertBefore);
+ /// Construct an InvokeInst given a range of arguments.
+ ///
+ /// \brief Construct an InvokeInst from a range of arguments
+ inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException,
+ ArrayRef<Value *> Args, ArrayRef<OperandBundleDef> Bundles,
+ unsigned Values, const Twine &NameStr,
+ BasicBlock *InsertAtEnd);
+
+ friend class OperandBundleUser<InvokeInst, User::op_iterator>;
+ bool hasDescriptor() const { return HasDescriptor; }
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ InvokeInst *cloneImpl() const;
+
+public:
+ static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ const Twine &NameStr,
+ Instruction *InsertBefore = nullptr) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, IfNormal, IfException, Args, None, NameStr,
+ InsertBefore);
+ }
+ static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles = None,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return Create(cast<FunctionType>(
+ cast<PointerType>(Func->getType())->getElementType()),
+ Func, IfNormal, IfException, Args, Bundles, NameStr,
+ InsertBefore);
+ }
+ static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ const Twine &NameStr,
+ Instruction *InsertBefore = nullptr) {
+ unsigned Values = unsigned(Args.size()) + 3;
+ return new (Values) InvokeInst(Ty, Func, IfNormal, IfException, Args, None,
+ Values, NameStr, InsertBefore);
+ }
+ static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles = None,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ unsigned Values = unsigned(Args.size()) + CountBundleInputs(Bundles) + 3;
+ unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+
+ return new (Values, DescriptorBytes)
+ InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, Values,
+ NameStr, InsertBefore);
+ }
+ static InvokeInst *Create(Value *Func,
+ BasicBlock *IfNormal, BasicBlock *IfException,
+ ArrayRef<Value *> Args, const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ unsigned Values = unsigned(Args.size()) + 3;
+ return new (Values) InvokeInst(Func, IfNormal, IfException, Args, None,
+ Values, NameStr, InsertAtEnd);
+ }
+ static InvokeInst *Create(Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ unsigned Values = unsigned(Args.size()) + CountBundleInputs(Bundles) + 3;
+ unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
+
+ return new (Values, DescriptorBytes)
+ InvokeInst(Func, IfNormal, IfException, Args, Bundles, Values, NameStr,
+ InsertAtEnd);
+ }
+
+ /// \brief Create a clone of \p II with a different set of operand bundles and
+ /// insert it before \p InsertPt.
+ ///
+ /// The returned invoke instruction is identical to \p II in every way except
+ /// that the operand bundles for the new instruction are set to the operand
+ /// bundles in \p Bundles.
+ static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
+ Instruction *InsertPt = nullptr);
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ FunctionType *getFunctionType() const { return FTy; }
+
+ void mutateFunctionType(FunctionType *FTy) {
+ mutateType(FTy->getReturnType());
+ this->FTy = FTy;
+ }
+
+ /// getNumArgOperands - Return the number of invoke arguments.
+ ///
+ unsigned getNumArgOperands() const {
+ return getNumOperands() - getNumTotalBundleOperands() - 3;
+ }
+
+ /// getArgOperand/setArgOperand - Return/set the i-th invoke argument.
+ ///
+ Value *getArgOperand(unsigned i) const {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ return getOperand(i);
+ }
+ void setArgOperand(unsigned i, Value *v) {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ setOperand(i, v);
+ }
+
+ /// \brief Return the iterator pointing to the beginning of the argument list.
+ op_iterator arg_begin() { return op_begin(); }
+
+ /// \brief Return the iterator pointing to the end of the argument list.
+ op_iterator arg_end() {
+ // [ invoke args ], [ operand bundles ], normal dest, unwind dest, callee
+ return op_end() - getNumTotalBundleOperands() - 3;
+ };
+
+ /// \brief Iteration adapter for range-for loops.
+ iterator_range<op_iterator> arg_operands() {
+ return make_range(arg_begin(), arg_end());
+ }
+
+ /// \brief Return the iterator pointing to the beginning of the argument list.
+ const_op_iterator arg_begin() const { return op_begin(); }
+
+ /// \brief Return the iterator pointing to the end of the argument list.
+ const_op_iterator arg_end() const {
+ // [ invoke args ], [ operand bundles ], normal dest, unwind dest, callee
+ return op_end() - getNumTotalBundleOperands() - 3;
+ };
+
+ /// \brief Iteration adapter for range-for loops.
+ iterator_range<const_op_iterator> arg_operands() const {
+ return make_range(arg_begin(), arg_end());
+ }
+
+ /// \brief Wrappers for getting the \c Use of a invoke argument.
+ const Use &getArgOperandUse(unsigned i) const {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ return getOperandUse(i);
+ }
+ Use &getArgOperandUse(unsigned i) {
+ assert(i < getNumArgOperands() && "Out of bounds!");
+ return getOperandUse(i);
+ }
+
+ /// getCallingConv/setCallingConv - Get or set the calling convention of this
+ /// function call.
+ CallingConv::ID getCallingConv() const {
+ return static_cast<CallingConv::ID>(getSubclassDataFromInstruction());
+ }
+ void setCallingConv(CallingConv::ID CC) {
+ auto ID = static_cast<unsigned>(CC);
+ assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention");
+ setInstructionSubclassData(ID);
+ }
+
+ /// getAttributes - Return the parameter attributes for this invoke.
+ ///
+ const AttributeSet &getAttributes() const { return AttributeList; }
+
+ /// setAttributes - Set the parameter attributes for this invoke.
+ ///
+ void setAttributes(const AttributeSet &Attrs) { AttributeList = Attrs; }
+
+ /// addAttribute - adds the attribute to the list of attributes.
+ void addAttribute(unsigned i, Attribute::AttrKind attr);
+
+ /// removeAttribute - removes the attribute from the list of attributes.
+ void removeAttribute(unsigned i, Attribute attr);
+
+ /// \brief adds the dereferenceable attribute to the list of attributes.
+ void addDereferenceableAttr(unsigned i, uint64_t Bytes);
+
+ /// \brief adds the dereferenceable_or_null attribute to the list of
+ /// attributes.
+ void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes);
+
+ /// \brief Determine whether this call has the given attribute.
+ bool hasFnAttr(Attribute::AttrKind A) const {
+ assert(A != Attribute::NoBuiltin &&
+ "Use CallInst::isNoBuiltin() to check for Attribute::NoBuiltin");
+ return hasFnAttrImpl(A);
+ }
+
+ /// \brief Determine whether the call or the callee has the given attributes.
+ bool paramHasAttr(unsigned i, Attribute::AttrKind A) const;
+
+ /// \brief Return true if the data operand at index \p i has the attribute \p
+ /// A.
+ ///
+ /// Data operands include invoke arguments and values used in operand bundles,
+ /// but does not include the invokee operand, or the two successor blocks.
+ /// This routine dispatches to the underlying AttributeList or the
+ /// OperandBundleUser as appropriate.
+ ///
+ /// The index \p i is interpreted as
+ ///
+ /// \p i == Attribute::ReturnIndex -> the return value
+ /// \p i in [1, arg_size + 1) -> argument number (\p i - 1)
+ /// \p i in [arg_size + 1, data_operand_size + 1) -> bundle operand at index
+ /// (\p i - 1) in the operand list.
+ bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind A) const;
+
+ /// \brief Extract the alignment for a call or parameter (0=unknown).
+ unsigned getParamAlignment(unsigned i) const {
+ return AttributeList.getParamAlignment(i);
+ }
+
+ /// \brief Extract the number of dereferenceable bytes for a call or
+ /// parameter (0=unknown).
+ uint64_t getDereferenceableBytes(unsigned i) const {
+ return AttributeList.getDereferenceableBytes(i);
+ }
+
+ /// \brief Extract the number of dereferenceable_or_null bytes for a call or
+ /// parameter (0=unknown).
+ uint64_t getDereferenceableOrNullBytes(unsigned i) const {
+ return AttributeList.getDereferenceableOrNullBytes(i);
+ }
+
+ /// @brief Determine if the parameter or return value is marked with NoAlias
+ /// attribute.
+ /// @param n The parameter to check. 1 is the first parameter, 0 is the return
+ bool doesNotAlias(unsigned n) const {
+ return AttributeList.hasAttribute(n, Attribute::NoAlias);
+ }
+
+ /// \brief Return true if the call should not be treated as a call to a
+ /// builtin.
+ bool isNoBuiltin() const {
+ // We assert in hasFnAttr if one passes in Attribute::NoBuiltin, so we have
+ // to check it by hand.
+ return hasFnAttrImpl(Attribute::NoBuiltin) &&
+ !hasFnAttrImpl(Attribute::Builtin);
+ }
+
+ /// \brief Return true if the call should not be inlined.
+ bool isNoInline() const { return hasFnAttr(Attribute::NoInline); }
+ void setIsNoInline() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::NoInline);
+ }
+
+ /// \brief Determine if the call does not access memory.
+ bool doesNotAccessMemory() const {
+ return hasFnAttr(Attribute::ReadNone);
+ }
+ void setDoesNotAccessMemory() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
+ }
+
+ /// \brief Determine if the call does not access or only reads memory.
+ bool onlyReadsMemory() const {
+ return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
+ }
+ void setOnlyReadsMemory() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly);
+ }
+
+ /// @brief Determine if the call access memmory only using it's pointer
+ /// arguments.
+ bool onlyAccessesArgMemory() const {
+ return hasFnAttr(Attribute::ArgMemOnly);
+ }
+ void setOnlyAccessesArgMemory() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::ArgMemOnly);
+ }
+
+ /// \brief Determine if the call cannot return.
+ bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); }
+ void setDoesNotReturn() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::NoReturn);
+ }
+
+ /// \brief Determine if the call cannot unwind.
+ bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); }
+ void setDoesNotThrow() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
+ }
+
+ /// \brief Determine if the invoke cannot be duplicated.
+ bool cannotDuplicate() const {return hasFnAttr(Attribute::NoDuplicate); }
+ void setCannotDuplicate() {
+ addAttribute(AttributeSet::FunctionIndex, Attribute::NoDuplicate);
+ }
+
+ /// \brief Determine if the call returns a structure through first
+ /// pointer argument.
+ bool hasStructRetAttr() const {
+ if (getNumArgOperands() == 0)
+ return false;
+
+ // Be friendly and also check the callee.
+ return paramHasAttr(1, Attribute::StructRet);
+ }
+
+ /// \brief Determine if any call argument is an aggregate passed by value.
+ bool hasByValArgument() const {
+ return AttributeList.hasAttrSomewhere(Attribute::ByVal);
+ }
+
+ /// getCalledFunction - Return the function called, or null if this is an
+ /// indirect function invocation.
+ ///
+ Function *getCalledFunction() const {
+ return dyn_cast<Function>(Op<-3>());
+ }
+
+ /// getCalledValue - Get a pointer to the function that is invoked by this
+ /// instruction
+ const Value *getCalledValue() const { return Op<-3>(); }
+ Value *getCalledValue() { return Op<-3>(); }
+
+ /// setCalledFunction - Set the function called.
+ void setCalledFunction(Value* Fn) {
+ setCalledFunction(
+ cast<FunctionType>(cast<PointerType>(Fn->getType())->getElementType()),
+ Fn);
+ }
+ void setCalledFunction(FunctionType *FTy, Value *Fn) {
+ this->FTy = FTy;
+ assert(FTy == cast<FunctionType>(
+ cast<PointerType>(Fn->getType())->getElementType()));
+ Op<-3>() = Fn;
+ }
+
+ // get*Dest - Return the destination basic blocks...
+ BasicBlock *getNormalDest() const {
+ return cast<BasicBlock>(Op<-2>());
+ }
+ BasicBlock *getUnwindDest() const {
+ return cast<BasicBlock>(Op<-1>());
+ }
+ void setNormalDest(BasicBlock *B) {
+ Op<-2>() = reinterpret_cast<Value*>(B);
+ }
+ void setUnwindDest(BasicBlock *B) {
+ Op<-1>() = reinterpret_cast<Value*>(B);
+ }
+
+ /// getLandingPadInst - Get the landingpad instruction from the landing pad
+ /// block (the unwind destination).
+ LandingPadInst *getLandingPadInst() const;
+
+ BasicBlock *getSuccessor(unsigned i) const {
+ assert(i < 2 && "Successor # out of range for invoke!");
+ return i == 0 ? getNormalDest() : getUnwindDest();
+ }
+
+ void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
+ assert(idx < 2 && "Successor # out of range for invoke!");
+ *(&Op<-2>() + idx) = reinterpret_cast<Value*>(NewSucc);
+ }
+
+ unsigned getNumSuccessors() const { return 2; }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return (I->getOpcode() == Instruction::Invoke);
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
+
+ bool hasFnAttrImpl(Attribute::AttrKind A) const;
+
+ // Shadow Instruction::setInstructionSubclassData with a private forwarding
+ // method so that subclasses cannot accidentally use it.
+ void setInstructionSubclassData(unsigned short D) {
+ Instruction::setInstructionSubclassData(D);
+ }
+};
+
+template <>
+struct OperandTraits<InvokeInst> : public VariadicOperandTraits<InvokeInst, 3> {
+};
+
+InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, unsigned Values,
+ const Twine &NameStr, Instruction *InsertBefore)
+ : TerminatorInst(Ty->getReturnType(), Instruction::Invoke,
+ OperandTraits<InvokeInst>::op_end(this) - Values, Values,
+ InsertBefore) {
+ init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
+}
+InvokeInst::InvokeInst(Value *Func, BasicBlock *IfNormal,
+ BasicBlock *IfException, ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> Bundles, unsigned Values,
+ const Twine &NameStr, BasicBlock *InsertAtEnd)
+ : TerminatorInst(
+ cast<FunctionType>(cast<PointerType>(Func->getType())
+ ->getElementType())->getReturnType(),
+ Instruction::Invoke, OperandTraits<InvokeInst>::op_end(this) - Values,
+ Values, InsertAtEnd) {
+ init(Func, IfNormal, IfException, Args, Bundles, NameStr);
+}
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InvokeInst, Value)
+
+//===----------------------------------------------------------------------===//
+// ResumeInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// ResumeInst - Resume the propagation of an exception.
+///
+class ResumeInst : public TerminatorInst {
+ ResumeInst(const ResumeInst &RI);
+
+ explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
+ ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ ResumeInst *cloneImpl() const;
+
+public:
+ static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
+ return new(1) ResumeInst(Exn, InsertBefore);
+ }
+ static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
+ return new(1) ResumeInst(Exn, InsertAtEnd);
+ }
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// Convenience accessor.
+ Value *getValue() const { return Op<0>(); }
+
+ unsigned getNumSuccessors() const { return 0; }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Resume;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
+};
+
+template <>
+struct OperandTraits<ResumeInst> :
+ public FixedNumOperandTraits<ResumeInst, 1> {
+};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)
+
+//===----------------------------------------------------------------------===//
+// CatchSwitchInst Class
+//===----------------------------------------------------------------------===//
+class CatchSwitchInst : public TerminatorInst {
+ void *operator new(size_t, unsigned) = delete;
+ /// ReservedSpace - The number of operands actually allocated. NumOperands is
+ /// the number actually in use.
+ unsigned ReservedSpace;
+ // Operand[0] = Outer scope
+ // Operand[1] = Unwind block destination
+ // Operand[n] = BasicBlock to go to on match
+ CatchSwitchInst(const CatchSwitchInst &CSI);
+ void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
+ void growOperands(unsigned Size);
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) { return User::operator new(s); }
+ /// CatchSwitchInst ctor - Create a new switch instruction, specifying a
+ /// default destination. The number of additional handlers can be specified
+ /// here to make memory allocation more efficient.
+ /// This constructor can also autoinsert before another instruction.
+ CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumHandlers, const Twine &NameStr,
+ Instruction *InsertBefore);
+
+ /// CatchSwitchInst ctor - Create a new switch instruction, specifying a
+ /// default destination. The number of additional handlers can be specified
+ /// here to make memory allocation more efficient.
+ /// This constructor also autoinserts at the end of the specified BasicBlock.
+ CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumHandlers, const Twine &NameStr,
+ BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ CatchSwitchInst *cloneImpl() const;
+
+public:
+ static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumHandlers,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
+ InsertBefore);
+ }
+ static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
+ unsigned NumHandlers, const Twine &NameStr,
+ BasicBlock *InsertAtEnd) {
+ return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
+ InsertAtEnd);
+ }
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ // Accessor Methods for CatchSwitch stmt
+ Value *getParentPad() const { return getOperand(0); }
+ void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
+
+ // Accessor Methods for CatchSwitch stmt
+ bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
+ bool unwindsToCaller() const { return !hasUnwindDest(); }
+ BasicBlock *getUnwindDest() const {
+ if (hasUnwindDest())
+ return cast<BasicBlock>(getOperand(1));
+ return nullptr;
+ }
+ void setUnwindDest(BasicBlock *UnwindDest) {
+ assert(UnwindDest);
+ assert(hasUnwindDest());
+ setOperand(1, UnwindDest);
+ }
+
+ /// getNumHandlers - return the number of 'handlers' in this catchswitch
+ /// instruction, except the default handler
+ unsigned getNumHandlers() const {
+ if (hasUnwindDest())
+ return getNumOperands() - 2;
+ return getNumOperands() - 1;
+ }
+
+private:
+ static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
+ static const BasicBlock *handler_helper(const Value *V) {
+ return cast<BasicBlock>(V);
+ }
+
+public:
+ typedef std::pointer_to_unary_function<Value *, BasicBlock *> DerefFnTy;
+ typedef mapped_iterator<op_iterator, DerefFnTy> handler_iterator;
+ typedef iterator_range<handler_iterator> handler_range;
+
+
+ typedef std::pointer_to_unary_function<const Value *, const BasicBlock *>
+ ConstDerefFnTy;
+ typedef mapped_iterator<const_op_iterator, ConstDerefFnTy> const_handler_iterator;
+ typedef iterator_range<const_handler_iterator> const_handler_range;
+
+ /// Returns an iterator that points to the first handler in CatchSwitchInst.
+ handler_iterator handler_begin() {
+ op_iterator It = op_begin() + 1;
+ if (hasUnwindDest())
+ ++It;
+ return handler_iterator(It, DerefFnTy(handler_helper));
+ }
+ /// Returns an iterator that points to the first handler in the
+ /// CatchSwitchInst.
+ const_handler_iterator handler_begin() const {
+ const_op_iterator It = op_begin() + 1;
+ if (hasUnwindDest())
+ ++It;
+ return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
+ }
+
+ /// Returns a read-only iterator that points one past the last
+ /// handler in the CatchSwitchInst.
+ handler_iterator handler_end() {
+ return handler_iterator(op_end(), DerefFnTy(handler_helper));
+ }
+ /// Returns an iterator that points one past the last handler in the
+ /// CatchSwitchInst.
+ const_handler_iterator handler_end() const {
+ return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
+ }
+
+ /// handlers - iteration adapter for range-for loops.
+ handler_range handlers() {
+ return make_range(handler_begin(), handler_end());
+ }
+
+ /// handlers - iteration adapter for range-for loops.
+ const_handler_range handlers() const {
+ return make_range(handler_begin(), handler_end());
+ }
+
+ /// addHandler - Add an entry to the switch instruction...
+ /// Note:
+ /// This action invalidates handler_end(). Old handler_end() iterator will
+ /// point to the added handler.
+ void addHandler(BasicBlock *Dest);
+
+ unsigned getNumSuccessors() const { return getNumOperands() - 1; }
+ BasicBlock *getSuccessor(unsigned Idx) const {
+ assert(Idx < getNumSuccessors() &&
+ "Successor # out of range for catchswitch!");
+ return cast<BasicBlock>(getOperand(Idx + 1));
+ }
+ void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
+ assert(Idx < getNumSuccessors() &&
+ "Successor # out of range for catchswitch!");
+ setOperand(Idx + 1, NewSucc);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::CatchSwitch;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ BasicBlock *getSuccessorV(unsigned Idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned Idx, BasicBlock *B) override;
+};
+
+template <>
+struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)
+
+//===----------------------------------------------------------------------===//
+// CleanupPadInst Class
+//===----------------------------------------------------------------------===//
+class CleanupPadInst : public FuncletPadInst {
+private:
+ explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
+ unsigned Values, const Twine &NameStr,
+ Instruction *InsertBefore)
+ : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
+ NameStr, InsertBefore) {}
+ explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
+ unsigned Values, const Twine &NameStr,
+ BasicBlock *InsertAtEnd)
+ : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
+ NameStr, InsertAtEnd) {}
+
+public:
+ static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ unsigned Values = 1 + Args.size();
+ return new (Values)
+ CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
+ }
+ static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ unsigned Values = 1 + Args.size();
+ return new (Values)
+ CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
+ }
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::CleanupPad;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// CatchPadInst Class
+//===----------------------------------------------------------------------===//
+class CatchPadInst : public FuncletPadInst {
+private:
+ explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
+ unsigned Values, const Twine &NameStr,
+ Instruction *InsertBefore)
+ : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
+ NameStr, InsertBefore) {}
+ explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
+ unsigned Values, const Twine &NameStr,
+ BasicBlock *InsertAtEnd)
+ : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
+ NameStr, InsertAtEnd) {}
+
+public:
+ static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
+ const Twine &NameStr = "",
+ Instruction *InsertBefore = nullptr) {
+ unsigned Values = 1 + Args.size();
+ return new (Values)
+ CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
+ }
+ static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
+ const Twine &NameStr, BasicBlock *InsertAtEnd) {
+ unsigned Values = 1 + Args.size();
+ return new (Values)
+ CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
+ }
+
+ /// Convenience accessors
+ CatchSwitchInst *getCatchSwitch() const {
+ return cast<CatchSwitchInst>(Op<-1>());
+ }
+ void setCatchSwitch(Value *CatchSwitch) {
+ assert(CatchSwitch);
+ Op<-1>() = CatchSwitch;
+ }
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::CatchPad;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// CatchReturnInst Class
+//===----------------------------------------------------------------------===//
+
+class CatchReturnInst : public TerminatorInst {
+ CatchReturnInst(const CatchReturnInst &RI);
+
+ void init(Value *CatchPad, BasicBlock *BB);
+ CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
+ CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ CatchReturnInst *cloneImpl() const;
+
+public:
+ static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
+ Instruction *InsertBefore = nullptr) {
+ assert(CatchPad);
+ assert(BB);
+ return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
+ }
+ static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
+ BasicBlock *InsertAtEnd) {
+ assert(CatchPad);
+ assert(BB);
+ return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
+ }
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ /// Convenience accessors.
+ CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
+ void setCatchPad(CatchPadInst *CatchPad) {
+ assert(CatchPad);
+ Op<0>() = CatchPad;
+ }
+
+ BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
+ void setSuccessor(BasicBlock *NewSucc) {
+ assert(NewSucc);
+ Op<1>() = NewSucc;
+ }
+ unsigned getNumSuccessors() const { return 1; }
+
+ Value *getParentPad() const {
+ return getCatchPad()->getCatchSwitch()->getParentPad();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return (I->getOpcode() == Instruction::CatchRet);
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ BasicBlock *getSuccessorV(unsigned Idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned Idx, BasicBlock *B) override;
+};
+
+template <>
+struct OperandTraits<CatchReturnInst>
+ : public FixedNumOperandTraits<CatchReturnInst, 2> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)
+
+//===----------------------------------------------------------------------===//
+// CleanupReturnInst Class
+//===----------------------------------------------------------------------===//
+
+class CleanupReturnInst : public TerminatorInst {
+private:
+ CleanupReturnInst(const CleanupReturnInst &RI);
+
+ void init(Value *CleanupPad, BasicBlock *UnwindBB);
+ CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
+ Instruction *InsertBefore = nullptr);
+ CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
+ BasicBlock *InsertAtEnd);
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ CleanupReturnInst *cloneImpl() const;
+
+public:
+ static CleanupReturnInst *Create(Value *CleanupPad,
+ BasicBlock *UnwindBB = nullptr,
+ Instruction *InsertBefore = nullptr) {
+ assert(CleanupPad);
+ unsigned Values = 1;
+ if (UnwindBB)
+ ++Values;
+ return new (Values)
+ CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
+ }
+ static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
+ BasicBlock *InsertAtEnd) {
+ assert(CleanupPad);
+ unsigned Values = 1;
+ if (UnwindBB)
+ ++Values;
+ return new (Values)
+ CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
+ }
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+
+ bool hasUnwindDest() const { return getSubclassDataFromInstruction() & 1; }
+ bool unwindsToCaller() const { return !hasUnwindDest(); }
+
+ /// Convenience accessor.
+ CleanupPadInst *getCleanupPad() const {
+ return cast<CleanupPadInst>(Op<0>());
+ }
+ void setCleanupPad(CleanupPadInst *CleanupPad) {
+ assert(CleanupPad);
+ Op<0>() = CleanupPad;
+ }
+
+ unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
+
+ BasicBlock *getUnwindDest() const {
+ return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
+ }
+ void setUnwindDest(BasicBlock *NewDest) {
+ assert(NewDest);
+ assert(hasUnwindDest());
+ Op<1>() = NewDest;
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return (I->getOpcode() == Instruction::CleanupRet);
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ BasicBlock *getSuccessorV(unsigned Idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned Idx, BasicBlock *B) override;
+
+ // Shadow Instruction::setInstructionSubclassData with a private forwarding
+ // method so that subclasses cannot accidentally use it.
+ void setInstructionSubclassData(unsigned short D) {
+ Instruction::setInstructionSubclassData(D);
+ }
+};
+
+template <>
+struct OperandTraits<CleanupReturnInst>
+ : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
+
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)
+
+//===----------------------------------------------------------------------===//
+// UnreachableInst Class
+//===----------------------------------------------------------------------===//
+
+//===---------------------------------------------------------------------------
+/// UnreachableInst - This function has undefined behavior. In particular, the
+/// presence of this instruction indicates some higher level knowledge that the
+/// end of the block cannot be reached.
+///
+class UnreachableInst : public TerminatorInst {
+ void *operator new(size_t, unsigned) = delete;
+
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ UnreachableInst *cloneImpl() const;
+
+public:
+ // allocate space for exactly zero operands
+ void *operator new(size_t s) {
+ return User::operator new(s, 0);
+ }
+ explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
+ explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
+
+ unsigned getNumSuccessors() const { return 0; }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Unreachable;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+
+private:
+ BasicBlock *getSuccessorV(unsigned idx) const override;
+ unsigned getNumSuccessorsV() const override;
+ void setSuccessorV(unsigned idx, BasicBlock *B) override;
+};
+
+//===----------------------------------------------------------------------===//
+// TruncInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a truncation of integer types.
+class TruncInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical TruncInst
+ TruncInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ TruncInst(
+ Value *S, ///< The value to be truncated
+ Type *Ty, ///< The (smaller) type to truncate to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ TruncInst(
+ Value *S, ///< The value to be truncated
+ Type *Ty, ///< The (smaller) type to truncate to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Trunc;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// ZExtInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents zero extension of integer types.
+class ZExtInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical ZExtInst
+ ZExtInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ ZExtInst(
+ Value *S, ///< The value to be zero extended
+ Type *Ty, ///< The type to zero extend to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end semantics.
+ ZExtInst(
+ Value *S, ///< The value to be zero extended
+ Type *Ty, ///< The type to zero extend to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == ZExt;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// SExtInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a sign extension of integer types.
+class SExtInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical SExtInst
+ SExtInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ SExtInst(
+ Value *S, ///< The value to be sign extended
+ Type *Ty, ///< The type to sign extend to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ SExtInst(
+ Value *S, ///< The value to be sign extended
+ Type *Ty, ///< The type to sign extend to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == SExt;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// FPTruncInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a truncation of floating point types.
+class FPTruncInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical FPTruncInst
+ FPTruncInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ FPTruncInst(
+ Value *S, ///< The value to be truncated
+ Type *Ty, ///< The type to truncate to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-before-instruction semantics
+ FPTruncInst(
+ Value *S, ///< The value to be truncated
+ Type *Ty, ///< The type to truncate to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == FPTrunc;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// FPExtInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents an extension of floating point types.
+class FPExtInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical FPExtInst
+ FPExtInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ FPExtInst(
+ Value *S, ///< The value to be extended
+ Type *Ty, ///< The type to extend to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ FPExtInst(
+ Value *S, ///< The value to be extended
+ Type *Ty, ///< The type to extend to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == FPExt;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// UIToFPInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a cast unsigned integer to floating point.
+class UIToFPInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical UIToFPInst
+ UIToFPInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ UIToFPInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ UIToFPInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == UIToFP;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// SIToFPInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a cast from signed integer to floating point.
+class SIToFPInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical SIToFPInst
+ SIToFPInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ SIToFPInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ SIToFPInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == SIToFP;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// FPToUIInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a cast from floating point to unsigned integer
+class FPToUIInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical FPToUIInst
+ FPToUIInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ FPToUIInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ FPToUIInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< Where to insert the new instruction
+ );
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == FPToUI;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// FPToSIInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a cast from floating point to signed integer.
+class FPToSIInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical FPToSIInst
+ FPToSIInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ FPToSIInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ FPToSIInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == FPToSI;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// IntToPtrInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a cast from an integer to a pointer.
+class IntToPtrInst : public CastInst {
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ IntToPtrInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ IntToPtrInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical IntToPtrInst
+ IntToPtrInst *cloneImpl() const;
+
+ /// \brief Returns the address space of this instruction's pointer type.
+ unsigned getAddressSpace() const {
+ return getType()->getPointerAddressSpace();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == IntToPtr;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// PtrToIntInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a cast from a pointer to an integer
+class PtrToIntInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical PtrToIntInst
+ PtrToIntInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ PtrToIntInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ PtrToIntInst(
+ Value *S, ///< The value to be converted
+ Type *Ty, ///< The type to convert to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ /// \brief Gets the pointer operand.
+ Value *getPointerOperand() { return getOperand(0); }
+ /// \brief Gets the pointer operand.
+ const Value *getPointerOperand() const { return getOperand(0); }
+ /// \brief Gets the operand index of the pointer operand.
+ static unsigned getPointerOperandIndex() { return 0U; }
+
+ /// \brief Returns the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperand()->getType()->getPointerAddressSpace();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == PtrToInt;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// BitCastInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a no-op cast from one type to another.
+class BitCastInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical BitCastInst
+ BitCastInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ BitCastInst(
+ Value *S, ///< The value to be casted
+ Type *Ty, ///< The type to casted to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ BitCastInst(
+ Value *S, ///< The value to be casted
+ Type *Ty, ///< The type to casted to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == BitCast;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// AddrSpaceCastInst Class
+//===----------------------------------------------------------------------===//
+
+/// \brief This class represents a conversion between pointers from
+/// one address space to another.
+class AddrSpaceCastInst : public CastInst {
+protected:
+ // Note: Instruction needs to be a friend here to call cloneImpl.
+ friend class Instruction;
+ /// \brief Clone an identical AddrSpaceCastInst
+ AddrSpaceCastInst *cloneImpl() const;
+
+public:
+ /// \brief Constructor with insert-before-instruction semantics
+ AddrSpaceCastInst(
+ Value *S, ///< The value to be casted
+ Type *Ty, ///< The type to casted to
+ const Twine &NameStr = "", ///< A name for the new instruction
+ Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
+ );
+
+ /// \brief Constructor with insert-at-end-of-block semantics
+ AddrSpaceCastInst(
+ Value *S, ///< The value to be casted
+ Type *Ty, ///< The type to casted to
+ const Twine &NameStr, ///< A name for the new instruction
+ BasicBlock *InsertAtEnd ///< The block to insert the instruction into
+ );
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == AddrSpaceCast;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicInst.h b/contrib/llvm/include/llvm/IR/IntrinsicInst.h
new file mode 100644
index 0000000..169bcc0
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicInst.h
@@ -0,0 +1,410 @@
+//===-- llvm/IntrinsicInst.h - Intrinsic Instruction Wrappers ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes that make it really easy to deal with intrinsic
+// functions with the isa/dyncast family of functions. In particular, this
+// allows you to do things like:
+//
+// if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(Inst))
+// ... MCI->getDest() ... MCI->getSource() ...
+//
+// All intrinsic function calls are instances of the call instruction, so these
+// are all subclasses of the CallInst class. Note that none of these classes
+// has state or virtual methods, which is an important part of this gross/neat
+// hack working.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INTRINSICINST_H
+#define LLVM_IR_INTRINSICINST_H
+
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Metadata.h"
+
+namespace llvm {
+ /// IntrinsicInst - A useful wrapper class for inspecting calls to intrinsic
+ /// functions. This allows the standard isa/dyncast/cast functionality to
+ /// work with calls to intrinsic functions.
+ class IntrinsicInst : public CallInst {
+ IntrinsicInst() = delete;
+ IntrinsicInst(const IntrinsicInst&) = delete;
+ void operator=(const IntrinsicInst&) = delete;
+ public:
+ /// getIntrinsicID - Return the intrinsic ID of this intrinsic.
+ ///
+ Intrinsic::ID getIntrinsicID() const {
+ return getCalledFunction()->getIntrinsicID();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const CallInst *I) {
+ if (const Function *CF = I->getCalledFunction())
+ return CF->isIntrinsic();
+ return false;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<CallInst>(V) && classof(cast<CallInst>(V));
+ }
+ };
+
+ /// DbgInfoIntrinsic - This is the common base class for debug info intrinsics
+ ///
+ class DbgInfoIntrinsic : public IntrinsicInst {
+ public:
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::dbg_declare:
+ case Intrinsic::dbg_value:
+ return true;
+ default: return false;
+ }
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ static Value *StripCast(Value *C);
+ };
+
+ /// DbgDeclareInst - This represents the llvm.dbg.declare instruction.
+ ///
+ class DbgDeclareInst : public DbgInfoIntrinsic {
+ public:
+ Value *getAddress() const;
+ DILocalVariable *getVariable() const {
+ return cast<DILocalVariable>(getRawVariable());
+ }
+ DIExpression *getExpression() const {
+ return cast<DIExpression>(getRawExpression());
+ }
+
+ Metadata *getRawVariable() const {
+ return cast<MetadataAsValue>(getArgOperand(1))->getMetadata();
+ }
+ Metadata *getRawExpression() const {
+ return cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::dbg_declare;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
+ /// DbgValueInst - This represents the llvm.dbg.value instruction.
+ ///
+ class DbgValueInst : public DbgInfoIntrinsic {
+ public:
+ const Value *getValue() const;
+ Value *getValue();
+ uint64_t getOffset() const {
+ return cast<ConstantInt>(
+ const_cast<Value*>(getArgOperand(1)))->getZExtValue();
+ }
+ DILocalVariable *getVariable() const {
+ return cast<DILocalVariable>(getRawVariable());
+ }
+ DIExpression *getExpression() const {
+ return cast<DIExpression>(getRawExpression());
+ }
+
+ Metadata *getRawVariable() const {
+ return cast<MetadataAsValue>(getArgOperand(2))->getMetadata();
+ }
+ Metadata *getRawExpression() const {
+ return cast<MetadataAsValue>(getArgOperand(3))->getMetadata();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::dbg_value;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
+ /// MemIntrinsic - This is the common base class for memset/memcpy/memmove.
+ ///
+ class MemIntrinsic : public IntrinsicInst {
+ public:
+ Value *getRawDest() const { return const_cast<Value*>(getArgOperand(0)); }
+ const Use &getRawDestUse() const { return getArgOperandUse(0); }
+ Use &getRawDestUse() { return getArgOperandUse(0); }
+
+ Value *getLength() const { return const_cast<Value*>(getArgOperand(2)); }
+ const Use &getLengthUse() const { return getArgOperandUse(2); }
+ Use &getLengthUse() { return getArgOperandUse(2); }
+
+ ConstantInt *getAlignmentCst() const {
+ return cast<ConstantInt>(const_cast<Value*>(getArgOperand(3)));
+ }
+
+ unsigned getAlignment() const {
+ return getAlignmentCst()->getZExtValue();
+ }
+
+ ConstantInt *getVolatileCst() const {
+ return cast<ConstantInt>(const_cast<Value*>(getArgOperand(4)));
+ }
+ bool isVolatile() const {
+ return !getVolatileCst()->isZero();
+ }
+
+ unsigned getDestAddressSpace() const {
+ return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+ }
+
+ /// getDest - This is just like getRawDest, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getDest() const { return getRawDest()->stripPointerCasts(); }
+
+ /// set* - Set the specified arguments of the instruction.
+ ///
+ void setDest(Value *Ptr) {
+ assert(getRawDest()->getType() == Ptr->getType() &&
+ "setDest called with pointer of wrong type!");
+ setArgOperand(0, Ptr);
+ }
+
+ void setLength(Value *L) {
+ assert(getLength()->getType() == L->getType() &&
+ "setLength called with value of wrong type!");
+ setArgOperand(2, L);
+ }
+
+ void setAlignment(Constant* A) {
+ setArgOperand(3, A);
+ }
+
+ void setVolatile(Constant* V) {
+ setArgOperand(4, V);
+ }
+
+ Type *getAlignmentType() const {
+ return getArgOperand(3)->getType();
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const IntrinsicInst *I) {
+ switch (I->getIntrinsicID()) {
+ case Intrinsic::memcpy:
+ case Intrinsic::memmove:
+ case Intrinsic::memset:
+ return true;
+ default: return false;
+ }
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
+ /// MemSetInst - This class wraps the llvm.memset intrinsic.
+ ///
+ class MemSetInst : public MemIntrinsic {
+ public:
+ /// get* - Return the arguments to the instruction.
+ ///
+ Value *getValue() const { return const_cast<Value*>(getArgOperand(1)); }
+ const Use &getValueUse() const { return getArgOperandUse(1); }
+ Use &getValueUse() { return getArgOperandUse(1); }
+
+ void setValue(Value *Val) {
+ assert(getValue()->getType() == Val->getType() &&
+ "setValue called with value of wrong type!");
+ setArgOperand(1, Val);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memset;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
+ /// MemTransferInst - This class wraps the llvm.memcpy/memmove intrinsics.
+ ///
+ class MemTransferInst : public MemIntrinsic {
+ public:
+ /// get* - Return the arguments to the instruction.
+ ///
+ Value *getRawSource() const { return const_cast<Value*>(getArgOperand(1)); }
+ const Use &getRawSourceUse() const { return getArgOperandUse(1); }
+ Use &getRawSourceUse() { return getArgOperandUse(1); }
+
+ /// getSource - This is just like getRawSource, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getSource() const { return getRawSource()->stripPointerCasts(); }
+
+ unsigned getSourceAddressSpace() const {
+ return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
+ }
+
+ void setSource(Value *Ptr) {
+ assert(getRawSource()->getType() == Ptr->getType() &&
+ "setSource called with pointer of wrong type!");
+ setArgOperand(1, Ptr);
+ }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memcpy ||
+ I->getIntrinsicID() == Intrinsic::memmove;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
+
+ /// MemCpyInst - This class wraps the llvm.memcpy intrinsic.
+ ///
+ class MemCpyInst : public MemTransferInst {
+ public:
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memcpy;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
+ /// MemMoveInst - This class wraps the llvm.memmove intrinsic.
+ ///
+ class MemMoveInst : public MemTransferInst {
+ public:
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::memmove;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+ };
+
+ /// VAStartInst - This represents the llvm.va_start intrinsic.
+ ///
+ class VAStartInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vastart;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); }
+ };
+
+ /// VAEndInst - This represents the llvm.va_end intrinsic.
+ ///
+ class VAEndInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vaend;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getArgList() const { return const_cast<Value*>(getArgOperand(0)); }
+ };
+
+ /// VACopyInst - This represents the llvm.va_copy intrinsic.
+ ///
+ class VACopyInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::vacopy;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ Value *getDest() const { return const_cast<Value*>(getArgOperand(0)); }
+ Value *getSrc() const { return const_cast<Value*>(getArgOperand(1)); }
+ };
+
+ /// This represents the llvm.instrprof_increment intrinsic.
+ class InstrProfIncrementInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::instrprof_increment;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ GlobalVariable *getName() const {
+ return cast<GlobalVariable>(
+ const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
+ }
+
+ ConstantInt *getHash() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
+ }
+
+ ConstantInt *getNumCounters() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(2)));
+ }
+
+ ConstantInt *getIndex() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
+ }
+ };
+
+ /// This represents the llvm.instrprof_value_profile intrinsic.
+ class InstrProfValueProfileInst : public IntrinsicInst {
+ public:
+ static inline bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::instrprof_value_profile;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+
+ GlobalVariable *getName() const {
+ return cast<GlobalVariable>(
+ const_cast<Value *>(getArgOperand(0))->stripPointerCasts());
+ }
+
+ ConstantInt *getHash() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(1)));
+ }
+
+ Value *getTargetValue() const {
+ return cast<Value>(const_cast<Value *>(getArgOperand(2)));
+ }
+
+ ConstantInt *getValueKind() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(3)));
+ }
+
+ // Returns the value site index.
+ ConstantInt *getIndex() const {
+ return cast<ConstantInt>(const_cast<Value *>(getArgOperand(4)));
+ }
+ };
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Intrinsics.h b/contrib/llvm/include/llvm/IR/Intrinsics.h
new file mode 100644
index 0000000..314e2aa
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Intrinsics.h
@@ -0,0 +1,133 @@
+//===-- llvm/Instrinsics.h - LLVM Intrinsic Function Handling ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of enums which allow processing of intrinsic
+// functions. Values of these enum types are returned by
+// Function::getIntrinsicID.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_INTRINSICS_H
+#define LLVM_IR_INTRINSICS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include <string>
+
+namespace llvm {
+
+class Type;
+class FunctionType;
+class Function;
+class LLVMContext;
+class Module;
+class AttributeSet;
+
+/// This namespace contains an enum with a value for every intrinsic/builtin
+/// function known by LLVM. The enum values are returned by
+/// Function::getIntrinsicID().
+namespace Intrinsic {
+ enum ID : unsigned {
+ not_intrinsic = 0, // Must be zero
+
+ // Get the intrinsic enums generated from Intrinsics.td
+#define GET_INTRINSIC_ENUM_VALUES
+#include "llvm/IR/Intrinsics.gen"
+#undef GET_INTRINSIC_ENUM_VALUES
+ , num_intrinsics
+ };
+
+ /// Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
+ std::string getName(ID id, ArrayRef<Type*> Tys = None);
+
+ /// Return the function type for an intrinsic.
+ FunctionType *getType(LLVMContext &Context, ID id,
+ ArrayRef<Type*> Tys = None);
+
+ /// Returns true if the intrinsic can be overloaded.
+ bool isOverloaded(ID id);
+
+ /// Returns true if the intrinsic is a leaf, i.e. it does not make any calls
+ /// itself. Most intrinsics are leafs, the exceptions being the patchpoint
+ /// and statepoint intrinsics. These call (or invoke) their "target" argument.
+ bool isLeaf(ID id);
+
+ /// Return the attributes for an intrinsic.
+ AttributeSet getAttributes(LLVMContext &C, ID id);
+
+ /// Create or insert an LLVM Function declaration for an intrinsic, and return
+ /// it.
+ ///
+ /// The Tys parameter is for intrinsics with overloaded types (e.g., those
+ /// using iAny, fAny, vAny, or iPTRAny). For a declaration of an overloaded
+ /// intrinsic, Tys must provide exactly one type for each overloaded type in
+ /// the intrinsic.
+ Function *getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys = None);
+
+ /// Map a GCC builtin name to an intrinsic ID.
+ ID getIntrinsicForGCCBuiltin(const char *Prefix, const char *BuiltinName);
+
+ /// Map a MS builtin name to an intrinsic ID.
+ ID getIntrinsicForMSBuiltin(const char *Prefix, const char *BuiltinName);
+
+ /// This is a type descriptor which explains the type requirements of an
+ /// intrinsic. This is returned by getIntrinsicInfoTableEntries.
+ struct IITDescriptor {
+ enum IITDescriptorKind {
+ Void, VarArg, MMX, Token, Metadata, Half, Float, Double,
+ Integer, Vector, Pointer, Struct,
+ Argument, ExtendArgument, TruncArgument, HalfVecArgument,
+ SameVecWidthArgument, PtrToArgument, VecOfPtrsToElt
+ } Kind;
+
+ union {
+ unsigned Integer_Width;
+ unsigned Float_Width;
+ unsigned Vector_Width;
+ unsigned Pointer_AddressSpace;
+ unsigned Struct_NumElements;
+ unsigned Argument_Info;
+ };
+
+ enum ArgKind {
+ AK_Any,
+ AK_AnyInteger,
+ AK_AnyFloat,
+ AK_AnyVector,
+ AK_AnyPointer
+ };
+ unsigned getArgumentNumber() const {
+ assert(Kind == Argument || Kind == ExtendArgument ||
+ Kind == TruncArgument || Kind == HalfVecArgument ||
+ Kind == SameVecWidthArgument || Kind == PtrToArgument ||
+ Kind == VecOfPtrsToElt);
+ return Argument_Info >> 3;
+ }
+ ArgKind getArgumentKind() const {
+ assert(Kind == Argument || Kind == ExtendArgument ||
+ Kind == TruncArgument || Kind == HalfVecArgument ||
+ Kind == SameVecWidthArgument || Kind == PtrToArgument ||
+ Kind == VecOfPtrsToElt);
+ return (ArgKind)(Argument_Info & 7);
+ }
+
+ static IITDescriptor get(IITDescriptorKind K, unsigned Field) {
+ IITDescriptor Result = { K, { Field } };
+ return Result;
+ }
+ };
+
+ /// Return the IIT table descriptor for the specified intrinsic into an array
+ /// of IITDescriptors.
+ void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl<IITDescriptor> &T);
+
+} // End Intrinsic namespace
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Intrinsics.td b/contrib/llvm/include/llvm/IR/Intrinsics.td
new file mode 100644
index 0000000..5a95ddc
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Intrinsics.td
@@ -0,0 +1,671 @@
+//===- Intrinsics.td - Defines all LLVM intrinsics ---------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines properties of all LLVM intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+include "llvm/CodeGen/ValueTypes.td"
+
+//===----------------------------------------------------------------------===//
+// Properties we keep track of for intrinsics.
+//===----------------------------------------------------------------------===//
+
+class IntrinsicProperty;
+
+// Intr*Mem - Memory properties. An intrinsic is allowed to have at most one of
+// these properties set. They are listed from the most aggressive (best to use
+// if correct) to the least aggressive. If no property is set, the worst case
+// is assumed (it may read and write any memory it can get access to and it may
+// have other side effects).
+
+// IntrNoMem - The intrinsic does not access memory or have any other side
+// effects. It may be CSE'd deleted if dead, etc.
+def IntrNoMem : IntrinsicProperty;
+
+// IntrReadArgMem - This intrinsic reads only from memory that one of its
+// pointer-typed arguments points to, but may read an unspecified amount.
+def IntrReadArgMem : IntrinsicProperty;
+
+// IntrReadMem - This intrinsic reads from unspecified memory, so it cannot be
+// moved across stores. However, it can be reordered otherwise and can be
+// deleted if dead.
+def IntrReadMem : IntrinsicProperty;
+
+// IntrReadWriteArgMem - This intrinsic reads and writes only from memory that
+// one of its arguments points to, but may access an unspecified amount. The
+// reads and writes may be volatile, but except for this it has no other side
+// effects.
+def IntrReadWriteArgMem : IntrinsicProperty;
+
+// Commutative - This intrinsic is commutative: X op Y == Y op X.
+def Commutative : IntrinsicProperty;
+
+// Throws - This intrinsic can throw.
+def Throws : IntrinsicProperty;
+
+// NoCapture - The specified argument pointer is not captured by the intrinsic.
+class NoCapture<int argNo> : IntrinsicProperty {
+ int ArgNo = argNo;
+}
+
+// ReadOnly - The specified argument pointer is not written to through the
+// pointer by the intrinsic.
+class ReadOnly<int argNo> : IntrinsicProperty {
+ int ArgNo = argNo;
+}
+
+// ReadNone - The specified argument pointer is not dereferenced by the
+// intrinsic.
+class ReadNone<int argNo> : IntrinsicProperty {
+ int ArgNo = argNo;
+}
+
+def IntrNoReturn : IntrinsicProperty;
+
+// IntrNoduplicate - Calls to this intrinsic cannot be duplicated.
+// Parallels the noduplicate attribute on LLVM IR functions.
+def IntrNoDuplicate : IntrinsicProperty;
+
+// IntrConvergent - Calls to this intrinsic are convergent and may not be made
+// control-dependent on any additional values.
+// Parallels the convergent attribute on LLVM IR functions.
+def IntrConvergent : IntrinsicProperty;
+
+//===----------------------------------------------------------------------===//
+// Types used by intrinsics.
+//===----------------------------------------------------------------------===//
+
+class LLVMType<ValueType vt> {
+ ValueType VT = vt;
+}
+
+class LLVMQualPointerType<LLVMType elty, int addrspace>
+ : LLVMType<iPTR>{
+ LLVMType ElTy = elty;
+ int AddrSpace = addrspace;
+}
+
+class LLVMPointerType<LLVMType elty>
+ : LLVMQualPointerType<elty, 0>;
+
+class LLVMAnyPointerType<LLVMType elty>
+ : LLVMType<iPTRAny>{
+ LLVMType ElTy = elty;
+}
+
+// Match the type of another intrinsic parameter. Number is an index into the
+// list of overloaded types for the intrinsic, excluding all the fixed types.
+// The Number value must refer to a previously listed type. For example:
+// Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyfloat_ty, LLVMMatchType<0>]>
+// has two overloaded types, the 2nd and 3rd arguments. LLVMMatchType<0>
+// refers to the first overloaded type, which is the 2nd argument.
+class LLVMMatchType<int num>
+ : LLVMType<OtherVT>{
+ int Number = num;
+}
+
+// Match the type of another intrinsic parameter that is expected to be based on
+// an integral type (i.e. either iN or <N x iM>), but change the scalar size to
+// be twice as wide or half as wide as the other type. This is only useful when
+// the intrinsic is overloaded, so the matched type should be declared as iAny.
+class LLVMExtendedType<int num> : LLVMMatchType<num>;
+class LLVMTruncatedType<int num> : LLVMMatchType<num>;
+class LLVMVectorSameWidth<int num, LLVMType elty>
+ : LLVMMatchType<num> {
+ ValueType ElTy = elty.VT;
+}
+class LLVMPointerTo<int num> : LLVMMatchType<num>;
+class LLVMVectorOfPointersToElt<int num> : LLVMMatchType<num>;
+
+// Match the type of another intrinsic parameter that is expected to be a
+// vector type, but change the element count to be half as many
+class LLVMHalfElementsVectorType<int num> : LLVMMatchType<num>;
+
+def llvm_void_ty : LLVMType<isVoid>;
+def llvm_any_ty : LLVMType<Any>;
+def llvm_anyint_ty : LLVMType<iAny>;
+def llvm_anyfloat_ty : LLVMType<fAny>;
+def llvm_anyvector_ty : LLVMType<vAny>;
+def llvm_i1_ty : LLVMType<i1>;
+def llvm_i8_ty : LLVMType<i8>;
+def llvm_i16_ty : LLVMType<i16>;
+def llvm_i32_ty : LLVMType<i32>;
+def llvm_i64_ty : LLVMType<i64>;
+def llvm_half_ty : LLVMType<f16>;
+def llvm_float_ty : LLVMType<f32>;
+def llvm_double_ty : LLVMType<f64>;
+def llvm_f80_ty : LLVMType<f80>;
+def llvm_f128_ty : LLVMType<f128>;
+def llvm_ppcf128_ty : LLVMType<ppcf128>;
+def llvm_ptr_ty : LLVMPointerType<llvm_i8_ty>; // i8*
+def llvm_ptrptr_ty : LLVMPointerType<llvm_ptr_ty>; // i8**
+def llvm_anyptr_ty : LLVMAnyPointerType<llvm_i8_ty>; // (space)i8*
+def llvm_empty_ty : LLVMType<OtherVT>; // { }
+def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>; // { }*
+def llvm_metadata_ty : LLVMType<MetadataVT>; // !{...}
+def llvm_token_ty : LLVMType<token>; // token
+
+def llvm_x86mmx_ty : LLVMType<x86mmx>;
+def llvm_ptrx86mmx_ty : LLVMPointerType<llvm_x86mmx_ty>; // <1 x i64>*
+
+def llvm_v2i1_ty : LLVMType<v2i1>; // 2 x i1
+def llvm_v4i1_ty : LLVMType<v4i1>; // 4 x i1
+def llvm_v8i1_ty : LLVMType<v8i1>; // 8 x i1
+def llvm_v16i1_ty : LLVMType<v16i1>; // 16 x i1
+def llvm_v32i1_ty : LLVMType<v32i1>; // 32 x i1
+def llvm_v64i1_ty : LLVMType<v64i1>; // 64 x i1
+def llvm_v512i1_ty : LLVMType<v512i1>; // 512 x i1
+def llvm_v1024i1_ty : LLVMType<v1024i1>; //1024 x i1
+
+def llvm_v1i8_ty : LLVMType<v1i8>; // 1 x i8
+def llvm_v2i8_ty : LLVMType<v2i8>; // 2 x i8
+def llvm_v4i8_ty : LLVMType<v4i8>; // 4 x i8
+def llvm_v8i8_ty : LLVMType<v8i8>; // 8 x i8
+def llvm_v16i8_ty : LLVMType<v16i8>; // 16 x i8
+def llvm_v32i8_ty : LLVMType<v32i8>; // 32 x i8
+def llvm_v64i8_ty : LLVMType<v64i8>; // 64 x i8
+def llvm_v128i8_ty : LLVMType<v128i8>; //128 x i8
+def llvm_v256i8_ty : LLVMType<v256i8>; //256 x i8
+
+def llvm_v1i16_ty : LLVMType<v1i16>; // 1 x i16
+def llvm_v2i16_ty : LLVMType<v2i16>; // 2 x i16
+def llvm_v4i16_ty : LLVMType<v4i16>; // 4 x i16
+def llvm_v8i16_ty : LLVMType<v8i16>; // 8 x i16
+def llvm_v16i16_ty : LLVMType<v16i16>; // 16 x i16
+def llvm_v32i16_ty : LLVMType<v32i16>; // 32 x i16
+def llvm_v64i16_ty : LLVMType<v64i16>; // 64 x i16
+def llvm_v128i16_ty : LLVMType<v128i16>; //128 x i16
+
+def llvm_v1i32_ty : LLVMType<v1i32>; // 1 x i32
+def llvm_v2i32_ty : LLVMType<v2i32>; // 2 x i32
+def llvm_v4i32_ty : LLVMType<v4i32>; // 4 x i32
+def llvm_v8i32_ty : LLVMType<v8i32>; // 8 x i32
+def llvm_v16i32_ty : LLVMType<v16i32>; // 16 x i32
+def llvm_v32i32_ty : LLVMType<v32i32>; // 32 x i32
+def llvm_v64i32_ty : LLVMType<v64i32>; // 64 x i32
+
+def llvm_v1i64_ty : LLVMType<v1i64>; // 1 x i64
+def llvm_v2i64_ty : LLVMType<v2i64>; // 2 x i64
+def llvm_v4i64_ty : LLVMType<v4i64>; // 4 x i64
+def llvm_v8i64_ty : LLVMType<v8i64>; // 8 x i64
+def llvm_v16i64_ty : LLVMType<v16i64>; // 16 x i64
+def llvm_v32i64_ty : LLVMType<v32i64>; // 32 x i64
+
+def llvm_v1i128_ty : LLVMType<v1i128>; // 1 x i128
+
+def llvm_v2f16_ty : LLVMType<v2f16>; // 2 x half (__fp16)
+def llvm_v4f16_ty : LLVMType<v4f16>; // 4 x half (__fp16)
+def llvm_v8f16_ty : LLVMType<v8f16>; // 8 x half (__fp16)
+def llvm_v1f32_ty : LLVMType<v1f32>; // 1 x float
+def llvm_v2f32_ty : LLVMType<v2f32>; // 2 x float
+def llvm_v4f32_ty : LLVMType<v4f32>; // 4 x float
+def llvm_v8f32_ty : LLVMType<v8f32>; // 8 x float
+def llvm_v16f32_ty : LLVMType<v16f32>; // 16 x float
+def llvm_v1f64_ty : LLVMType<v1f64>; // 1 x double
+def llvm_v2f64_ty : LLVMType<v2f64>; // 2 x double
+def llvm_v4f64_ty : LLVMType<v4f64>; // 4 x double
+def llvm_v8f64_ty : LLVMType<v8f64>; // 8 x double
+
+def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here
+
+
+//===----------------------------------------------------------------------===//
+// Intrinsic Definitions.
+//===----------------------------------------------------------------------===//
+
+// Intrinsic class - This is used to define one LLVM intrinsic. The name of the
+// intrinsic definition should start with "int_", then match the LLVM intrinsic
+// name with the "llvm." prefix removed, and all "."s turned into "_"s. For
+// example, llvm.bswap.i16 -> int_bswap_i16.
+//
+// * RetTypes is a list containing the return types expected for the
+// intrinsic.
+// * ParamTypes is a list containing the parameter types expected for the
+// intrinsic.
+// * Properties can be set to describe the behavior of the intrinsic.
+//
+class SDPatternOperator;
+class Intrinsic<list<LLVMType> ret_types,
+ list<LLVMType> param_types = [],
+ list<IntrinsicProperty> properties = [],
+ string name = ""> : SDPatternOperator {
+ string LLVMName = name;
+ string TargetPrefix = ""; // Set to a prefix for target-specific intrinsics.
+ list<LLVMType> RetTypes = ret_types;
+ list<LLVMType> ParamTypes = param_types;
+ list<IntrinsicProperty> Properties = properties;
+
+ bit isTarget = 0;
+}
+
+/// GCCBuiltin - If this intrinsic exactly corresponds to a GCC builtin, this
+/// specifies the name of the builtin. This provides automatic CBE and CFE
+/// support.
+class GCCBuiltin<string name> {
+ string GCCBuiltinName = name;
+}
+
+class MSBuiltin<string name> {
+ string MSBuiltinName = name;
+}
+
+
+//===--------------- Variable Argument Handling Intrinsics ----------------===//
+//
+
+def int_vastart : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_start">;
+def int_vacopy : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], [],
+ "llvm.va_copy">;
+def int_vaend : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_end">;
+
+//===------------------- Garbage Collection Intrinsics --------------------===//
+//
+def int_gcroot : Intrinsic<[],
+ [llvm_ptrptr_ty, llvm_ptr_ty]>;
+def int_gcread : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty, llvm_ptrptr_ty],
+ [IntrReadArgMem]>;
+def int_gcwrite : Intrinsic<[],
+ [llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty],
+ [IntrReadWriteArgMem, NoCapture<1>, NoCapture<2>]>;
+
+//===--------------------- Code Generator Intrinsics ----------------------===//
+//
+def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_read_register : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
+ [IntrReadMem], "llvm.read_register">;
+def int_write_register : Intrinsic<[], [llvm_metadata_ty, llvm_anyint_ty],
+ [], "llvm.write_register">;
+
+// Gets the address of the local variable area. This is typically a copy of the
+// stack, frame, or base pointer depending on the type of prologue.
+def int_localaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+
+// Escapes local variables to allow access from other functions.
+def int_localescape : Intrinsic<[], [llvm_vararg_ty]>;
+
+// Given a function and the localaddress of a parent frame, returns a pointer
+// to an escaped allocation indicated by the index.
+def int_localrecover : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+// Note: we treat stacksave/stackrestore as writemem because we don't otherwise
+// model their dependencies on allocas.
+def int_stacksave : Intrinsic<[llvm_ptr_ty]>,
+ GCCBuiltin<"__builtin_stack_save">;
+def int_stackrestore : Intrinsic<[], [llvm_ptr_ty]>,
+ GCCBuiltin<"__builtin_stack_restore">;
+
+def int_get_dynamic_area_offset : Intrinsic<[llvm_anyint_ty]>;
+
+// IntrReadWriteArgMem is more pessimistic than strictly necessary for prefetch,
+// however it does conveniently prevent the prefetch from being reordered
+// with respect to nearby accesses to the same memory.
+def int_prefetch : Intrinsic<[],
+ [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+def int_pcmarker : Intrinsic<[], [llvm_i32_ty]>;
+
+def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>;
+
+// The assume intrinsic is marked as arbitrarily writing so that proper
+// control dependencies will be maintained.
+def int_assume : Intrinsic<[], [llvm_i1_ty], []>;
+
+// Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
+// guard to the correct place on the stack frame.
+def int_stackprotector : Intrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>;
+def int_stackprotectorcheck : Intrinsic<[], [llvm_ptrptr_ty],
+ [IntrReadWriteArgMem]>;
+
+// A counter increment for instrumentation based profiling.
+def int_instrprof_increment : Intrinsic<[],
+ [llvm_ptr_ty, llvm_i64_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ []>;
+
+// A call to profile runtime for value profiling of target expressions
+// through instrumentation based profiling.
+def int_instrprof_value_profile : Intrinsic<[],
+ [llvm_ptr_ty, llvm_i64_ty,
+ llvm_i64_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ []>;
+
+//===------------------- Standard C Library Intrinsics --------------------===//
+//
+
+def int_memcpy : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
+ llvm_i32_ty, llvm_i1_ty],
+ [IntrReadWriteArgMem, NoCapture<0>, NoCapture<1>,
+ ReadOnly<1>]>;
+def int_memmove : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
+ llvm_i32_ty, llvm_i1_ty],
+ [IntrReadWriteArgMem, NoCapture<0>, NoCapture<1>,
+ ReadOnly<1>]>;
+def int_memset : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty,
+ llvm_i32_ty, llvm_i1_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+
+let Properties = [IntrNoMem] in {
+ def int_fma : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>]>;
+ def int_fmuladd : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>]>;
+
+ // These functions do not read memory, but are sensitive to the
+ // rounding mode. LLVM purposely does not model changes to the FP
+ // environment so they can be treated as readnone.
+ def int_sqrt : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_powi : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty]>;
+ def int_sin : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_cos : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_pow : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>]>;
+ def int_log : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_log10: Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_log2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_exp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_exp2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_fabs : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_minnum : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>]>;
+ def int_maxnum : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>]>;
+ def int_copysign : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>]>;
+ def int_floor : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_ceil : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_trunc : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_rint : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_nearbyint : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_round : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+ def int_canonicalize : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>],
+ [IntrNoMem]>;
+}
+
+// NOTE: these are internal interfaces.
+def int_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
+def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
+def int_sigsetjmp : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>;
+def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
+
+// Internal interface for object size checking
+def int_objectsize : Intrinsic<[llvm_anyint_ty], [llvm_anyptr_ty, llvm_i1_ty],
+ [IntrNoMem]>,
+ GCCBuiltin<"__builtin_object_size">;
+
+//===------------------------- Expect Intrinsics --------------------------===//
+//
+def int_expect : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+ LLVMMatchType<0>], [IntrNoMem]>;
+
+//===-------------------- Bit Manipulation Intrinsics ---------------------===//
+//
+
+// None of these intrinsics accesses memory at all.
+let Properties = [IntrNoMem] in {
+ def int_bswap: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
+ def int_ctpop: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
+ def int_ctlz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
+ def int_cttz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
+ def int_bitreverse : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
+}
+
+//===------------------------ Debugger Intrinsics -------------------------===//
+//
+
+// None of these intrinsics accesses memory at all...but that doesn't mean the
+// optimizers can change them aggressively. Special handling needed in a few
+// places.
+let Properties = [IntrNoMem] in {
+ def int_dbg_declare : Intrinsic<[],
+ [llvm_metadata_ty,
+ llvm_metadata_ty,
+ llvm_metadata_ty]>;
+ def int_dbg_value : Intrinsic<[],
+ [llvm_metadata_ty, llvm_i64_ty,
+ llvm_metadata_ty,
+ llvm_metadata_ty]>;
+}
+
+//===------------------ Exception Handling Intrinsics----------------------===//
+//
+
+// The result of eh.typeid.for depends on the enclosing function, but inside a
+// given function it is 'const' and may be CSE'd etc.
+def int_eh_typeid_for : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+def int_eh_return_i32 : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty]>;
+def int_eh_return_i64 : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty]>;
+
+// eh.exceptionpointer returns the pointer to the exception caught by
+// the given `catchpad`.
+def int_eh_exceptionpointer : Intrinsic<[llvm_anyptr_ty], [llvm_token_ty],
+ [IntrNoMem]>;
+
+// Gets the exception code from a catchpad token. Only used on some platforms.
+def int_eh_exceptioncode : Intrinsic<[llvm_i32_ty], [llvm_token_ty], [IntrNoMem]>;
+
+// __builtin_unwind_init is an undocumented GCC intrinsic that causes all
+// callee-saved registers to be saved and restored (regardless of whether they
+// are used) in the calling function. It is used by libgcc_eh.
+def int_eh_unwind_init: Intrinsic<[]>,
+ GCCBuiltin<"__builtin_unwind_init">;
+
+def int_eh_dwarf_cfa : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty]>;
+
+let Properties = [IntrNoMem] in {
+ def int_eh_sjlj_lsda : Intrinsic<[llvm_ptr_ty]>;
+ def int_eh_sjlj_callsite : Intrinsic<[], [llvm_i32_ty]>;
+}
+def int_eh_sjlj_functioncontext : Intrinsic<[], [llvm_ptr_ty]>;
+def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
+def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty], [IntrNoReturn]>;
+def int_eh_sjlj_setup_dispatch : Intrinsic<[], []>;
+
+//===---------------- Generic Variable Attribute Intrinsics----------------===//
+//
+def int_var_annotation : Intrinsic<[],
+ [llvm_ptr_ty, llvm_ptr_ty,
+ llvm_ptr_ty, llvm_i32_ty],
+ [], "llvm.var.annotation">;
+def int_ptr_annotation : Intrinsic<[LLVMAnyPointerType<llvm_anyint_ty>],
+ [LLVMMatchType<0>, llvm_ptr_ty, llvm_ptr_ty,
+ llvm_i32_ty],
+ [], "llvm.ptr.annotation">;
+def int_annotation : Intrinsic<[llvm_anyint_ty],
+ [LLVMMatchType<0>, llvm_ptr_ty,
+ llvm_ptr_ty, llvm_i32_ty],
+ [], "llvm.annotation">;
+
+//===------------------------ Trampoline Intrinsics -----------------------===//
+//
+def int_init_trampoline : Intrinsic<[],
+ [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>,
+ GCCBuiltin<"__builtin_init_trampoline">;
+
+def int_adjust_trampoline : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty],
+ [IntrReadArgMem]>,
+ GCCBuiltin<"__builtin_adjust_trampoline">;
+
+//===------------------------ Overflow Intrinsics -------------------------===//
+//
+
+// Expose the carry flag from add operations on two integrals.
+def int_sadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_uadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+def int_ssub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_usub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+def int_smul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+//===------------------------- Memory Use Markers -------------------------===//
+//
+def int_lifetime_start : Intrinsic<[],
+ [llvm_i64_ty, llvm_ptr_ty],
+ [IntrReadWriteArgMem, NoCapture<1>]>;
+def int_lifetime_end : Intrinsic<[],
+ [llvm_i64_ty, llvm_ptr_ty],
+ [IntrReadWriteArgMem, NoCapture<1>]>;
+def int_invariant_start : Intrinsic<[llvm_descriptor_ty],
+ [llvm_i64_ty, llvm_ptr_ty],
+ [IntrReadWriteArgMem, NoCapture<1>]>;
+def int_invariant_end : Intrinsic<[],
+ [llvm_descriptor_ty, llvm_i64_ty,
+ llvm_ptr_ty],
+ [IntrReadWriteArgMem, NoCapture<2>]>;
+
+def int_invariant_group_barrier : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty],
+ [IntrNoMem]>;
+
+//===------------------------ Stackmap Intrinsics -------------------------===//
+//
+def int_experimental_stackmap : Intrinsic<[],
+ [llvm_i64_ty, llvm_i32_ty, llvm_vararg_ty],
+ [Throws]>;
+def int_experimental_patchpoint_void : Intrinsic<[],
+ [llvm_i64_ty, llvm_i32_ty,
+ llvm_ptr_ty, llvm_i32_ty,
+ llvm_vararg_ty],
+ [Throws]>;
+def int_experimental_patchpoint_i64 : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty,
+ llvm_ptr_ty, llvm_i32_ty,
+ llvm_vararg_ty],
+ [Throws]>;
+
+
+//===------------------------ Garbage Collection Intrinsics ---------------===//
+// These are documented in docs/Statepoint.rst
+
+def int_experimental_gc_statepoint : Intrinsic<[llvm_token_ty],
+ [llvm_i64_ty, llvm_i32_ty,
+ llvm_anyptr_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_vararg_ty],
+ [Throws]>;
+
+def int_experimental_gc_result : Intrinsic<[llvm_any_ty], [llvm_token_ty],
+ [IntrReadMem]>;
+def int_experimental_gc_relocate : Intrinsic<[llvm_anyptr_ty],
+ [llvm_token_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrReadMem]>;
+
+//===-------------------------- Other Intrinsics --------------------------===//
+//
+def int_flt_rounds : Intrinsic<[llvm_i32_ty]>,
+ GCCBuiltin<"__builtin_flt_rounds">;
+def int_trap : Intrinsic<[], [], [IntrNoReturn]>,
+ GCCBuiltin<"__builtin_trap">;
+def int_debugtrap : Intrinsic<[]>,
+ GCCBuiltin<"__builtin_debugtrap">;
+
+// NOP: calls/invokes to this intrinsic are removed by codegen
+def int_donothing : Intrinsic<[], [], [IntrNoMem]>;
+
+// Intrisics to support half precision floating point format
+let Properties = [IntrNoMem] in {
+def int_convert_to_fp16 : Intrinsic<[llvm_i16_ty], [llvm_anyfloat_ty]>;
+def int_convert_from_fp16 : Intrinsic<[llvm_anyfloat_ty], [llvm_i16_ty]>;
+}
+
+// These convert intrinsics are to support various conversions between
+// various types with rounding and saturation. NOTE: avoid using these
+// intrinsics as they might be removed sometime in the future and
+// most targets don't support them.
+def int_convertff : Intrinsic<[llvm_anyfloat_ty],
+ [llvm_anyfloat_ty, llvm_i32_ty, llvm_i32_ty]>;
+def int_convertfsi : Intrinsic<[llvm_anyfloat_ty],
+ [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
+def int_convertfui : Intrinsic<[llvm_anyfloat_ty],
+ [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
+def int_convertsif : Intrinsic<[llvm_anyint_ty],
+ [llvm_anyfloat_ty, llvm_i32_ty, llvm_i32_ty]>;
+def int_convertuif : Intrinsic<[llvm_anyint_ty],
+ [llvm_anyfloat_ty, llvm_i32_ty, llvm_i32_ty]>;
+def int_convertss : Intrinsic<[llvm_anyint_ty],
+ [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
+def int_convertsu : Intrinsic<[llvm_anyint_ty],
+ [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
+def int_convertus : Intrinsic<[llvm_anyint_ty],
+ [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
+def int_convertuu : Intrinsic<[llvm_anyint_ty],
+ [llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
+
+// Clear cache intrinsic, default to ignore (ie. emit nothing)
+// maps to void __clear_cache() on supporting platforms
+def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
+ [], "llvm.clear_cache">;
+
+//===-------------------------- Masked Intrinsics -------------------------===//
+//
+def int_masked_store : Intrinsic<[], [llvm_anyvector_ty, LLVMPointerTo<0>,
+ llvm_i32_ty,
+ LLVMVectorSameWidth<0, llvm_i1_ty>],
+ [IntrReadWriteArgMem]>;
+
+def int_masked_load : Intrinsic<[llvm_anyvector_ty],
+ [LLVMPointerTo<0>, llvm_i32_ty,
+ LLVMVectorSameWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
+ [IntrReadArgMem]>;
+
+def int_masked_gather: Intrinsic<[llvm_anyvector_ty],
+ [LLVMVectorOfPointersToElt<0>, llvm_i32_ty,
+ LLVMVectorSameWidth<0, llvm_i1_ty>,
+ LLVMMatchType<0>],
+ [IntrReadArgMem]>;
+
+def int_masked_scatter: Intrinsic<[],
+ [llvm_anyvector_ty,
+ LLVMVectorOfPointersToElt<0>, llvm_i32_ty,
+ LLVMVectorSameWidth<0, llvm_i1_ty>],
+ [IntrReadWriteArgMem]>;
+
+// Intrinsics to support bit sets.
+def int_bitset_test : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty],
+ [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Target-specific intrinsics
+//===----------------------------------------------------------------------===//
+
+include "llvm/IR/IntrinsicsPowerPC.td"
+include "llvm/IR/IntrinsicsX86.td"
+include "llvm/IR/IntrinsicsARM.td"
+include "llvm/IR/IntrinsicsAArch64.td"
+include "llvm/IR/IntrinsicsXCore.td"
+include "llvm/IR/IntrinsicsHexagon.td"
+include "llvm/IR/IntrinsicsNVVM.td"
+include "llvm/IR/IntrinsicsMips.td"
+include "llvm/IR/IntrinsicsAMDGPU.td"
+include "llvm/IR/IntrinsicsBPF.td"
+include "llvm/IR/IntrinsicsSystemZ.td"
+include "llvm/IR/IntrinsicsWebAssembly.td"
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsAArch64.td b/contrib/llvm/include/llvm/IR/IntrinsicsAArch64.td
new file mode 100644
index 0000000..578f259
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -0,0 +1,658 @@
+//===- IntrinsicsAARCH64.td - Defines AARCH64 intrinsics ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the AARCH64-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "aarch64" in {
+
+def int_aarch64_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
+ Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+
+def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
+def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
+def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
+def int_aarch64_stlxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
+
+def int_aarch64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
+def int_aarch64_ldaxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
+def int_aarch64_stxp : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
+def int_aarch64_stlxp : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
+
+def int_aarch64_clrex : Intrinsic<[]>;
+
+def int_aarch64_sdiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+ LLVMMatchType<0>], [IntrNoMem]>;
+def int_aarch64_udiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+ LLVMMatchType<0>], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// HINT
+
+def int_aarch64_hint : Intrinsic<[], [llvm_i32_ty]>;
+
+//===----------------------------------------------------------------------===//
+// RBIT
+
+def int_aarch64_rbit : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Data Barrier Instructions
+
+def int_aarch64_dmb : GCCBuiltin<"__builtin_arm_dmb">, Intrinsic<[], [llvm_i32_ty]>;
+def int_aarch64_dsb : GCCBuiltin<"__builtin_arm_dsb">, Intrinsic<[], [llvm_i32_ty]>;
+def int_aarch64_isb : GCCBuiltin<"__builtin_arm_isb">, Intrinsic<[], [llvm_i32_ty]>;
+
+}
+
+//===----------------------------------------------------------------------===//
+// Advanced SIMD (NEON)
+
+let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+ class AdvSIMD_2Scalar_Float_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_FPToIntRounding_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
+
+ class AdvSIMD_1IntArg_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_1FloatArg_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Expand_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
+ class AdvSIMD_1IntArg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Int_Across_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+ class AdvSIMD_1VectorArg_Float_Across_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+ class AdvSIMD_2IntArg_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2FloatArg_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Compare_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+ class AdvSIMD_2Arg_FloatCompare_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Wide_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMExtendedType<0>, LLVMExtendedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyint_ty],
+ [LLVMExtendedType<0>, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMTruncatedType<0>, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
+
+ class AdvSIMD_3VectorArg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_3VectorArg_Scalar_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
+ LLVMMatchType<1>], [IntrNoMem]>;
+ class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_CvtFxToFP_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ class AdvSIMD_CvtFPToFx_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+}
+
+// Arithmetic ops
+
+let Properties = [IntrNoMem] in {
+ // Vector Add Across Lanes
+ def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+ // Vector Long Add Across Lanes
+ def int_aarch64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+
+ // Vector Halving Add
+ def int_aarch64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Rounding Halving Add
+ def int_aarch64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Saturating Add
+ def int_aarch64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Add High-Half
+ // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
+ // header is no longer supported.
+ def int_aarch64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Rounding Add High-Half
+ def int_aarch64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Saturating Doubling Multiply High
+ def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Saturating Rounding Doubling Multiply High
+ def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Polynominal Multiply
+ def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Long Multiply
+ def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+
+ // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
+ // it with a v16i8.
+ def int_aarch64_neon_pmull64 :
+ Intrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+
+ // Vector Extending Multiply
+ def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic {
+ let Properties = [IntrNoMem, Commutative];
+ }
+
+ // Vector Saturating Doubling Long Multiply
+ def int_aarch64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
+ def int_aarch64_neon_sqdmulls_scalar
+ : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ // Vector Halving Subtract
+ def int_aarch64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Saturating Subtract
+ def int_aarch64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Subtract High-Half
+ // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
+ // header is no longer supported.
+ def int_aarch64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Rounding Subtract High-Half
+ def int_aarch64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
+
+ // Vector Compare Absolute Greater-than-or-equal
+ def int_aarch64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;
+
+ // Vector Compare Absolute Greater-than
+ def int_aarch64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;
+
+ // Vector Absolute Difference
+ def int_aarch64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Scalar Absolute Difference
+ def int_aarch64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;
+
+ // Vector Max
+ def int_aarch64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fmax : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Max Across Lanes
+ def int_aarch64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+ def int_aarch64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+ // Vector Min
+ def int_aarch64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fmin : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Vector Min/Max Number
+ def int_aarch64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
+ def int_aarch64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;
+
+ // Vector Min Across Lanes
+ def int_aarch64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
+ def int_aarch64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+ def int_aarch64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
+
+ // Pairwise Add
+ def int_aarch64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Long Pairwise Add
+ // FIXME: In theory, we shouldn't need intrinsics for saddlp or
+ // uaddlp, but tblgen's type inference currently can't handle the
+ // pattern fragments this ends up generating.
+ def int_aarch64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
+ def int_aarch64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
+
+ // Folding Maximum
+ def int_aarch64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Folding Minimum
+ def int_aarch64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
+ def int_aarch64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;
+
+ // Reciprocal Estimate/Step
+ def int_aarch64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
+ def int_aarch64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;
+
+ // Reciprocal Exponent
+ def int_aarch64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Vector Saturating Shift Left
+ def int_aarch64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Rounding Shift Left
+ def int_aarch64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Saturating Rounding Shift Left
+ def int_aarch64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Signed->Unsigned Shift Left by Constant
+ def int_aarch64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
+ def int_aarch64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
+ def int_aarch64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Narrowing Shift Right by Constant
+ def int_aarch64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+ def int_aarch64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Rounding Narrowing Shift Right by Constant
+ def int_aarch64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Rounding Narrowing Saturating Shift Right by Constant
+ def int_aarch64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+ def int_aarch64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
+
+ // Vector Shift Left
+ def int_aarch64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
+ def int_aarch64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;
+
+ // Vector Widening Shift Left by Constant
+ def int_aarch64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
+ def int_aarch64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
+ def int_aarch64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
+
+ // Vector Shift Right by Constant and Insert
+ def int_aarch64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;
+
+ // Vector Shift Left by Constant and Insert
+ def int_aarch64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;
+
+ // Vector Saturating Narrow
+ def int_aarch64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
+ def int_aarch64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
+ def int_aarch64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+ def int_aarch64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+
+ // Vector Saturating Extract and Unsigned Narrow
+ def int_aarch64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
+ def int_aarch64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;
+
+ // Vector Absolute Value
+ def int_aarch64_neon_abs : AdvSIMD_1IntArg_Intrinsic;
+
+ // Vector Saturating Absolute Value
+ def int_aarch64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;
+
+ // Vector Saturating Negation
+ def int_aarch64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;
+
+ // Vector Count Leading Sign Bits
+ def int_aarch64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;
+
+ // Vector Reciprocal Estimate
+ def int_aarch64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
+ def int_aarch64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Vector Square Root Estimate
+ def int_aarch64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
+ def int_aarch64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Vector Bitwise Reverse
+ def int_aarch64_neon_rbit : AdvSIMD_1VectorArg_Intrinsic;
+
+ // Vector Conversions Between Half-Precision and Single-Precision.
+ def int_aarch64_neon_vcvtfp2hf
+ : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_aarch64_neon_vcvthf2fp
+ : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
+
+ // Vector Conversions Between Floating-point and Fixed-point.
+ def int_aarch64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
+ def int_aarch64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
+ def int_aarch64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
+ def int_aarch64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;
+
+ // Vector FP->Int Conversions
+ def int_aarch64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
+ def int_aarch64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;
+
+ // Vector FP Rounding: only ties to even is unrepresented by a normal
+ // intrinsic.
+ def int_aarch64_neon_frintn : AdvSIMD_1FloatArg_Intrinsic;
+
+ // Scalar FP->Int conversions
+
+ // Vector FP Inexact Narrowing
+ def int_aarch64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;
+
+ // Scalar FP Inexact Narrowing
+ def int_aarch64_sisd_fcvtxn : Intrinsic<[llvm_float_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+}
+
+let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+ class AdvSIMD_2Vector2Index_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
+ [IntrNoMem]>;
+}
+
+// Vector element to element moves
+def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
+
+let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+ class AdvSIMD_1Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_1Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<2>]>;
+
+ class AdvSIMD_2Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_2Vec_Load_Lane_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadArgMem]>;
+ class AdvSIMD_2Vec_Store_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadWriteArgMem, NoCapture<2>]>;
+ class AdvSIMD_2Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<3>]>;
+
+ class AdvSIMD_3Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_3Vec_Load_Lane_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadArgMem]>;
+ class AdvSIMD_3Vec_Store_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadWriteArgMem, NoCapture<3>]>;
+ class AdvSIMD_3Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<4>]>;
+
+ class AdvSIMD_4Vec_Load_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadArgMem]>;
+ class AdvSIMD_4Vec_Load_Lane_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadArgMem]>;
+ class AdvSIMD_4Vec_Store_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMAnyPointerType<LLVMMatchType<0>>],
+ [IntrReadWriteArgMem, NoCapture<4>]>;
+ class AdvSIMD_4Vec_Store_Lane_Intrinsic
+ : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i64_ty, llvm_anyptr_ty],
+ [IntrReadWriteArgMem, NoCapture<5>]>;
+}
+
+// Memory ops
+
+def int_aarch64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
+def int_aarch64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
+def int_aarch64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;
+
+def int_aarch64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
+def int_aarch64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
+def int_aarch64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;
+
+def int_aarch64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
+def int_aarch64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
+def int_aarch64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;
+
+def int_aarch64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
+def int_aarch64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
+def int_aarch64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;
+
+def int_aarch64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
+def int_aarch64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
+def int_aarch64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;
+
+def int_aarch64_neon_st2 : AdvSIMD_2Vec_Store_Intrinsic;
+def int_aarch64_neon_st3 : AdvSIMD_3Vec_Store_Intrinsic;
+def int_aarch64_neon_st4 : AdvSIMD_4Vec_Store_Intrinsic;
+
+def int_aarch64_neon_st2lane : AdvSIMD_2Vec_Store_Lane_Intrinsic;
+def int_aarch64_neon_st3lane : AdvSIMD_3Vec_Store_Lane_Intrinsic;
+def int_aarch64_neon_st4lane : AdvSIMD_4Vec_Store_Lane_Intrinsic;
+
+let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
+ class AdvSIMD_Tbl1_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbl2_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
+ class AdvSIMD_Tbl3_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbl4_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+ class AdvSIMD_Tbx1_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbx2_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbx3_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+ class AdvSIMD_Tbx4_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
+ [IntrNoMem]>;
+}
+def int_aarch64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
+def int_aarch64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
+def int_aarch64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
+def int_aarch64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;
+
+def int_aarch64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
+def int_aarch64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
+def int_aarch64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
+def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
+
+let TargetPrefix = "aarch64" in {
+ class Crypto_AES_DataKey_Intrinsic
+ : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+ class Crypto_AES_Data_Intrinsic
+ : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+ // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
+ // (v4i32).
+ class Crypto_SHA_5Hash4Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
+ // (v4i32).
+ class Crypto_SHA_1Hash_Intrinsic
+ : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ // SHA intrinsic taking 8 words of the schedule
+ class Crypto_SHA_8Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+ // SHA intrinsic taking 12 words of the schedule
+ class Crypto_SHA_12Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
+ class Crypto_SHA_8Hash4Schedule_Intrinsic
+ : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+}
+
+// AES
+def int_aarch64_crypto_aese : Crypto_AES_DataKey_Intrinsic;
+def int_aarch64_crypto_aesd : Crypto_AES_DataKey_Intrinsic;
+def int_aarch64_crypto_aesmc : Crypto_AES_Data_Intrinsic;
+def int_aarch64_crypto_aesimc : Crypto_AES_Data_Intrinsic;
+
+// SHA1
+def int_aarch64_crypto_sha1c : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha1p : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha1m : Crypto_SHA_5Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha1h : Crypto_SHA_1Hash_Intrinsic;
+
+def int_aarch64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
+def int_aarch64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;
+
+// SHA256
+def int_aarch64_crypto_sha256h : Crypto_SHA_8Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha256h2 : Crypto_SHA_8Hash4Schedule_Intrinsic;
+def int_aarch64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
+def int_aarch64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;
+
+//===----------------------------------------------------------------------===//
+// CRC32
+
+let TargetPrefix = "aarch64" in {
+
+def int_aarch64_crc32b : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32h : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32w : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32x : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+def int_aarch64_crc32cx : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+}
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/contrib/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
new file mode 100644
index 0000000..84582e8
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -0,0 +1,157 @@
+//===- IntrinsicsAMDGPU.td - Defines AMDGPU intrinsics -----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the R600-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "r600" in {
+
+class R600ReadPreloadRegisterIntrinsic<string name>
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<name>;
+
+multiclass R600ReadPreloadRegisterIntrinsic_xyz<string prefix> {
+ def _x : R600ReadPreloadRegisterIntrinsic<!strconcat(prefix, "_x")>;
+ def _y : R600ReadPreloadRegisterIntrinsic<!strconcat(prefix, "_y")>;
+ def _z : R600ReadPreloadRegisterIntrinsic<!strconcat(prefix, "_z")>;
+}
+
+defm int_r600_read_global_size : R600ReadPreloadRegisterIntrinsic_xyz <
+ "__builtin_r600_read_global_size">;
+defm int_r600_read_local_size : R600ReadPreloadRegisterIntrinsic_xyz <
+ "__builtin_r600_read_local_size">;
+defm int_r600_read_ngroups : R600ReadPreloadRegisterIntrinsic_xyz <
+ "__builtin_r600_read_ngroups">;
+defm int_r600_read_tgid : R600ReadPreloadRegisterIntrinsic_xyz <
+ "__builtin_r600_read_tgid">;
+defm int_r600_read_tidig : R600ReadPreloadRegisterIntrinsic_xyz <
+ "__builtin_r600_read_tidig">;
+
+def int_r600_rat_store_typed :
+ // 1st parameter: Data
+ // 2nd parameter: Index
+ // 3rd parameter: Constant RAT ID
+ Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], []>,
+ GCCBuiltin<"__builtin_r600_rat_store_typed">;
+
+} // End TargetPrefix = "r600"
+
+let TargetPrefix = "AMDGPU" in {
+
+class AMDGPUReadPreloadRegisterIntrinsic<string name>
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<name>;
+
+def int_AMDGPU_div_scale : GCCBuiltin<"__builtin_amdgpu_div_scale">,
+ // 1st parameter: Numerator
+ // 2nd parameter: Denominator
+ // 3rd parameter: Constant to select select between first and
+ // second. (0 = first, 1 = second).
+ Intrinsic<[llvm_anyfloat_ty, llvm_i1_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
+ [IntrNoMem]>;
+
+def int_AMDGPU_div_fmas : GCCBuiltin<"__builtin_amdgpu_div_fmas">,
+ Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
+ [IntrNoMem]>;
+
+def int_AMDGPU_div_fixup : GCCBuiltin<"__builtin_amdgpu_div_fixup">,
+ Intrinsic<[llvm_anyfloat_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+def int_AMDGPU_trig_preop : GCCBuiltin<"__builtin_amdgpu_trig_preop">,
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_AMDGPU_rcp : GCCBuiltin<"__builtin_amdgpu_rcp">,
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+
+def int_AMDGPU_rsq : GCCBuiltin<"__builtin_amdgpu_rsq">,
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+
+def int_AMDGPU_rsq_clamped : GCCBuiltin<"__builtin_amdgpu_rsq_clamped">,
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+
+def int_AMDGPU_ldexp : GCCBuiltin<"__builtin_amdgpu_ldexp">,
+ Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>;
+
+def int_AMDGPU_class : GCCBuiltin<"__builtin_amdgpu_class">,
+ Intrinsic<[llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_AMDGPU_read_workdim : AMDGPUReadPreloadRegisterIntrinsic <
+ "__builtin_amdgpu_read_workdim">;
+
+} // End TargetPrefix = "AMDGPU"
+
+let TargetPrefix = "amdgcn" in {
+
+// SI only
+def int_amdgcn_buffer_wbinvl1_sc :
+ GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">,
+ Intrinsic<[], [], []>;
+
+// On CI+
+def int_amdgcn_buffer_wbinvl1_vol :
+ GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">,
+ Intrinsic<[], [], []>;
+
+def int_amdgcn_buffer_wbinvl1 :
+ GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1">,
+ Intrinsic<[], [], []>;
+
+def int_amdgcn_s_dcache_inv :
+ GCCBuiltin<"__builtin_amdgcn_s_dcache_inv">,
+ Intrinsic<[], [], []>;
+
+// CI+
+def int_amdgcn_s_dcache_inv_vol :
+ GCCBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">,
+ Intrinsic<[], [], []>;
+
+// VI
+def int_amdgcn_s_dcache_wb :
+ GCCBuiltin<"__builtin_amdgcn_s_dcache_wb">,
+ Intrinsic<[], [], []>;
+
+// VI
+def int_amdgcn_s_dcache_wb_vol :
+ GCCBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">,
+ Intrinsic<[], [], []>;
+
+def int_amdgcn_dispatch_ptr :
+ GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+
+// __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
+def int_amdgcn_interp_p1 :
+ GCCBuiltin<"__builtin_amdgcn_interp_p1">,
+ Intrinsic<[llvm_float_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>; // This intrinsic reads from lds, but the memory
+ // values are constant, so it behaves like IntrNoMem.
+
+// __builtin_amdgcn_interp_p2 <p1>, <j>, <attr_chan>, <attr>, <m0>
+def int_amdgcn_interp_p2 :
+ GCCBuiltin<"__builtin_amdgcn_interp_p2">,
+ Intrinsic<[llvm_float_ty],
+ [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>; // See int_amdgcn_v_interp_p1 for why this is
+ // IntrNoMem.
+
+def int_amdgcn_mbcnt_lo :
+ GCCBuiltin<"__builtin_amdgcn_mbcnt_lo">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_amdgcn_mbcnt_hi :
+ GCCBuiltin<"__builtin_amdgcn_mbcnt_hi">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+}
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsARM.td b/contrib/llvm/include/llvm/IR/IntrinsicsARM.td
new file mode 100644
index 0000000..c1d911c
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsARM.td
@@ -0,0 +1,519 @@
+//===- IntrinsicsARM.td - Defines ARM intrinsics -----------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the ARM-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// TLS
+
+let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
+
+def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
+ Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+
+// A space-consuming intrinsic primarily for testing ARMConstantIslands. The
+// first argument is the number of bytes this "instruction" takes up, the second
+// and return value are essentially chains, used to force ordering during ISel.
+def int_arm_space : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Saturating Arithmetic
+
+def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Load, Store and Clear exclusive
+
+def int_arm_ldrex : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty]>;
+def int_arm_strex : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyptr_ty]>;
+
+def int_arm_ldaex : Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty]>;
+def int_arm_stlex : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyptr_ty]>;
+
+def int_arm_clrex : Intrinsic<[]>;
+
+def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_ptr_ty]>;
+def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty]>;
+
+def int_arm_stlexd : Intrinsic<[llvm_i32_ty],
+ [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty]>;
+def int_arm_ldaexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty]>;
+
+//===----------------------------------------------------------------------===//
+// Data barrier instructions
+def int_arm_dmb : GCCBuiltin<"__builtin_arm_dmb">, MSBuiltin<"__dmb">,
+ Intrinsic<[], [llvm_i32_ty]>;
+def int_arm_dsb : GCCBuiltin<"__builtin_arm_dsb">, MSBuiltin<"__dsb">,
+ Intrinsic<[], [llvm_i32_ty]>;
+def int_arm_isb : GCCBuiltin<"__builtin_arm_isb">, MSBuiltin<"__isb">,
+ Intrinsic<[], [llvm_i32_ty]>;
+
+//===----------------------------------------------------------------------===//
+// VFP
+
+def int_arm_get_fpscr : GCCBuiltin<"__builtin_arm_get_fpscr">,
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+def int_arm_set_fpscr : GCCBuiltin<"__builtin_arm_set_fpscr">,
+ Intrinsic<[], [llvm_i32_ty], []>;
+def int_arm_vcvtr : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
+ [IntrNoMem]>;
+def int_arm_vcvtru : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty],
+ [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Coprocessor
+
+// Move to coprocessor
+def int_arm_mcr : GCCBuiltin<"__builtin_arm_mcr">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mcr2 : GCCBuiltin<"__builtin_arm_mcr2">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+// Move from coprocessor
+def int_arm_mrc : GCCBuiltin<"__builtin_arm_mrc">,
+ MSBuiltin<"_MoveFromCoprocessor">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mrc2 : GCCBuiltin<"__builtin_arm_mrc2">,
+ MSBuiltin<"_MoveFromCoprocessor2">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+
+// Coprocessor data processing
+def int_arm_cdp : GCCBuiltin<"__builtin_arm_cdp">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_cdp2 : GCCBuiltin<"__builtin_arm_cdp2">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+// Move from two registers to coprocessor
+def int_arm_mcrr : GCCBuiltin<"__builtin_arm_mcrr">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+def int_arm_mcrr2 : GCCBuiltin<"__builtin_arm_mcrr2">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// CRC32
+
+def int_arm_crc32b : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_crc32h : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_crc32w : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_arm_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// HINT
+
+def int_arm_hint : Intrinsic<[], [llvm_i32_ty]>;
+def int_arm_dbg : Intrinsic<[], [llvm_i32_ty]>;
+
+//===----------------------------------------------------------------------===//
+// RBIT
+
+def int_arm_rbit : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// UND (reserved undefined sequence)
+
+def int_arm_undefined : Intrinsic<[], [llvm_i32_ty]>;
+
+//===----------------------------------------------------------------------===//
+// Advanced SIMD (NEON)
+
+// The following classes do not correspond directly to GCC builtins.
+class Neon_1Arg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+class Neon_1Arg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
+class Neon_2Arg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+class Neon_2Arg_Narrow_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMExtendedType<0>, LLVMExtendedType<0>],
+ [IntrNoMem]>;
+class Neon_2Arg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+class Neon_3Arg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+class Neon_3Arg_Long_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMTruncatedType<0>, LLVMTruncatedType<0>],
+ [IntrNoMem]>;
+class Neon_CvtFxToFP_Intrinsic
+ : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>;
+class Neon_CvtFPToFx_Intrinsic
+ : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>;
+class Neon_CvtFPtoInt_1Arg_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
+
+class Neon_Compare_Intrinsic
+ : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
+ [IntrNoMem]>;
+
+// The table operands for VTBL and VTBX consist of 1 to 4 v8i8 vectors.
+// Besides the table, VTBL has one other v8i8 argument and VTBX has two.
+// Overall, the classes range from 2 to 6 v8i8 arguments.
+class Neon_Tbl2Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl3Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl4Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty],
+ [IntrNoMem]>;
+class Neon_Tbl5Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
+ llvm_v8i8_ty], [IntrNoMem]>;
+class Neon_Tbl6Arg_Intrinsic
+ : Intrinsic<[llvm_v8i8_ty],
+ [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty,
+ llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>;
+
+// Arithmetic ops
+
+let Properties = [IntrNoMem, Commutative] in {
+
+ // Vector Add.
+ def int_arm_neon_vhadds : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vhaddu : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vrhadds : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vrhaddu : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vqadds : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vqaddu : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vraddhn : Neon_2Arg_Narrow_Intrinsic;
+
+ // Vector Multiply.
+ def int_arm_neon_vmulp : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vqdmulh : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vqrdmulh : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vmulls : Neon_2Arg_Long_Intrinsic;
+ def int_arm_neon_vmullu : Neon_2Arg_Long_Intrinsic;
+ def int_arm_neon_vmullp : Neon_2Arg_Long_Intrinsic;
+ def int_arm_neon_vqdmull : Neon_2Arg_Long_Intrinsic;
+
+ // Vector Maximum.
+ def int_arm_neon_vmaxs : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vmaxu : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vmaxnm : Neon_2Arg_Intrinsic;
+
+ // Vector Minimum.
+ def int_arm_neon_vmins : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vminu : Neon_2Arg_Intrinsic;
+ def int_arm_neon_vminnm : Neon_2Arg_Intrinsic;
+
+ // Vector Reciprocal Step.
+ def int_arm_neon_vrecps : Neon_2Arg_Intrinsic;
+
+ // Vector Reciprocal Square Root Step.
+ def int_arm_neon_vrsqrts : Neon_2Arg_Intrinsic;
+}
+
+// Vector Subtract.
+def int_arm_neon_vhsubs : Neon_2Arg_Intrinsic;
+def int_arm_neon_vhsubu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqsubs : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqsubu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic;
+
+// Vector Absolute Compare.
+def int_arm_neon_vacge : Neon_Compare_Intrinsic;
+def int_arm_neon_vacgt : Neon_Compare_Intrinsic;
+
+// Vector Absolute Differences.
+def int_arm_neon_vabds : Neon_2Arg_Intrinsic;
+def int_arm_neon_vabdu : Neon_2Arg_Intrinsic;
+
+// Vector Pairwise Add.
+def int_arm_neon_vpadd : Neon_2Arg_Intrinsic;
+
+// Vector Pairwise Add Long.
+// Note: This is different than the other "long" NEON intrinsics because
+// the result vector has half as many elements as the source vector.
+// The source and destination vector types must be specified separately.
+def int_arm_neon_vpaddls : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vpaddlu : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty],
+ [IntrNoMem]>;
+
+// Vector Pairwise Add and Accumulate Long.
+// Note: This is similar to vpaddl but the destination vector also appears
+// as the first argument.
+def int_arm_neon_vpadals : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
+def int_arm_neon_vpadalu : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty],
+ [IntrNoMem]>;
+
+// Vector Pairwise Maximum and Minimum.
+def int_arm_neon_vpmaxs : Neon_2Arg_Intrinsic;
+def int_arm_neon_vpmaxu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vpmins : Neon_2Arg_Intrinsic;
+def int_arm_neon_vpminu : Neon_2Arg_Intrinsic;
+
+// Vector Shifts:
+//
+// The various saturating and rounding vector shift operations need to be
+// represented by intrinsics in LLVM, and even the basic VSHL variable shift
+// operation cannot be safely translated to LLVM's shift operators. VSHL can
+// be used for both left and right shifts, or even combinations of the two,
+// depending on the signs of the shift amounts. It also has well-defined
+// behavior for shift amounts that LLVM leaves undefined. Only basic shifts
+// by constants can be represented with LLVM's shift operators.
+//
+// The shift counts for these intrinsics are always vectors, even for constant
+// shifts, where the constant is replicated. For consistency with VSHL (and
+// other variable shift instructions), left shifts have positive shift counts
+// and right shifts have negative shift counts. This convention is also used
+// for constant right shift intrinsics, and to help preserve sanity, the
+// intrinsic names use "shift" instead of either "shl" or "shr". Where
+// applicable, signed and unsigned versions of the intrinsics are
+// distinguished with "s" and "u" suffixes. A few NEON shift instructions,
+// such as VQSHLU, take signed operands but produce unsigned results; these
+// use a "su" suffix.
+
+// Vector Shift.
+def int_arm_neon_vshifts : Neon_2Arg_Intrinsic;
+def int_arm_neon_vshiftu : Neon_2Arg_Intrinsic;
+
+// Vector Rounding Shift.
+def int_arm_neon_vrshifts : Neon_2Arg_Intrinsic;
+def int_arm_neon_vrshiftu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vrshiftn : Neon_2Arg_Narrow_Intrinsic;
+
+// Vector Saturating Shift.
+def int_arm_neon_vqshifts : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqshiftu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqshiftsu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqshiftns : Neon_2Arg_Narrow_Intrinsic;
+def int_arm_neon_vqshiftnu : Neon_2Arg_Narrow_Intrinsic;
+def int_arm_neon_vqshiftnsu : Neon_2Arg_Narrow_Intrinsic;
+
+// Vector Saturating Rounding Shift.
+def int_arm_neon_vqrshifts : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqrshiftu : Neon_2Arg_Intrinsic;
+def int_arm_neon_vqrshiftns : Neon_2Arg_Narrow_Intrinsic;
+def int_arm_neon_vqrshiftnu : Neon_2Arg_Narrow_Intrinsic;
+def int_arm_neon_vqrshiftnsu : Neon_2Arg_Narrow_Intrinsic;
+
+// Vector Shift and Insert.
+def int_arm_neon_vshiftins : Neon_3Arg_Intrinsic;
+
+// Vector Absolute Value and Saturating Absolute Value.
+def int_arm_neon_vabs : Neon_1Arg_Intrinsic;
+def int_arm_neon_vqabs : Neon_1Arg_Intrinsic;
+
+// Vector Saturating Negate.
+def int_arm_neon_vqneg : Neon_1Arg_Intrinsic;
+
+// Vector Count Leading Sign/Zero Bits.
+def int_arm_neon_vcls : Neon_1Arg_Intrinsic;
+
+// Vector Reciprocal Estimate.
+def int_arm_neon_vrecpe : Neon_1Arg_Intrinsic;
+
+// Vector Reciprocal Square Root Estimate.
+def int_arm_neon_vrsqrte : Neon_1Arg_Intrinsic;
+
+// Vector Conversions Between Floating-point and Integer
+def int_arm_neon_vcvtau : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtas : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtnu : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtns : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtpu : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtps : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtmu : Neon_CvtFPtoInt_1Arg_Intrinsic;
+def int_arm_neon_vcvtms : Neon_CvtFPtoInt_1Arg_Intrinsic;
+
+// Vector Conversions Between Floating-point and Fixed-point.
+def int_arm_neon_vcvtfp2fxs : Neon_CvtFPToFx_Intrinsic;
+def int_arm_neon_vcvtfp2fxu : Neon_CvtFPToFx_Intrinsic;
+def int_arm_neon_vcvtfxs2fp : Neon_CvtFxToFP_Intrinsic;
+def int_arm_neon_vcvtfxu2fp : Neon_CvtFxToFP_Intrinsic;
+
+// Vector Conversions Between Half-Precision and Single-Precision.
+def int_arm_neon_vcvtfp2hf
+ : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_arm_neon_vcvthf2fp
+ : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
+
+// Narrowing Saturating Vector Moves.
+def int_arm_neon_vqmovns : Neon_1Arg_Narrow_Intrinsic;
+def int_arm_neon_vqmovnu : Neon_1Arg_Narrow_Intrinsic;
+def int_arm_neon_vqmovnsu : Neon_1Arg_Narrow_Intrinsic;
+
+// Vector Table Lookup.
+// The first 1-4 arguments are the table.
+def int_arm_neon_vtbl1 : Neon_Tbl2Arg_Intrinsic;
+def int_arm_neon_vtbl2 : Neon_Tbl3Arg_Intrinsic;
+def int_arm_neon_vtbl3 : Neon_Tbl4Arg_Intrinsic;
+def int_arm_neon_vtbl4 : Neon_Tbl5Arg_Intrinsic;
+
+// Vector Table Extension.
+// Some elements of the destination vector may not be updated, so the original
+// value of that vector is passed as the first argument. The next 1-4
+// arguments after that are the table.
+def int_arm_neon_vtbx1 : Neon_Tbl3Arg_Intrinsic;
+def int_arm_neon_vtbx2 : Neon_Tbl4Arg_Intrinsic;
+def int_arm_neon_vtbx3 : Neon_Tbl5Arg_Intrinsic;
+def int_arm_neon_vtbx4 : Neon_Tbl6Arg_Intrinsic;
+
+// Vector Rounding
+def int_arm_neon_vrintn : Neon_1Arg_Intrinsic;
+def int_arm_neon_vrintx : Neon_1Arg_Intrinsic;
+def int_arm_neon_vrinta : Neon_1Arg_Intrinsic;
+def int_arm_neon_vrintz : Neon_1Arg_Intrinsic;
+def int_arm_neon_vrintm : Neon_1Arg_Intrinsic;
+def int_arm_neon_vrintp : Neon_1Arg_Intrinsic;
+
+// De-interleaving vector loads from N-element structures.
+// Source operands are the address and alignment.
+def int_arm_neon_vld1 : Intrinsic<[llvm_anyvector_ty],
+ [llvm_anyptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [llvm_anyptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>],
+ [llvm_anyptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [llvm_anyptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+// Vector load N-element structure to one lane.
+// Source operands are: the address, the N input vectors (since only one
+// lane is assigned), the lane number, and the alignment.
+def int_arm_neon_vld2lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
+ [llvm_anyptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadArgMem]>;
+def int_arm_neon_vld3lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>],
+ [llvm_anyptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_arm_neon_vld4lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>],
+ [llvm_anyptr_ty, LLVMMatchType<0>,
+ LLVMMatchType<0>, LLVMMatchType<0>,
+ LLVMMatchType<0>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadArgMem]>;
+
+// Interleaving vector stores from N-element structures.
+// Source operands are: the address, the N vectors, and the alignment.
+def int_arm_neon_vst1 : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyvector_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+def int_arm_neon_vst2 : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<1>, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+def int_arm_neon_vst3 : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<1>, LLVMMatchType<1>,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+def int_arm_neon_vst4 : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<1>, LLVMMatchType<1>,
+ LLVMMatchType<1>, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+// Vector store N-element structure from one lane.
+// Source operands are: the address, the N vectors, the lane number, and
+// the alignment.
+def int_arm_neon_vst2lane : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<1>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+def int_arm_neon_vst3lane : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<1>, LLVMMatchType<1>,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+def int_arm_neon_vst4lane : Intrinsic<[],
+ [llvm_anyptr_ty, llvm_anyvector_ty,
+ LLVMMatchType<1>, LLVMMatchType<1>,
+ LLVMMatchType<1>, llvm_i32_ty,
+ llvm_i32_ty], [IntrReadWriteArgMem]>;
+
+// Vector bitwise select.
+def int_arm_neon_vbsl : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem]>;
+
+
+// Crypto instructions
+class AES_1Arg_Intrinsic : Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty], [IntrNoMem]>;
+class AES_2Arg_Intrinsic : Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+
+class SHA_1Arg_Intrinsic : Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+class SHA_2Arg_Intrinsic : Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+class SHA_3Arg_i32_Intrinsic : Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+class SHA_3Arg_v4i32_Intrinsic : Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty,llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+def int_arm_neon_aesd : AES_2Arg_Intrinsic;
+def int_arm_neon_aese : AES_2Arg_Intrinsic;
+def int_arm_neon_aesimc : AES_1Arg_Intrinsic;
+def int_arm_neon_aesmc : AES_1Arg_Intrinsic;
+def int_arm_neon_sha1h : SHA_1Arg_Intrinsic;
+def int_arm_neon_sha1su1 : SHA_2Arg_Intrinsic;
+def int_arm_neon_sha256su0 : SHA_2Arg_Intrinsic;
+def int_arm_neon_sha1c : SHA_3Arg_i32_Intrinsic;
+def int_arm_neon_sha1m : SHA_3Arg_i32_Intrinsic;
+def int_arm_neon_sha1p : SHA_3Arg_i32_Intrinsic;
+def int_arm_neon_sha1su0: SHA_3Arg_v4i32_Intrinsic;
+def int_arm_neon_sha256h: SHA_3Arg_v4i32_Intrinsic;
+def int_arm_neon_sha256h2: SHA_3Arg_v4i32_Intrinsic;
+def int_arm_neon_sha256su1: SHA_3Arg_v4i32_Intrinsic;
+
+} // end TargetPrefix
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsBPF.td b/contrib/llvm/include/llvm/IR/IntrinsicsBPF.td
new file mode 100644
index 0000000..94eca8e
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsBPF.td
@@ -0,0 +1,24 @@
+//===- IntrinsicsBPF.td - Defines BPF intrinsics -----------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the BPF-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+// Specialized loads from packet
+let TargetPrefix = "bpf" in { // All intrinsics start with "llvm.bpf."
+ def int_bpf_load_byte : GCCBuiltin<"__builtin_bpf_load_byte">,
+ Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
+ def int_bpf_load_half : GCCBuiltin<"__builtin_bpf_load_half">,
+ Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
+ def int_bpf_load_word : GCCBuiltin<"__builtin_bpf_load_word">,
+ Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
+ def int_bpf_pseudo : GCCBuiltin<"__builtin_bpf_pseudo">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty]>;
+}
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsHexagon.td b/contrib/llvm/include/llvm/IR/IntrinsicsHexagon.td
new file mode 100644
index 0000000..ca6fcbd
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsHexagon.td
@@ -0,0 +1,9373 @@
+//===- IntrinsicsHexagon.td - Defines Hexagon intrinsics ---*- tablegen -*-===//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the Hexagon-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Definitions for all Hexagon intrinsics.
+//
+// All Hexagon intrinsics start with "llvm.hexagon.".
+let TargetPrefix = "hexagon" in {
+ /// Hexagon_Intrinsic - Base class for all Hexagon intrinsics.
+ class Hexagon_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+ list<LLVMType> param_types,
+ list<IntrinsicProperty> properties>
+ : GCCBuiltin<!strconcat("__builtin_", GCCIntSuffix)>,
+ Intrinsic<ret_types, param_types, properties>;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_MEM,BT_BOOL,BT_PTR) ->
+// Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_ptr_ty],
+ [IntrNoMem]>;
+
+//
+// DEF_FUNCTION_TYPE_1(void_ftype_SI,BT_VOID,BT_INT) ->
+// Hexagon_void_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [], [llvm_ptr_ty],
+ []>;
+
+//
+// DEF_FUNCTION_TYPE_1(HI_ftype_SI,BT_I16,BT_INT) ->
+// Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i16_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_SI,BT_INT,BT_INT) ->
+// Hexagon_si_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_SI,BT_LONGLONG,BT_INT) ->
+// Hexagon_di_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_DI,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_di_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_DI,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_di_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_di_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_QI,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(QI_ftype_SI,BT_BOOL,BT_INT) ->
+// Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(DI_ftype_QI,BT_LONGLONG,BT_BOOL) ->
+// Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_1(SI_ftype_QI,BT_INT,BT_BOOL) ->
+// Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_SISI,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(void_ftype_SISI,BT_VOID,BT_INT,BT_INT) ->
+// Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_SISI,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(USI_ftype_SISI,BT_UINT,BT_INT,BT_INT) ->
+// Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_usi_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_SISI,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(UDI_ftype_SISI,BT_ULONGLONG,BT_INT,BT_INT) ->
+// Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_udi_sisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_SIDI,BT_LONGLONG,BT_INT,BT_LONGLONG) ->
+// Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_DISI,BT_LONGLONG,BT_LONGLONG,BT_INT) ->
+// Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_disi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_SIDI,BT_INT,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_DIDI,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(DI_ftype_DIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(UDI_ftype_DIDI,BT_ULONGLONG,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_udi_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_DISI,BT_INT,BT_LONGLONG,BT_INT) ->
+// Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_disi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_DIDI,BT_BOOL,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_SIDI,BT_BOOL,BT_INT,BT_LONGLONG) ->
+// Hexagon_qi_didi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_DISI,BT_BOOL,BT_LONGLONG,BT_INT) ->
+// Hexagon_qi_disi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_disi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_QIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(QI_ftype_QIQIQI,BT_BOOL,BT_BOOL,BT_BOOL) ->
+// Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_qiqiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_i1_ty, llvm_i1_ty, llvm_i1_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_QIQI,BT_INT,BT_BOOL,BT_BOOL) ->
+// Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_2(SI_ftype_QISI,BT_INT,BT_BOOL,BT_INT) ->
+// Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i1_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(void_ftype_SISISI,BT_VOID,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_void_sisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_void_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SISISI,BT_INT,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_SISISI,BT_LONGLONG,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_DISISI,BT_INT,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_disisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DISISI,BT_LONGLONG,BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_disisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SIDISI,BT_INT,BT_INT,BT_LONGLONG,BT_INT) ->
+// Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sidisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDISI,BT_LONGLONG,BT_LONGLONG,
+// BT_LONGLONG,BT_INT) ->
+// Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SIDIDI,BT_INT,BT_INT,BT_LONGLONG,BT_LONGLONG) ->
+// Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sididi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIDI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
+// BT_LONGLONG) ->
+// Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_SISIDI,BT_INT,BT_INT,BT_INT,BT_LONGLONG) ->
+// Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisidi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(SI_ftype_QISISI,BT_INT,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_qisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_QISISI,BT_LONGLONG,BT_BOOL,BT_INT,BT_INT) ->
+// Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i1_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_QIDIDI,BT_LONGLONG,BT_BOOL,BT_LONGLONG,
+// BT_LONGLONG) ->
+// Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_qididi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty,
+ llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_3(DI_ftype_DIDIQI,BT_LONGLONG,BT_LONGLONG,BT_LONGLONG,
+// BT_BOOL) ->
+// Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didiqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_4(SI_ftype_SISISISI,BT_INT,BT_INT,BT_INT,BT_INT,BT_INT) ->
+// Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sisisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// DEF_FUNCTION_TYPE_4(DI_ftype_DIDISISI,BT_LONGLONG,BT_LONGLONG,
+// BT_LONGLONG,BT_INT,BT_INT) ->
+// Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_didisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+class Hexagon_mem_memmemsi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
+ llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+class Hexagon_mem_memsisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty,
+ llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+class Hexagon_mem_memdisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty,
+ llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+class Hexagon_mem_memmemsisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+class Hexagon_mem_memsisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+class Hexagon_mem_memdisisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty,
+ llvm_i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+class Hexagon_v256_v256v256_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrReadWriteArgMem]>;
+
+//
+// Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_i32_ty],
+ [IntrNoMem, Throws]>;
+//
+// Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_df_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_sf_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_di_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_df_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_sf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_di_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_sf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_sf_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_si_sf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_float_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_si_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_df_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_sf_sfsf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sfsf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Throws]>;
+//
+// Hexagon_si_sfsf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sfsf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Throws]>;
+//
+// Hexagon_si_sfsi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_sfsi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_float_ty, llvm_i32_ty],
+ [IntrNoMem, Throws]>;
+//
+// Hexagon_qi_sfqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_qi_sfqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i1_ty], [llvm_float_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_sf_sfsfsf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sfsfsf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty, llvm_float_ty,
+ llvm_float_ty],
+ [IntrNoMem, Throws]>;
+//
+// Hexagon_sf_sfsfsfqi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_sf_sfsfsfqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_float_ty], [llvm_float_ty, llvm_float_ty,
+ llvm_float_ty,
+ llvm_i32_ty],
+ [IntrNoMem, Throws]>;
+//
+// Hexagon_di_dididi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_dididisi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty,
+ llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_df_si_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_si_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_i32_ty],
+ [IntrNoMem, Throws]>;
+//
+// Hexagon_df_di_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_di_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_i64_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_di_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_di_df_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_df_df_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_df_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty],
+ [IntrNoMem]>;
+//
+// Hexagon_df_dfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_dfdf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Throws]>;
+//
+// Hexagon_si_dfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_dfdf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Throws]>;
+//
+// Hexagon_si_dfsi_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_si_dfsi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_double_ty, llvm_i32_ty],
+ [IntrNoMem, Throws]>;
+//
+//
+// Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty, llvm_double_ty,
+ llvm_double_ty],
+ [IntrNoMem, Throws]>;
+//
+// Hexagon_df_dfdfdf_Intrinsic<string GCCIntSuffix>
+//
+class Hexagon_df_dfdfdfqi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty, llvm_double_ty,
+ llvm_double_ty,
+ llvm_i32_ty],
+ [IntrNoMem, Throws]>;
+
+
+// This one below will not be auto-generated,
+// so make sure, you don't overwrite this one.
+//
+// BUILTIN_INFO(SI_to_SXTHI_asrh,SI_ftype_SI,1)
+//
+def int_hexagon_SI_to_SXTHI_asrh :
+Hexagon_si_si_Intrinsic<"SI_to_SXTHI_asrh">;
+//
+// BUILTIN_INFO_NONCONST(brev_ldd,PTR_ftype_PTRPTRSI,3)
+//
+def int_hexagon_brev_ldd :
+Hexagon_mem_memmemsi_Intrinsic<"brev_ldd">;
+//
+// BUILTIN_INFO_NONCONST(brev_ldw,PTR_ftype_PTRPTRSI,3)
+//
+def int_hexagon_brev_ldw :
+Hexagon_mem_memmemsi_Intrinsic<"brev_ldw">;
+//
+// BUILTIN_INFO_NONCONST(brev_ldh,PTR_ftype_PTRPTRSI,3)
+//
+def int_hexagon_brev_ldh :
+Hexagon_mem_memmemsi_Intrinsic<"brev_ldh">;
+//
+// BUILTIN_INFO_NONCONST(brev_lduh,PTR_ftype_PTRPTRSI,3)
+//
+def int_hexagon_brev_lduh :
+Hexagon_mem_memmemsi_Intrinsic<"brev_lduh">;
+//
+// BUILTIN_INFO_NONCONST(brev_ldb,PTR_ftype_PTRPTRSI,3)
+//
+def int_hexagon_brev_ldb :
+Hexagon_mem_memmemsi_Intrinsic<"brev_ldb">;
+//
+// BUILTIN_INFO_NONCONST(brev_ldub,PTR_ftype_PTRPTRSI,3)
+//
+def int_hexagon_brev_ldub :
+Hexagon_mem_memmemsi_Intrinsic<"brev_ldub">;
+//
+// BUILTIN_INFO_NONCONST(circ_ldd,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldd :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldd">;
+//
+// BUILTIN_INFO_NONCONST(circ_ldw,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldw :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldw">;
+//
+// BUILTIN_INFO_NONCONST(circ_ldh,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldh :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldh">;
+//
+// BUILTIN_INFO_NONCONST(circ_lduh,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_lduh :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_lduh">;
+//
+// BUILTIN_INFO_NONCONST(circ_ldb,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldb :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldb">;
+//
+// BUILTIN_INFO_NONCONST(circ_ldub,PTR_ftype_PTRPTRSISI,4)
+//
+def int_hexagon_circ_ldub :
+Hexagon_mem_memmemsisi_Intrinsic<"circ_ldub">;
+
+//
+// BUILTIN_INFO_NONCONST(brev_stb,PTR_ftype_PTRSISI,3)
+//
+def int_hexagon_brev_stb :
+Hexagon_mem_memsisi_Intrinsic<"brev_stb">;
+//
+// BUILTIN_INFO_NONCONST(brev_sthhi,PTR_ftype_PTRSISI,3)
+//
+def int_hexagon_brev_sthhi :
+Hexagon_mem_memsisi_Intrinsic<"brev_sthhi">;
+//
+// BUILTIN_INFO_NONCONST(brev_sth,PTR_ftype_PTRSISI,3)
+//
+def int_hexagon_brev_sth :
+Hexagon_mem_memsisi_Intrinsic<"brev_sth">;
+//
+// BUILTIN_INFO_NONCONST(brev_stw,PTR_ftype_PTRSISI,3)
+//
+def int_hexagon_brev_stw :
+Hexagon_mem_memsisi_Intrinsic<"brev_stw">;
+//
+// BUILTIN_INFO_NONCONST(brev_std,PTR_ftype_PTRSISI,3)
+//
+def int_hexagon_brev_std :
+Hexagon_mem_memdisi_Intrinsic<"brev_std">;
+//
+// BUILTIN_INFO_NONCONST(circ_std,PTR_ftype_PTRDISISI,4)
+//
+def int_hexagon_circ_std :
+Hexagon_mem_memdisisi_Intrinsic<"circ_std">;
+//
+// BUILTIN_INFO_NONCONST(circ_stw,PTR_ftype_PTRSISISI,4)
+//
+def int_hexagon_circ_stw :
+Hexagon_mem_memsisisi_Intrinsic<"circ_stw">;
+//
+// BUILTIN_INFO_NONCONST(circ_sth,PTR_ftype_PTRSISISI,4)
+//
+def int_hexagon_circ_sth :
+Hexagon_mem_memsisisi_Intrinsic<"circ_sth">;
+//
+// BUILTIN_INFO_NONCONST(circ_sthhi,PTR_ftype_PTRSISISI,4)
+//
+def int_hexagon_circ_sthhi :
+Hexagon_mem_memsisisi_Intrinsic<"circ_sthhi">;
+//
+// BUILTIN_INFO_NONCONST(circ_stb,PTR_ftype_PTRSISISI,4)
+//
+def int_hexagon_circ_stb :
+Hexagon_mem_memsisisi_Intrinsic<"circ_stb">;
+
+
+def int_hexagon_mm256i_vaddw :
+Hexagon_v256_v256v256_Intrinsic<"_mm256i_vaddw">;
+
+
+// This one above will not be auto-generated,
+// so make sure, you don't overwrite this one.
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeq,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpeq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgt,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgt :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgtu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgtu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeqp,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpeqp :
+Hexagon_si_didi_Intrinsic<"HEXAGON_C2_cmpeqp">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtp,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpgtp :
+Hexagon_si_didi_Intrinsic<"HEXAGON_C2_cmpgtp">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtup,QI_ftype_DIDI,2)
+//
+def int_hexagon_C2_cmpgtup :
+Hexagon_si_didi_Intrinsic<"HEXAGON_C2_cmpgtup">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpeqi,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpeqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpeqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpneqi,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpneqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpneqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpeq,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpeq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_rcmpneq,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_rcmpneq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_rcmpneq">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsset,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsset :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_bitsset">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsclr,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsclr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_bitsclr">;
+//
+// BUILTIN_INFO(HEXAGON.C4_nbitsset,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_nbitsset :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_nbitsset">;
+//
+// BUILTIN_INFO(HEXAGON.C4_nbitsclr,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_nbitsclr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_nbitsclr">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpeqi,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpeqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpeqi">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgti,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgti :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgti">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgtui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgtui">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgei,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgei :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgei">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpgeui,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpgeui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpgeui">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmplt,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmplt :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmplt">;
+//
+// BUILTIN_INFO(HEXAGON.C2_cmpltu,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_cmpltu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_cmpltu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_bitsclri,QI_ftype_SISI,2)
+//
+def int_hexagon_C2_bitsclri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_bitsclri">;
+//
+// BUILTIN_INFO(HEXAGON.C4_nbitsclri,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_nbitsclri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_nbitsclri">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpneqi,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpneqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmpneqi">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpltei,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpltei :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmpltei">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplteui,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplteui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmplteui">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmpneq,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmpneq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmpneq">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplte,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplte :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmplte">;
+//
+// BUILTIN_INFO(HEXAGON.C4_cmplteu,QI_ftype_SISI,2)
+//
+def int_hexagon_C4_cmplteu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C4_cmplteu">;
+//
+// BUILTIN_INFO(HEXAGON.C2_and,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_and :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_and">;
+//
+// BUILTIN_INFO(HEXAGON.C2_or,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_or :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_or">;
+//
+// BUILTIN_INFO(HEXAGON.C2_xor,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_xor :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_xor">;
+//
+// BUILTIN_INFO(HEXAGON.C2_andn,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_andn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C2_not,QI_ftype_QI,1)
+//
+def int_hexagon_C2_not :
+Hexagon_si_si_Intrinsic<"HEXAGON_C2_not">;
+//
+// BUILTIN_INFO(HEXAGON.C2_orn,QI_ftype_QIQI,2)
+//
+def int_hexagon_C2_orn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_C2_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_and,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_and">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_or,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_or">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_and,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_and">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_or,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_or">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_andn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_and_orn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_and_orn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_and_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_andn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_andn">;
+//
+// BUILTIN_INFO(HEXAGON.C4_or_orn,QI_ftype_QIQIQI,3)
+//
+def int_hexagon_C4_or_orn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_C4_or_orn">;
+//
+// BUILTIN_INFO(HEXAGON.C2_pxfer_map,QI_ftype_QI,1)
+//
+def int_hexagon_C2_pxfer_map :
+Hexagon_si_qi_Intrinsic<"HEXAGON_C2_pxfer_map">;
+//
+// BUILTIN_INFO(HEXAGON.C2_any8,QI_ftype_QI,1)
+//
+def int_hexagon_C2_any8 :
+Hexagon_si_qi_Intrinsic<"HEXAGON_C2_any8">;
+//
+// BUILTIN_INFO(HEXAGON.C2_all8,QI_ftype_QI,1)
+//
+def int_hexagon_C2_all8 :
+Hexagon_si_qi_Intrinsic<"HEXAGON_C2_all8">;
+//
+// BUILTIN_INFO(HEXAGON.C2_vitpack,SI_ftype_QIQI,2)
+//
+def int_hexagon_C2_vitpack :
+Hexagon_si_qiqi_Intrinsic<"HEXAGON_C2_vitpack">;
+//
+// BUILTIN_INFO(HEXAGON.C2_mux,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_mux :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_mux">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxii,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxii :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxii">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxir,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxir :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxir">;
+//
+// BUILTIN_INFO(HEXAGON.C2_muxri,SI_ftype_QISISI,3)
+//
+def int_hexagon_C2_muxri :
+Hexagon_si_qisisi_Intrinsic<"HEXAGON_C2_muxri">;
+//
+// BUILTIN_INFO(HEXAGON.C2_vmux,DI_ftype_QIDIDI,3)
+//
+def int_hexagon_C2_vmux :
+Hexagon_di_qididi_Intrinsic<"HEXAGON_C2_vmux">;
+//
+// BUILTIN_INFO(HEXAGON.C2_mask,DI_ftype_QI,1)
+//
+def int_hexagon_C2_mask :
+Hexagon_di_qi_Intrinsic<"HEXAGON_C2_mask">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpbeq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpbeq :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpbeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbeqi,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpbeqi :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpbeqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbeq_any,QI_ftype_DIDI,2)
+//
+def int_hexagon_A4_vcmpbeq_any :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A4_vcmpbeq_any">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpbgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpbgtu :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpbgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbgtui,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpbgtui :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpbgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A4_vcmpbgt :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A4_vcmpbgt">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpbgti,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpbgti :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpbgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbeq,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbeq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbeq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbeqi,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbeqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbeqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgtu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgtui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgt,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgt :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgt">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpbgti,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpbgti :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpbgti">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpheq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpheq :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpheq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmphgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmphgt :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmphgt">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmphgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmphgtu :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmphgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpheqi,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpheqi :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpheqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmphgti,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmphgti :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmphgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmphgtui,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmphgtui :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmphgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpheq,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpheq :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpheq">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgt,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgt :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgt">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgtu,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgtu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmpheqi,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmpheqi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmpheqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgti,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgti :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cmphgtui,QI_ftype_SISI,2)
+//
+def int_hexagon_A4_cmphgtui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cmphgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpweq,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpweq :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpweq">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpwgt,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpwgt :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpwgt">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vcmpwgtu,QI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vcmpwgtu :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A2_vcmpwgtu">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpweqi,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpweqi :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpweqi">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpwgti,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpwgti :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpwgti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vcmpwgtui,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_vcmpwgtui :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_vcmpwgtui">;
+//
+// BUILTIN_INFO(HEXAGON.A4_boundscheck,QI_ftype_SIDI,2)
+//
+def int_hexagon_A4_boundscheck :
+Hexagon_si_sidi_Intrinsic<"HEXAGON_A4_boundscheck">;
+//
+// BUILTIN_INFO(HEXAGON.A4_tlbmatch,QI_ftype_DISI,2)
+//
+def int_hexagon_A4_tlbmatch :
+Hexagon_si_disi_Intrinsic<"HEXAGON_A4_tlbmatch">;
+//
+// BUILTIN_INFO(HEXAGON.C2_tfrpr,SI_ftype_QI,1)
+//
+def int_hexagon_C2_tfrpr :
+Hexagon_si_qi_Intrinsic<"HEXAGON_C2_tfrpr">;
+//
+// BUILTIN_INFO(HEXAGON.C2_tfrrp,QI_ftype_SI,1)
+//
+def int_hexagon_C2_tfrrp :
+Hexagon_si_si_Intrinsic<"HEXAGON_C2_tfrrp">;
+//
+// BUILTIN_INFO(HEXAGON.C4_fastcorner9,QI_ftype_QIQI,2)
+//
+def int_hexagon_C4_fastcorner9 :
+Hexagon_si_qiqi_Intrinsic<"HEXAGON_C4_fastcorner9">;
+//
+// BUILTIN_INFO(HEXAGON.C4_fastcorner9_not,QI_ftype_QIQI,2)
+//
+def int_hexagon_C4_fastcorner9_not :
+Hexagon_si_qiqi_Intrinsic<"HEXAGON_C4_fastcorner9_not">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_acc_sat_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_acc_sat_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_acc_sat_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_nac_sat_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpy_nac_sat_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpy_nac_sat_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_rnd_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_rnd_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_rnd_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_hl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_lh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_sat_rnd_ll_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_sat_rnd_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_sat_rnd_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_acc_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_acc_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_acc_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_nac_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyd_nac_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyd_nac_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_hl_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_lh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_ll_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_hl_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_lh_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyd_rnd_ll_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyd_rnd_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyd_rnd_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_acc_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_acc_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_acc_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hl_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_hl_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_hl_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_lh_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_lh_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_lh_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s0,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_ll_s0 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_nac_ll_s1,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_mpyu_nac_ll_s1 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_mpyu_nac_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hh_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hl_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_hl_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_hl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_lh_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_lh_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_lh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s0,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_ll_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_ll_s1,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_ll_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_acc_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_acc_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_acc_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hl_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_hl_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_hl_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_lh_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_lh_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_lh_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_ll_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_nac_ll_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_mpyud_nac_ll_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_mpyud_nac_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hh_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hl_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_hl_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_hl_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_hl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_lh_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_lh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_lh_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_lh_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_lh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_ll_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_ll_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyud_ll_s1,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyud_ll_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_mpyud_ll_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpysmi,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpysmi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpysmi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_macsip,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_macsip :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_macsip">;
+//
+// BUILTIN_INFO(HEXAGON.M2_macsin,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_macsin :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_macsin">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyss_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_dpmpyss_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_acc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyss_acc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyss_acc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_nac_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyss_nac_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyss_nac_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_s0,UDI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyuu_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_dpmpyuu_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_acc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyuu_acc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyuu_acc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyuu_nac_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_dpmpyuu_nac_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_dpmpyuu_nac_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpy_up_s1_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpy_up_s1_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpy_up_s1_sat">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyu_up,USI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyu_up :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyu_up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpysu_up,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpysu_up :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpysu_up">;
+//
+// BUILTIN_INFO(HEXAGON.M2_dpmpyss_rnd_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_dpmpyss_rnd_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_dpmpyss_rnd_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mac_up_s1_sat,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mac_up_s1_sat :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mac_up_s1_sat">;
+//
+// BUILTIN_INFO(HEXAGON.M4_nac_up_s1_sat,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_nac_up_s1_sat :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_nac_up_s1_sat">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyi,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mpyui,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_mpyui :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_mpyui">;
+//
+// BUILTIN_INFO(HEXAGON.M2_maci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_maci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_maci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_acci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_acci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_acci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_accii,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_accii :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_accii">;
+//
+// BUILTIN_INFO(HEXAGON.M2_nacci,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_nacci :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_nacci">;
+//
+// BUILTIN_INFO(HEXAGON.M2_naccii,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_naccii :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_naccii">;
+//
+// BUILTIN_INFO(HEXAGON.M2_subacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_subacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_subacc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyrr_addr,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyrr_addr :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyrr_addr">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyri_addr_u2,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyri_addr_u2 :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addr_u2">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyri_addr,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyri_addr :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addr">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyri_addi,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyri_addi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyri_addi">;
+//
+// BUILTIN_INFO(HEXAGON.M4_mpyrr_addi,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_mpyrr_addi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_mpyrr_addi">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2s_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2s_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2s_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2s_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2s_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2s_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2su_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2su_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2su_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2su_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2su_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_vmpy2su_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2su_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2su_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2su_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2su_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2su_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2su_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s0pack,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s0pack :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s0pack">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2s_s1pack,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_vmpy2s_s1pack :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_vmpy2s_s1pack">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_vmac2 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_vmac2">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vmpy2es_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vmpy2es_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmpy2es_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vmpy2es_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vmpy2es_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vmac2es,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vmac2es :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vmac2es">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrmac_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrmac_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrmac_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrmpy_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrmpy_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrmpy_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s0,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpyrs_s0 :
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vdmpyrs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpyrs_s1,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpyrs_s1 :
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vdmpyrs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmpybuu,DI_ftype_DIDI,2)
+//
+def int_hexagon_M5_vrmpybuu :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vrmpybuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmacbuu,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M5_vrmacbuu :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vrmacbuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmpybsu,DI_ftype_DIDI,2)
+//
+def int_hexagon_M5_vrmpybsu :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vrmpybsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vrmacbsu,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M5_vrmacbsu :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vrmacbsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmpybuu,DI_ftype_SISI,2)
+//
+def int_hexagon_M5_vmpybuu :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M5_vmpybuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmpybsu,DI_ftype_SISI,2)
+//
+def int_hexagon_M5_vmpybsu :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M5_vmpybsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmacbuu,DI_ftype_DISISI,3)
+//
+def int_hexagon_M5_vmacbuu :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M5_vmacbuu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vmacbsu,DI_ftype_DISISI,3)
+//
+def int_hexagon_M5_vmacbsu :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M5_vmacbsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vdmpybsu,DI_ftype_DIDI,2)
+//
+def int_hexagon_M5_vdmpybsu :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M5_vdmpybsu">;
+//
+// BUILTIN_INFO(HEXAGON.M5_vdmacbsu,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M5_vdmacbsu :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M5_vdmacbsu">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmacs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vdmacs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vdmacs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmacs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vdmacs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vdmacs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpys_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpys_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vdmpys_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vdmpys_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vdmpys_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vdmpys_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrs_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrs_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrs_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s0,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrsc_s0 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrsc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyrsc_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyrsc_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_cmpyrsc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacs_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacs_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacs_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacs_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacsc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacsc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacsc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacsc_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacsc_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacsc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpys_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpys_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpys_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpys_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpys_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpys_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpysc_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpysc_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpysc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpysc_s1,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpysc_s1 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpysc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacs_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacs_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacs_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacs_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacsc_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacsc_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacsc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cnacsc_s1,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cnacsc_s1 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cnacsc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1,DI_ftype_DISI,2)
+//
+def int_hexagon_M2_vrcmpys_s1 :
+Hexagon_di_disi_Intrinsic<"HEXAGON_M2_vrcmpys_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_acc_s1,DI_ftype_DIDISI,3)
+//
+def int_hexagon_M2_vrcmpys_acc_s1 :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_M2_vrcmpys_acc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpys_s1rp,SI_ftype_DISI,2)
+//
+def int_hexagon_M2_vrcmpys_s1rp :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M2_vrcmpys_s1rp">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacls_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacls_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacls_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmachs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmachs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmachs_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyl_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyl_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyl_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyh_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyh_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyh_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyeh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyeh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyeh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyeh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_acc_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyeh_acc_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyeh_acc_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyeh_acc_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyeh_acc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyoh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyoh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M4_vrmpyoh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M4_vrmpyoh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_acc_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyoh_acc_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vrmpyoh_acc_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_vrmpyoh_acc_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_vrmpyoh_acc_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyl_rs1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyl_rs1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyl_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyh_rs1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyh_rs1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyh_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyl_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyl_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyl_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_hmmpyh_s1,SI_ftype_SISI,2)
+//
+def int_hexagon_M2_hmmpyh_s1 :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_M2_hmmpyh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_s1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_s1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_s1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_s1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_s1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmaculs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmaculs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmaculs_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_rs0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmacuhs_rs1,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_mmacuhs_rs1 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_mmacuhs_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyul_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyul_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyul_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_rs0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_rs0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_mmpyuh_rs1,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_mmpyuh_rs1 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_mmpyuh_rs1">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmaci_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmaci_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmacr_s0 :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmacr_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmaci_s0c,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmaci_s0c :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmaci_s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmacr_s0c,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vrcmacr_s0c :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vrcmacr_s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmaci_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmaci_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmaci_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmacr_s0,DI_ftype_DISISI,3)
+//
+def int_hexagon_M2_cmacr_s0 :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M2_cmacr_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyi_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyi_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyr_s0 :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyr_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyi_s0c,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyi_s0c :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyi_s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vrcmpyr_s0c,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vrcmpyr_s0c :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vrcmpyr_s0c">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyi_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyi_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpyi_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M2_cmpyr_s0,DI_ftype_SISI,2)
+//
+def int_hexagon_M2_cmpyr_s0 :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M2_cmpyr_s0">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyi_wh,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyi_wh :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyi_wh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyr_wh,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyr_wh :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyr_wh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyi_whc,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyi_whc :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyi_whc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_cmpyr_whc,SI_ftype_DISI,2)
+//
+def int_hexagon_M4_cmpyr_whc :
+Hexagon_si_disi_Intrinsic<"HEXAGON_M4_cmpyr_whc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_i,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s0_sat_i :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s0_sat_r,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s0_sat_r :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s0_sat_r">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_i,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s1_sat_i :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmpy_s1_sat_r,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vcmpy_s1_sat_r :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vcmpy_s1_sat_r">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_i,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vcmac_s0_sat_i :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_i">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vcmac_s0_sat_r,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M2_vcmac_s0_sat_r :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M2_vcmac_s0_sat_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vcrotate,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_vcrotate :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_vcrotate">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vrcrotate_acc,DI_ftype_DIDISISI,4)
+//
+def int_hexagon_S4_vrcrotate_acc :
+Hexagon_di_didisisi_Intrinsic<"HEXAGON_S4_vrcrotate_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vrcrotate,DI_ftype_DISISI,3)
+//
+def int_hexagon_S4_vrcrotate :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_S4_vrcrotate">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vcnegh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_vcnegh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_vcnegh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrcnegh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_vrcnegh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_vrcnegh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_pmpyw,DI_ftype_SISI,2)
+//
+def int_hexagon_M4_pmpyw :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M4_pmpyw">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vpmpyh,DI_ftype_SISI,2)
+//
+def int_hexagon_M4_vpmpyh :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_M4_vpmpyh">;
+//
+// BUILTIN_INFO(HEXAGON.M4_pmpyw_acc,DI_ftype_DISISI,3)
+//
+def int_hexagon_M4_pmpyw_acc :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M4_pmpyw_acc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_vpmpyh_acc,DI_ftype_DISISI,3)
+//
+def int_hexagon_M4_vpmpyh_acc :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_M4_vpmpyh_acc">;
+//
+// BUILTIN_INFO(HEXAGON.A2_add,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_add :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_add">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sub,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_sub :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_sub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addsat,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addsat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subsat,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subsat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addi,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_sat_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_l16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_l16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_l16_sat_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_sat_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_l16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_l16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_l16_sat_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addh_h16_sat_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_addh_h16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_addh_h16_sat_hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subh_h16_sat_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subh_h16_sat_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subh_h16_sat_hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_aslh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_aslh :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_aslh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_asrh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_asrh :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_asrh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_addp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_addp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addpsat,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_addpsat :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_addpsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_addsp,DI_ftype_SIDI,2)
+//
+def int_hexagon_A2_addsp :
+Hexagon_di_sidi_Intrinsic<"HEXAGON_A2_addsp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_subp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_subp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_neg,SI_ftype_SI,1)
+//
+def int_hexagon_A2_neg :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_neg">;
+//
+// BUILTIN_INFO(HEXAGON.A2_negsat,SI_ftype_SI,1)
+//
+def int_hexagon_A2_negsat :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_negsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_abs,SI_ftype_SI,1)
+//
+def int_hexagon_A2_abs :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_abs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_abssat,SI_ftype_SI,1)
+//
+def int_hexagon_A2_abssat :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_abssat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vconj,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vconj :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vconj">;
+//
+// BUILTIN_INFO(HEXAGON.A2_negp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_negp :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_negp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_absp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_absp :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_absp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_max,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_max :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_max">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxu,USI_ftype_SISI,2)
+//
+def int_hexagon_A2_maxu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_maxu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_min,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_min :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_min">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minu,USI_ftype_SISI,2)
+//
+def int_hexagon_A2_minu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_minu">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_maxp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_maxp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_maxup,UDI_ftype_DIDI,2)
+//
+def int_hexagon_A2_maxup :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_maxup">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_minp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_minp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_minup,UDI_ftype_DIDI,2)
+//
+def int_hexagon_A2_minup :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_minup">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfr,SI_ftype_SI,1)
+//
+def int_hexagon_A2_tfr :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_tfr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrsi,SI_ftype_SI,1)
+//
+def int_hexagon_A2_tfrsi :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_tfrsi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_tfrp :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_tfrp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrpi,DI_ftype_SI,1)
+//
+def int_hexagon_A2_tfrpi :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_tfrpi">;
+//
+// BUILTIN_INFO(HEXAGON.A2_zxtb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_zxtb :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_zxtb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxtb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sxtb :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_sxtb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_zxth,SI_ftype_SI,1)
+//
+def int_hexagon_A2_zxth :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_zxth">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxth,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sxth :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_sxth">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combinew,DI_ftype_SISI,2)
+//
+def int_hexagon_A2_combinew :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A2_combinew">;
+//
+// BUILTIN_INFO(HEXAGON.A4_combineri,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_combineri :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_combineri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_combineir,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_combineir :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_combineir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combineii,DI_ftype_SISI,2)
+//
+def int_hexagon_A2_combineii :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A2_combineii">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_hh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_hh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_hh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_hl,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_hl :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_hl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_lh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_lh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_lh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_combine_ll,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_combine_ll :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_combine_ll">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfril,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_tfril :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_tfril">;
+//
+// BUILTIN_INFO(HEXAGON.A2_tfrih,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_tfrih :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_tfrih">;
+//
+// BUILTIN_INFO(HEXAGON.A2_and,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_and :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_and">;
+//
+// BUILTIN_INFO(HEXAGON.A2_or,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_or :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_or">;
+//
+// BUILTIN_INFO(HEXAGON.A2_xor,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_xor :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_xor">;
+//
+// BUILTIN_INFO(HEXAGON.A2_not,SI_ftype_SI,1)
+//
+def int_hexagon_A2_not :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_not">;
+//
+// BUILTIN_INFO(HEXAGON.M2_xor_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_M2_xor_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M2_xor_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_xacc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_M4_xor_xacc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_M4_xor_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.A4_andn,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_andn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_andn">;
+//
+// BUILTIN_INFO(HEXAGON.A4_orn,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_orn :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_orn">;
+//
+// BUILTIN_INFO(HEXAGON.A4_andnp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A4_andnp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A4_andnp">;
+//
+// BUILTIN_INFO(HEXAGON.A4_ornp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A4_ornp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A4_ornp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addaddi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addaddi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subaddi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subaddi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subaddi">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_andn">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_and_xor,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_and_xor :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_and_xor">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_andn">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_or_xor,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_or_xor :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_or_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_andix,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_andix :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_andix">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_andi,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_andi :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_andi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_or_ori,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_or_ori :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_or_ori">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_and">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_or">;
+//
+// BUILTIN_INFO(HEXAGON.M4_xor_andn,SI_ftype_SISISI,3)
+//
+def int_hexagon_M4_xor_andn :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_M4_xor_andn">;
+//
+// BUILTIN_INFO(HEXAGON.A2_subri,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_subri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_subri">;
+//
+// BUILTIN_INFO(HEXAGON.A2_andir,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_andir :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_andir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_orir,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_orir :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_orir">;
+//
+// BUILTIN_INFO(HEXAGON.A2_andp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_andp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_andp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_orp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_orp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_orp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_xorp,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_xorp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_xorp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_notp,DI_ftype_DI,1)
+//
+def int_hexagon_A2_notp :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_notp">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sxtw,DI_ftype_SI,1)
+//
+def int_hexagon_A2_sxtw :
+Hexagon_di_si_Intrinsic<"HEXAGON_A2_sxtw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sat,SI_ftype_DI,1)
+//
+def int_hexagon_A2_sat :
+Hexagon_si_di_Intrinsic<"HEXAGON_A2_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_roundsat,SI_ftype_DI,1)
+//
+def int_hexagon_A2_roundsat :
+Hexagon_si_di_Intrinsic<"HEXAGON_A2_roundsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_sath,SI_ftype_SI,1)
+//
+def int_hexagon_A2_sath :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_sath">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satuh,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satuh :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_satuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satub,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satub :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_satub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_satb,SI_ftype_SI,1)
+//
+def int_hexagon_A2_satb :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_satb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddb_map,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddb_map :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddb_map">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddubs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddubs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddubs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vadduhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vadduhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A5_vaddhubs,SI_ftype_DIDI,2)
+//
+def int_hexagon_A5_vaddhubs :
+Hexagon_si_didi_Intrinsic<"HEXAGON_A5_vaddhubs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vaddws,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vaddws :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vaddws">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxaddsubw,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxaddsubw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubw">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxsubaddw,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxsubaddw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddw">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxaddsubh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxaddsubh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubh">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxsubaddh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxsubaddh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddh">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxaddsubhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxaddsubhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxaddsubhr">;
+//
+// BUILTIN_INFO(HEXAGON.S4_vxsubaddhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_vxsubaddhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_vxsubaddhr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svavgh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svavgh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svavghs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svavghs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svavghs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svnavgh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svnavgh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svnavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svaddh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svaddh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svaddh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svaddhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svaddhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svaddhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svadduhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svadduhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svadduhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubh,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubh :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_svsubuhs,SI_ftype_SISI,2)
+//
+def int_hexagon_A2_svsubuhs :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A2_svsubuhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vraddub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vraddub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vraddub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vraddub_acc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_A2_vraddub_acc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_A2_vraddub_acc">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vraddh,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vraddh :
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vraddh">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vradduh,SI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vradduh :
+Hexagon_si_didi_Intrinsic<"HEXAGON_M2_vradduh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubb_map,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubb_map :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubb_map">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsububs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsububs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsububs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubuhs,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubuhs :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubuhs">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vsubws,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vsubws :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vsubws">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabsh,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabsh :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabsh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabshsat,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabshsat :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabshsat">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabsw,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabsw :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabsw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vabswsat,DI_ftype_DI,1)
+//
+def int_hexagon_A2_vabswsat :
+Hexagon_di_di_Intrinsic<"HEXAGON_A2_vabswsat">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vabsdiffw,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vabsdiffw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vabsdiffw">;
+//
+// BUILTIN_INFO(HEXAGON.M2_vabsdiffh,DI_ftype_DIDI,2)
+//
+def int_hexagon_M2_vabsdiffh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_M2_vabsdiffh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vrsadub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vrsadub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vrsadub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vrsadub_acc,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_A2_vrsadub_acc :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_A2_vrsadub_acc">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgwcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgwcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgwcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavgwcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavgwcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavgwcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavghcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavghcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavghcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavghcr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavghcr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavghcr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguwr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguwr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguwr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavgubr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavgubr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavgubr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavguhr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavguhr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavguhr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vavghr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vavghr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vavghr">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vnavghr,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vnavghr :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vnavghr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_ri,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_ri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_ri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_rr,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_rr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_rr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_ri_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_ri_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_ri_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A4_round_rr_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_round_rr_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_round_rr_sat">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cround_ri,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_cround_ri :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cround_ri">;
+//
+// BUILTIN_INFO(HEXAGON.A4_cround_rr,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_cround_rr :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_cround_rr">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminuh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminuh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminuh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxuh,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxuh :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxuh">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrminuw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrminuw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrminuw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_vrmaxuw,DI_ftype_DIDISI,3)
+//
+def int_hexagon_A4_vrmaxuw :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_A4_vrmaxuw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminb,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminb :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxb,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxb :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxb">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxub,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxub :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxub">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminuh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminuh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxuh,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxuh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxuh">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vminuw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vminuw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vminuw">;
+//
+// BUILTIN_INFO(HEXAGON.A2_vmaxuw,DI_ftype_DIDI,2)
+//
+def int_hexagon_A2_vmaxuw :
+Hexagon_di_didi_Intrinsic<"HEXAGON_A2_vmaxuw">;
+//
+// BUILTIN_INFO(HEXAGON.A4_modwrapu,SI_ftype_SISI,2)
+//
+def int_hexagon_A4_modwrapu :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_A4_modwrapu">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfadd,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfadd :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfadd">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfsub,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfsub :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfsub">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfmpy,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfmpy :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmpy">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffma,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffma :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffma">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffma_sc,SF_ftype_SFSFSFQI,4)
+//
+def int_hexagon_F2_sffma_sc :
+Hexagon_sf_sfsfsfqi_Intrinsic<"HEXAGON_F2_sffma_sc">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffms,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffms :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffms">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffma_lib,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffma_lib :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffma_lib">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffms_lib,SF_ftype_SFSFSF,3)
+//
+def int_hexagon_F2_sffms_lib :
+Hexagon_sf_sfsfsf_Intrinsic<"HEXAGON_F2_sffms_lib">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpeq,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpeq :
+Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpgt,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpgt :
+Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpge,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpge :
+Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpge">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfcmpuo,QI_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfcmpuo :
+Hexagon_si_sfsf_Intrinsic<"HEXAGON_F2_sfcmpuo">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfmax,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfmax :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmax">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfmin,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sfmin :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sfmin">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfclass,QI_ftype_SFSI,2)
+//
+def int_hexagon_F2_sfclass :
+Hexagon_si_sfsi_Intrinsic<"HEXAGON_F2_sfclass">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfimm_p,SF_ftype_SI,1)
+//
+def int_hexagon_F2_sfimm_p :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_sfimm_p">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sfimm_n,SF_ftype_SI,1)
+//
+def int_hexagon_F2_sfimm_n :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_sfimm_n">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffixupn,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sffixupn :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sffixupn">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffixupd,SF_ftype_SFSF,2)
+//
+def int_hexagon_F2_sffixupd :
+Hexagon_sf_sfsf_Intrinsic<"HEXAGON_F2_sffixupd">;
+//
+// BUILTIN_INFO(HEXAGON.F2_sffixupr,SF_ftype_SF,1)
+//
+def int_hexagon_F2_sffixupr :
+Hexagon_sf_sf_Intrinsic<"HEXAGON_F2_sffixupr">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpeq,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpeq :
+Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpeq">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpgt,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpgt :
+Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpgt">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpge,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpge :
+Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpge">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfcmpuo,QI_ftype_DFDF,2)
+//
+def int_hexagon_F2_dfcmpuo :
+Hexagon_si_dfdf_Intrinsic<"HEXAGON_F2_dfcmpuo">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfclass,QI_ftype_DFSI,2)
+//
+def int_hexagon_F2_dfclass :
+Hexagon_si_dfsi_Intrinsic<"HEXAGON_F2_dfclass">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfimm_p,DF_ftype_SI,1)
+//
+def int_hexagon_F2_dfimm_p :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_dfimm_p">;
+//
+// BUILTIN_INFO(HEXAGON.F2_dfimm_n,DF_ftype_SI,1)
+//
+def int_hexagon_F2_dfimm_n :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_dfimm_n">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2df,DF_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2df :
+Hexagon_df_sf_Intrinsic<"HEXAGON_F2_conv_sf2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2sf,SF_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2sf :
+Hexagon_sf_df_Intrinsic<"HEXAGON_F2_conv_df2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_uw2sf,SF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_uw2sf :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_conv_uw2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_uw2df,DF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_uw2df :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_conv_uw2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_w2sf,SF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_w2sf :
+Hexagon_sf_si_Intrinsic<"HEXAGON_F2_conv_w2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_w2df,DF_ftype_SI,1)
+//
+def int_hexagon_F2_conv_w2df :
+Hexagon_df_si_Intrinsic<"HEXAGON_F2_conv_w2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_ud2sf,SF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_ud2sf :
+Hexagon_sf_di_Intrinsic<"HEXAGON_F2_conv_ud2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_ud2df,DF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_ud2df :
+Hexagon_df_di_Intrinsic<"HEXAGON_F2_conv_ud2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_d2sf,SF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_d2sf :
+Hexagon_sf_di_Intrinsic<"HEXAGON_F2_conv_d2sf">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_d2df,DF_ftype_DI,1)
+//
+def int_hexagon_F2_conv_d2df :
+Hexagon_df_di_Intrinsic<"HEXAGON_F2_conv_d2df">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2uw,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2uw :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2uw">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2w,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2w :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2w">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2ud,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2ud :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2ud">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2d,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2d :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2d">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2uw,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2uw :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2uw">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2w,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2w :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2w">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2ud,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2ud :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2ud">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2d,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2d :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2d">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2uw_chop,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2uw_chop :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2uw_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2w_chop,SI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2w_chop :
+Hexagon_si_sf_Intrinsic<"HEXAGON_F2_conv_sf2w_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2ud_chop,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2ud_chop :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2ud_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_sf2d_chop,DI_ftype_SF,1)
+//
+def int_hexagon_F2_conv_sf2d_chop :
+Hexagon_di_sf_Intrinsic<"HEXAGON_F2_conv_sf2d_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2uw_chop,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2uw_chop :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2uw_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2w_chop,SI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2w_chop :
+Hexagon_si_df_Intrinsic<"HEXAGON_F2_conv_df2w_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2ud_chop,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2ud_chop :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2ud_chop">;
+//
+// BUILTIN_INFO(HEXAGON.F2_conv_df2d_chop,DI_ftype_DF,1)
+//
+def int_hexagon_F2_conv_df2d_chop :
+Hexagon_di_df_Intrinsic<"HEXAGON_F2_conv_df2d_chop">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_r_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_r_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsr_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsr_r_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsl_r_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsl_r_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_r_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_r_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_r_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsl_r_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsl_r_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_p_xor,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsl_r_p_xor :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsl_r_p_xor">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_r_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_r_r_sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_r_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_r_r_sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_lsr_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_lsr_i_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_i_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_i_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_p :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_acc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_acc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_acc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_acc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_acc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_nac,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_nac :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_nac,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_nac :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_nac">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_xacc,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_xacc :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_xacc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_xacc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_xacc,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_xacc :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_xacc">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_and,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_and :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asr_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asr_i_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_lsr_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_lsr_i_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_or,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_asl_i_r_or :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_asl_i_r_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_and,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_and :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_and">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asr_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asr_i_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_lsr_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_lsr_i_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_p_or,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_asl_i_p_or :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_asl_i_p_or">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_r_sat,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asl_i_r_sat :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asl_i_r_sat">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r_rnd :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r_rnd">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_r_rnd_goodsyntax,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_asr_i_r_rnd_goodsyntax :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_asr_i_r_rnd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_rnd,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_p_rnd :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p_rnd">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_p_rnd_goodsyntax,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_p_rnd_goodsyntax :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_p_rnd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S4_lsli,SI_ftype_SISI,2)
+//
+def int_hexagon_S4_lsli :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_lsli">;
+//
+// BUILTIN_INFO(HEXAGON.S2_addasl_rrri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_addasl_rrri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_addasl_rrri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_andi_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_andi_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_andi_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ori_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_ori_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_ori_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addi_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addi_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addi_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subi_asl_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subi_asl_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subi_asl_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_andi_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_andi_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_andi_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ori_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_ori_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_ori_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_addi_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_addi_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_addi_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S4_subi_lsr_ri,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_subi_lsr_ri :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_subi_lsr_ri">;
+//
+// BUILTIN_INFO(HEXAGON.S2_valignib,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_valignib :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_valignib">;
+//
+// BUILTIN_INFO(HEXAGON.S2_valignrb,DI_ftype_DIDIQI,3)
+//
+def int_hexagon_S2_valignrb :
+Hexagon_di_didiqi_Intrinsic<"HEXAGON_S2_valignrb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vspliceib,DI_ftype_DIDISI,3)
+//
+def int_hexagon_S2_vspliceib :
+Hexagon_di_didisi_Intrinsic<"HEXAGON_S2_vspliceib">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplicerb,DI_ftype_DIDIQI,3)
+//
+def int_hexagon_S2_vsplicerb :
+Hexagon_di_didiqi_Intrinsic<"HEXAGON_S2_vsplicerb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplatrh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsplatrh :
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsplatrh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsplatrb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_vsplatrb :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_vsplatrb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insert,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_insert :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_insert">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxb_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxb_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxb_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxh_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxh_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxh_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxw_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxw_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxw_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tableidxd_goodsyntax,SI_ftype_SISISISI,4)
+//
+def int_hexagon_S2_tableidxd_goodsyntax :
+Hexagon_si_sisisisi_Intrinsic<"HEXAGON_S2_tableidxd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.A4_bitspliti,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_bitspliti :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_bitspliti">;
+//
+// BUILTIN_INFO(HEXAGON.A4_bitsplit,DI_ftype_SISI,2)
+//
+def int_hexagon_A4_bitsplit :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_A4_bitsplit">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extract,SI_ftype_SISISI,3)
+//
+def int_hexagon_S4_extract :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S4_extract">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractu,SI_ftype_SISISI,3)
+//
+def int_hexagon_S2_extractu :
+Hexagon_si_sisisi_Intrinsic<"HEXAGON_S2_extractu">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insertp,DI_ftype_DIDISISI,4)
+//
+def int_hexagon_S2_insertp :
+Hexagon_di_didisisi_Intrinsic<"HEXAGON_S2_insertp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extractp,DI_ftype_DISISI,3)
+//
+def int_hexagon_S4_extractp :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_S4_extractp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractup,DI_ftype_DISISI,3)
+//
+def int_hexagon_S2_extractup :
+Hexagon_di_disisi_Intrinsic<"HEXAGON_S2_extractup">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insert_rp,SI_ftype_SISIDI,3)
+//
+def int_hexagon_S2_insert_rp :
+Hexagon_si_sisidi_Intrinsic<"HEXAGON_S2_insert_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extract_rp,SI_ftype_SIDI,2)
+//
+def int_hexagon_S4_extract_rp :
+Hexagon_si_sidi_Intrinsic<"HEXAGON_S4_extract_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractu_rp,SI_ftype_SIDI,2)
+//
+def int_hexagon_S2_extractu_rp :
+Hexagon_si_sidi_Intrinsic<"HEXAGON_S2_extractu_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_insertp_rp,DI_ftype_DIDIDI,3)
+//
+def int_hexagon_S2_insertp_rp :
+Hexagon_di_dididi_Intrinsic<"HEXAGON_S2_insertp_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_extractp_rp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S4_extractp_rp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S4_extractp_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_extractup_rp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_extractup_rp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_extractup_rp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tstbit_i,QI_ftype_SISI,2)
+//
+def int_hexagon_S2_tstbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_tstbit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ntstbit_i,QI_ftype_SISI,2)
+//
+def int_hexagon_S4_ntstbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_ntstbit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_setbit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_setbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_setbit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_togglebit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_togglebit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_togglebit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clrbit_i,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_clrbit_i :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_clrbit_i">;
+//
+// BUILTIN_INFO(HEXAGON.S2_tstbit_r,QI_ftype_SISI,2)
+//
+def int_hexagon_S2_tstbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_tstbit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S4_ntstbit_r,QI_ftype_SISI,2)
+//
+def int_hexagon_S4_ntstbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_ntstbit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_setbit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_setbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_setbit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_togglebit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_togglebit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_togglebit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clrbit_r,SI_ftype_SISI,2)
+//
+def int_hexagon_S2_clrbit_r :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S2_clrbit_r">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S5_asrhub_rnd_sat_goodsyntax,SI_ftype_DISI,2)
+//
+def int_hexagon_S5_asrhub_rnd_sat_goodsyntax :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S5_asrhub_rnd_sat_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S5_asrhub_sat,SI_ftype_DISI,2)
+//
+def int_hexagon_S5_asrhub_sat :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S5_asrhub_sat">;
+//
+// BUILTIN_INFO(HEXAGON.S5_vasrhrnd_goodsyntax,DI_ftype_DISI,2)
+//
+def int_hexagon_S5_vasrhrnd_goodsyntax :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S5_vasrhrnd_goodsyntax">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_vh,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_vh :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_vh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_i_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_i_svw_trun,SI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_i_svw_trun :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S2_asr_i_svw_trun">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_svw_trun,SI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_svw_trun :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S2_asr_r_svw_trun">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_i_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_i_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_i_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_i_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asr_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asr_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asr_r_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_asl_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_asl_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_asl_r_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsr_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsr_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsr_r_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lsl_r_vw,DI_ftype_DISI,2)
+//
+def int_hexagon_S2_lsl_r_vw :
+Hexagon_di_disi_Intrinsic<"HEXAGON_S2_lsl_r_vw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrndpackwh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vrndpackwh :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vrndpackwh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vrndpackwhs,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vrndpackwhs :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vrndpackwhs">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsxtbh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsxtbh :
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsxtbh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vzxtbh,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vzxtbh :
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vzxtbh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathub,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathub :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsathub">;
+//
+// BUILTIN_INFO(HEXAGON.S2_svsathub,SI_ftype_SI,1)
+//
+def int_hexagon_S2_svsathub :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_svsathub">;
+//
+// BUILTIN_INFO(HEXAGON.S2_svsathb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_svsathb :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_svsathb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathb :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsathb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunohb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vtrunohb :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vtrunohb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunewh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_vtrunewh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_vtrunewh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunowh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_vtrunowh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_vtrunowh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vtrunehb,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vtrunehb :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vtrunehb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsxthw,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vsxthw :
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vsxthw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vzxthw,DI_ftype_SI,1)
+//
+def int_hexagon_S2_vzxthw :
+Hexagon_di_si_Intrinsic<"HEXAGON_S2_vzxthw">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwh :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsatwh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwuh,SI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwuh :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_vsatwuh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_packhl,DI_ftype_SISI,2)
+//
+def int_hexagon_S2_packhl :
+Hexagon_di_sisi_Intrinsic<"HEXAGON_S2_packhl">;
+//
+// BUILTIN_INFO(HEXAGON.A2_swiz,SI_ftype_SI,1)
+//
+def int_hexagon_A2_swiz :
+Hexagon_si_si_Intrinsic<"HEXAGON_A2_swiz">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathub_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathub_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsathub_nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsathb_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsathb_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsathb_nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwh_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwh_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsatwh_nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_vsatwuh_nopack,DI_ftype_DI,1)
+//
+def int_hexagon_S2_vsatwuh_nopack :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_vsatwuh_nopack">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffob,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffob :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffob">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffeb,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffeb :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffeb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffoh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffoh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffoh">;
+//
+// BUILTIN_INFO(HEXAGON.S2_shuffeh,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_shuffeh :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_shuffeh">;
+//
+// BUILTIN_INFO(HEXAGON.S5_popcountp,SI_ftype_DI,1)
+//
+def int_hexagon_S5_popcountp :
+Hexagon_si_di_Intrinsic<"HEXAGON_S5_popcountp">;
+//
+// BUILTIN_INFO(HEXAGON.S4_parity,SI_ftype_SISI,2)
+//
+def int_hexagon_S4_parity :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_parity">;
+//
+// BUILTIN_INFO(HEXAGON.S2_parityp,SI_ftype_DIDI,2)
+//
+def int_hexagon_S2_parityp :
+Hexagon_si_didi_Intrinsic<"HEXAGON_S2_parityp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_lfsp,DI_ftype_DIDI,2)
+//
+def int_hexagon_S2_lfsp :
+Hexagon_di_didi_Intrinsic<"HEXAGON_S2_lfsp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clbnorm,SI_ftype_SI,1)
+//
+def int_hexagon_S2_clbnorm :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_clbnorm">;
+//
+// BUILTIN_INFO(HEXAGON.S4_clbaddi,SI_ftype_SISI,2)
+//
+def int_hexagon_S4_clbaddi :
+Hexagon_si_sisi_Intrinsic<"HEXAGON_S4_clbaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S4_clbpnorm,SI_ftype_DI,1)
+//
+def int_hexagon_S4_clbpnorm :
+Hexagon_si_di_Intrinsic<"HEXAGON_S4_clbpnorm">;
+//
+// BUILTIN_INFO(HEXAGON.S4_clbpaddi,SI_ftype_DISI,2)
+//
+def int_hexagon_S4_clbpaddi :
+Hexagon_si_disi_Intrinsic<"HEXAGON_S4_clbpaddi">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clb,SI_ftype_SI,1)
+//
+def int_hexagon_S2_clb :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_clb">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl0,SI_ftype_SI,1)
+//
+def int_hexagon_S2_cl0 :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_cl0">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl1,SI_ftype_SI,1)
+//
+def int_hexagon_S2_cl1 :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_cl1">;
+//
+// BUILTIN_INFO(HEXAGON.S2_clbp,SI_ftype_DI,1)
+//
+def int_hexagon_S2_clbp :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_clbp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl0p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_cl0p :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_cl0p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_cl1p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_cl1p :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_cl1p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_brev,SI_ftype_SI,1)
+//
+def int_hexagon_S2_brev :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_brev">;
+//
+// BUILTIN_INFO(HEXAGON.S2_brevp,DI_ftype_DI,1)
+//
+def int_hexagon_S2_brevp :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_brevp">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct0,SI_ftype_SI,1)
+//
+def int_hexagon_S2_ct0 :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_ct0">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct1,SI_ftype_SI,1)
+//
+def int_hexagon_S2_ct1 :
+Hexagon_si_si_Intrinsic<"HEXAGON_S2_ct1">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct0p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_ct0p :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_ct0p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_ct1p,SI_ftype_DI,1)
+//
+def int_hexagon_S2_ct1p :
+Hexagon_si_di_Intrinsic<"HEXAGON_S2_ct1p">;
+//
+// BUILTIN_INFO(HEXAGON.S2_interleave,DI_ftype_DI,1)
+//
+def int_hexagon_S2_interleave :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_interleave">;
+//
+// BUILTIN_INFO(HEXAGON.S2_deinterleave,DI_ftype_DI,1)
+//
+def int_hexagon_S2_deinterleave :
+Hexagon_di_di_Intrinsic<"HEXAGON_S2_deinterleave">;
+//
+// BUILTIN_INFO(HEXAGON.dcfetch_A,v_ftype_DI*,1)
+//
+def int_hexagon_prefetch :
+Hexagon_void_si_Intrinsic<"HEXAGON_prefetch">;
+
+def llvm_ptr32_ty : LLVMPointerType<llvm_i32_ty>;
+def llvm_ptr64_ty : LLVMPointerType<llvm_i64_ty>;
+
+// Mark locked loads as read/write to prevent any accidental reordering.
+def int_hexagon_L2_loadw_locked :
+Hexagon_Intrinsic<"HEXAGON_L2_loadw_locked", [llvm_i32_ty], [llvm_ptr32_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+def int_hexagon_L4_loadd_locked :
+Hexagon_Intrinsic<"HEXAGON_L4_loadd_locked", [llvm_i64_ty], [llvm_ptr64_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+
+def int_hexagon_S2_storew_locked :
+Hexagon_Intrinsic<"HEXAGON_S2_storew_locked", [llvm_i32_ty],
+ [llvm_ptr32_ty, llvm_i32_ty], [IntrReadWriteArgMem, NoCapture<0>]>;
+def int_hexagon_S4_stored_locked :
+Hexagon_Intrinsic<"HEXAGON_S4_stored_locked", [llvm_i32_ty],
+ [llvm_ptr64_ty, llvm_i64_ty], [IntrReadWriteArgMem, NoCapture<0>]>;
+
+// V60
+
+class Hexagon_v2048v2048_Intrinsic_T<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_hi_W
+// tag : V6_lo_W
+class Hexagon_v512v1024_Intrinsic_T<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// tag : V6_hi_W_128B
+// tag : V6_lo_W_128B
+class Hexagon_v1024v2048_Intrinsic_T<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v64i32_ty],
+ [IntrNoMem]>;
+
+class Hexagon_v1024v1024_Intrinsic_T<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+// BUILTIN_INFO(HEXAGON.V6_hi_W,VI_ftype_VI,1)
+// tag : V6_hi
+def int_hexagon_V6_hi :
+Hexagon_v512v1024_Intrinsic_T<"HEXAGON_V6_hi">;
+
+// BUILTIN_INFO(HEXAGON.V6_lo_W,VI_ftype_VI,1)
+// tag : V6_lo
+def int_hexagon_V6_lo :
+Hexagon_v512v1024_Intrinsic_T<"HEXAGON_V6_lo">;
+
+// BUILTIN_INFO(HEXAGON.V6_hi_W,VI_ftype_VI,1)
+// tag : V6_hi_128B
+def int_hexagon_V6_hi_128B :
+Hexagon_v1024v2048_Intrinsic_T<"HEXAGON_V6_hi_128B">;
+
+// BUILTIN_INFO(HEXAGON.V6_lo_W,VI_ftype_VI,1)
+// tag : V6_lo_128B
+def int_hexagon_V6_lo_128B :
+Hexagon_v1024v2048_Intrinsic_T<"HEXAGON_V6_lo_128B">;
+
+// BUILTIN_INFO(HEXAGON.V6_vassignp,VI_ftype_VI,1)
+// tag : V6_vassignp
+def int_hexagon_V6_vassignp :
+Hexagon_v1024v1024_Intrinsic_T<"HEXAGON_V6_vassignp">;
+
+// BUILTIN_INFO(HEXAGON.V6_vassignp,VI_ftype_VI,1)
+// tag : V6_vassignp_128B
+def int_hexagon_V6_vassignp_128B :
+Hexagon_v2048v2048_Intrinsic_T<"HEXAGON_V6_vassignp_128B">;
+
+
+
+//
+// Hexagon_iii_Intrinsic<string GCCIntSuffix>
+// tag : S6_rol_i_r
+class Hexagon_iii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_LLiLLii_Intrinsic<string GCCIntSuffix>
+// tag : S6_rol_i_p
+class Hexagon_LLiLLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_iiii_Intrinsic<string GCCIntSuffix>
+// tag : S6_rol_i_r_acc
+class Hexagon_iiii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_LLiLLiLLii_Intrinsic<string GCCIntSuffix>
+// tag : S6_rol_i_p_acc
+class Hexagon_LLiLLiLLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_valignb
+class Hexagon_v512v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_valignb_128B
+class Hexagon_v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vror
+class Hexagon_v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vror_128B
+class Hexagon_v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vunpackub
+class Hexagon_v1024v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vunpackub_128B
+class Hexagon_v2048v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vunpackob
+class Hexagon_v1024v1024v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vunpackob_128B
+class Hexagon_v2048v2048v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vpackeb
+class Hexagon_v512v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vpackeb_128B
+class Hexagon_v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpybus_dv_128B
+class Hexagon_v2048v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpybus_dv_acc_128B
+class Hexagon_v2048v2048v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhvsat_acc
+class Hexagon_v512v512v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhvsat_acc_128B
+class Hexagon_v1024v1024v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhisat
+class Hexagon_v512v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhisat_128B
+class Hexagon_v1024v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v64i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhisat_acc
+class Hexagon_v512v512v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v2048i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vdmpyhisat_acc_128B
+class Hexagon_v1024v1024v2048i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v64i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyubi
+class Hexagon_v1024v1024ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyubi_128B
+class Hexagon_v2048v2048ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyubi_acc
+class Hexagon_v1024v1024v1024ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v2048ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vrmpyubi_acc_128B
+class Hexagon_v2048v2048v2048ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v2048_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddb_dv_128B
+class Hexagon_v2048v2048v2048_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddubh
+class Hexagon_v1024v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddubh_128B
+class Hexagon_v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vd0
+class Hexagon_v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vd0_128B
+class Hexagon_v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v64iv512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddbq
+class Hexagon_v512v64iv512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vaddbq_128B
+class Hexagon_v1024v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vabsh
+class Hexagon_v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vabsh_128B
+class Hexagon_v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpybv_acc
+class Hexagon_v1024v1024v512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpybv_acc_128B
+class Hexagon_v2048v2048v1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyub
+class Hexagon_v1024v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyub_128B
+class Hexagon_v2048v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyub_acc
+class Hexagon_v1024v1024v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vmpyub_acc_128B
+class Hexagon_v2048v2048v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v64ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandqrt
+class Hexagon_v512v64ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v512i1_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v128ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandqrt_128B
+class Hexagon_v1024v128ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v1024i1_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v64ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandqrt_acc
+class Hexagon_v512v512v64ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v512i1_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v128ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandqrt_acc_128B
+class Hexagon_v1024v1024v128ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v1024i1_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v64iv512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandvrt
+class Hexagon_v64iv512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v128iv1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandvrt_128B
+class Hexagon_v128iv1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v64iv64iv512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandvrt_acc
+class Hexagon_v64iv64iv512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v128iv128iv1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vandvrt_acc_128B
+class Hexagon_v128iv128iv1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v64iv512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vgtw
+class Hexagon_v64iv512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vgtw_128B
+class Hexagon_v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v64iv64iv512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vgtw_and
+class Hexagon_v64iv64iv512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v128iv128iv1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vgtw_and_128B
+class Hexagon_v128iv128iv1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v64iv64iv64i_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_or
+class Hexagon_v64iv64iv64i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v512i1_ty,llvm_v512i1_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v128iv128iv128i_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_or_128B
+class Hexagon_v128iv128iv128i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v1024i1_ty,llvm_v1024i1_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v64iv64i_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_not
+class Hexagon_v64iv64i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_v512i1_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v128iv128i_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_not_128B
+class Hexagon_v128iv128i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_v1024i1_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v64ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_scalar2
+class Hexagon_v64ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v512i1_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v128ii_Intrinsic<string GCCIntSuffix>
+// tag : V6_pred_scalar2_128B
+class Hexagon_v128ii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v1024i1_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v64iv512v512_Intrinsic<string GCCIntSuffix>
+// tag : V6_vswap
+class Hexagon_v1024v64iv512v512_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v512i1_ty,llvm_v16i32_ty,llvm_v16i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+// tag : V6_vswap_128B
+class Hexagon_v2048v128iv1024v1024_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v1024i1_ty,llvm_v32i32_ty,llvm_v32i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vshuffvdd
+class Hexagon_v1024v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vshuffvdd_128B
+class Hexagon_v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+
+//
+// Hexagon_iv512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_extractw
+class Hexagon_iv512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_iv1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_extractw_128B
+class Hexagon_iv1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i32_ty], [llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_lvsplatw
+class Hexagon_v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_lvsplatw_128B
+class Hexagon_v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v512LLii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutb
+class Hexagon_v512v512LLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024LLii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutb_128B
+class Hexagon_v1024v1024LLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v512LLii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutb_acc
+class Hexagon_v512v512v512LLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024LLii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutb_acc_128B
+class Hexagon_v1024v1024v1024LLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048LLii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutb_dv_128B
+class Hexagon_v2048v2048LLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v2048LLii_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutb_dv_acc_128B
+class Hexagon_v2048v2048v2048LLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v64i32_ty,llvm_i64_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v512v512v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvvb_oracc
+class Hexagon_v512v512v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v16i32_ty], [llvm_v16i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvvb_oracc_128B
+class Hexagon_v1024v1024v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v1024v1024v512v512i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvwh_oracc
+class Hexagon_v1024v1024v512v512i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_v16i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_v2048v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+// tag : V6_vlutvwh_oracc_128B
+class Hexagon_v2048v2048v1024v1024i_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_v32i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_LLiLLiLLi_Intrinsic<string GCCIntSuffix>
+// tag : M6_vabsdiffb
+class Hexagon_LLiLLiLLi_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i64_ty,llvm_i64_ty],
+ [IntrNoMem]>;
+
+//
+// Hexagon_LLii_Intrinsic<string GCCIntSuffix>
+// tag : S6_vsplatrbp
+class Hexagon_LLii_Intrinsic<string GCCIntSuffix>
+ : Hexagon_Intrinsic<GCCIntSuffix,
+ [llvm_i64_ty], [llvm_i32_ty],
+ [IntrNoMem]>;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r,SI_ftype_SISI,2)
+// tag : S6_rol_i_r
+def int_hexagon_S6_rol_i_r :
+Hexagon_iii_Intrinsic<"HEXAGON_S6_rol_i_r">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p,DI_ftype_DISI,2)
+// tag : S6_rol_i_p
+def int_hexagon_S6_rol_i_p :
+Hexagon_LLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r_acc,SI_ftype_SISISI,3)
+// tag : S6_rol_i_r_acc
+def int_hexagon_S6_rol_i_r_acc :
+Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p_acc,DI_ftype_DIDISI,3)
+// tag : S6_rol_i_p_acc
+def int_hexagon_S6_rol_i_p_acc :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r_nac,SI_ftype_SISISI,3)
+// tag : S6_rol_i_r_nac
+def int_hexagon_S6_rol_i_r_nac :
+Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_nac">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p_nac,DI_ftype_DIDISI,3)
+// tag : S6_rol_i_p_nac
+def int_hexagon_S6_rol_i_p_nac :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_nac">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r_xacc,SI_ftype_SISISI,3)
+// tag : S6_rol_i_r_xacc
+def int_hexagon_S6_rol_i_r_xacc :
+Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_xacc">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p_xacc,DI_ftype_DIDISI,3)
+// tag : S6_rol_i_p_xacc
+def int_hexagon_S6_rol_i_p_xacc :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_xacc">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r_and,SI_ftype_SISISI,3)
+// tag : S6_rol_i_r_and
+def int_hexagon_S6_rol_i_r_and :
+Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_r_or,SI_ftype_SISISI,3)
+// tag : S6_rol_i_r_or
+def int_hexagon_S6_rol_i_r_or :
+Hexagon_iiii_Intrinsic<"HEXAGON_S6_rol_i_r_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p_and,DI_ftype_DIDISI,3)
+// tag : S6_rol_i_p_and
+def int_hexagon_S6_rol_i_p_and :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_rol_i_p_or,DI_ftype_DIDISI,3)
+// tag : S6_rol_i_p_or
+def int_hexagon_S6_rol_i_p_or :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S6_rol_i_p_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.S2_cabacencbin,DI_ftype_DIDIQI,3)
+// tag : S2_cabacencbin
+def int_hexagon_S2_cabacencbin :
+Hexagon_LLiLLiLLii_Intrinsic<"HEXAGON_S2_cabacencbin">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_valignb,VI_ftype_VIVISI,3)
+// tag : V6_valignb
+def int_hexagon_V6_valignb :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_valignb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_valignb_128B,VI_ftype_VIVISI,3)
+// tag : V6_valignb_128B
+def int_hexagon_V6_valignb_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_valignb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlalignb,VI_ftype_VIVISI,3)
+// tag : V6_vlalignb
+def int_hexagon_V6_vlalignb :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vlalignb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlalignb_128B,VI_ftype_VIVISI,3)
+// tag : V6_vlalignb_128B
+def int_hexagon_V6_vlalignb_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlalignb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_valignbi,VI_ftype_VIVISI,3)
+// tag : V6_valignbi
+def int_hexagon_V6_valignbi :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_valignbi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_valignbi_128B,VI_ftype_VIVISI,3)
+// tag : V6_valignbi_128B
+def int_hexagon_V6_valignbi_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_valignbi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlalignbi,VI_ftype_VIVISI,3)
+// tag : V6_vlalignbi
+def int_hexagon_V6_vlalignbi :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vlalignbi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlalignbi_128B,VI_ftype_VIVISI,3)
+// tag : V6_vlalignbi_128B
+def int_hexagon_V6_vlalignbi_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlalignbi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vror,VI_ftype_VISI,2)
+// tag : V6_vror
+def int_hexagon_V6_vror :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vror">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vror_128B,VI_ftype_VISI,2)
+// tag : V6_vror_128B
+def int_hexagon_V6_vror_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vror_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackub,VD_ftype_VI,1)
+// tag : V6_vunpackub
+def int_hexagon_V6_vunpackub :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackub_128B,VD_ftype_VI,1)
+// tag : V6_vunpackub_128B
+def int_hexagon_V6_vunpackub_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackb,VD_ftype_VI,1)
+// tag : V6_vunpackb
+def int_hexagon_V6_vunpackb :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackb_128B,VD_ftype_VI,1)
+// tag : V6_vunpackb_128B
+def int_hexagon_V6_vunpackb_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackuh,VD_ftype_VI,1)
+// tag : V6_vunpackuh
+def int_hexagon_V6_vunpackuh :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackuh_128B,VD_ftype_VI,1)
+// tag : V6_vunpackuh_128B
+def int_hexagon_V6_vunpackuh_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackh,VD_ftype_VI,1)
+// tag : V6_vunpackh
+def int_hexagon_V6_vunpackh :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vunpackh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackh_128B,VD_ftype_VI,1)
+// tag : V6_vunpackh_128B
+def int_hexagon_V6_vunpackh_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vunpackh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackob,VD_ftype_VDVI,2)
+// tag : V6_vunpackob
+def int_hexagon_V6_vunpackob :
+Hexagon_v1024v1024v512_Intrinsic<"HEXAGON_V6_vunpackob">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackob_128B,VD_ftype_VDVI,2)
+// tag : V6_vunpackob_128B
+def int_hexagon_V6_vunpackob_128B :
+Hexagon_v2048v2048v1024_Intrinsic<"HEXAGON_V6_vunpackob_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackoh,VD_ftype_VDVI,2)
+// tag : V6_vunpackoh
+def int_hexagon_V6_vunpackoh :
+Hexagon_v1024v1024v512_Intrinsic<"HEXAGON_V6_vunpackoh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vunpackoh_128B,VD_ftype_VDVI,2)
+// tag : V6_vunpackoh_128B
+def int_hexagon_V6_vunpackoh_128B :
+Hexagon_v2048v2048v1024_Intrinsic<"HEXAGON_V6_vunpackoh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackeb,VI_ftype_VIVI,2)
+// tag : V6_vpackeb
+def int_hexagon_V6_vpackeb :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackeb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackeb_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackeb_128B
+def int_hexagon_V6_vpackeb_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackeb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackeh,VI_ftype_VIVI,2)
+// tag : V6_vpackeh
+def int_hexagon_V6_vpackeh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackeh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackeh_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackeh_128B
+def int_hexagon_V6_vpackeh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackeh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackob,VI_ftype_VIVI,2)
+// tag : V6_vpackob
+def int_hexagon_V6_vpackob :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackob">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackob_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackob_128B
+def int_hexagon_V6_vpackob_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackob_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackoh,VI_ftype_VIVI,2)
+// tag : V6_vpackoh
+def int_hexagon_V6_vpackoh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackoh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackoh_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackoh_128B
+def int_hexagon_V6_vpackoh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackoh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackhub_sat,VI_ftype_VIVI,2)
+// tag : V6_vpackhub_sat
+def int_hexagon_V6_vpackhub_sat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackhub_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackhub_sat_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackhub_sat_128B
+def int_hexagon_V6_vpackhub_sat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackhub_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackhb_sat,VI_ftype_VIVI,2)
+// tag : V6_vpackhb_sat
+def int_hexagon_V6_vpackhb_sat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackhb_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackhb_sat_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackhb_sat_128B
+def int_hexagon_V6_vpackhb_sat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackhb_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackwuh_sat,VI_ftype_VIVI,2)
+// tag : V6_vpackwuh_sat
+def int_hexagon_V6_vpackwuh_sat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackwuh_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackwuh_sat_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackwuh_sat_128B
+def int_hexagon_V6_vpackwuh_sat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackwuh_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackwh_sat,VI_ftype_VIVI,2)
+// tag : V6_vpackwh_sat
+def int_hexagon_V6_vpackwh_sat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vpackwh_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpackwh_sat_128B,VI_ftype_VIVI,2)
+// tag : V6_vpackwh_sat_128B
+def int_hexagon_V6_vpackwh_sat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vpackwh_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vzb,VD_ftype_VI,1)
+// tag : V6_vzb
+def int_hexagon_V6_vzb :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vzb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vzb_128B,VD_ftype_VI,1)
+// tag : V6_vzb_128B
+def int_hexagon_V6_vzb_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vzb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsb,VD_ftype_VI,1)
+// tag : V6_vsb
+def int_hexagon_V6_vsb :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vsb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsb_128B,VD_ftype_VI,1)
+// tag : V6_vsb_128B
+def int_hexagon_V6_vsb_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vsb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vzh,VD_ftype_VI,1)
+// tag : V6_vzh
+def int_hexagon_V6_vzh :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vzh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vzh_128B,VD_ftype_VI,1)
+// tag : V6_vzh_128B
+def int_hexagon_V6_vzh_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vzh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsh,VD_ftype_VI,1)
+// tag : V6_vsh
+def int_hexagon_V6_vsh :
+Hexagon_v1024v512_Intrinsic<"HEXAGON_V6_vsh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsh_128B,VD_ftype_VI,1)
+// tag : V6_vsh_128B
+def int_hexagon_V6_vsh_128B :
+Hexagon_v2048v1024_Intrinsic<"HEXAGON_V6_vsh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus,VI_ftype_VISI,2)
+// tag : V6_vdmpybus
+def int_hexagon_V6_vdmpybus :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpybus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_128B,VI_ftype_VISI,2)
+// tag : V6_vdmpybus_128B
+def int_hexagon_V6_vdmpybus_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_acc,VI_ftype_VIVISI,3)
+// tag : V6_vdmpybus_acc
+def int_hexagon_V6_vdmpybus_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpybus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vdmpybus_acc_128B
+def int_hexagon_V6_vdmpybus_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv,VD_ftype_VDSI,2)
+// tag : V6_vdmpybus_dv
+def int_hexagon_V6_vdmpybus_dv :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv_128B,VD_ftype_VDSI,2)
+// tag : V6_vdmpybus_dv_128B
+def int_hexagon_V6_vdmpybus_dv_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpybus_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vdmpybus_dv_acc
+def int_hexagon_V6_vdmpybus_dv_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpybus_dv_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vdmpybus_dv_acc_128B
+def int_hexagon_V6_vdmpybus_dv_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpybus_dv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb,VI_ftype_VISI,2)
+// tag : V6_vdmpyhb
+def int_hexagon_V6_vdmpyhb :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_128B,VI_ftype_VISI,2)
+// tag : V6_vdmpyhb_128B
+def int_hexagon_V6_vdmpyhb_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_acc,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhb_acc
+def int_hexagon_V6_vdmpyhb_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhb_acc_128B
+def int_hexagon_V6_vdmpyhb_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv,VD_ftype_VDSI,2)
+// tag : V6_vdmpyhb_dv
+def int_hexagon_V6_vdmpyhb_dv :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv_128B,VD_ftype_VDSI,2)
+// tag : V6_vdmpyhb_dv_128B
+def int_hexagon_V6_vdmpyhb_dv_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vdmpyhb_dv_acc
+def int_hexagon_V6_vdmpyhb_dv_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhb_dv_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vdmpyhb_dv_acc_128B
+def int_hexagon_V6_vdmpyhb_dv_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vdmpyhb_dv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat,VI_ftype_VIVI,2)
+// tag : V6_vdmpyhvsat
+def int_hexagon_V6_vdmpyhvsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vdmpyhvsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vdmpyhvsat_128B
+def int_hexagon_V6_vdmpyhvsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdmpyhvsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vdmpyhvsat_acc
+def int_hexagon_V6_vdmpyhvsat_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhvsat_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vdmpyhvsat_acc_128B
+def int_hexagon_V6_vdmpyhvsat_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdmpyhvsat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat,VI_ftype_VISI,2)
+// tag : V6_vdmpyhsat
+def int_hexagon_V6_vdmpyhsat :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat_128B,VI_ftype_VISI,2)
+// tag : V6_vdmpyhsat_128B
+def int_hexagon_V6_vdmpyhsat_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat_acc,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhsat_acc
+def int_hexagon_V6_vdmpyhsat_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsat_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhsat_acc_128B
+def int_hexagon_V6_vdmpyhsat_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat,VI_ftype_VDSI,2)
+// tag : V6_vdmpyhisat
+def int_hexagon_V6_vdmpyhisat :
+Hexagon_v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhisat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat_128B,VI_ftype_VDSI,2)
+// tag : V6_vdmpyhisat_128B
+def int_hexagon_V6_vdmpyhisat_128B :
+Hexagon_v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhisat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat_acc,VI_ftype_VIVDSI,3)
+// tag : V6_vdmpyhisat_acc
+def int_hexagon_V6_vdmpyhisat_acc :
+Hexagon_v512v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhisat_acc_128B,VI_ftype_VIVDSI,3)
+// tag : V6_vdmpyhisat_acc_128B
+def int_hexagon_V6_vdmpyhisat_acc_128B :
+Hexagon_v1024v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhisat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat,VI_ftype_VISI,2)
+// tag : V6_vdmpyhsusat
+def int_hexagon_V6_vdmpyhsusat :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsusat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat_128B,VI_ftype_VISI,2)
+// tag : V6_vdmpyhsusat_128B
+def int_hexagon_V6_vdmpyhsusat_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsusat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat_acc,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhsusat_acc
+def int_hexagon_V6_vdmpyhsusat_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsusat_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vdmpyhsusat_acc_128B
+def int_hexagon_V6_vdmpyhsusat_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsusat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat,VI_ftype_VDSI,2)
+// tag : V6_vdmpyhsuisat
+def int_hexagon_V6_vdmpyhsuisat :
+Hexagon_v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat_128B,VI_ftype_VDSI,2)
+// tag : V6_vdmpyhsuisat_128B
+def int_hexagon_V6_vdmpyhsuisat_128B :
+Hexagon_v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat_acc,VI_ftype_VIVDSI,3)
+// tag : V6_vdmpyhsuisat_acc
+def int_hexagon_V6_vdmpyhsuisat_acc :
+Hexagon_v512v512v1024i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdmpyhsuisat_acc_128B,VI_ftype_VIVDSI,3)
+// tag : V6_vdmpyhsuisat_acc_128B
+def int_hexagon_V6_vdmpyhsuisat_acc_128B :
+Hexagon_v1024v1024v2048i_Intrinsic<"HEXAGON_V6_vdmpyhsuisat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyb,VD_ftype_VDSI,2)
+// tag : V6_vtmpyb
+def int_hexagon_V6_vtmpyb :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyb_128B,VD_ftype_VDSI,2)
+// tag : V6_vtmpyb_128B
+def int_hexagon_V6_vtmpyb_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyb_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpyb_acc
+def int_hexagon_V6_vtmpyb_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyb_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpyb_acc_128B
+def int_hexagon_V6_vtmpyb_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpybus,VD_ftype_VDSI,2)
+// tag : V6_vtmpybus
+def int_hexagon_V6_vtmpybus :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpybus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpybus_128B,VD_ftype_VDSI,2)
+// tag : V6_vtmpybus_128B
+def int_hexagon_V6_vtmpybus_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpybus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpybus_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpybus_acc
+def int_hexagon_V6_vtmpybus_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpybus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpybus_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpybus_acc_128B
+def int_hexagon_V6_vtmpybus_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpybus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyhb,VD_ftype_VDSI,2)
+// tag : V6_vtmpyhb
+def int_hexagon_V6_vtmpyhb :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyhb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyhb_128B,VD_ftype_VDSI,2)
+// tag : V6_vtmpyhb_128B
+def int_hexagon_V6_vtmpyhb_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyhb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyhb_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpyhb_acc
+def int_hexagon_V6_vtmpyhb_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vtmpyhb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vtmpyhb_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vtmpyhb_acc_128B
+def int_hexagon_V6_vtmpyhb_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vtmpyhb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub,VI_ftype_VISI,2)
+// tag : V6_vrmpyub
+def int_hexagon_V6_vrmpyub :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vrmpyub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub_128B,VI_ftype_VISI,2)
+// tag : V6_vrmpyub_128B
+def int_hexagon_V6_vrmpyub_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpyub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub_acc,VI_ftype_VIVISI,3)
+// tag : V6_vrmpyub_acc
+def int_hexagon_V6_vrmpyub_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vrmpyub_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyub_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vrmpyub_acc_128B
+def int_hexagon_V6_vrmpyub_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpyub_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubv,VI_ftype_VIVI,2)
+// tag : V6_vrmpyubv
+def int_hexagon_V6_vrmpyubv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrmpyubv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubv_128B,VI_ftype_VIVI,2)
+// tag : V6_vrmpyubv_128B
+def int_hexagon_V6_vrmpyubv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpyubv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubv_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpyubv_acc
+def int_hexagon_V6_vrmpyubv_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vrmpyubv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubv_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpyubv_acc_128B
+def int_hexagon_V6_vrmpyubv_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpyubv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybv,VI_ftype_VIVI,2)
+// tag : V6_vrmpybv
+def int_hexagon_V6_vrmpybv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybv_128B,VI_ftype_VIVI,2)
+// tag : V6_vrmpybv_128B
+def int_hexagon_V6_vrmpybv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybv_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpybv_acc
+def int_hexagon_V6_vrmpybv_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybv_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpybv_acc_128B
+def int_hexagon_V6_vrmpybv_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubi,VD_ftype_VDSISI,3)
+// tag : V6_vrmpyubi
+def int_hexagon_V6_vrmpyubi :
+Hexagon_v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpyubi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubi_128B,VD_ftype_VDSISI,3)
+// tag : V6_vrmpyubi_128B
+def int_hexagon_V6_vrmpyubi_128B :
+Hexagon_v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpyubi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubi_acc,VD_ftype_VDVDSISI,4)
+// tag : V6_vrmpyubi_acc
+def int_hexagon_V6_vrmpyubi_acc :
+Hexagon_v1024v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpyubi_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpyubi_acc_128B,VD_ftype_VDVDSISI,4)
+// tag : V6_vrmpyubi_acc_128B
+def int_hexagon_V6_vrmpyubi_acc_128B :
+Hexagon_v2048v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpyubi_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybus,VI_ftype_VISI,2)
+// tag : V6_vrmpybus
+def int_hexagon_V6_vrmpybus :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vrmpybus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybus_128B,VI_ftype_VISI,2)
+// tag : V6_vrmpybus_128B
+def int_hexagon_V6_vrmpybus_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpybus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybus_acc,VI_ftype_VIVISI,3)
+// tag : V6_vrmpybus_acc
+def int_hexagon_V6_vrmpybus_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vrmpybus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybus_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vrmpybus_acc_128B
+def int_hexagon_V6_vrmpybus_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vrmpybus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusi,VD_ftype_VDSISI,3)
+// tag : V6_vrmpybusi
+def int_hexagon_V6_vrmpybusi :
+Hexagon_v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpybusi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusi_128B,VD_ftype_VDSISI,3)
+// tag : V6_vrmpybusi_128B
+def int_hexagon_V6_vrmpybusi_128B :
+Hexagon_v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpybusi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusi_acc,VD_ftype_VDVDSISI,4)
+// tag : V6_vrmpybusi_acc
+def int_hexagon_V6_vrmpybusi_acc :
+Hexagon_v1024v1024v1024ii_Intrinsic<"HEXAGON_V6_vrmpybusi_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusi_acc_128B,VD_ftype_VDVDSISI,4)
+// tag : V6_vrmpybusi_acc_128B
+def int_hexagon_V6_vrmpybusi_acc_128B :
+Hexagon_v2048v2048v2048ii_Intrinsic<"HEXAGON_V6_vrmpybusi_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusv,VI_ftype_VIVI,2)
+// tag : V6_vrmpybusv
+def int_hexagon_V6_vrmpybusv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybusv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusv_128B,VI_ftype_VIVI,2)
+// tag : V6_vrmpybusv_128B
+def int_hexagon_V6_vrmpybusv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybusv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusv_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpybusv_acc
+def int_hexagon_V6_vrmpybusv_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vrmpybusv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrmpybusv_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vrmpybusv_acc_128B
+def int_hexagon_V6_vrmpybusv_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrmpybusv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdsaduh,VD_ftype_VDSI,2)
+// tag : V6_vdsaduh
+def int_hexagon_V6_vdsaduh :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vdsaduh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdsaduh_128B,VD_ftype_VDSI,2)
+// tag : V6_vdsaduh_128B
+def int_hexagon_V6_vdsaduh_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vdsaduh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdsaduh_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vdsaduh_acc
+def int_hexagon_V6_vdsaduh_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vdsaduh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdsaduh_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vdsaduh_acc_128B
+def int_hexagon_V6_vdsaduh_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vdsaduh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrsadubi,VD_ftype_VDSISI,3)
+// tag : V6_vrsadubi
+def int_hexagon_V6_vrsadubi :
+Hexagon_v1024v1024ii_Intrinsic<"HEXAGON_V6_vrsadubi">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrsadubi_128B,VD_ftype_VDSISI,3)
+// tag : V6_vrsadubi_128B
+def int_hexagon_V6_vrsadubi_128B :
+Hexagon_v2048v2048ii_Intrinsic<"HEXAGON_V6_vrsadubi_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrsadubi_acc,VD_ftype_VDVDSISI,4)
+// tag : V6_vrsadubi_acc
+def int_hexagon_V6_vrsadubi_acc :
+Hexagon_v1024v1024v1024ii_Intrinsic<"HEXAGON_V6_vrsadubi_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrsadubi_acc_128B,VD_ftype_VDVDSISI,4)
+// tag : V6_vrsadubi_acc_128B
+def int_hexagon_V6_vrsadubi_acc_128B :
+Hexagon_v2048v2048v2048ii_Intrinsic<"HEXAGON_V6_vrsadubi_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrw,VI_ftype_VISI,2)
+// tag : V6_vasrw
+def int_hexagon_V6_vasrw :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vasrw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrw_128B,VI_ftype_VISI,2)
+// tag : V6_vasrw_128B
+def int_hexagon_V6_vasrw_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vasrw_128B">;
+
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslw,VI_ftype_VISI,2)
+// tag : V6_vaslw
+def int_hexagon_V6_vaslw :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vaslw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslw_128B,VI_ftype_VISI,2)
+// tag : V6_vaslw_128B
+def int_hexagon_V6_vaslw_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vaslw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrw,VI_ftype_VISI,2)
+// tag : V6_vlsrw
+def int_hexagon_V6_vlsrw :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vlsrw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrw_128B,VI_ftype_VISI,2)
+// tag : V6_vlsrw_128B
+def int_hexagon_V6_vlsrw_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vlsrw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwv,VI_ftype_VIVI,2)
+// tag : V6_vasrwv
+def int_hexagon_V6_vasrwv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vasrwv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwv_128B,VI_ftype_VIVI,2)
+// tag : V6_vasrwv_128B
+def int_hexagon_V6_vasrwv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vasrwv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslwv,VI_ftype_VIVI,2)
+// tag : V6_vaslwv
+def int_hexagon_V6_vaslwv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaslwv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslwv_128B,VI_ftype_VIVI,2)
+// tag : V6_vaslwv_128B
+def int_hexagon_V6_vaslwv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaslwv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrwv,VI_ftype_VIVI,2)
+// tag : V6_vlsrwv
+def int_hexagon_V6_vlsrwv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vlsrwv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrwv_128B,VI_ftype_VIVI,2)
+// tag : V6_vlsrwv_128B
+def int_hexagon_V6_vlsrwv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vlsrwv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrh,VI_ftype_VISI,2)
+// tag : V6_vasrh
+def int_hexagon_V6_vasrh :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vasrh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrh_128B,VI_ftype_VISI,2)
+// tag : V6_vasrh_128B
+def int_hexagon_V6_vasrh_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vasrh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslh,VI_ftype_VISI,2)
+// tag : V6_vaslh
+def int_hexagon_V6_vaslh :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vaslh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslh_128B,VI_ftype_VISI,2)
+// tag : V6_vaslh_128B
+def int_hexagon_V6_vaslh_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vaslh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrh,VI_ftype_VISI,2)
+// tag : V6_vlsrh
+def int_hexagon_V6_vlsrh :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vlsrh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrh_128B,VI_ftype_VISI,2)
+// tag : V6_vlsrh_128B
+def int_hexagon_V6_vlsrh_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vlsrh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhv,VI_ftype_VIVI,2)
+// tag : V6_vasrhv
+def int_hexagon_V6_vasrhv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vasrhv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhv_128B,VI_ftype_VIVI,2)
+// tag : V6_vasrhv_128B
+def int_hexagon_V6_vasrhv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vasrhv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslhv,VI_ftype_VIVI,2)
+// tag : V6_vaslhv
+def int_hexagon_V6_vaslhv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaslhv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslhv_128B,VI_ftype_VIVI,2)
+// tag : V6_vaslhv_128B
+def int_hexagon_V6_vaslhv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaslhv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrhv,VI_ftype_VIVI,2)
+// tag : V6_vlsrhv
+def int_hexagon_V6_vlsrhv :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vlsrhv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlsrhv_128B,VI_ftype_VIVI,2)
+// tag : V6_vlsrhv_128B
+def int_hexagon_V6_vlsrhv_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vlsrhv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwh,VI_ftype_VIVISI,3)
+// tag : V6_vasrwh
+def int_hexagon_V6_vasrwh :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwh_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrwh_128B
+def int_hexagon_V6_vasrwh_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwhsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrwhsat
+def int_hexagon_V6_vasrwhsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwhsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrwhsat_128B
+def int_hexagon_V6_vasrwhsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwhrndsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrwhrndsat
+def int_hexagon_V6_vasrwhrndsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwhrndsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwhrndsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrwhrndsat_128B
+def int_hexagon_V6_vasrwhrndsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwhrndsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwuhsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrwuhsat
+def int_hexagon_V6_vasrwuhsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwuhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrwuhsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrwuhsat_128B
+def int_hexagon_V6_vasrwuhsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwuhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundwh,VI_ftype_VIVI,2)
+// tag : V6_vroundwh
+def int_hexagon_V6_vroundwh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundwh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundwh_128B,VI_ftype_VIVI,2)
+// tag : V6_vroundwh_128B
+def int_hexagon_V6_vroundwh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundwh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundwuh,VI_ftype_VIVI,2)
+// tag : V6_vroundwuh
+def int_hexagon_V6_vroundwuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundwuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundwuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vroundwuh_128B
+def int_hexagon_V6_vroundwuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundwuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhubsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrhubsat
+def int_hexagon_V6_vasrhubsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhubsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhubsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrhubsat_128B
+def int_hexagon_V6_vasrhubsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhubsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhubrndsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrhubrndsat
+def int_hexagon_V6_vasrhubrndsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhubrndsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhubrndsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrhubrndsat_128B
+def int_hexagon_V6_vasrhubrndsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhubrndsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhbrndsat,VI_ftype_VIVISI,3)
+// tag : V6_vasrhbrndsat
+def int_hexagon_V6_vasrhbrndsat :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhbrndsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrhbrndsat_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrhbrndsat_128B
+def int_hexagon_V6_vasrhbrndsat_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhbrndsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundhb,VI_ftype_VIVI,2)
+// tag : V6_vroundhb
+def int_hexagon_V6_vroundhb :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundhb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundhb_128B,VI_ftype_VIVI,2)
+// tag : V6_vroundhb_128B
+def int_hexagon_V6_vroundhb_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundhb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundhub,VI_ftype_VIVI,2)
+// tag : V6_vroundhub
+def int_hexagon_V6_vroundhub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vroundhub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vroundhub_128B,VI_ftype_VIVI,2)
+// tag : V6_vroundhub_128B
+def int_hexagon_V6_vroundhub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vroundhub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslw_acc,VI_ftype_VIVISI,3)
+// tag : V6_vaslw_acc
+def int_hexagon_V6_vaslw_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vaslw_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaslw_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vaslw_acc_128B
+def int_hexagon_V6_vaslw_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vaslw_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrw_acc,VI_ftype_VIVISI,3)
+// tag : V6_vasrw_acc
+def int_hexagon_V6_vasrw_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrw_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vasrw_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vasrw_acc_128B
+def int_hexagon_V6_vasrw_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrw_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddb,VI_ftype_VIVI,2)
+// tag : V6_vaddb
+def int_hexagon_V6_vaddb :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddb_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddb_128B
+def int_hexagon_V6_vaddb_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubb,VI_ftype_VIVI,2)
+// tag : V6_vsubb
+def int_hexagon_V6_vsubb :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubb_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubb_128B
+def int_hexagon_V6_vsubb_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddb_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddb_dv
+def int_hexagon_V6_vaddb_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddb_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddb_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddb_dv_128B
+def int_hexagon_V6_vaddb_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddb_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubb_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubb_dv
+def int_hexagon_V6_vsubb_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubb_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubb_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubb_dv_128B
+def int_hexagon_V6_vsubb_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubb_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddh,VI_ftype_VIVI,2)
+// tag : V6_vaddh
+def int_hexagon_V6_vaddh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddh_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddh_128B
+def int_hexagon_V6_vaddh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubh,VI_ftype_VIVI,2)
+// tag : V6_vsubh
+def int_hexagon_V6_vsubh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubh_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubh_128B
+def int_hexagon_V6_vsubh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddh_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddh_dv
+def int_hexagon_V6_vaddh_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddh_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddh_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddh_dv_128B
+def int_hexagon_V6_vaddh_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddh_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubh_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubh_dv
+def int_hexagon_V6_vsubh_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubh_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubh_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubh_dv_128B
+def int_hexagon_V6_vsubh_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubh_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddw,VI_ftype_VIVI,2)
+// tag : V6_vaddw
+def int_hexagon_V6_vaddw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddw_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddw_128B
+def int_hexagon_V6_vaddw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubw,VI_ftype_VIVI,2)
+// tag : V6_vsubw
+def int_hexagon_V6_vsubw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubw_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubw_128B
+def int_hexagon_V6_vsubw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddw_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddw_dv
+def int_hexagon_V6_vaddw_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddw_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddw_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddw_dv_128B
+def int_hexagon_V6_vaddw_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddw_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubw_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubw_dv
+def int_hexagon_V6_vsubw_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubw_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubw_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubw_dv_128B
+def int_hexagon_V6_vsubw_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubw_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubsat,VI_ftype_VIVI,2)
+// tag : V6_vaddubsat
+def int_hexagon_V6_vaddubsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddubsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddubsat_128B
+def int_hexagon_V6_vaddubsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddubsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddubsat_dv
+def int_hexagon_V6_vaddubsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddubsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddubsat_dv_128B
+def int_hexagon_V6_vaddubsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddubsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububsat,VI_ftype_VIVI,2)
+// tag : V6_vsububsat
+def int_hexagon_V6_vsububsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsububsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsububsat_128B
+def int_hexagon_V6_vsububsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsububsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vsububsat_dv
+def int_hexagon_V6_vsububsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsububsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsububsat_dv_128B
+def int_hexagon_V6_vsububsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsububsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhsat,VI_ftype_VIVI,2)
+// tag : V6_vadduhsat
+def int_hexagon_V6_vadduhsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vadduhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vadduhsat_128B
+def int_hexagon_V6_vadduhsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vadduhsat_dv
+def int_hexagon_V6_vadduhsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduhsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vadduhsat_dv_128B
+def int_hexagon_V6_vadduhsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vadduhsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhsat,VI_ftype_VIVI,2)
+// tag : V6_vsubuhsat
+def int_hexagon_V6_vsubuhsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubuhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubuhsat_128B
+def int_hexagon_V6_vsubuhsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubuhsat_dv
+def int_hexagon_V6_vsubuhsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuhsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubuhsat_dv_128B
+def int_hexagon_V6_vsubuhsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubuhsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhsat,VI_ftype_VIVI,2)
+// tag : V6_vaddhsat
+def int_hexagon_V6_vaddhsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddhsat_128B
+def int_hexagon_V6_vaddhsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddhsat_dv
+def int_hexagon_V6_vaddhsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddhsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddhsat_dv_128B
+def int_hexagon_V6_vaddhsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddhsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhsat,VI_ftype_VIVI,2)
+// tag : V6_vsubhsat
+def int_hexagon_V6_vsubhsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubhsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubhsat_128B
+def int_hexagon_V6_vsubhsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubhsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubhsat_dv
+def int_hexagon_V6_vsubhsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubhsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubhsat_dv_128B
+def int_hexagon_V6_vsubhsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubhsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwsat,VI_ftype_VIVI,2)
+// tag : V6_vaddwsat
+def int_hexagon_V6_vaddwsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vaddwsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vaddwsat_128B
+def int_hexagon_V6_vaddwsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddwsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vaddwsat_dv
+def int_hexagon_V6_vaddwsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddwsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vaddwsat_dv_128B
+def int_hexagon_V6_vaddwsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddwsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwsat,VI_ftype_VIVI,2)
+// tag : V6_vsubwsat
+def int_hexagon_V6_vsubwsat :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsubwsat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwsat_128B,VI_ftype_VIVI,2)
+// tag : V6_vsubwsat_128B
+def int_hexagon_V6_vsubwsat_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubwsat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwsat_dv,VD_ftype_VDVD,2)
+// tag : V6_vsubwsat_dv
+def int_hexagon_V6_vsubwsat_dv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubwsat_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwsat_dv_128B,VD_ftype_VDVD,2)
+// tag : V6_vsubwsat_dv_128B
+def int_hexagon_V6_vsubwsat_dv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubwsat_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgub,VI_ftype_VIVI,2)
+// tag : V6_vavgub
+def int_hexagon_V6_vavgub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgub_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgub_128B
+def int_hexagon_V6_vavgub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgubrnd,VI_ftype_VIVI,2)
+// tag : V6_vavgubrnd
+def int_hexagon_V6_vavgubrnd :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgubrnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgubrnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgubrnd_128B
+def int_hexagon_V6_vavgubrnd_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgubrnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguh,VI_ftype_VIVI,2)
+// tag : V6_vavguh
+def int_hexagon_V6_vavguh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavguh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguh_128B,VI_ftype_VIVI,2)
+// tag : V6_vavguh_128B
+def int_hexagon_V6_vavguh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavguh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguhrnd,VI_ftype_VIVI,2)
+// tag : V6_vavguhrnd
+def int_hexagon_V6_vavguhrnd :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavguhrnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavguhrnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vavguhrnd_128B
+def int_hexagon_V6_vavguhrnd_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavguhrnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgh,VI_ftype_VIVI,2)
+// tag : V6_vavgh
+def int_hexagon_V6_vavgh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgh_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgh_128B
+def int_hexagon_V6_vavgh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavghrnd,VI_ftype_VIVI,2)
+// tag : V6_vavghrnd
+def int_hexagon_V6_vavghrnd :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavghrnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavghrnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vavghrnd_128B
+def int_hexagon_V6_vavghrnd_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavghrnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgh,VI_ftype_VIVI,2)
+// tag : V6_vnavgh
+def int_hexagon_V6_vnavgh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgh_128B,VI_ftype_VIVI,2)
+// tag : V6_vnavgh_128B
+def int_hexagon_V6_vnavgh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgw,VI_ftype_VIVI,2)
+// tag : V6_vavgw
+def int_hexagon_V6_vavgw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgw_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgw_128B
+def int_hexagon_V6_vavgw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgwrnd,VI_ftype_VIVI,2)
+// tag : V6_vavgwrnd
+def int_hexagon_V6_vavgwrnd :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vavgwrnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vavgwrnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vavgwrnd_128B
+def int_hexagon_V6_vavgwrnd_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vavgwrnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgw,VI_ftype_VIVI,2)
+// tag : V6_vnavgw
+def int_hexagon_V6_vnavgw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgw_128B,VI_ftype_VIVI,2)
+// tag : V6_vnavgw_128B
+def int_hexagon_V6_vnavgw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffub,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffub
+def int_hexagon_V6_vabsdiffub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffub_128B,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffub_128B
+def int_hexagon_V6_vabsdiffub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffuh,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffuh
+def int_hexagon_V6_vabsdiffuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffuh_128B
+def int_hexagon_V6_vabsdiffuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffh,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffh
+def int_hexagon_V6_vabsdiffh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffh_128B,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffh_128B
+def int_hexagon_V6_vabsdiffh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffw,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffw
+def int_hexagon_V6_vabsdiffw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vabsdiffw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsdiffw_128B,VI_ftype_VIVI,2)
+// tag : V6_vabsdiffw_128B
+def int_hexagon_V6_vabsdiffw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vabsdiffw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgub,VI_ftype_VIVI,2)
+// tag : V6_vnavgub
+def int_hexagon_V6_vnavgub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vnavgub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnavgub_128B,VI_ftype_VIVI,2)
+// tag : V6_vnavgub_128B
+def int_hexagon_V6_vnavgub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vnavgub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubh,VD_ftype_VIVI,2)
+// tag : V6_vaddubh
+def int_hexagon_V6_vaddubh :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vaddubh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddubh_128B,VD_ftype_VIVI,2)
+// tag : V6_vaddubh_128B
+def int_hexagon_V6_vaddubh_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddubh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububh,VD_ftype_VIVI,2)
+// tag : V6_vsububh
+def int_hexagon_V6_vsububh :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vsububh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsububh_128B,VD_ftype_VIVI,2)
+// tag : V6_vsububh_128B
+def int_hexagon_V6_vsububh_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vsububh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhw,VD_ftype_VIVI,2)
+// tag : V6_vaddhw
+def int_hexagon_V6_vaddhw :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vaddhw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhw_128B,VD_ftype_VIVI,2)
+// tag : V6_vaddhw_128B
+def int_hexagon_V6_vaddhw_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddhw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhw,VD_ftype_VIVI,2)
+// tag : V6_vsubhw
+def int_hexagon_V6_vsubhw :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vsubhw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhw_128B,VD_ftype_VIVI,2)
+// tag : V6_vsubhw_128B
+def int_hexagon_V6_vsubhw_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vsubhw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhw,VD_ftype_VIVI,2)
+// tag : V6_vadduhw
+def int_hexagon_V6_vadduhw :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vadduhw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vadduhw_128B,VD_ftype_VIVI,2)
+// tag : V6_vadduhw_128B
+def int_hexagon_V6_vadduhw_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vadduhw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhw,VD_ftype_VIVI,2)
+// tag : V6_vsubuhw
+def int_hexagon_V6_vsubuhw :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vsubuhw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubuhw_128B,VD_ftype_VIVI,2)
+// tag : V6_vsubuhw_128B
+def int_hexagon_V6_vsubuhw_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vsubuhw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vd0,VI_ftype_,0)
+// tag : V6_vd0
+def int_hexagon_V6_vd0 :
+Hexagon_v512_Intrinsic<"HEXAGON_V6_vd0">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vd0_128B,VI_ftype_,0)
+// tag : V6_vd0_128B
+def int_hexagon_V6_vd0_128B :
+Hexagon_v1024_Intrinsic<"HEXAGON_V6_vd0_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddbq
+def int_hexagon_V6_vaddbq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddbq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddbq_128B
+def int_hexagon_V6_vaddbq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddbq_128B">;
+
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubbq
+def int_hexagon_V6_vsubbq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubbq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubbq_128B
+def int_hexagon_V6_vsubbq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubbq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbnq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddbnq
+def int_hexagon_V6_vaddbnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddbnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddbnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddbnq_128B
+def int_hexagon_V6_vaddbnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddbnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbnq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubbnq
+def int_hexagon_V6_vsubbnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubbnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubbnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubbnq_128B
+def int_hexagon_V6_vsubbnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubbnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddhq
+def int_hexagon_V6_vaddhq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddhq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddhq_128B
+def int_hexagon_V6_vaddhq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddhq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubhq
+def int_hexagon_V6_vsubhq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubhq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubhq_128B
+def int_hexagon_V6_vsubhq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubhq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhnq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddhnq
+def int_hexagon_V6_vaddhnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddhnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddhnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddhnq_128B
+def int_hexagon_V6_vaddhnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddhnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhnq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubhnq
+def int_hexagon_V6_vsubhnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubhnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubhnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubhnq_128B
+def int_hexagon_V6_vsubhnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubhnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddwq
+def int_hexagon_V6_vaddwq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddwq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddwq_128B
+def int_hexagon_V6_vaddwq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddwq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubwq
+def int_hexagon_V6_vsubwq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubwq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubwq_128B
+def int_hexagon_V6_vsubwq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubwq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwnq,VI_ftype_QVVIVI,3)
+// tag : V6_vaddwnq
+def int_hexagon_V6_vaddwnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vaddwnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vaddwnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vaddwnq_128B
+def int_hexagon_V6_vaddwnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vaddwnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwnq,VI_ftype_QVVIVI,3)
+// tag : V6_vsubwnq
+def int_hexagon_V6_vsubwnq :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vsubwnq">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsubwnq_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vsubwnq_128B
+def int_hexagon_V6_vsubwnq_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vsubwnq_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsh,VI_ftype_VI,1)
+// tag : V6_vabsh
+def int_hexagon_V6_vabsh :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsh_128B,VI_ftype_VI,1)
+// tag : V6_vabsh_128B
+def int_hexagon_V6_vabsh_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsh_sat,VI_ftype_VI,1)
+// tag : V6_vabsh_sat
+def int_hexagon_V6_vabsh_sat :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsh_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsh_sat_128B,VI_ftype_VI,1)
+// tag : V6_vabsh_sat_128B
+def int_hexagon_V6_vabsh_sat_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsh_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsw,VI_ftype_VI,1)
+// tag : V6_vabsw
+def int_hexagon_V6_vabsw :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsw_128B,VI_ftype_VI,1)
+// tag : V6_vabsw_128B
+def int_hexagon_V6_vabsw_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsw_sat,VI_ftype_VI,1)
+// tag : V6_vabsw_sat
+def int_hexagon_V6_vabsw_sat :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vabsw_sat">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vabsw_sat_128B,VI_ftype_VI,1)
+// tag : V6_vabsw_sat_128B
+def int_hexagon_V6_vabsw_sat_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vabsw_sat_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybv,VD_ftype_VIVI,2)
+// tag : V6_vmpybv
+def int_hexagon_V6_vmpybv :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybv_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpybv_128B
+def int_hexagon_V6_vmpybv_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybv_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpybv_acc
+def int_hexagon_V6_vmpybv_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybv_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpybv_acc_128B
+def int_hexagon_V6_vmpybv_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyubv,VD_ftype_VIVI,2)
+// tag : V6_vmpyubv
+def int_hexagon_V6_vmpyubv :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyubv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyubv_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpyubv_128B
+def int_hexagon_V6_vmpyubv_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyubv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyubv_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyubv_acc
+def int_hexagon_V6_vmpyubv_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyubv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyubv_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyubv_acc_128B
+def int_hexagon_V6_vmpyubv_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyubv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybusv,VD_ftype_VIVI,2)
+// tag : V6_vmpybusv
+def int_hexagon_V6_vmpybusv :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybusv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybusv_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpybusv_128B
+def int_hexagon_V6_vmpybusv_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybusv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybusv_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpybusv_acc
+def int_hexagon_V6_vmpybusv_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpybusv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybusv_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpybusv_acc_128B
+def int_hexagon_V6_vmpybusv_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpybusv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabusv,VD_ftype_VDVD,2)
+// tag : V6_vmpabusv
+def int_hexagon_V6_vmpabusv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpabusv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabusv_128B,VD_ftype_VDVD,2)
+// tag : V6_vmpabusv_128B
+def int_hexagon_V6_vmpabusv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vmpabusv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabuuv,VD_ftype_VDVD,2)
+// tag : V6_vmpabuuv
+def int_hexagon_V6_vmpabuuv :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpabuuv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabuuv_128B,VD_ftype_VDVD,2)
+// tag : V6_vmpabuuv_128B
+def int_hexagon_V6_vmpabuuv_128B :
+Hexagon_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vmpabuuv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhv,VD_ftype_VIVI,2)
+// tag : V6_vmpyhv
+def int_hexagon_V6_vmpyhv :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhv_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpyhv_128B
+def int_hexagon_V6_vmpyhv_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhv_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyhv_acc
+def int_hexagon_V6_vmpyhv_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhv_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyhv_acc_128B
+def int_hexagon_V6_vmpyhv_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhv,VD_ftype_VIVI,2)
+// tag : V6_vmpyuhv
+def int_hexagon_V6_vmpyuhv :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyuhv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhv_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpyuhv_128B
+def int_hexagon_V6_vmpyuhv_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyuhv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhv_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyuhv_acc
+def int_hexagon_V6_vmpyuhv_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyuhv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuhv_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyuhv_acc_128B
+def int_hexagon_V6_vmpyuhv_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyuhv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhvsrs,VI_ftype_VIVI,2)
+// tag : V6_vmpyhvsrs
+def int_hexagon_V6_vmpyhvsrs :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyhvsrs">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhvsrs_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyhvsrs_128B
+def int_hexagon_V6_vmpyhvsrs_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhvsrs_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhus,VD_ftype_VIVI,2)
+// tag : V6_vmpyhus
+def int_hexagon_V6_vmpyhus :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhus_128B,VD_ftype_VIVI,2)
+// tag : V6_vmpyhus_128B
+def int_hexagon_V6_vmpyhus_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhus_acc,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyhus_acc
+def int_hexagon_V6_vmpyhus_acc :
+Hexagon_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyhus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhus_acc_128B,VD_ftype_VDVIVI,3)
+// tag : V6_vmpyhus_acc_128B
+def int_hexagon_V6_vmpyhus_acc_128B :
+Hexagon_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyhus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyih,VI_ftype_VIVI,2)
+// tag : V6_vmpyih
+def int_hexagon_V6_vmpyih :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyih">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyih_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyih_128B
+def int_hexagon_V6_vmpyih_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyih_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyih_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyih_acc
+def int_hexagon_V6_vmpyih_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyih_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyih_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyih_acc_128B
+def int_hexagon_V6_vmpyih_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyih_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyewuh,VI_ftype_VIVI,2)
+// tag : V6_vmpyewuh
+def int_hexagon_V6_vmpyewuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyewuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyewuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyewuh_128B
+def int_hexagon_V6_vmpyewuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyewuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh,VI_ftype_VIVI,2)
+// tag : V6_vmpyowh
+def int_hexagon_V6_vmpyowh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyowh_128B
+def int_hexagon_V6_vmpyowh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd,VI_ftype_VIVI,2)
+// tag : V6_vmpyowh_rnd
+def int_hexagon_V6_vmpyowh_rnd :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_rnd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyowh_rnd_128B
+def int_hexagon_V6_vmpyowh_rnd_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_sacc,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyowh_sacc
+def int_hexagon_V6_vmpyowh_sacc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_sacc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_sacc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyowh_sacc_128B
+def int_hexagon_V6_vmpyowh_sacc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_sacc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd_sacc,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyowh_rnd_sacc
+def int_hexagon_V6_vmpyowh_rnd_sacc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyowh_rnd_sacc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyowh_rnd_sacc_128B
+def int_hexagon_V6_vmpyowh_rnd_sacc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_rnd_sacc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyieoh,VI_ftype_VIVI,2)
+// tag : V6_vmpyieoh
+def int_hexagon_V6_vmpyieoh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyieoh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyieoh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyieoh_128B
+def int_hexagon_V6_vmpyieoh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyieoh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh,VI_ftype_VIVI,2)
+// tag : V6_vmpyiewuh
+def int_hexagon_V6_vmpyiewuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiewuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyiewuh_128B
+def int_hexagon_V6_vmpyiewuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiewuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiowh,VI_ftype_VIVI,2)
+// tag : V6_vmpyiowh
+def int_hexagon_V6_vmpyiowh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiowh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiowh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmpyiowh_128B
+def int_hexagon_V6_vmpyiowh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiowh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewh_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyiewh_acc
+def int_hexagon_V6_vmpyiewh_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiewh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewh_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyiewh_acc_128B
+def int_hexagon_V6_vmpyiewh_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiewh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh_acc,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyiewuh_acc
+def int_hexagon_V6_vmpyiewuh_acc :
+Hexagon_v512v512v512v512_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiewuh_acc_128B,VI_ftype_VIVIVI,3)
+// tag : V6_vmpyiewuh_acc_128B
+def int_hexagon_V6_vmpyiewuh_acc_128B :
+Hexagon_v1024v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmpyiewuh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyub,VD_ftype_VISI,2)
+// tag : V6_vmpyub
+def int_hexagon_V6_vmpyub :
+Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpyub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyub_128B,VD_ftype_VISI,2)
+// tag : V6_vmpyub_128B
+def int_hexagon_V6_vmpyub_128B :
+Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyub_acc,VD_ftype_VDVISI,3)
+// tag : V6_vmpyub_acc
+def int_hexagon_V6_vmpyub_acc :
+Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyub_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyub_acc_128B,VD_ftype_VDVISI,3)
+// tag : V6_vmpyub_acc_128B
+def int_hexagon_V6_vmpyub_acc_128B :
+Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyub_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybus,VD_ftype_VISI,2)
+// tag : V6_vmpybus
+def int_hexagon_V6_vmpybus :
+Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpybus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybus_128B,VD_ftype_VISI,2)
+// tag : V6_vmpybus_128B
+def int_hexagon_V6_vmpybus_128B :
+Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpybus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybus_acc,VD_ftype_VDVISI,3)
+// tag : V6_vmpybus_acc
+def int_hexagon_V6_vmpybus_acc :
+Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpybus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpybus_acc_128B,VD_ftype_VDVISI,3)
+// tag : V6_vmpybus_acc_128B
+def int_hexagon_V6_vmpybus_acc_128B :
+Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpybus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabus,VD_ftype_VDSI,2)
+// tag : V6_vmpabus
+def int_hexagon_V6_vmpabus :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpabus">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabus_128B,VD_ftype_VDSI,2)
+// tag : V6_vmpabus_128B
+def int_hexagon_V6_vmpabus_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpabus_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabus_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vmpabus_acc
+def int_hexagon_V6_vmpabus_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpabus_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpabus_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vmpabus_acc_128B
+def int_hexagon_V6_vmpabus_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpabus_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpahb,VD_ftype_VDSI,2)
+// tag : V6_vmpahb
+def int_hexagon_V6_vmpahb :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpahb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpahb_128B,VD_ftype_VDSI,2)
+// tag : V6_vmpahb_128B
+def int_hexagon_V6_vmpahb_128B :
+Hexagon_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpahb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpahb_acc,VD_ftype_VDVDSI,3)
+// tag : V6_vmpahb_acc
+def int_hexagon_V6_vmpahb_acc :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpahb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpahb_acc_128B,VD_ftype_VDVDSI,3)
+// tag : V6_vmpahb_acc_128B
+def int_hexagon_V6_vmpahb_acc_128B :
+Hexagon_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpahb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyh,VD_ftype_VISI,2)
+// tag : V6_vmpyh
+def int_hexagon_V6_vmpyh :
+Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpyh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyh_128B,VD_ftype_VISI,2)
+// tag : V6_vmpyh_128B
+def int_hexagon_V6_vmpyh_128B :
+Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhsat_acc,VD_ftype_VDVISI,3)
+// tag : V6_vmpyhsat_acc
+def int_hexagon_V6_vmpyhsat_acc :
+Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyhsat_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhsat_acc_128B,VD_ftype_VDVISI,3)
+// tag : V6_vmpyhsat_acc_128B
+def int_hexagon_V6_vmpyhsat_acc_128B :
+Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyhsat_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhss,VI_ftype_VISI,2)
+// tag : V6_vmpyhss
+def int_hexagon_V6_vmpyhss :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyhss">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhss_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyhss_128B
+def int_hexagon_V6_vmpyhss_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyhss_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhsrs,VI_ftype_VISI,2)
+// tag : V6_vmpyhsrs
+def int_hexagon_V6_vmpyhsrs :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyhsrs">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyhsrs_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyhsrs_128B
+def int_hexagon_V6_vmpyhsrs_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyhsrs_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuh,VD_ftype_VISI,2)
+// tag : V6_vmpyuh
+def int_hexagon_V6_vmpyuh :
+Hexagon_v1024v512i_Intrinsic<"HEXAGON_V6_vmpyuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuh_128B,VD_ftype_VISI,2)
+// tag : V6_vmpyuh_128B
+def int_hexagon_V6_vmpyuh_128B :
+Hexagon_v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuh_acc,VD_ftype_VDVISI,3)
+// tag : V6_vmpyuh_acc
+def int_hexagon_V6_vmpyuh_acc :
+Hexagon_v1024v1024v512i_Intrinsic<"HEXAGON_V6_vmpyuh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyuh_acc_128B,VD_ftype_VDVISI,3)
+// tag : V6_vmpyuh_acc_128B
+def int_hexagon_V6_vmpyuh_acc_128B :
+Hexagon_v2048v2048v1024i_Intrinsic<"HEXAGON_V6_vmpyuh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyihb,VI_ftype_VISI,2)
+// tag : V6_vmpyihb
+def int_hexagon_V6_vmpyihb :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyihb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyihb_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyihb_128B
+def int_hexagon_V6_vmpyihb_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyihb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyihb_acc,VI_ftype_VIVISI,3)
+// tag : V6_vmpyihb_acc
+def int_hexagon_V6_vmpyihb_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyihb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyihb_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vmpyihb_acc_128B
+def int_hexagon_V6_vmpyihb_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyihb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwb,VI_ftype_VISI,2)
+// tag : V6_vmpyiwb
+def int_hexagon_V6_vmpyiwb :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwb_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyiwb_128B
+def int_hexagon_V6_vmpyiwb_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwb_acc,VI_ftype_VIVISI,3)
+// tag : V6_vmpyiwb_acc
+def int_hexagon_V6_vmpyiwb_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwb_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vmpyiwb_acc_128B
+def int_hexagon_V6_vmpyiwb_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwh,VI_ftype_VISI,2)
+// tag : V6_vmpyiwh
+def int_hexagon_V6_vmpyiwh :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwh_128B,VI_ftype_VISI,2)
+// tag : V6_vmpyiwh_128B
+def int_hexagon_V6_vmpyiwh_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwh_acc,VI_ftype_VIVISI,3)
+// tag : V6_vmpyiwh_acc
+def int_hexagon_V6_vmpyiwh_acc :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwh_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmpyiwh_acc_128B,VI_ftype_VIVISI,3)
+// tag : V6_vmpyiwh_acc_128B
+def int_hexagon_V6_vmpyiwh_acc_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwh_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vand,VI_ftype_VIVI,2)
+// tag : V6_vand
+def int_hexagon_V6_vand :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vand">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vand_128B,VI_ftype_VIVI,2)
+// tag : V6_vand_128B
+def int_hexagon_V6_vand_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vand_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vor,VI_ftype_VIVI,2)
+// tag : V6_vor
+def int_hexagon_V6_vor :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vor_128B,VI_ftype_VIVI,2)
+// tag : V6_vor_128B
+def int_hexagon_V6_vor_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vxor,VI_ftype_VIVI,2)
+// tag : V6_vxor
+def int_hexagon_V6_vxor :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vxor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vxor_128B,VI_ftype_VIVI,2)
+// tag : V6_vxor_128B
+def int_hexagon_V6_vxor_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vxor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnot,VI_ftype_VI,1)
+// tag : V6_vnot
+def int_hexagon_V6_vnot :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vnot">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnot_128B,VI_ftype_VI,1)
+// tag : V6_vnot_128B
+def int_hexagon_V6_vnot_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vnot_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandqrt,VI_ftype_QVSI,2)
+// tag : V6_vandqrt
+def int_hexagon_V6_vandqrt :
+Hexagon_v512v64ii_Intrinsic<"HEXAGON_V6_vandqrt">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandqrt_128B,VI_ftype_QVSI,2)
+// tag : V6_vandqrt_128B
+def int_hexagon_V6_vandqrt_128B :
+Hexagon_v1024v128ii_Intrinsic<"HEXAGON_V6_vandqrt_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandqrt_acc,VI_ftype_VIQVSI,3)
+// tag : V6_vandqrt_acc
+def int_hexagon_V6_vandqrt_acc :
+Hexagon_v512v512v64ii_Intrinsic<"HEXAGON_V6_vandqrt_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandqrt_acc_128B,VI_ftype_VIQVSI,3)
+// tag : V6_vandqrt_acc_128B
+def int_hexagon_V6_vandqrt_acc_128B :
+Hexagon_v1024v1024v128ii_Intrinsic<"HEXAGON_V6_vandqrt_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvrt,QV_ftype_VISI,2)
+// tag : V6_vandvrt
+def int_hexagon_V6_vandvrt :
+Hexagon_v64iv512i_Intrinsic<"HEXAGON_V6_vandvrt">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvrt_128B,QV_ftype_VISI,2)
+// tag : V6_vandvrt_128B
+def int_hexagon_V6_vandvrt_128B :
+Hexagon_v128iv1024i_Intrinsic<"HEXAGON_V6_vandvrt_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvrt_acc,QV_ftype_QVVISI,3)
+// tag : V6_vandvrt_acc
+def int_hexagon_V6_vandvrt_acc :
+Hexagon_v64iv64iv512i_Intrinsic<"HEXAGON_V6_vandvrt_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vandvrt_acc_128B,QV_ftype_QVVISI,3)
+// tag : V6_vandvrt_acc_128B
+def int_hexagon_V6_vandvrt_acc_128B :
+Hexagon_v128iv128iv1024i_Intrinsic<"HEXAGON_V6_vandvrt_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw,QV_ftype_VIVI,2)
+// tag : V6_vgtw
+def int_hexagon_V6_vgtw :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_128B,QV_ftype_VIVI,2)
+// tag : V6_vgtw_128B
+def int_hexagon_V6_vgtw_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_and
+def int_hexagon_V6_vgtw_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtw_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_and_128B
+def int_hexagon_V6_vgtw_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_or
+def int_hexagon_V6_vgtw_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtw_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_or_128B
+def int_hexagon_V6_vgtw_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_xor
+def int_hexagon_V6_vgtw_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtw_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtw_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtw_xor_128B
+def int_hexagon_V6_vgtw_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtw_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw,QV_ftype_VIVI,2)
+// tag : V6_veqw
+def int_hexagon_V6_veqw :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_veqw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_128B,QV_ftype_VIVI,2)
+// tag : V6_veqw_128B
+def int_hexagon_V6_veqw_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_and,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_and
+def int_hexagon_V6_veqw_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqw_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_and_128B
+def int_hexagon_V6_veqw_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_or,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_or
+def int_hexagon_V6_veqw_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqw_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_or_128B
+def int_hexagon_V6_veqw_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_xor,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_xor
+def int_hexagon_V6_veqw_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqw_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqw_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqw_xor_128B
+def int_hexagon_V6_veqw_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqw_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth,QV_ftype_VIVI,2)
+// tag : V6_vgth
+def int_hexagon_V6_vgth :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgth">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_128B,QV_ftype_VIVI,2)
+// tag : V6_vgth_128B
+def int_hexagon_V6_vgth_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_and
+def int_hexagon_V6_vgth_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgth_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_and_128B
+def int_hexagon_V6_vgth_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_or
+def int_hexagon_V6_vgth_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgth_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_or_128B
+def int_hexagon_V6_vgth_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_xor
+def int_hexagon_V6_vgth_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgth_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgth_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgth_xor_128B
+def int_hexagon_V6_vgth_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgth_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh,QV_ftype_VIVI,2)
+// tag : V6_veqh
+def int_hexagon_V6_veqh :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_veqh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_128B,QV_ftype_VIVI,2)
+// tag : V6_veqh_128B
+def int_hexagon_V6_veqh_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_and,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_and
+def int_hexagon_V6_veqh_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqh_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_and_128B
+def int_hexagon_V6_veqh_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_or,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_or
+def int_hexagon_V6_veqh_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqh_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_or_128B
+def int_hexagon_V6_veqh_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_xor,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_xor
+def int_hexagon_V6_veqh_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqh_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqh_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqh_xor_128B
+def int_hexagon_V6_veqh_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqh_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb,QV_ftype_VIVI,2)
+// tag : V6_vgtb
+def int_hexagon_V6_vgtb :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_128B,QV_ftype_VIVI,2)
+// tag : V6_vgtb_128B
+def int_hexagon_V6_vgtb_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_and
+def int_hexagon_V6_vgtb_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtb_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_and_128B
+def int_hexagon_V6_vgtb_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_or
+def int_hexagon_V6_vgtb_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtb_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_or_128B
+def int_hexagon_V6_vgtb_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_xor
+def int_hexagon_V6_vgtb_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtb_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtb_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtb_xor_128B
+def int_hexagon_V6_vgtb_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtb_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb,QV_ftype_VIVI,2)
+// tag : V6_veqb
+def int_hexagon_V6_veqb :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_veqb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_128B,QV_ftype_VIVI,2)
+// tag : V6_veqb_128B
+def int_hexagon_V6_veqb_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_and,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_and
+def int_hexagon_V6_veqb_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqb_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_and_128B
+def int_hexagon_V6_veqb_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_or,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_or
+def int_hexagon_V6_veqb_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqb_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_or_128B
+def int_hexagon_V6_veqb_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_xor,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_xor
+def int_hexagon_V6_veqb_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_veqb_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_veqb_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_veqb_xor_128B
+def int_hexagon_V6_veqb_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_veqb_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw,QV_ftype_VIVI,2)
+// tag : V6_vgtuw
+def int_hexagon_V6_vgtuw :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_128B,QV_ftype_VIVI,2)
+// tag : V6_vgtuw_128B
+def int_hexagon_V6_vgtuw_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_and
+def int_hexagon_V6_vgtuw_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_and_128B
+def int_hexagon_V6_vgtuw_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_or
+def int_hexagon_V6_vgtuw_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_or_128B
+def int_hexagon_V6_vgtuw_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_xor
+def int_hexagon_V6_vgtuw_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuw_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuw_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuw_xor_128B
+def int_hexagon_V6_vgtuw_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuw_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh,QV_ftype_VIVI,2)
+// tag : V6_vgtuh
+def int_hexagon_V6_vgtuh :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_128B,QV_ftype_VIVI,2)
+// tag : V6_vgtuh_128B
+def int_hexagon_V6_vgtuh_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_and
+def int_hexagon_V6_vgtuh_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_and_128B
+def int_hexagon_V6_vgtuh_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_or
+def int_hexagon_V6_vgtuh_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_or_128B
+def int_hexagon_V6_vgtuh_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_xor
+def int_hexagon_V6_vgtuh_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtuh_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtuh_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtuh_xor_128B
+def int_hexagon_V6_vgtuh_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtuh_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub,QV_ftype_VIVI,2)
+// tag : V6_vgtub
+def int_hexagon_V6_vgtub :
+Hexagon_v64iv512v512_Intrinsic<"HEXAGON_V6_vgtub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_128B,QV_ftype_VIVI,2)
+// tag : V6_vgtub_128B
+def int_hexagon_V6_vgtub_128B :
+Hexagon_v128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_and,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_and
+def int_hexagon_V6_vgtub_and :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtub_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_and_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_and_128B
+def int_hexagon_V6_vgtub_and_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_or,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_or
+def int_hexagon_V6_vgtub_or :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtub_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_or_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_or_128B
+def int_hexagon_V6_vgtub_or_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_xor,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_xor
+def int_hexagon_V6_vgtub_xor :
+Hexagon_v64iv64iv512v512_Intrinsic<"HEXAGON_V6_vgtub_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vgtub_xor_128B,QV_ftype_QVVIVI,3)
+// tag : V6_vgtub_xor_128B
+def int_hexagon_V6_vgtub_xor_128B :
+Hexagon_v128iv128iv1024v1024_Intrinsic<"HEXAGON_V6_vgtub_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_or,QV_ftype_QVQV,2)
+// tag : V6_pred_or
+def int_hexagon_V6_pred_or :
+Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_or">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_or_128B,QV_ftype_QVQV,2)
+// tag : V6_pred_or_128B
+def int_hexagon_V6_pred_or_128B :
+Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_or_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_and,QV_ftype_QVQV,2)
+// tag : V6_pred_and
+def int_hexagon_V6_pred_and :
+Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_and">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_and_128B,QV_ftype_QVQV,2)
+// tag : V6_pred_and_128B
+def int_hexagon_V6_pred_and_128B :
+Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_and_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_not,QV_ftype_QV,1)
+// tag : V6_pred_not
+def int_hexagon_V6_pred_not :
+Hexagon_v64iv64i_Intrinsic<"HEXAGON_V6_pred_not">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_not_128B,QV_ftype_QV,1)
+// tag : V6_pred_not_128B
+def int_hexagon_V6_pred_not_128B :
+Hexagon_v128iv128i_Intrinsic<"HEXAGON_V6_pred_not_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_xor,QV_ftype_QVQV,2)
+// tag : V6_pred_xor
+def int_hexagon_V6_pred_xor :
+Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_xor">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_xor_128B,QV_ftype_QVQV,2)
+// tag : V6_pred_xor_128B
+def int_hexagon_V6_pred_xor_128B :
+Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_xor_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_and_n,QV_ftype_QVQV,2)
+// tag : V6_pred_and_n
+def int_hexagon_V6_pred_and_n :
+Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_and_n">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_and_n_128B,QV_ftype_QVQV,2)
+// tag : V6_pred_and_n_128B
+def int_hexagon_V6_pred_and_n_128B :
+Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_and_n_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_or_n,QV_ftype_QVQV,2)
+// tag : V6_pred_or_n
+def int_hexagon_V6_pred_or_n :
+Hexagon_v64iv64iv64i_Intrinsic<"HEXAGON_V6_pred_or_n">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_or_n_128B,QV_ftype_QVQV,2)
+// tag : V6_pred_or_n_128B
+def int_hexagon_V6_pred_or_n_128B :
+Hexagon_v128iv128iv128i_Intrinsic<"HEXAGON_V6_pred_or_n_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_scalar2,QV_ftype_SI,1)
+// tag : V6_pred_scalar2
+def int_hexagon_V6_pred_scalar2 :
+Hexagon_v64ii_Intrinsic<"HEXAGON_V6_pred_scalar2">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_pred_scalar2_128B,QV_ftype_SI,1)
+// tag : V6_pred_scalar2_128B
+def int_hexagon_V6_pred_scalar2_128B :
+Hexagon_v128ii_Intrinsic<"HEXAGON_V6_pred_scalar2_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmux,VI_ftype_QVVIVI,3)
+// tag : V6_vmux
+def int_hexagon_V6_vmux :
+Hexagon_v512v64iv512v512_Intrinsic<"HEXAGON_V6_vmux">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmux_128B,VI_ftype_QVVIVI,3)
+// tag : V6_vmux_128B
+def int_hexagon_V6_vmux_128B :
+Hexagon_v1024v128iv1024v1024_Intrinsic<"HEXAGON_V6_vmux_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vswap,VD_ftype_QVVIVI,3)
+// tag : V6_vswap
+def int_hexagon_V6_vswap :
+Hexagon_v1024v64iv512v512_Intrinsic<"HEXAGON_V6_vswap">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vswap_128B,VD_ftype_QVVIVI,3)
+// tag : V6_vswap_128B
+def int_hexagon_V6_vswap_128B :
+Hexagon_v2048v128iv1024v1024_Intrinsic<"HEXAGON_V6_vswap_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxub,VI_ftype_VIVI,2)
+// tag : V6_vmaxub
+def int_hexagon_V6_vmaxub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxub_128B,VI_ftype_VIVI,2)
+// tag : V6_vmaxub_128B
+def int_hexagon_V6_vmaxub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminub,VI_ftype_VIVI,2)
+// tag : V6_vminub
+def int_hexagon_V6_vminub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminub_128B,VI_ftype_VIVI,2)
+// tag : V6_vminub_128B
+def int_hexagon_V6_vminub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxuh,VI_ftype_VIVI,2)
+// tag : V6_vmaxuh
+def int_hexagon_V6_vmaxuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmaxuh_128B
+def int_hexagon_V6_vmaxuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminuh,VI_ftype_VIVI,2)
+// tag : V6_vminuh
+def int_hexagon_V6_vminuh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminuh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminuh_128B,VI_ftype_VIVI,2)
+// tag : V6_vminuh_128B
+def int_hexagon_V6_vminuh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminuh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxh,VI_ftype_VIVI,2)
+// tag : V6_vmaxh
+def int_hexagon_V6_vmaxh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxh_128B,VI_ftype_VIVI,2)
+// tag : V6_vmaxh_128B
+def int_hexagon_V6_vmaxh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminh,VI_ftype_VIVI,2)
+// tag : V6_vminh
+def int_hexagon_V6_vminh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminh_128B,VI_ftype_VIVI,2)
+// tag : V6_vminh_128B
+def int_hexagon_V6_vminh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxw,VI_ftype_VIVI,2)
+// tag : V6_vmaxw
+def int_hexagon_V6_vmaxw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vmaxw_128B,VI_ftype_VIVI,2)
+// tag : V6_vmaxw_128B
+def int_hexagon_V6_vmaxw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminw,VI_ftype_VIVI,2)
+// tag : V6_vminw
+def int_hexagon_V6_vminw :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vminw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vminw_128B,VI_ftype_VIVI,2)
+// tag : V6_vminw_128B
+def int_hexagon_V6_vminw_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsathub,VI_ftype_VIVI,2)
+// tag : V6_vsathub
+def int_hexagon_V6_vsathub :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsathub">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsathub_128B,VI_ftype_VIVI,2)
+// tag : V6_vsathub_128B
+def int_hexagon_V6_vsathub_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsathub_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsatwh,VI_ftype_VIVI,2)
+// tag : V6_vsatwh
+def int_hexagon_V6_vsatwh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vsatwh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vsatwh_128B,VI_ftype_VIVI,2)
+// tag : V6_vsatwh_128B
+def int_hexagon_V6_vsatwh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsatwh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffeb,VI_ftype_VIVI,2)
+// tag : V6_vshuffeb
+def int_hexagon_V6_vshuffeb :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshuffeb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffeb_128B,VI_ftype_VIVI,2)
+// tag : V6_vshuffeb_128B
+def int_hexagon_V6_vshuffeb_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshuffeb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffob,VI_ftype_VIVI,2)
+// tag : V6_vshuffob
+def int_hexagon_V6_vshuffob :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshuffob">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffob_128B,VI_ftype_VIVI,2)
+// tag : V6_vshuffob_128B
+def int_hexagon_V6_vshuffob_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshuffob_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufeh,VI_ftype_VIVI,2)
+// tag : V6_vshufeh
+def int_hexagon_V6_vshufeh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshufeh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufeh_128B,VI_ftype_VIVI,2)
+// tag : V6_vshufeh_128B
+def int_hexagon_V6_vshufeh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshufeh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoh,VI_ftype_VIVI,2)
+// tag : V6_vshufoh
+def int_hexagon_V6_vshufoh :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vshufoh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoh_128B,VI_ftype_VIVI,2)
+// tag : V6_vshufoh_128B
+def int_hexagon_V6_vshufoh_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vshufoh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffvdd,VD_ftype_VIVISI,3)
+// tag : V6_vshuffvdd
+def int_hexagon_V6_vshuffvdd :
+Hexagon_v1024v512v512i_Intrinsic<"HEXAGON_V6_vshuffvdd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffvdd_128B,VD_ftype_VIVISI,3)
+// tag : V6_vshuffvdd_128B
+def int_hexagon_V6_vshuffvdd_128B :
+Hexagon_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vshuffvdd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealvdd,VD_ftype_VIVISI,3)
+// tag : V6_vdealvdd
+def int_hexagon_V6_vdealvdd :
+Hexagon_v1024v512v512i_Intrinsic<"HEXAGON_V6_vdealvdd">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealvdd_128B,VD_ftype_VIVISI,3)
+// tag : V6_vdealvdd_128B
+def int_hexagon_V6_vdealvdd_128B :
+Hexagon_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vdealvdd_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoeh,VD_ftype_VIVI,2)
+// tag : V6_vshufoeh
+def int_hexagon_V6_vshufoeh :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vshufoeh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoeh_128B,VD_ftype_VIVI,2)
+// tag : V6_vshufoeh_128B
+def int_hexagon_V6_vshufoeh_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vshufoeh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoeb,VD_ftype_VIVI,2)
+// tag : V6_vshufoeb
+def int_hexagon_V6_vshufoeb :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vshufoeb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshufoeb_128B,VD_ftype_VIVI,2)
+// tag : V6_vshufoeb_128B
+def int_hexagon_V6_vshufoeb_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vshufoeb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealh,VI_ftype_VI,1)
+// tag : V6_vdealh
+def int_hexagon_V6_vdealh :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vdealh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealh_128B,VI_ftype_VI,1)
+// tag : V6_vdealh_128B
+def int_hexagon_V6_vdealh_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vdealh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealb,VI_ftype_VI,1)
+// tag : V6_vdealb
+def int_hexagon_V6_vdealb :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vdealb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealb_128B,VI_ftype_VI,1)
+// tag : V6_vdealb_128B
+def int_hexagon_V6_vdealb_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vdealb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealb4w,VI_ftype_VIVI,2)
+// tag : V6_vdealb4w
+def int_hexagon_V6_vdealb4w :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vdealb4w">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdealb4w_128B,VI_ftype_VIVI,2)
+// tag : V6_vdealb4w_128B
+def int_hexagon_V6_vdealb4w_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdealb4w_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffh,VI_ftype_VI,1)
+// tag : V6_vshuffh
+def int_hexagon_V6_vshuffh :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vshuffh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffh_128B,VI_ftype_VI,1)
+// tag : V6_vshuffh_128B
+def int_hexagon_V6_vshuffh_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vshuffh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffb,VI_ftype_VI,1)
+// tag : V6_vshuffb
+def int_hexagon_V6_vshuffb :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vshuffb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vshuffb_128B,VI_ftype_VI,1)
+// tag : V6_vshuffb_128B
+def int_hexagon_V6_vshuffb_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vshuffb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_extractw,SI_ftype_VISI,2)
+// tag : V6_extractw
+def int_hexagon_V6_extractw :
+Hexagon_iv512i_Intrinsic<"HEXAGON_V6_extractw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_extractw_128B,SI_ftype_VISI,2)
+// tag : V6_extractw_128B
+def int_hexagon_V6_extractw_128B :
+Hexagon_iv1024i_Intrinsic<"HEXAGON_V6_extractw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vinsertwr,VI_ftype_VISI,2)
+// tag : V6_vinsertwr
+def int_hexagon_V6_vinsertwr :
+Hexagon_v512v512i_Intrinsic<"HEXAGON_V6_vinsertwr">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vinsertwr_128B,VI_ftype_VISI,2)
+// tag : V6_vinsertwr_128B
+def int_hexagon_V6_vinsertwr_128B :
+Hexagon_v1024v1024i_Intrinsic<"HEXAGON_V6_vinsertwr_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_lvsplatw,VI_ftype_SI,1)
+// tag : V6_lvsplatw
+def int_hexagon_V6_lvsplatw :
+Hexagon_v512i_Intrinsic<"HEXAGON_V6_lvsplatw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_lvsplatw_128B,VI_ftype_SI,1)
+// tag : V6_lvsplatw_128B
+def int_hexagon_V6_lvsplatw_128B :
+Hexagon_v1024i_Intrinsic<"HEXAGON_V6_lvsplatw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vassign,VI_ftype_VI,1)
+// tag : V6_vassign
+def int_hexagon_V6_vassign :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vassign">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vassign_128B,VI_ftype_VI,1)
+// tag : V6_vassign_128B
+def int_hexagon_V6_vassign_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vassign_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcombine,VD_ftype_VIVI,2)
+// tag : V6_vcombine
+def int_hexagon_V6_vcombine :
+Hexagon_v1024v512v512_Intrinsic<"HEXAGON_V6_vcombine">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcombine_128B,VD_ftype_VIVI,2)
+// tag : V6_vcombine_128B
+def int_hexagon_V6_vcombine_128B :
+Hexagon_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vcombine_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutb,VI_ftype_VIDISI,3)
+// tag : V6_vlutb
+def int_hexagon_V6_vlutb :
+Hexagon_v512v512LLii_Intrinsic<"HEXAGON_V6_vlutb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutb_128B,VI_ftype_VIDISI,3)
+// tag : V6_vlutb_128B
+def int_hexagon_V6_vlutb_128B :
+Hexagon_v1024v1024LLii_Intrinsic<"HEXAGON_V6_vlutb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutb_acc,VI_ftype_VIVIDISI,4)
+// tag : V6_vlutb_acc
+def int_hexagon_V6_vlutb_acc :
+Hexagon_v512v512v512LLii_Intrinsic<"HEXAGON_V6_vlutb_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutb_acc_128B,VI_ftype_VIVIDISI,4)
+// tag : V6_vlutb_acc_128B
+def int_hexagon_V6_vlutb_acc_128B :
+Hexagon_v1024v1024v1024LLii_Intrinsic<"HEXAGON_V6_vlutb_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutb_dv,VD_ftype_VDDISI,3)
+// tag : V6_vlutb_dv
+def int_hexagon_V6_vlutb_dv :
+Hexagon_v1024v1024LLii_Intrinsic<"HEXAGON_V6_vlutb_dv">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutb_dv_128B,VD_ftype_VDDISI,3)
+// tag : V6_vlutb_dv_128B
+def int_hexagon_V6_vlutb_dv_128B :
+Hexagon_v2048v2048LLii_Intrinsic<"HEXAGON_V6_vlutb_dv_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutb_dv_acc,VD_ftype_VDVDDISI,4)
+// tag : V6_vlutb_dv_acc
+def int_hexagon_V6_vlutb_dv_acc :
+Hexagon_v1024v1024v1024LLii_Intrinsic<"HEXAGON_V6_vlutb_dv_acc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutb_dv_acc_128B,VD_ftype_VDVDDISI,4)
+// tag : V6_vlutb_dv_acc_128B
+def int_hexagon_V6_vlutb_dv_acc_128B :
+Hexagon_v2048v2048v2048LLii_Intrinsic<"HEXAGON_V6_vlutb_dv_acc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdelta,VI_ftype_VIVI,2)
+// tag : V6_vdelta
+def int_hexagon_V6_vdelta :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vdelta">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vdelta_128B,VI_ftype_VIVI,2)
+// tag : V6_vdelta_128B
+def int_hexagon_V6_vdelta_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vdelta_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrdelta,VI_ftype_VIVI,2)
+// tag : V6_vrdelta
+def int_hexagon_V6_vrdelta :
+Hexagon_v512v512v512_Intrinsic<"HEXAGON_V6_vrdelta">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vrdelta_128B,VI_ftype_VIVI,2)
+// tag : V6_vrdelta_128B
+def int_hexagon_V6_vrdelta_128B :
+Hexagon_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrdelta_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcl0w,VI_ftype_VI,1)
+// tag : V6_vcl0w
+def int_hexagon_V6_vcl0w :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vcl0w">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcl0w_128B,VI_ftype_VI,1)
+// tag : V6_vcl0w_128B
+def int_hexagon_V6_vcl0w_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vcl0w_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcl0h,VI_ftype_VI,1)
+// tag : V6_vcl0h
+def int_hexagon_V6_vcl0h :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vcl0h">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vcl0h_128B,VI_ftype_VI,1)
+// tag : V6_vcl0h_128B
+def int_hexagon_V6_vcl0h_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vcl0h_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnormamtw,VI_ftype_VI,1)
+// tag : V6_vnormamtw
+def int_hexagon_V6_vnormamtw :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vnormamtw">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnormamtw_128B,VI_ftype_VI,1)
+// tag : V6_vnormamtw_128B
+def int_hexagon_V6_vnormamtw_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vnormamtw_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnormamth,VI_ftype_VI,1)
+// tag : V6_vnormamth
+def int_hexagon_V6_vnormamth :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vnormamth">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vnormamth_128B,VI_ftype_VI,1)
+// tag : V6_vnormamth_128B
+def int_hexagon_V6_vnormamth_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vnormamth_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpopcounth,VI_ftype_VI,1)
+// tag : V6_vpopcounth
+def int_hexagon_V6_vpopcounth :
+Hexagon_v512v512_Intrinsic<"HEXAGON_V6_vpopcounth">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vpopcounth_128B,VI_ftype_VI,1)
+// tag : V6_vpopcounth_128B
+def int_hexagon_V6_vpopcounth_128B :
+Hexagon_v1024v1024_Intrinsic<"HEXAGON_V6_vpopcounth_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb,VI_ftype_VIVISI,3)
+// tag : V6_vlutvvb
+def int_hexagon_V6_vlutvvb :
+Hexagon_v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb_128B,VI_ftype_VIVISI,3)
+// tag : V6_vlutvvb_128B
+def int_hexagon_V6_vlutvvb_128B :
+Hexagon_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracc,VI_ftype_VIVIVISI,4)
+// tag : V6_vlutvvb_oracc
+def int_hexagon_V6_vlutvvb_oracc :
+Hexagon_v512v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb_oracc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracc_128B,VI_ftype_VIVIVISI,4)
+// tag : V6_vlutvvb_oracc_128B
+def int_hexagon_V6_vlutvvb_oracc_128B :
+Hexagon_v1024v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_oracc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh,VD_ftype_VIVISI,3)
+// tag : V6_vlutvwh
+def int_hexagon_V6_vlutvwh :
+Hexagon_v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh_128B,VD_ftype_VIVISI,3)
+// tag : V6_vlutvwh_128B
+def int_hexagon_V6_vlutvwh_128B :
+Hexagon_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracc,VD_ftype_VDVIVISI,4)
+// tag : V6_vlutvwh_oracc
+def int_hexagon_V6_vlutvwh_oracc :
+Hexagon_v1024v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh_oracc">;
+
+//
+// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracc_128B,VD_ftype_VDVIVISI,4)
+// tag : V6_vlutvwh_oracc_128B
+def int_hexagon_V6_vlutvwh_oracc_128B :
+Hexagon_v2048v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_oracc_128B">;
+
+//
+// BUILTIN_INFO(HEXAGON.M6_vabsdiffb,DI_ftype_DIDI,2)
+// tag : M6_vabsdiffb
+def int_hexagon_M6_vabsdiffb :
+Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_M6_vabsdiffb">;
+
+//
+// BUILTIN_INFO(HEXAGON.M6_vabsdiffub,DI_ftype_DIDI,2)
+// tag : M6_vabsdiffub
+def int_hexagon_M6_vabsdiffub :
+Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_M6_vabsdiffub">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_vsplatrbp,DI_ftype_SI,1)
+// tag : S6_vsplatrbp
+def int_hexagon_S6_vsplatrbp :
+Hexagon_LLii_Intrinsic<"HEXAGON_S6_vsplatrbp">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_vtrunehb_ppp,DI_ftype_DIDI,2)
+// tag : S6_vtrunehb_ppp
+def int_hexagon_S6_vtrunehb_ppp :
+Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_S6_vtrunehb_ppp">;
+
+//
+// BUILTIN_INFO(HEXAGON.S6_vtrunohb_ppp,DI_ftype_DIDI,2)
+// tag : S6_vtrunohb_ppp
+def int_hexagon_S6_vtrunohb_ppp :
+Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_S6_vtrunohb_ppp">;
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsMips.td b/contrib/llvm/include/llvm/IR/IntrinsicsMips.td
new file mode 100644
index 0000000..3455761
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsMips.td
@@ -0,0 +1,1771 @@
+//===- IntrinsicsMips.td - Defines Mips intrinsics ---------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the MIPS-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP data types
+def mips_v2q15_ty: LLVMType<v2i16>;
+def mips_v4q7_ty: LLVMType<v4i8>;
+def mips_q31_ty: LLVMType<i32>;
+
+let TargetPrefix = "mips" in { // All intrinsics start with "llvm.mips.".
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP Rev 1
+
+//===----------------------------------------------------------------------===//
+// Addition/subtraction
+
+def int_mips_addu_qb : GCCBuiltin<"__builtin_mips_addu_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_addu_s_qb : GCCBuiltin<"__builtin_mips_addu_s_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_subu_qb : GCCBuiltin<"__builtin_mips_subu_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_subu_s_qb : GCCBuiltin<"__builtin_mips_subu_s_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
+
+def int_mips_addq_ph : GCCBuiltin<"__builtin_mips_addq_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_addq_s_ph : GCCBuiltin<"__builtin_mips_addq_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_subq_ph : GCCBuiltin<"__builtin_mips_subq_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_subq_s_ph : GCCBuiltin<"__builtin_mips_subq_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+
+def int_mips_madd: GCCBuiltin<"__builtin_mips_madd">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_maddu: GCCBuiltin<"__builtin_mips_maddu">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+def int_mips_msub: GCCBuiltin<"__builtin_mips_msub">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_msubu: GCCBuiltin<"__builtin_mips_msubu">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_addq_s_w: GCCBuiltin<"__builtin_mips_addq_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_subq_s_w: GCCBuiltin<"__builtin_mips_subq_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], []>;
+
+def int_mips_addsc: GCCBuiltin<"__builtin_mips_addsc">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [Commutative]>;
+def int_mips_addwc: GCCBuiltin<"__builtin_mips_addwc">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [Commutative]>;
+
+def int_mips_modsub: GCCBuiltin<"__builtin_mips_modsub">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_raddu_w_qb: GCCBuiltin<"__builtin_mips_raddu_w_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Absolute value
+
+def int_mips_absq_s_ph: GCCBuiltin<"__builtin_mips_absq_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty], []>;
+def int_mips_absq_s_w: GCCBuiltin<"__builtin_mips_absq_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Precision reduce/expand
+
+def int_mips_precrq_qb_ph: GCCBuiltin<"__builtin_mips_precrq_qb_ph">,
+ Intrinsic<[llvm_v4i8_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_precrqu_s_qb_ph: GCCBuiltin<"__builtin_mips_precrqu_s_qb_ph">,
+ Intrinsic<[llvm_v4i8_ty], [mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_precrq_ph_w: GCCBuiltin<"__builtin_mips_precrq_ph_w">,
+ Intrinsic<[mips_v2q15_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+def int_mips_precrq_rs_ph_w: GCCBuiltin<"__builtin_mips_precrq_rs_ph_w">,
+ Intrinsic<[mips_v2q15_ty], [mips_q31_ty, mips_q31_ty], []>;
+def int_mips_preceq_w_phl: GCCBuiltin<"__builtin_mips_preceq_w_phl">,
+ Intrinsic<[mips_q31_ty], [mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_preceq_w_phr: GCCBuiltin<"__builtin_mips_preceq_w_phr">,
+ Intrinsic<[mips_q31_ty], [mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbl: GCCBuiltin<"__builtin_mips_precequ_ph_qbl">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbr: GCCBuiltin<"__builtin_mips_precequ_ph_qbr">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbla: GCCBuiltin<"__builtin_mips_precequ_ph_qbla">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_precequ_ph_qbra: GCCBuiltin<"__builtin_mips_precequ_ph_qbra">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbl: GCCBuiltin<"__builtin_mips_preceu_ph_qbl">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbr: GCCBuiltin<"__builtin_mips_preceu_ph_qbr">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbla: GCCBuiltin<"__builtin_mips_preceu_ph_qbla">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_preceu_ph_qbra: GCCBuiltin<"__builtin_mips_preceu_ph_qbra">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Shift
+
+def int_mips_shll_qb: GCCBuiltin<"__builtin_mips_shll_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], []>;
+def int_mips_shrl_qb: GCCBuiltin<"__builtin_mips_shrl_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shll_ph: GCCBuiltin<"__builtin_mips_shll_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], []>;
+def int_mips_shll_s_ph: GCCBuiltin<"__builtin_mips_shll_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], []>;
+def int_mips_shra_ph: GCCBuiltin<"__builtin_mips_shra_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shra_r_ph: GCCBuiltin<"__builtin_mips_shra_r_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shll_s_w: GCCBuiltin<"__builtin_mips_shll_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, llvm_i32_ty], []>;
+def int_mips_shra_r_w: GCCBuiltin<"__builtin_mips_shra_r_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shilo: GCCBuiltin<"__builtin_mips_shilo">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// Multiplication
+
+def int_mips_muleu_s_ph_qbl: GCCBuiltin<"__builtin_mips_muleu_s_ph_qbl">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty, mips_v2q15_ty], []>;
+def int_mips_muleu_s_ph_qbr: GCCBuiltin<"__builtin_mips_muleu_s_ph_qbr">,
+ Intrinsic<[mips_v2q15_ty], [llvm_v4i8_ty, mips_v2q15_ty], []>;
+def int_mips_mulq_rs_ph: GCCBuiltin<"__builtin_mips_mulq_rs_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_muleq_s_w_phl: GCCBuiltin<"__builtin_mips_muleq_s_w_phl">,
+ Intrinsic<[mips_q31_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_muleq_s_w_phr: GCCBuiltin<"__builtin_mips_muleq_s_w_phr">,
+ Intrinsic<[mips_q31_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_mulsaq_s_w_ph: GCCBuiltin<"__builtin_mips_mulsaq_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_s_w_phl: GCCBuiltin<"__builtin_mips_maq_s_w_phl">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_s_w_phr: GCCBuiltin<"__builtin_mips_maq_s_w_phr">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_sa_w_phl: GCCBuiltin<"__builtin_mips_maq_sa_w_phl">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_maq_sa_w_phr: GCCBuiltin<"__builtin_mips_maq_sa_w_phr">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_mult: GCCBuiltin<"__builtin_mips_mult">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_multu: GCCBuiltin<"__builtin_mips_multu">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+//===----------------------------------------------------------------------===//
+// Dot product with accumulate/subtract
+
+def int_mips_dpau_h_qbl: GCCBuiltin<"__builtin_mips_dpau_h_qbl">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpau_h_qbr: GCCBuiltin<"__builtin_mips_dpau_h_qbr">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpsu_h_qbl: GCCBuiltin<"__builtin_mips_dpsu_h_qbl">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpsu_h_qbr: GCCBuiltin<"__builtin_mips_dpsu_h_qbr">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpaq_s_w_ph: GCCBuiltin<"__builtin_mips_dpaq_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpsq_s_w_ph: GCCBuiltin<"__builtin_mips_dpsq_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpaq_sa_l_w: GCCBuiltin<"__builtin_mips_dpaq_sa_l_w">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_q31_ty, mips_q31_ty], []>;
+def int_mips_dpsq_sa_l_w: GCCBuiltin<"__builtin_mips_dpsq_sa_l_w">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_q31_ty, mips_q31_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Comparison
+
+def int_mips_cmpu_eq_qb: GCCBuiltin<"__builtin_mips_cmpu_eq_qb">,
+ Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpu_lt_qb: GCCBuiltin<"__builtin_mips_cmpu_lt_qb">,
+ Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_cmpu_le_qb: GCCBuiltin<"__builtin_mips_cmpu_le_qb">,
+ Intrinsic<[], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_cmpgu_eq_qb: GCCBuiltin<"__builtin_mips_cmpgu_eq_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgu_lt_qb: GCCBuiltin<"__builtin_mips_cmpgu_lt_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_cmpgu_le_qb: GCCBuiltin<"__builtin_mips_cmpgu_le_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_cmp_eq_ph: GCCBuiltin<"__builtin_mips_cmp_eq_ph">,
+ Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_cmp_lt_ph: GCCBuiltin<"__builtin_mips_cmp_lt_ph">,
+ Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_cmp_le_ph: GCCBuiltin<"__builtin_mips_cmp_le_ph">,
+ Intrinsic<[], [mips_v2q15_ty, mips_v2q15_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Extracting
+
+def int_mips_extr_s_h: GCCBuiltin<"__builtin_mips_extr_s_h">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extr_w: GCCBuiltin<"__builtin_mips_extr_w">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extr_rs_w: GCCBuiltin<"__builtin_mips_extr_rs_w">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extr_r_w: GCCBuiltin<"__builtin_mips_extr_r_w">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extp: GCCBuiltin<"__builtin_mips_extp">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+def int_mips_extpdp: GCCBuiltin<"__builtin_mips_extpdp">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+
+//===----------------------------------------------------------------------===//
+// Misc
+
+def int_mips_wrdsp: GCCBuiltin<"__builtin_mips_wrdsp">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>;
+def int_mips_rddsp: GCCBuiltin<"__builtin_mips_rddsp">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrReadMem]>;
+
+def int_mips_insv: GCCBuiltin<"__builtin_mips_insv">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrReadMem]>;
+def int_mips_bitrev: GCCBuiltin<"__builtin_mips_bitrev">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_packrl_ph: GCCBuiltin<"__builtin_mips_packrl_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+
+def int_mips_repl_qb: GCCBuiltin<"__builtin_mips_repl_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_repl_ph: GCCBuiltin<"__builtin_mips_repl_ph">,
+ Intrinsic<[mips_v2q15_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_pick_qb: GCCBuiltin<"__builtin_mips_pick_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrReadMem]>;
+def int_mips_pick_ph: GCCBuiltin<"__builtin_mips_pick_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrReadMem]>;
+
+def int_mips_mthlip: GCCBuiltin<"__builtin_mips_mthlip">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty], []>;
+
+def int_mips_bposge32: GCCBuiltin<"__builtin_mips_bposge32">,
+ Intrinsic<[llvm_i32_ty], [], [IntrReadMem]>;
+
+def int_mips_lbux: GCCBuiltin<"__builtin_mips_lbux">,
+ Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>;
+def int_mips_lhx: GCCBuiltin<"__builtin_mips_lhx">,
+ Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>;
+def int_mips_lwx: GCCBuiltin<"__builtin_mips_lwx">,
+ Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>;
+
+//===----------------------------------------------------------------------===//
+// MIPS DSP Rev 2
+
+def int_mips_absq_s_qb: GCCBuiltin<"__builtin_mips_absq_s_qb">,
+ Intrinsic<[mips_v4q7_ty], [mips_v4q7_ty], []>;
+
+def int_mips_addqh_ph: GCCBuiltin<"__builtin_mips_addqh_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_addqh_r_ph: GCCBuiltin<"__builtin_mips_addqh_r_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_addqh_w: GCCBuiltin<"__builtin_mips_addqh_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_addqh_r_w: GCCBuiltin<"__builtin_mips_addqh_r_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty],
+ [IntrNoMem, Commutative]>;
+
+def int_mips_addu_ph: GCCBuiltin<"__builtin_mips_addu_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+def int_mips_addu_s_ph: GCCBuiltin<"__builtin_mips_addu_s_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+
+def int_mips_adduh_qb: GCCBuiltin<"__builtin_mips_adduh_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem, Commutative]>;
+def int_mips_adduh_r_qb: GCCBuiltin<"__builtin_mips_adduh_r_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty],
+ [IntrNoMem, Commutative]>;
+
+def int_mips_append: GCCBuiltin<"__builtin_mips_append">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_balign: GCCBuiltin<"__builtin_mips_balign">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_cmpgdu_eq_qb: GCCBuiltin<"__builtin_mips_cmpgdu_eq_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>;
+def int_mips_cmpgdu_lt_qb: GCCBuiltin<"__builtin_mips_cmpgdu_lt_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+def int_mips_cmpgdu_le_qb: GCCBuiltin<"__builtin_mips_cmpgdu_le_qb">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], []>;
+
+def int_mips_dpa_w_ph: GCCBuiltin<"__builtin_mips_dpa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+def int_mips_dps_w_ph: GCCBuiltin<"__builtin_mips_dps_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+
+def int_mips_dpaqx_s_w_ph: GCCBuiltin<"__builtin_mips_dpaqx_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpaqx_sa_w_ph: GCCBuiltin<"__builtin_mips_dpaqx_sa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpax_w_ph: GCCBuiltin<"__builtin_mips_dpax_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+def int_mips_dpsx_w_ph: GCCBuiltin<"__builtin_mips_dpsx_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+def int_mips_dpsqx_s_w_ph: GCCBuiltin<"__builtin_mips_dpsqx_s_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+def int_mips_dpsqx_sa_w_ph: GCCBuiltin<"__builtin_mips_dpsqx_sa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>;
+
+def int_mips_mul_ph: GCCBuiltin<"__builtin_mips_mul_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+def int_mips_mul_s_ph: GCCBuiltin<"__builtin_mips_mul_s_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>;
+
+def int_mips_mulq_rs_w: GCCBuiltin<"__builtin_mips_mulq_rs_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_mulq_s_ph: GCCBuiltin<"__builtin_mips_mulq_s_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>;
+def int_mips_mulq_s_w: GCCBuiltin<"__builtin_mips_mulq_s_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>;
+def int_mips_mulsa_w_ph: GCCBuiltin<"__builtin_mips_mulsa_w_ph">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty],
+ [IntrNoMem]>;
+
+def int_mips_precr_qb_ph: GCCBuiltin<"__builtin_mips_precr_qb_ph">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+def int_mips_precr_sra_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_ph_w">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_precr_sra_r_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_r_ph_w">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_prepend: GCCBuiltin<"__builtin_mips_prepend">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_shra_qb: GCCBuiltin<"__builtin_mips_shra_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shra_r_qb: GCCBuiltin<"__builtin_mips_shra_r_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shrl_ph: GCCBuiltin<"__builtin_mips_shrl_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_subqh_ph: GCCBuiltin<"__builtin_mips_subqh_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_subqh_r_ph: GCCBuiltin<"__builtin_mips_subqh_r_ph">,
+ Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>;
+def int_mips_subqh_w: GCCBuiltin<"__builtin_mips_subqh_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+def int_mips_subqh_r_w: GCCBuiltin<"__builtin_mips_subqh_r_w">,
+ Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>;
+
+def int_mips_subu_ph: GCCBuiltin<"__builtin_mips_subu_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+def int_mips_subu_s_ph: GCCBuiltin<"__builtin_mips_subu_s_ph">,
+ Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>;
+
+def int_mips_subuh_qb: GCCBuiltin<"__builtin_mips_subuh_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
+def int_mips_subuh_r_qb: GCCBuiltin<"__builtin_mips_subuh_r_qb">,
+ Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// MIPS MSA
+
+//===----------------------------------------------------------------------===//
+// Addition/subtraction
+
+def int_mips_add_a_b : GCCBuiltin<"__builtin_msa_add_a_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_add_a_h : GCCBuiltin<"__builtin_msa_add_a_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_add_a_w : GCCBuiltin<"__builtin_msa_add_a_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_add_a_d : GCCBuiltin<"__builtin_msa_add_a_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [Commutative, IntrNoMem]>;
+
+def int_mips_adds_a_b : GCCBuiltin<"__builtin_msa_adds_a_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_adds_a_h : GCCBuiltin<"__builtin_msa_adds_a_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_adds_a_w : GCCBuiltin<"__builtin_msa_adds_a_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_adds_a_d : GCCBuiltin<"__builtin_msa_adds_a_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [Commutative, IntrNoMem]>;
+
+def int_mips_adds_s_b : GCCBuiltin<"__builtin_msa_adds_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_adds_s_h : GCCBuiltin<"__builtin_msa_adds_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_adds_s_w : GCCBuiltin<"__builtin_msa_adds_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_adds_s_d : GCCBuiltin<"__builtin_msa_adds_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [Commutative, IntrNoMem]>;
+
+def int_mips_adds_u_b : GCCBuiltin<"__builtin_msa_adds_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_adds_u_h : GCCBuiltin<"__builtin_msa_adds_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_adds_u_w : GCCBuiltin<"__builtin_msa_adds_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_adds_u_d : GCCBuiltin<"__builtin_msa_adds_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [Commutative, IntrNoMem]>;
+
+def int_mips_addv_b : GCCBuiltin<"__builtin_msa_addv_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_addv_h : GCCBuiltin<"__builtin_msa_addv_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_addv_w : GCCBuiltin<"__builtin_msa_addv_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_addv_d : GCCBuiltin<"__builtin_msa_addv_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [Commutative, IntrNoMem]>;
+
+def int_mips_addvi_b : GCCBuiltin<"__builtin_msa_addvi_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_addvi_h : GCCBuiltin<"__builtin_msa_addvi_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_addvi_w : GCCBuiltin<"__builtin_msa_addvi_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_addvi_d : GCCBuiltin<"__builtin_msa_addvi_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty],
+ [Commutative, IntrNoMem]>;
+
+def int_mips_and_v : GCCBuiltin<"__builtin_msa_and_v">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_andi_b : GCCBuiltin<"__builtin_msa_andi_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_asub_s_b : GCCBuiltin<"__builtin_msa_asub_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_asub_s_h : GCCBuiltin<"__builtin_msa_asub_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_asub_s_w : GCCBuiltin<"__builtin_msa_asub_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_asub_s_d : GCCBuiltin<"__builtin_msa_asub_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_asub_u_b : GCCBuiltin<"__builtin_msa_asub_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_asub_u_h : GCCBuiltin<"__builtin_msa_asub_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_asub_u_w : GCCBuiltin<"__builtin_msa_asub_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_asub_u_d : GCCBuiltin<"__builtin_msa_asub_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ave_s_b : GCCBuiltin<"__builtin_msa_ave_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_ave_s_h : GCCBuiltin<"__builtin_msa_ave_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_ave_s_w : GCCBuiltin<"__builtin_msa_ave_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_ave_s_d : GCCBuiltin<"__builtin_msa_ave_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [Commutative, IntrNoMem]>;
+
+def int_mips_ave_u_b : GCCBuiltin<"__builtin_msa_ave_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_ave_u_h : GCCBuiltin<"__builtin_msa_ave_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_ave_u_w : GCCBuiltin<"__builtin_msa_ave_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_ave_u_d : GCCBuiltin<"__builtin_msa_ave_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [Commutative, IntrNoMem]>;
+
+def int_mips_aver_s_b : GCCBuiltin<"__builtin_msa_aver_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_aver_s_h : GCCBuiltin<"__builtin_msa_aver_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_aver_s_w : GCCBuiltin<"__builtin_msa_aver_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_aver_s_d : GCCBuiltin<"__builtin_msa_aver_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [Commutative, IntrNoMem]>;
+
+def int_mips_aver_u_b : GCCBuiltin<"__builtin_msa_aver_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_aver_u_h : GCCBuiltin<"__builtin_msa_aver_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_aver_u_w : GCCBuiltin<"__builtin_msa_aver_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [Commutative, IntrNoMem]>;
+def int_mips_aver_u_d : GCCBuiltin<"__builtin_msa_aver_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [Commutative, IntrNoMem]>;
+
+def int_mips_bclr_b : GCCBuiltin<"__builtin_msa_bclr_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_bclr_h : GCCBuiltin<"__builtin_msa_bclr_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_bclr_w : GCCBuiltin<"__builtin_msa_bclr_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_bclr_d : GCCBuiltin<"__builtin_msa_bclr_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_bclri_b : GCCBuiltin<"__builtin_msa_bclri_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bclri_h : GCCBuiltin<"__builtin_msa_bclri_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bclri_w : GCCBuiltin<"__builtin_msa_bclri_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bclri_d : GCCBuiltin<"__builtin_msa_bclri_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_binsl_b : GCCBuiltin<"__builtin_msa_binsl_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_mips_binsl_h : GCCBuiltin<"__builtin_msa_binsl_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_binsl_w : GCCBuiltin<"__builtin_msa_binsl_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+def int_mips_binsl_d : GCCBuiltin<"__builtin_msa_binsl_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+def int_mips_binsli_b : GCCBuiltin<"__builtin_msa_binsli_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_binsli_h : GCCBuiltin<"__builtin_msa_binsli_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_binsli_w : GCCBuiltin<"__builtin_msa_binsli_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_binsli_d : GCCBuiltin<"__builtin_msa_binsli_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_binsr_b : GCCBuiltin<"__builtin_msa_binsr_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_mips_binsr_h : GCCBuiltin<"__builtin_msa_binsr_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_binsr_w : GCCBuiltin<"__builtin_msa_binsr_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+def int_mips_binsr_d : GCCBuiltin<"__builtin_msa_binsr_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+def int_mips_binsri_b : GCCBuiltin<"__builtin_msa_binsri_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_binsri_h : GCCBuiltin<"__builtin_msa_binsri_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_binsri_w : GCCBuiltin<"__builtin_msa_binsri_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_binsri_d : GCCBuiltin<"__builtin_msa_binsri_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_bmnz_v : GCCBuiltin<"__builtin_msa_bmnz_v">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+
+def int_mips_bmnzi_b : GCCBuiltin<"__builtin_msa_bmnzi_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_bmz_v : GCCBuiltin<"__builtin_msa_bmz_v">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+
+def int_mips_bmzi_b : GCCBuiltin<"__builtin_msa_bmzi_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_bneg_b : GCCBuiltin<"__builtin_msa_bneg_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_bneg_h : GCCBuiltin<"__builtin_msa_bneg_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_bneg_w : GCCBuiltin<"__builtin_msa_bneg_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_bneg_d : GCCBuiltin<"__builtin_msa_bneg_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_bnegi_b : GCCBuiltin<"__builtin_msa_bnegi_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bnegi_h : GCCBuiltin<"__builtin_msa_bnegi_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bnegi_w : GCCBuiltin<"__builtin_msa_bnegi_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bnegi_d : GCCBuiltin<"__builtin_msa_bnegi_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_bnz_b : GCCBuiltin<"__builtin_msa_bnz_b">,
+ Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_bnz_h : GCCBuiltin<"__builtin_msa_bnz_h">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_bnz_w : GCCBuiltin<"__builtin_msa_bnz_w">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_bnz_d : GCCBuiltin<"__builtin_msa_bnz_d">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_bnz_v : GCCBuiltin<"__builtin_msa_bnz_v">,
+ Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_bsel_v : GCCBuiltin<"__builtin_msa_bsel_v">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+
+def int_mips_bseli_b : GCCBuiltin<"__builtin_msa_bseli_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_bset_b : GCCBuiltin<"__builtin_msa_bset_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_bset_h : GCCBuiltin<"__builtin_msa_bset_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_bset_w : GCCBuiltin<"__builtin_msa_bset_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_bset_d : GCCBuiltin<"__builtin_msa_bset_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_bseti_b : GCCBuiltin<"__builtin_msa_bseti_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bseti_h : GCCBuiltin<"__builtin_msa_bseti_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bseti_w : GCCBuiltin<"__builtin_msa_bseti_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_bseti_d : GCCBuiltin<"__builtin_msa_bseti_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_bz_b : GCCBuiltin<"__builtin_msa_bz_b">,
+ Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_bz_h : GCCBuiltin<"__builtin_msa_bz_h">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_bz_w : GCCBuiltin<"__builtin_msa_bz_w">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_bz_d : GCCBuiltin<"__builtin_msa_bz_d">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_bz_v : GCCBuiltin<"__builtin_msa_bz_v">,
+ Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_ceq_b : GCCBuiltin<"__builtin_msa_ceq_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_ceq_h : GCCBuiltin<"__builtin_msa_ceq_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ceq_w : GCCBuiltin<"__builtin_msa_ceq_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ceq_d : GCCBuiltin<"__builtin_msa_ceq_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ceqi_b : GCCBuiltin<"__builtin_msa_ceqi_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ceqi_h : GCCBuiltin<"__builtin_msa_ceqi_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ceqi_w : GCCBuiltin<"__builtin_msa_ceqi_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ceqi_d : GCCBuiltin<"__builtin_msa_ceqi_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_cfcmsa : GCCBuiltin<"__builtin_msa_cfcmsa">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+
+def int_mips_cle_s_b : GCCBuiltin<"__builtin_msa_cle_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_cle_s_h : GCCBuiltin<"__builtin_msa_cle_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_cle_s_w : GCCBuiltin<"__builtin_msa_cle_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_cle_s_d : GCCBuiltin<"__builtin_msa_cle_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_cle_u_b : GCCBuiltin<"__builtin_msa_cle_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_cle_u_h : GCCBuiltin<"__builtin_msa_cle_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_cle_u_w : GCCBuiltin<"__builtin_msa_cle_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_cle_u_d : GCCBuiltin<"__builtin_msa_cle_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_clei_s_b : GCCBuiltin<"__builtin_msa_clei_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_s_h : GCCBuiltin<"__builtin_msa_clei_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_s_w : GCCBuiltin<"__builtin_msa_clei_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_s_d : GCCBuiltin<"__builtin_msa_clei_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_clei_u_b : GCCBuiltin<"__builtin_msa_clei_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_u_h : GCCBuiltin<"__builtin_msa_clei_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_u_w : GCCBuiltin<"__builtin_msa_clei_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clei_u_d : GCCBuiltin<"__builtin_msa_clei_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_clt_s_b : GCCBuiltin<"__builtin_msa_clt_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_clt_s_h : GCCBuiltin<"__builtin_msa_clt_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_clt_s_w : GCCBuiltin<"__builtin_msa_clt_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_clt_s_d : GCCBuiltin<"__builtin_msa_clt_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_clt_u_b : GCCBuiltin<"__builtin_msa_clt_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_clt_u_h : GCCBuiltin<"__builtin_msa_clt_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_clt_u_w : GCCBuiltin<"__builtin_msa_clt_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_clt_u_d : GCCBuiltin<"__builtin_msa_clt_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_clti_s_b : GCCBuiltin<"__builtin_msa_clti_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_s_h : GCCBuiltin<"__builtin_msa_clti_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_s_w : GCCBuiltin<"__builtin_msa_clti_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_s_d : GCCBuiltin<"__builtin_msa_clti_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_clti_u_b : GCCBuiltin<"__builtin_msa_clti_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_u_h : GCCBuiltin<"__builtin_msa_clti_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_u_w : GCCBuiltin<"__builtin_msa_clti_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_clti_u_d : GCCBuiltin<"__builtin_msa_clti_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_copy_s_b : GCCBuiltin<"__builtin_msa_copy_s_b">,
+ Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_s_h : GCCBuiltin<"__builtin_msa_copy_s_h">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_s_w : GCCBuiltin<"__builtin_msa_copy_s_w">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_s_d : GCCBuiltin<"__builtin_msa_copy_s_d">,
+ Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_copy_u_b : GCCBuiltin<"__builtin_msa_copy_u_b">,
+ Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_u_h : GCCBuiltin<"__builtin_msa_copy_u_h">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_u_w : GCCBuiltin<"__builtin_msa_copy_u_w">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_copy_u_d : GCCBuiltin<"__builtin_msa_copy_u_d">,
+ Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_ctcmsa : GCCBuiltin<"__builtin_msa_ctcmsa">,
+ Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], []>;
+
+def int_mips_div_s_b : GCCBuiltin<"__builtin_msa_div_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_div_s_h : GCCBuiltin<"__builtin_msa_div_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_div_s_w : GCCBuiltin<"__builtin_msa_div_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_div_s_d : GCCBuiltin<"__builtin_msa_div_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_div_u_b : GCCBuiltin<"__builtin_msa_div_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_div_u_h : GCCBuiltin<"__builtin_msa_div_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_div_u_w : GCCBuiltin<"__builtin_msa_div_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_div_u_d : GCCBuiltin<"__builtin_msa_div_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+// This instruction is part of the MSA spec but it does not share the
+// __builtin_msa prefix because it operates on GP registers.
+def int_mips_dlsa : GCCBuiltin<"__builtin_mips_dlsa">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_dotp_s_h : GCCBuiltin<"__builtin_msa_dotp_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_dotp_s_w : GCCBuiltin<"__builtin_msa_dotp_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_dotp_s_d : GCCBuiltin<"__builtin_msa_dotp_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_dotp_u_h : GCCBuiltin<"__builtin_msa_dotp_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_dotp_u_w : GCCBuiltin<"__builtin_msa_dotp_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_dotp_u_d : GCCBuiltin<"__builtin_msa_dotp_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_dpadd_s_h : GCCBuiltin<"__builtin_msa_dpadd_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpadd_s_w : GCCBuiltin<"__builtin_msa_dpadd_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_dpadd_s_d : GCCBuiltin<"__builtin_msa_dpadd_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_dpadd_u_h : GCCBuiltin<"__builtin_msa_dpadd_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpadd_u_w : GCCBuiltin<"__builtin_msa_dpadd_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_dpadd_u_d : GCCBuiltin<"__builtin_msa_dpadd_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_dpsub_s_h : GCCBuiltin<"__builtin_msa_dpsub_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpsub_s_w : GCCBuiltin<"__builtin_msa_dpsub_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_dpsub_s_d : GCCBuiltin<"__builtin_msa_dpsub_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_dpsub_u_h : GCCBuiltin<"__builtin_msa_dpsub_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_mips_dpsub_u_w : GCCBuiltin<"__builtin_msa_dpsub_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_dpsub_u_d : GCCBuiltin<"__builtin_msa_dpsub_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_fadd_w : GCCBuiltin<"__builtin_msa_fadd_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fadd_d : GCCBuiltin<"__builtin_msa_fadd_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcaf_w : GCCBuiltin<"__builtin_msa_fcaf_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcaf_d : GCCBuiltin<"__builtin_msa_fcaf_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fceq_w : GCCBuiltin<"__builtin_msa_fceq_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fceq_d : GCCBuiltin<"__builtin_msa_fceq_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcle_w : GCCBuiltin<"__builtin_msa_fcle_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcle_d : GCCBuiltin<"__builtin_msa_fcle_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fclt_w : GCCBuiltin<"__builtin_msa_fclt_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fclt_d : GCCBuiltin<"__builtin_msa_fclt_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fclass_w : GCCBuiltin<"__builtin_msa_fclass_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fclass_d : GCCBuiltin<"__builtin_msa_fclass_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcne_w : GCCBuiltin<"__builtin_msa_fcne_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcne_d : GCCBuiltin<"__builtin_msa_fcne_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcor_w : GCCBuiltin<"__builtin_msa_fcor_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcor_d : GCCBuiltin<"__builtin_msa_fcor_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcueq_w : GCCBuiltin<"__builtin_msa_fcueq_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcueq_d : GCCBuiltin<"__builtin_msa_fcueq_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcule_w : GCCBuiltin<"__builtin_msa_fcule_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcule_d : GCCBuiltin<"__builtin_msa_fcule_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcult_w : GCCBuiltin<"__builtin_msa_fcult_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcult_d : GCCBuiltin<"__builtin_msa_fcult_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcun_w : GCCBuiltin<"__builtin_msa_fcun_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcun_d : GCCBuiltin<"__builtin_msa_fcun_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fcune_w : GCCBuiltin<"__builtin_msa_fcune_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fcune_d : GCCBuiltin<"__builtin_msa_fcune_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fdiv_w : GCCBuiltin<"__builtin_msa_fdiv_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fdiv_d : GCCBuiltin<"__builtin_msa_fdiv_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fexdo_h : GCCBuiltin<"__builtin_msa_fexdo_h">,
+ Intrinsic<[llvm_v8f16_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fexdo_w : GCCBuiltin<"__builtin_msa_fexdo_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fexp2_w : GCCBuiltin<"__builtin_msa_fexp2_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_fexp2_d : GCCBuiltin<"__builtin_msa_fexp2_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_fexupl_w : GCCBuiltin<"__builtin_msa_fexupl_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v8f16_ty], [IntrNoMem]>;
+def int_mips_fexupl_d : GCCBuiltin<"__builtin_msa_fexupl_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+def int_mips_fexupr_w : GCCBuiltin<"__builtin_msa_fexupr_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v8f16_ty], [IntrNoMem]>;
+def int_mips_fexupr_d : GCCBuiltin<"__builtin_msa_fexupr_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+def int_mips_ffint_s_w : GCCBuiltin<"__builtin_msa_ffint_s_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ffint_s_d : GCCBuiltin<"__builtin_msa_ffint_s_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ffint_u_w : GCCBuiltin<"__builtin_msa_ffint_u_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ffint_u_d : GCCBuiltin<"__builtin_msa_ffint_u_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ffql_w : GCCBuiltin<"__builtin_msa_ffql_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ffql_d : GCCBuiltin<"__builtin_msa_ffql_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_ffqr_w : GCCBuiltin<"__builtin_msa_ffqr_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ffqr_d : GCCBuiltin<"__builtin_msa_ffqr_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_fill_b : GCCBuiltin<"__builtin_msa_fill_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_fill_h : GCCBuiltin<"__builtin_msa_fill_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_fill_w : GCCBuiltin<"__builtin_msa_fill_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_fill_d : GCCBuiltin<"__builtin_msa_fill_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+def int_mips_flog2_w : GCCBuiltin<"__builtin_msa_flog2_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_flog2_d : GCCBuiltin<"__builtin_msa_flog2_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fmadd_w : GCCBuiltin<"__builtin_msa_fmadd_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+def int_mips_fmadd_d : GCCBuiltin<"__builtin_msa_fmadd_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+
+def int_mips_fmax_w : GCCBuiltin<"__builtin_msa_fmax_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fmax_d : GCCBuiltin<"__builtin_msa_fmax_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fmax_a_w : GCCBuiltin<"__builtin_msa_fmax_a_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fmax_a_d : GCCBuiltin<"__builtin_msa_fmax_a_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fmin_w : GCCBuiltin<"__builtin_msa_fmin_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fmin_d : GCCBuiltin<"__builtin_msa_fmin_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fmin_a_w : GCCBuiltin<"__builtin_msa_fmin_a_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fmin_a_d : GCCBuiltin<"__builtin_msa_fmin_a_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fmsub_w : GCCBuiltin<"__builtin_msa_fmsub_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+def int_mips_fmsub_d : GCCBuiltin<"__builtin_msa_fmsub_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+
+def int_mips_fmul_w : GCCBuiltin<"__builtin_msa_fmul_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fmul_d : GCCBuiltin<"__builtin_msa_fmul_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_frint_w : GCCBuiltin<"__builtin_msa_frint_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_frint_d : GCCBuiltin<"__builtin_msa_frint_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_frcp_w : GCCBuiltin<"__builtin_msa_frcp_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_frcp_d : GCCBuiltin<"__builtin_msa_frcp_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_frsqrt_w : GCCBuiltin<"__builtin_msa_frsqrt_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_frsqrt_d : GCCBuiltin<"__builtin_msa_frsqrt_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsaf_w : GCCBuiltin<"__builtin_msa_fsaf_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsaf_d : GCCBuiltin<"__builtin_msa_fsaf_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fseq_w : GCCBuiltin<"__builtin_msa_fseq_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fseq_d : GCCBuiltin<"__builtin_msa_fseq_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsle_w : GCCBuiltin<"__builtin_msa_fsle_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsle_d : GCCBuiltin<"__builtin_msa_fsle_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fslt_w : GCCBuiltin<"__builtin_msa_fslt_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fslt_d : GCCBuiltin<"__builtin_msa_fslt_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsne_w : GCCBuiltin<"__builtin_msa_fsne_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsne_d : GCCBuiltin<"__builtin_msa_fsne_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsor_w : GCCBuiltin<"__builtin_msa_fsor_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsor_d : GCCBuiltin<"__builtin_msa_fsor_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsqrt_w : GCCBuiltin<"__builtin_msa_fsqrt_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsqrt_d : GCCBuiltin<"__builtin_msa_fsqrt_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsub_w : GCCBuiltin<"__builtin_msa_fsub_w">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsub_d : GCCBuiltin<"__builtin_msa_fsub_d">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsueq_w : GCCBuiltin<"__builtin_msa_fsueq_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsueq_d : GCCBuiltin<"__builtin_msa_fsueq_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsule_w : GCCBuiltin<"__builtin_msa_fsule_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsule_d : GCCBuiltin<"__builtin_msa_fsule_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsult_w : GCCBuiltin<"__builtin_msa_fsult_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsult_d : GCCBuiltin<"__builtin_msa_fsult_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsun_w : GCCBuiltin<"__builtin_msa_fsun_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsun_d : GCCBuiltin<"__builtin_msa_fsun_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_fsune_w : GCCBuiltin<"__builtin_msa_fsune_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_fsune_d : GCCBuiltin<"__builtin_msa_fsune_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_ftint_s_w : GCCBuiltin<"__builtin_msa_ftint_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_ftint_s_d : GCCBuiltin<"__builtin_msa_ftint_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_ftint_u_w : GCCBuiltin<"__builtin_msa_ftint_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_ftint_u_d : GCCBuiltin<"__builtin_msa_ftint_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_ftq_h : GCCBuiltin<"__builtin_msa_ftq_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_ftq_w : GCCBuiltin<"__builtin_msa_ftq_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_ftrunc_s_w : GCCBuiltin<"__builtin_msa_ftrunc_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_ftrunc_s_d : GCCBuiltin<"__builtin_msa_ftrunc_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_ftrunc_u_w : GCCBuiltin<"__builtin_msa_ftrunc_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_mips_ftrunc_u_d : GCCBuiltin<"__builtin_msa_ftrunc_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+def int_mips_hadd_s_h : GCCBuiltin<"__builtin_msa_hadd_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_hadd_s_w : GCCBuiltin<"__builtin_msa_hadd_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_hadd_s_d : GCCBuiltin<"__builtin_msa_hadd_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_hadd_u_h : GCCBuiltin<"__builtin_msa_hadd_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_hadd_u_w : GCCBuiltin<"__builtin_msa_hadd_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_hadd_u_d : GCCBuiltin<"__builtin_msa_hadd_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_hsub_s_h : GCCBuiltin<"__builtin_msa_hsub_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_hsub_s_w : GCCBuiltin<"__builtin_msa_hsub_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_hsub_s_d : GCCBuiltin<"__builtin_msa_hsub_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_hsub_u_h : GCCBuiltin<"__builtin_msa_hsub_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_hsub_u_w : GCCBuiltin<"__builtin_msa_hsub_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_hsub_u_d : GCCBuiltin<"__builtin_msa_hsub_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_ilvev_b : GCCBuiltin<"__builtin_msa_ilvev_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_ilvev_h : GCCBuiltin<"__builtin_msa_ilvev_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ilvev_w : GCCBuiltin<"__builtin_msa_ilvev_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ilvev_d : GCCBuiltin<"__builtin_msa_ilvev_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ilvl_b : GCCBuiltin<"__builtin_msa_ilvl_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_ilvl_h : GCCBuiltin<"__builtin_msa_ilvl_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ilvl_w : GCCBuiltin<"__builtin_msa_ilvl_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ilvl_d : GCCBuiltin<"__builtin_msa_ilvl_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ilvod_b : GCCBuiltin<"__builtin_msa_ilvod_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_ilvod_h : GCCBuiltin<"__builtin_msa_ilvod_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ilvod_w : GCCBuiltin<"__builtin_msa_ilvod_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ilvod_d : GCCBuiltin<"__builtin_msa_ilvod_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_ilvr_b : GCCBuiltin<"__builtin_msa_ilvr_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_ilvr_h : GCCBuiltin<"__builtin_msa_ilvr_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_ilvr_w : GCCBuiltin<"__builtin_msa_ilvr_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_ilvr_d : GCCBuiltin<"__builtin_msa_ilvr_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_insert_b : GCCBuiltin<"__builtin_msa_insert_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_insert_h : GCCBuiltin<"__builtin_msa_insert_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_insert_w : GCCBuiltin<"__builtin_msa_insert_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_insert_d : GCCBuiltin<"__builtin_msa_insert_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+
+def int_mips_insve_b : GCCBuiltin<"__builtin_msa_insve_b">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_mips_insve_h : GCCBuiltin<"__builtin_msa_insve_h">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_i32_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_insve_w : GCCBuiltin<"__builtin_msa_insve_w">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+def int_mips_insve_d : GCCBuiltin<"__builtin_msa_insve_d">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_i32_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+def int_mips_ld_b : GCCBuiltin<"__builtin_msa_ld_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_mips_ld_h : GCCBuiltin<"__builtin_msa_ld_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_mips_ld_w : GCCBuiltin<"__builtin_msa_ld_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+def int_mips_ld_d : GCCBuiltin<"__builtin_msa_ld_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+def int_mips_ldi_b : GCCBuiltin<"__builtin_msa_ldi_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ldi_h : GCCBuiltin<"__builtin_msa_ldi_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ldi_w : GCCBuiltin<"__builtin_msa_ldi_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_mips_ldi_d : GCCBuiltin<"__builtin_msa_ldi_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+// This instruction is part of the MSA spec but it does not share the
+// __builtin_msa prefix because it operates on the GPR registers.
+def int_mips_lsa : GCCBuiltin<"__builtin_mips_lsa">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_madd_q_h : GCCBuiltin<"__builtin_msa_madd_q_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_madd_q_w : GCCBuiltin<"__builtin_msa_madd_q_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_maddr_q_h : GCCBuiltin<"__builtin_msa_maddr_q_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_maddr_q_w : GCCBuiltin<"__builtin_msa_maddr_q_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_maddv_b : GCCBuiltin<"__builtin_msa_maddv_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_mips_maddv_h : GCCBuiltin<"__builtin_msa_maddv_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_maddv_w : GCCBuiltin<"__builtin_msa_maddv_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+def int_mips_maddv_d : GCCBuiltin<"__builtin_msa_maddv_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+def int_mips_max_a_b : GCCBuiltin<"__builtin_msa_max_a_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_max_a_h : GCCBuiltin<"__builtin_msa_max_a_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_max_a_w : GCCBuiltin<"__builtin_msa_max_a_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_max_a_d : GCCBuiltin<"__builtin_msa_max_a_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_max_s_b : GCCBuiltin<"__builtin_msa_max_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_max_s_h : GCCBuiltin<"__builtin_msa_max_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_max_s_w : GCCBuiltin<"__builtin_msa_max_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_max_s_d : GCCBuiltin<"__builtin_msa_max_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_max_u_b : GCCBuiltin<"__builtin_msa_max_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_max_u_h : GCCBuiltin<"__builtin_msa_max_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_max_u_w : GCCBuiltin<"__builtin_msa_max_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_max_u_d : GCCBuiltin<"__builtin_msa_max_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_maxi_s_b : GCCBuiltin<"__builtin_msa_maxi_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_s_h : GCCBuiltin<"__builtin_msa_maxi_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_s_w : GCCBuiltin<"__builtin_msa_maxi_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_s_d : GCCBuiltin<"__builtin_msa_maxi_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_maxi_u_b : GCCBuiltin<"__builtin_msa_maxi_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_u_h : GCCBuiltin<"__builtin_msa_maxi_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_u_w : GCCBuiltin<"__builtin_msa_maxi_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_maxi_u_d : GCCBuiltin<"__builtin_msa_maxi_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_min_a_b : GCCBuiltin<"__builtin_msa_min_a_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_min_a_h : GCCBuiltin<"__builtin_msa_min_a_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_min_a_w : GCCBuiltin<"__builtin_msa_min_a_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_min_a_d : GCCBuiltin<"__builtin_msa_min_a_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_min_s_b : GCCBuiltin<"__builtin_msa_min_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_min_s_h : GCCBuiltin<"__builtin_msa_min_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_min_s_w : GCCBuiltin<"__builtin_msa_min_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_min_s_d : GCCBuiltin<"__builtin_msa_min_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_min_u_b : GCCBuiltin<"__builtin_msa_min_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_min_u_h : GCCBuiltin<"__builtin_msa_min_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_min_u_w : GCCBuiltin<"__builtin_msa_min_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_min_u_d : GCCBuiltin<"__builtin_msa_min_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_mini_s_b : GCCBuiltin<"__builtin_msa_mini_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_s_h : GCCBuiltin<"__builtin_msa_mini_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_s_w : GCCBuiltin<"__builtin_msa_mini_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_s_d : GCCBuiltin<"__builtin_msa_mini_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_mini_u_b : GCCBuiltin<"__builtin_msa_mini_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_u_h : GCCBuiltin<"__builtin_msa_mini_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_u_w : GCCBuiltin<"__builtin_msa_mini_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_mini_u_d : GCCBuiltin<"__builtin_msa_mini_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_mod_s_b : GCCBuiltin<"__builtin_msa_mod_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_mod_s_h : GCCBuiltin<"__builtin_msa_mod_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_mod_s_w : GCCBuiltin<"__builtin_msa_mod_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_mod_s_d : GCCBuiltin<"__builtin_msa_mod_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_mod_u_b : GCCBuiltin<"__builtin_msa_mod_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_mod_u_h : GCCBuiltin<"__builtin_msa_mod_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_mod_u_w : GCCBuiltin<"__builtin_msa_mod_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_mod_u_d : GCCBuiltin<"__builtin_msa_mod_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_move_v : GCCBuiltin<"__builtin_msa_move_v">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_msub_q_h : GCCBuiltin<"__builtin_msa_msub_q_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_msub_q_w : GCCBuiltin<"__builtin_msa_msub_q_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_msubr_q_h : GCCBuiltin<"__builtin_msa_msubr_q_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_msubr_q_w : GCCBuiltin<"__builtin_msa_msubr_q_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_msubv_b : GCCBuiltin<"__builtin_msa_msubv_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_mips_msubv_h : GCCBuiltin<"__builtin_msa_msubv_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_msubv_w : GCCBuiltin<"__builtin_msa_msubv_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+def int_mips_msubv_d : GCCBuiltin<"__builtin_msa_msubv_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+def int_mips_mul_q_h : GCCBuiltin<"__builtin_msa_mul_q_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_mul_q_w : GCCBuiltin<"__builtin_msa_mul_q_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_mulr_q_h : GCCBuiltin<"__builtin_msa_mulr_q_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_mulr_q_w : GCCBuiltin<"__builtin_msa_mulr_q_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+
+def int_mips_mulv_b : GCCBuiltin<"__builtin_msa_mulv_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_mulv_h : GCCBuiltin<"__builtin_msa_mulv_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_mulv_w : GCCBuiltin<"__builtin_msa_mulv_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_mulv_d : GCCBuiltin<"__builtin_msa_mulv_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_nloc_b : GCCBuiltin<"__builtin_msa_nloc_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_nloc_h : GCCBuiltin<"__builtin_msa_nloc_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_nloc_w : GCCBuiltin<"__builtin_msa_nloc_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_nloc_d : GCCBuiltin<"__builtin_msa_nloc_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_nlzc_b : GCCBuiltin<"__builtin_msa_nlzc_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_nlzc_h : GCCBuiltin<"__builtin_msa_nlzc_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_nlzc_w : GCCBuiltin<"__builtin_msa_nlzc_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_nlzc_d : GCCBuiltin<"__builtin_msa_nlzc_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_nor_v : GCCBuiltin<"__builtin_msa_nor_v">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_nori_b : GCCBuiltin<"__builtin_msa_nori_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_or_v : GCCBuiltin<"__builtin_msa_or_v">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_ori_b : GCCBuiltin<"__builtin_msa_ori_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_pckev_b : GCCBuiltin<"__builtin_msa_pckev_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_pckev_h : GCCBuiltin<"__builtin_msa_pckev_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_pckev_w : GCCBuiltin<"__builtin_msa_pckev_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_pckev_d : GCCBuiltin<"__builtin_msa_pckev_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_pckod_b : GCCBuiltin<"__builtin_msa_pckod_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_pckod_h : GCCBuiltin<"__builtin_msa_pckod_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_pckod_w : GCCBuiltin<"__builtin_msa_pckod_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_pckod_d : GCCBuiltin<"__builtin_msa_pckod_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_pcnt_b : GCCBuiltin<"__builtin_msa_pcnt_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_pcnt_h : GCCBuiltin<"__builtin_msa_pcnt_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_pcnt_w : GCCBuiltin<"__builtin_msa_pcnt_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_pcnt_d : GCCBuiltin<"__builtin_msa_pcnt_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_sat_s_b : GCCBuiltin<"__builtin_msa_sat_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_s_h : GCCBuiltin<"__builtin_msa_sat_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_s_w : GCCBuiltin<"__builtin_msa_sat_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_s_d : GCCBuiltin<"__builtin_msa_sat_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_sat_u_b : GCCBuiltin<"__builtin_msa_sat_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_u_h : GCCBuiltin<"__builtin_msa_sat_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_u_w : GCCBuiltin<"__builtin_msa_sat_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sat_u_d : GCCBuiltin<"__builtin_msa_sat_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_shf_b : GCCBuiltin<"__builtin_msa_shf_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shf_h : GCCBuiltin<"__builtin_msa_shf_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_shf_w : GCCBuiltin<"__builtin_msa_shf_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_sld_b : GCCBuiltin<"__builtin_msa_sld_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sld_h : GCCBuiltin<"__builtin_msa_sld_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sld_w : GCCBuiltin<"__builtin_msa_sld_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_sld_d : GCCBuiltin<"__builtin_msa_sld_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_sldi_b : GCCBuiltin<"__builtin_msa_sldi_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_sldi_h : GCCBuiltin<"__builtin_msa_sldi_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_sldi_w : GCCBuiltin<"__builtin_msa_sldi_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_mips_sldi_d : GCCBuiltin<"__builtin_msa_sldi_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+def int_mips_sll_b : GCCBuiltin<"__builtin_msa_sll_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_sll_h : GCCBuiltin<"__builtin_msa_sll_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_sll_w : GCCBuiltin<"__builtin_msa_sll_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_sll_d : GCCBuiltin<"__builtin_msa_sll_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_slli_b : GCCBuiltin<"__builtin_msa_slli_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_slli_h : GCCBuiltin<"__builtin_msa_slli_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_slli_w : GCCBuiltin<"__builtin_msa_slli_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_slli_d : GCCBuiltin<"__builtin_msa_slli_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_splat_b : GCCBuiltin<"__builtin_msa_splat_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splat_h : GCCBuiltin<"__builtin_msa_splat_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splat_w : GCCBuiltin<"__builtin_msa_splat_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splat_d : GCCBuiltin<"__builtin_msa_splat_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_splati_b : GCCBuiltin<"__builtin_msa_splati_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splati_h : GCCBuiltin<"__builtin_msa_splati_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splati_w : GCCBuiltin<"__builtin_msa_splati_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_splati_d : GCCBuiltin<"__builtin_msa_splati_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_sra_b : GCCBuiltin<"__builtin_msa_sra_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_sra_h : GCCBuiltin<"__builtin_msa_sra_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_sra_w : GCCBuiltin<"__builtin_msa_sra_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_sra_d : GCCBuiltin<"__builtin_msa_sra_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_srai_b : GCCBuiltin<"__builtin_msa_srai_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srai_h : GCCBuiltin<"__builtin_msa_srai_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srai_w : GCCBuiltin<"__builtin_msa_srai_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srai_d : GCCBuiltin<"__builtin_msa_srai_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_srar_b : GCCBuiltin<"__builtin_msa_srar_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_srar_h : GCCBuiltin<"__builtin_msa_srar_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_srar_w : GCCBuiltin<"__builtin_msa_srar_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_srar_d : GCCBuiltin<"__builtin_msa_srar_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_srari_b : GCCBuiltin<"__builtin_msa_srari_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srari_h : GCCBuiltin<"__builtin_msa_srari_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srari_w : GCCBuiltin<"__builtin_msa_srari_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srari_d : GCCBuiltin<"__builtin_msa_srari_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_srl_b : GCCBuiltin<"__builtin_msa_srl_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_srl_h : GCCBuiltin<"__builtin_msa_srl_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_srl_w : GCCBuiltin<"__builtin_msa_srl_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_srl_d : GCCBuiltin<"__builtin_msa_srl_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_srli_b : GCCBuiltin<"__builtin_msa_srli_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srli_h : GCCBuiltin<"__builtin_msa_srli_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srli_w : GCCBuiltin<"__builtin_msa_srli_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srli_d : GCCBuiltin<"__builtin_msa_srli_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_srlr_b : GCCBuiltin<"__builtin_msa_srlr_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_srlr_h : GCCBuiltin<"__builtin_msa_srlr_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_srlr_w : GCCBuiltin<"__builtin_msa_srlr_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_srlr_d : GCCBuiltin<"__builtin_msa_srlr_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_srlri_b : GCCBuiltin<"__builtin_msa_srlri_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srlri_h : GCCBuiltin<"__builtin_msa_srlri_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srlri_w : GCCBuiltin<"__builtin_msa_srlri_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_srlri_d : GCCBuiltin<"__builtin_msa_srlri_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_st_b : GCCBuiltin<"__builtin_msa_st_b">,
+ Intrinsic<[], [llvm_v16i8_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+def int_mips_st_h : GCCBuiltin<"__builtin_msa_st_h">,
+ Intrinsic<[], [llvm_v8i16_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+def int_mips_st_w : GCCBuiltin<"__builtin_msa_st_w">,
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+def int_mips_st_d : GCCBuiltin<"__builtin_msa_st_d">,
+ Intrinsic<[], [llvm_v2i64_ty, llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+def int_mips_subs_s_b : GCCBuiltin<"__builtin_msa_subs_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_subs_s_h : GCCBuiltin<"__builtin_msa_subs_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_subs_s_w : GCCBuiltin<"__builtin_msa_subs_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_subs_s_d : GCCBuiltin<"__builtin_msa_subs_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_subs_u_b : GCCBuiltin<"__builtin_msa_subs_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_subs_u_h : GCCBuiltin<"__builtin_msa_subs_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_subs_u_w : GCCBuiltin<"__builtin_msa_subs_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_subs_u_d : GCCBuiltin<"__builtin_msa_subs_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_subsus_u_b : GCCBuiltin<"__builtin_msa_subsus_u_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_subsus_u_h : GCCBuiltin<"__builtin_msa_subsus_u_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_subsus_u_w : GCCBuiltin<"__builtin_msa_subsus_u_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_subsus_u_d : GCCBuiltin<"__builtin_msa_subsus_u_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_subsuu_s_b : GCCBuiltin<"__builtin_msa_subsuu_s_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_subsuu_s_h : GCCBuiltin<"__builtin_msa_subsuu_s_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_subsuu_s_w : GCCBuiltin<"__builtin_msa_subsuu_s_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_subsuu_s_d : GCCBuiltin<"__builtin_msa_subsuu_s_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_subv_b : GCCBuiltin<"__builtin_msa_subv_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+def int_mips_subv_h : GCCBuiltin<"__builtin_msa_subv_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+def int_mips_subv_w : GCCBuiltin<"__builtin_msa_subv_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_mips_subv_d : GCCBuiltin<"__builtin_msa_subv_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+def int_mips_subvi_b : GCCBuiltin<"__builtin_msa_subvi_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_subvi_h : GCCBuiltin<"__builtin_msa_subvi_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_subvi_w : GCCBuiltin<"__builtin_msa_subvi_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_mips_subvi_d : GCCBuiltin<"__builtin_msa_subvi_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+def int_mips_vshf_b : GCCBuiltin<"__builtin_msa_vshf_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+def int_mips_vshf_h : GCCBuiltin<"__builtin_msa_vshf_h">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+def int_mips_vshf_w : GCCBuiltin<"__builtin_msa_vshf_w">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+def int_mips_vshf_d : GCCBuiltin<"__builtin_msa_vshf_d">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+def int_mips_xor_v : GCCBuiltin<"__builtin_msa_xor_v">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_mips_xori_b : GCCBuiltin<"__builtin_msa_xori_b">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i32_ty], [IntrNoMem]>;
+}
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsNVVM.td b/contrib/llvm/include/llvm/IR/IntrinsicsNVVM.td
new file mode 100644
index 0000000..9deed41
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsNVVM.td
@@ -0,0 +1,3746 @@
+//===- IntrinsicsNVVM.td - Defines NVVM intrinsics ---------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the NVVM-specific intrinsics for use with NVPTX.
+//
+//===----------------------------------------------------------------------===//
+
+def llvm_anyi64ptr_ty : LLVMAnyPointerType<llvm_i64_ty>; // (space)i64*
+
+//
+// MISC
+//
+
+ def int_nvvm_clz_i : GCCBuiltin<"__nvvm_clz_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_clz_ll : GCCBuiltin<"__nvvm_clz_ll">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_nvvm_popc_i : GCCBuiltin<"__nvvm_popc_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_popc_ll : GCCBuiltin<"__nvvm_popc_ll">,
+ Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_nvvm_prmt : GCCBuiltin<"__nvvm_prmt">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Min Max
+//
+
+ def int_nvvm_min_i : GCCBuiltin<"__nvvm_min_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_min_ui : GCCBuiltin<"__nvvm_min_ui">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_min_ll : GCCBuiltin<"__nvvm_min_ll">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_min_ull : GCCBuiltin<"__nvvm_min_ull">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_max_i : GCCBuiltin<"__nvvm_max_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_max_ui : GCCBuiltin<"__nvvm_max_ui">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_max_ll : GCCBuiltin<"__nvvm_max_ll">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_max_ull : GCCBuiltin<"__nvvm_max_ull">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_fmin_f : GCCBuiltin<"__nvvm_fmin_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fmin_ftz_f : GCCBuiltin<"__nvvm_fmin_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_fmax_f : GCCBuiltin<"__nvvm_fmax_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty]
+ , [IntrNoMem, Commutative]>;
+ def int_nvvm_fmax_ftz_f : GCCBuiltin<"__nvvm_fmax_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_fmin_d : GCCBuiltin<"__nvvm_fmin_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fmax_d : GCCBuiltin<"__nvvm_fmax_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Multiplication
+//
+
+ def int_nvvm_mulhi_i : GCCBuiltin<"__nvvm_mulhi_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mulhi_ui : GCCBuiltin<"__nvvm_mulhi_ui">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_mulhi_ll : GCCBuiltin<"__nvvm_mulhi_ll">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mulhi_ull : GCCBuiltin<"__nvvm_mulhi_ull">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_mul_rn_ftz_f : GCCBuiltin<"__nvvm_mul_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rn_f : GCCBuiltin<"__nvvm_mul_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rz_ftz_f : GCCBuiltin<"__nvvm_mul_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rz_f : GCCBuiltin<"__nvvm_mul_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rm_ftz_f : GCCBuiltin<"__nvvm_mul_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rm_f : GCCBuiltin<"__nvvm_mul_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rp_ftz_f : GCCBuiltin<"__nvvm_mul_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rp_f : GCCBuiltin<"__nvvm_mul_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_mul_rn_d : GCCBuiltin<"__nvvm_mul_rn_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rz_d : GCCBuiltin<"__nvvm_mul_rz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rm_d : GCCBuiltin<"__nvvm_mul_rm_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul_rp_d : GCCBuiltin<"__nvvm_mul_rp_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_mul24_i : GCCBuiltin<"__nvvm_mul24_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_mul24_ui : GCCBuiltin<"__nvvm_mul24_ui">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Div
+//
+
+ def int_nvvm_div_approx_ftz_f : GCCBuiltin<"__nvvm_div_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_approx_f : GCCBuiltin<"__nvvm_div_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_div_rn_ftz_f : GCCBuiltin<"__nvvm_div_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rn_f : GCCBuiltin<"__nvvm_div_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_div_rz_ftz_f : GCCBuiltin<"__nvvm_div_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rz_f : GCCBuiltin<"__nvvm_div_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_div_rm_ftz_f : GCCBuiltin<"__nvvm_div_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rm_f : GCCBuiltin<"__nvvm_div_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_div_rp_ftz_f : GCCBuiltin<"__nvvm_div_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rp_f : GCCBuiltin<"__nvvm_div_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_div_rn_d : GCCBuiltin<"__nvvm_div_rn_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rz_d : GCCBuiltin<"__nvvm_div_rz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rm_d : GCCBuiltin<"__nvvm_div_rm_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_div_rp_d : GCCBuiltin<"__nvvm_div_rp_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Brev
+//
+
+ def int_nvvm_brev32 : GCCBuiltin<"__nvvm_brev32">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_brev64 : GCCBuiltin<"__nvvm_brev64">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+//
+// Sad
+//
+
+ def int_nvvm_sad_i : GCCBuiltin<"__nvvm_sad_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_sad_ui : GCCBuiltin<"__nvvm_sad_ui">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Floor Ceil
+//
+
+ def int_nvvm_floor_ftz_f : GCCBuiltin<"__nvvm_floor_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_floor_f : GCCBuiltin<"__nvvm_floor_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_floor_d : GCCBuiltin<"__nvvm_floor_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_ceil_ftz_f : GCCBuiltin<"__nvvm_ceil_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ceil_f : GCCBuiltin<"__nvvm_ceil_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ceil_d : GCCBuiltin<"__nvvm_ceil_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Abs
+//
+
+ def int_nvvm_abs_i : GCCBuiltin<"__nvvm_abs_i">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_abs_ll : GCCBuiltin<"__nvvm_abs_ll">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_nvvm_fabs_ftz_f : GCCBuiltin<"__nvvm_fabs_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_fabs_f : GCCBuiltin<"__nvvm_fabs_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_fabs_d : GCCBuiltin<"__nvvm_fabs_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Round
+//
+
+ def int_nvvm_round_ftz_f : GCCBuiltin<"__nvvm_round_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_round_f : GCCBuiltin<"__nvvm_round_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_round_d : GCCBuiltin<"__nvvm_round_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Trunc
+//
+
+ def int_nvvm_trunc_ftz_f : GCCBuiltin<"__nvvm_trunc_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_trunc_f : GCCBuiltin<"__nvvm_trunc_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_trunc_d : GCCBuiltin<"__nvvm_trunc_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Saturate
+//
+
+ def int_nvvm_saturate_ftz_f : GCCBuiltin<"__nvvm_saturate_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_saturate_f : GCCBuiltin<"__nvvm_saturate_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_saturate_d : GCCBuiltin<"__nvvm_saturate_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Exp2 Log2
+//
+
+ def int_nvvm_ex2_approx_ftz_f : GCCBuiltin<"__nvvm_ex2_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ex2_approx_f : GCCBuiltin<"__nvvm_ex2_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_ex2_approx_d : GCCBuiltin<"__nvvm_ex2_approx_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_lg2_approx_ftz_f : GCCBuiltin<"__nvvm_lg2_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_lg2_approx_f : GCCBuiltin<"__nvvm_lg2_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_lg2_approx_d : GCCBuiltin<"__nvvm_lg2_approx_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Sin Cos
+//
+
+ def int_nvvm_sin_approx_ftz_f : GCCBuiltin<"__nvvm_sin_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sin_approx_f : GCCBuiltin<"__nvvm_sin_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_cos_approx_ftz_f : GCCBuiltin<"__nvvm_cos_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_cos_approx_f : GCCBuiltin<"__nvvm_cos_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+//
+// Fma
+//
+
+ def int_nvvm_fma_rn_ftz_f : GCCBuiltin<"__nvvm_fma_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rn_f : GCCBuiltin<"__nvvm_fma_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rz_ftz_f : GCCBuiltin<"__nvvm_fma_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rz_f : GCCBuiltin<"__nvvm_fma_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rm_ftz_f : GCCBuiltin<"__nvvm_fma_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rm_f : GCCBuiltin<"__nvvm_fma_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rp_ftz_f : GCCBuiltin<"__nvvm_fma_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rp_f : GCCBuiltin<"__nvvm_fma_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_fma_rn_d : GCCBuiltin<"__nvvm_fma_rn_d">,
+ Intrinsic<[llvm_double_ty],
+ [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rz_d : GCCBuiltin<"__nvvm_fma_rz_d">,
+ Intrinsic<[llvm_double_ty],
+ [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rm_d : GCCBuiltin<"__nvvm_fma_rm_d">,
+ Intrinsic<[llvm_double_ty],
+ [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_fma_rp_d : GCCBuiltin<"__nvvm_fma_rp_d">,
+ Intrinsic<[llvm_double_ty],
+ [llvm_double_ty, llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Rcp
+//
+
+ def int_nvvm_rcp_rn_ftz_f : GCCBuiltin<"__nvvm_rcp_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rn_f : GCCBuiltin<"__nvvm_rcp_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rz_ftz_f : GCCBuiltin<"__nvvm_rcp_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rz_f : GCCBuiltin<"__nvvm_rcp_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rm_ftz_f : GCCBuiltin<"__nvvm_rcp_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rm_f : GCCBuiltin<"__nvvm_rcp_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rp_ftz_f : GCCBuiltin<"__nvvm_rcp_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rp_f : GCCBuiltin<"__nvvm_rcp_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_rcp_rn_d : GCCBuiltin<"__nvvm_rcp_rn_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rz_d : GCCBuiltin<"__nvvm_rcp_rz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rm_d : GCCBuiltin<"__nvvm_rcp_rm_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_rcp_rp_d : GCCBuiltin<"__nvvm_rcp_rp_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_rcp_approx_ftz_d : GCCBuiltin<"__nvvm_rcp_approx_ftz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Sqrt
+//
+
+ def int_nvvm_sqrt_f : GCCBuiltin<"__nvvm_sqrt_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rn_ftz_f : GCCBuiltin<"__nvvm_sqrt_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rn_f : GCCBuiltin<"__nvvm_sqrt_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rz_ftz_f : GCCBuiltin<"__nvvm_sqrt_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rz_f : GCCBuiltin<"__nvvm_sqrt_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rm_ftz_f : GCCBuiltin<"__nvvm_sqrt_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rm_f : GCCBuiltin<"__nvvm_sqrt_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rp_ftz_f : GCCBuiltin<"__nvvm_sqrt_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rp_f : GCCBuiltin<"__nvvm_sqrt_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_approx_ftz_f : GCCBuiltin<"__nvvm_sqrt_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_approx_f : GCCBuiltin<"__nvvm_sqrt_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_sqrt_rn_d : GCCBuiltin<"__nvvm_sqrt_rn_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rz_d : GCCBuiltin<"__nvvm_sqrt_rz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rm_d : GCCBuiltin<"__nvvm_sqrt_rm_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_sqrt_rp_d : GCCBuiltin<"__nvvm_sqrt_rp_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Rsqrt
+//
+
+ def int_nvvm_rsqrt_approx_ftz_f : GCCBuiltin<"__nvvm_rsqrt_approx_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rsqrt_approx_f : GCCBuiltin<"__nvvm_rsqrt_approx_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_rsqrt_approx_d : GCCBuiltin<"__nvvm_rsqrt_approx_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem]>;
+
+//
+// Add
+//
+
+ def int_nvvm_add_rn_ftz_f : GCCBuiltin<"__nvvm_add_rn_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rn_f : GCCBuiltin<"__nvvm_add_rn_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rz_ftz_f : GCCBuiltin<"__nvvm_add_rz_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rz_f : GCCBuiltin<"__nvvm_add_rz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rm_ftz_f : GCCBuiltin<"__nvvm_add_rm_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rm_f : GCCBuiltin<"__nvvm_add_rm_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rp_ftz_f : GCCBuiltin<"__nvvm_add_rp_ftz_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rp_f : GCCBuiltin<"__nvvm_add_rp_f">,
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_add_rn_d : GCCBuiltin<"__nvvm_add_rn_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rz_d : GCCBuiltin<"__nvvm_add_rz_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rm_d : GCCBuiltin<"__nvvm_add_rm_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+ def int_nvvm_add_rp_d : GCCBuiltin<"__nvvm_add_rp_d">,
+ Intrinsic<[llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem, Commutative]>;
+
+//
+// Convert
+//
+
+ def int_nvvm_d2f_rn_ftz : GCCBuiltin<"__nvvm_d2f_rn_ftz">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rn : GCCBuiltin<"__nvvm_d2f_rn">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rz_ftz : GCCBuiltin<"__nvvm_d2f_rz_ftz">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rz : GCCBuiltin<"__nvvm_d2f_rz">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rm_ftz : GCCBuiltin<"__nvvm_d2f_rm_ftz">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rm : GCCBuiltin<"__nvvm_d2f_rm">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rp_ftz : GCCBuiltin<"__nvvm_d2f_rp_ftz">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2f_rp : GCCBuiltin<"__nvvm_d2f_rp">,
+ Intrinsic<[llvm_float_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_d2i_rn : GCCBuiltin<"__nvvm_d2i_rn">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2i_rz : GCCBuiltin<"__nvvm_d2i_rz">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2i_rm : GCCBuiltin<"__nvvm_d2i_rm">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2i_rp : GCCBuiltin<"__nvvm_d2i_rp">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_d2ui_rn : GCCBuiltin<"__nvvm_d2ui_rn">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ui_rz : GCCBuiltin<"__nvvm_d2ui_rz">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ui_rm : GCCBuiltin<"__nvvm_d2ui_rm">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ui_rp : GCCBuiltin<"__nvvm_d2ui_rp">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_i2d_rn : GCCBuiltin<"__nvvm_i2d_rn">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2d_rz : GCCBuiltin<"__nvvm_i2d_rz">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2d_rm : GCCBuiltin<"__nvvm_i2d_rm">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2d_rp : GCCBuiltin<"__nvvm_i2d_rp">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_nvvm_ui2d_rn : GCCBuiltin<"__nvvm_ui2d_rn">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2d_rz : GCCBuiltin<"__nvvm_ui2d_rz">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2d_rm : GCCBuiltin<"__nvvm_ui2d_rm">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2d_rp : GCCBuiltin<"__nvvm_ui2d_rp">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2i_rn_ftz : GCCBuiltin<"__nvvm_f2i_rn_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rn : GCCBuiltin<"__nvvm_f2i_rn">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rz_ftz : GCCBuiltin<"__nvvm_f2i_rz_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rz : GCCBuiltin<"__nvvm_f2i_rz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rm_ftz : GCCBuiltin<"__nvvm_f2i_rm_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rm : GCCBuiltin<"__nvvm_f2i_rm">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rp_ftz : GCCBuiltin<"__nvvm_f2i_rp_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2i_rp : GCCBuiltin<"__nvvm_f2i_rp">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2ui_rn_ftz : GCCBuiltin<"__nvvm_f2ui_rn_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rn : GCCBuiltin<"__nvvm_f2ui_rn">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rz_ftz : GCCBuiltin<"__nvvm_f2ui_rz_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rz : GCCBuiltin<"__nvvm_f2ui_rz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rm_ftz : GCCBuiltin<"__nvvm_f2ui_rm_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rm : GCCBuiltin<"__nvvm_f2ui_rm">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rp_ftz : GCCBuiltin<"__nvvm_f2ui_rp_ftz">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ui_rp : GCCBuiltin<"__nvvm_f2ui_rp">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_i2f_rn : GCCBuiltin<"__nvvm_i2f_rn">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2f_rz : GCCBuiltin<"__nvvm_i2f_rz">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2f_rm : GCCBuiltin<"__nvvm_i2f_rm">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_i2f_rp : GCCBuiltin<"__nvvm_i2f_rp">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_nvvm_ui2f_rn : GCCBuiltin<"__nvvm_ui2f_rn">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2f_rz : GCCBuiltin<"__nvvm_ui2f_rz">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2f_rm : GCCBuiltin<"__nvvm_ui2f_rm">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_nvvm_ui2f_rp : GCCBuiltin<"__nvvm_ui2f_rp">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_nvvm_lohi_i2d : GCCBuiltin<"__nvvm_lohi_i2d">,
+ Intrinsic<[llvm_double_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, Commutative]>;
+
+ def int_nvvm_d2i_lo : GCCBuiltin<"__nvvm_d2i_lo">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2i_hi : GCCBuiltin<"__nvvm_d2i_hi">,
+ Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2ll_rn_ftz : GCCBuiltin<"__nvvm_f2ll_rn_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rn : GCCBuiltin<"__nvvm_f2ll_rn">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rz_ftz : GCCBuiltin<"__nvvm_f2ll_rz_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rz : GCCBuiltin<"__nvvm_f2ll_rz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rm_ftz : GCCBuiltin<"__nvvm_f2ll_rm_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rm : GCCBuiltin<"__nvvm_f2ll_rm">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rp_ftz : GCCBuiltin<"__nvvm_f2ll_rp_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ll_rp : GCCBuiltin<"__nvvm_f2ll_rp">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2ull_rn_ftz : GCCBuiltin<"__nvvm_f2ull_rn_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rn : GCCBuiltin<"__nvvm_f2ull_rn">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rz_ftz : GCCBuiltin<"__nvvm_f2ull_rz_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rz : GCCBuiltin<"__nvvm_f2ull_rz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rm_ftz : GCCBuiltin<"__nvvm_f2ull_rm_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rm : GCCBuiltin<"__nvvm_f2ull_rm">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rp_ftz : GCCBuiltin<"__nvvm_f2ull_rp_ftz">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2ull_rp : GCCBuiltin<"__nvvm_f2ull_rp">,
+ Intrinsic<[llvm_i64_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_d2ll_rn : GCCBuiltin<"__nvvm_d2ll_rn">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ll_rz : GCCBuiltin<"__nvvm_d2ll_rz">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ll_rm : GCCBuiltin<"__nvvm_d2ll_rm">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ll_rp : GCCBuiltin<"__nvvm_d2ll_rp">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_d2ull_rn : GCCBuiltin<"__nvvm_d2ull_rn">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ull_rz : GCCBuiltin<"__nvvm_d2ull_rz">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ull_rm : GCCBuiltin<"__nvvm_d2ull_rm">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+ def int_nvvm_d2ull_rp : GCCBuiltin<"__nvvm_d2ull_rp">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+
+ def int_nvvm_ll2f_rn : GCCBuiltin<"__nvvm_ll2f_rn">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2f_rz : GCCBuiltin<"__nvvm_ll2f_rz">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2f_rm : GCCBuiltin<"__nvvm_ll2f_rm">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2f_rp : GCCBuiltin<"__nvvm_ll2f_rp">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2f_rn : GCCBuiltin<"__nvvm_ull2f_rn">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2f_rz : GCCBuiltin<"__nvvm_ull2f_rz">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2f_rm : GCCBuiltin<"__nvvm_ull2f_rm">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2f_rp : GCCBuiltin<"__nvvm_ull2f_rp">,
+ Intrinsic<[llvm_float_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_nvvm_ll2d_rn : GCCBuiltin<"__nvvm_ll2d_rn">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2d_rz : GCCBuiltin<"__nvvm_ll2d_rz">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2d_rm : GCCBuiltin<"__nvvm_ll2d_rm">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ll2d_rp : GCCBuiltin<"__nvvm_ll2d_rp">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2d_rn : GCCBuiltin<"__nvvm_ull2d_rn">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2d_rz : GCCBuiltin<"__nvvm_ull2d_rz">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2d_rm : GCCBuiltin<"__nvvm_ull2d_rm">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_ull2d_rp : GCCBuiltin<"__nvvm_ull2d_rp">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_nvvm_f2h_rn_ftz : GCCBuiltin<"__nvvm_f2h_rn_ftz">,
+ Intrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_f2h_rn : GCCBuiltin<"__nvvm_f2h_rn">,
+ Intrinsic<[llvm_i16_ty], [llvm_float_ty], [IntrNoMem]>;
+
+ def int_nvvm_h2f : GCCBuiltin<"__nvvm_h2f">,
+ Intrinsic<[llvm_float_ty], [llvm_i16_ty], [IntrNoMem]>;
+
+//
+// Bitcast
+//
+
+ def int_nvvm_bitcast_f2i : GCCBuiltin<"__nvvm_bitcast_f2i">,
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>;
+ def int_nvvm_bitcast_i2f : GCCBuiltin<"__nvvm_bitcast_i2f">,
+ Intrinsic<[llvm_float_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_nvvm_bitcast_ll2d : GCCBuiltin<"__nvvm_bitcast_ll2d">,
+ Intrinsic<[llvm_double_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_nvvm_bitcast_d2ll : GCCBuiltin<"__nvvm_bitcast_d2ll">,
+ Intrinsic<[llvm_i64_ty], [llvm_double_ty], [IntrNoMem]>;
+
+
+// Atomic not available as an llvm intrinsic.
+ def int_nvvm_atomic_load_add_f32 : Intrinsic<[llvm_float_ty],
+ [LLVMAnyPointerType<llvm_float_ty>, llvm_float_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+ def int_nvvm_atomic_load_inc_32 : Intrinsic<[llvm_i32_ty],
+ [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+ def int_nvvm_atomic_load_dec_32 : Intrinsic<[llvm_i32_ty],
+ [LLVMAnyPointerType<llvm_i32_ty>, llvm_i32_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+
+// Bar.Sync
+ def int_cuda_syncthreads : GCCBuiltin<"__syncthreads">,
+ Intrinsic<[], [], [IntrNoDuplicate]>;
+ def int_nvvm_barrier0 : GCCBuiltin<"__nvvm_bar0">,
+ Intrinsic<[], [], [IntrNoDuplicate]>;
+ def int_nvvm_barrier0_popc : GCCBuiltin<"__nvvm_bar0_popc">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoDuplicate]>;
+ def int_nvvm_barrier0_and : GCCBuiltin<"__nvvm_bar0_and">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoDuplicate]>;
+ def int_nvvm_barrier0_or : GCCBuiltin<"__nvvm_bar0_or">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoDuplicate]>;
+
+ // Membar
+ def int_nvvm_membar_cta : GCCBuiltin<"__nvvm_membar_cta">,
+ Intrinsic<[], [], []>;
+ def int_nvvm_membar_gl : GCCBuiltin<"__nvvm_membar_gl">,
+ Intrinsic<[], [], []>;
+ def int_nvvm_membar_sys : GCCBuiltin<"__nvvm_membar_sys">,
+ Intrinsic<[], [], []>;
+
+
+// Accessing special registers
+ def int_nvvm_read_ptx_sreg_tid_x :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_tid_x">;
+ def int_nvvm_read_ptx_sreg_tid_y :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_tid_y">;
+ def int_nvvm_read_ptx_sreg_tid_z :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_tid_z">;
+
+ def int_nvvm_read_ptx_sreg_ntid_x :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ntid_x">;
+ def int_nvvm_read_ptx_sreg_ntid_y :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ntid_y">;
+ def int_nvvm_read_ptx_sreg_ntid_z :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ntid_z">;
+
+ def int_nvvm_read_ptx_sreg_ctaid_x :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ctaid_x">;
+ def int_nvvm_read_ptx_sreg_ctaid_y :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ctaid_y">;
+ def int_nvvm_read_ptx_sreg_ctaid_z :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_ctaid_z">;
+
+ def int_nvvm_read_ptx_sreg_nctaid_x :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_nctaid_x">;
+ def int_nvvm_read_ptx_sreg_nctaid_y :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_nctaid_y">;
+ def int_nvvm_read_ptx_sreg_nctaid_z :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_nctaid_z">;
+
+ def int_nvvm_read_ptx_sreg_warpsize :
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_warpsize">;
+
+
+// Generated within nvvm. Use for ldu on sm_20 or later
+def int_nvvm_ldu_global_i : Intrinsic<[llvm_anyint_ty],
+ [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [IntrReadMem, NoCapture<0>],
+ "llvm.nvvm.ldu.global.i">;
+def int_nvvm_ldu_global_f : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [IntrReadMem, NoCapture<0>],
+ "llvm.nvvm.ldu.global.f">;
+def int_nvvm_ldu_global_p : Intrinsic<[llvm_anyptr_ty],
+ [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [IntrReadMem, NoCapture<0>],
+ "llvm.nvvm.ldu.global.p">;
+
+// Generated within nvvm. Use for ldg on sm_35 or later
+def int_nvvm_ldg_global_i : Intrinsic<[llvm_anyint_ty],
+ [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [IntrReadMem, NoCapture<0>],
+ "llvm.nvvm.ldg.global.i">;
+def int_nvvm_ldg_global_f : Intrinsic<[llvm_anyfloat_ty],
+ [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [IntrReadMem, NoCapture<0>],
+ "llvm.nvvm.ldg.global.f">;
+def int_nvvm_ldg_global_p : Intrinsic<[llvm_anyptr_ty],
+ [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty],
+ [IntrReadMem, NoCapture<0>],
+ "llvm.nvvm.ldg.global.p">;
+
+// Use for generic pointers
+// - These intrinsics are used to convert address spaces.
+// - The input pointer and output pointer must have the same type, except for
+// the address-space. (This restriction is not enforced here as there is
+// currently no way to describe it).
+// - This complements the llvm bitcast, which can be used to cast one type
+// of pointer to another type of pointer, while the address space remains
+// the same.
+def int_nvvm_ptr_local_to_gen: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem],
+ "llvm.nvvm.ptr.local.to.gen">;
+def int_nvvm_ptr_shared_to_gen: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem],
+ "llvm.nvvm.ptr.shared.to.gen">;
+def int_nvvm_ptr_global_to_gen: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem],
+ "llvm.nvvm.ptr.global.to.gen">;
+def int_nvvm_ptr_constant_to_gen: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem],
+ "llvm.nvvm.ptr.constant.to.gen">;
+
+def int_nvvm_ptr_gen_to_global: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem],
+ "llvm.nvvm.ptr.gen.to.global">;
+def int_nvvm_ptr_gen_to_shared: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem],
+ "llvm.nvvm.ptr.gen.to.shared">;
+def int_nvvm_ptr_gen_to_local: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem],
+ "llvm.nvvm.ptr.gen.to.local">;
+def int_nvvm_ptr_gen_to_constant: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty], [IntrNoMem],
+ "llvm.nvvm.ptr.gen.to.constant">;
+
+// Used in nvvm internally to help address space opt and ptx code generation
+// This is for params that are passed to kernel functions by pointer by-val.
+def int_nvvm_ptr_gen_to_param: Intrinsic<[llvm_anyptr_ty],
+ [llvm_anyptr_ty],
+ [IntrNoMem],
+ "llvm.nvvm.ptr.gen.to.param">;
+
+// Move intrinsics, used in nvvm internally
+
+def int_nvvm_move_i16 : Intrinsic<[llvm_i16_ty], [llvm_i16_ty], [IntrNoMem],
+ "llvm.nvvm.move.i16">;
+def int_nvvm_move_i32 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem],
+ "llvm.nvvm.move.i32">;
+def int_nvvm_move_i64 : Intrinsic<[llvm_i64_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.move.i64">;
+def int_nvvm_move_float : Intrinsic<[llvm_float_ty], [llvm_float_ty],
+ [IntrNoMem], "llvm.nvvm.move.float">;
+def int_nvvm_move_double : Intrinsic<[llvm_double_ty], [llvm_double_ty],
+ [IntrNoMem], "llvm.nvvm.move.double">;
+def int_nvvm_move_ptr : Intrinsic<[llvm_anyptr_ty], [llvm_anyptr_ty],
+ [IntrNoMem, NoCapture<0>], "llvm.nvvm.move.ptr">;
+
+
+// For getting the handle from a texture or surface variable
+def int_nvvm_texsurf_handle
+ : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyi64ptr_ty],
+ [IntrNoMem], "llvm.nvvm.texsurf.handle">;
+def int_nvvm_texsurf_handle_internal
+ : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty],
+ [IntrNoMem], "llvm.nvvm.texsurf.handle.internal">;
+
+/// Error / Warn
+def int_nvvm_compiler_error :
+ Intrinsic<[], [llvm_anyptr_ty], [], "llvm.nvvm.compiler.error">;
+def int_nvvm_compiler_warn :
+ Intrinsic<[], [llvm_anyptr_ty], [], "llvm.nvvm.compiler.warn">;
+
+def int_nvvm_reflect :
+ Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty], [IntrNoMem], "llvm.nvvm.reflect">;
+
+// isspacep.{const, global, local, shared}
+def int_nvvm_isspacep_const
+ : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], [IntrNoMem],
+ "llvm.nvvm.isspacep.const">,
+ GCCBuiltin<"__nvvm_isspacep_const">;
+def int_nvvm_isspacep_global
+ : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], [IntrNoMem],
+ "llvm.nvvm.isspacep.global">,
+ GCCBuiltin<"__nvvm_isspacep_global">;
+def int_nvvm_isspacep_local
+ : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], [IntrNoMem],
+ "llvm.nvvm.isspacep.local">,
+ GCCBuiltin<"__nvvm_isspacep_local">;
+def int_nvvm_isspacep_shared
+ : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], [IntrNoMem],
+ "llvm.nvvm.isspacep.shared">,
+ GCCBuiltin<"__nvvm_isspacep_shared">;
+
+// Environment register read
+def int_nvvm_read_ptx_sreg_envreg0
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg0">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg0">;
+def int_nvvm_read_ptx_sreg_envreg1
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg1">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg1">;
+def int_nvvm_read_ptx_sreg_envreg2
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg2">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg2">;
+def int_nvvm_read_ptx_sreg_envreg3
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg3">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg3">;
+def int_nvvm_read_ptx_sreg_envreg4
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg4">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg4">;
+def int_nvvm_read_ptx_sreg_envreg5
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg5">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg5">;
+def int_nvvm_read_ptx_sreg_envreg6
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg6">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg6">;
+def int_nvvm_read_ptx_sreg_envreg7
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg7">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg7">;
+def int_nvvm_read_ptx_sreg_envreg8
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg8">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg8">;
+def int_nvvm_read_ptx_sreg_envreg9
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg9">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg9">;
+def int_nvvm_read_ptx_sreg_envreg10
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg10">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg10">;
+def int_nvvm_read_ptx_sreg_envreg11
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg11">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg11">;
+def int_nvvm_read_ptx_sreg_envreg12
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg12">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg12">;
+def int_nvvm_read_ptx_sreg_envreg13
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg13">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg13">;
+def int_nvvm_read_ptx_sreg_envreg14
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg14">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg14">;
+def int_nvvm_read_ptx_sreg_envreg15
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg15">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg15">;
+def int_nvvm_read_ptx_sreg_envreg16
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg16">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg16">;
+def int_nvvm_read_ptx_sreg_envreg17
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg17">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg17">;
+def int_nvvm_read_ptx_sreg_envreg18
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg18">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg18">;
+def int_nvvm_read_ptx_sreg_envreg19
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg19">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg19">;
+def int_nvvm_read_ptx_sreg_envreg20
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg20">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg20">;
+def int_nvvm_read_ptx_sreg_envreg21
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg21">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg21">;
+def int_nvvm_read_ptx_sreg_envreg22
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg22">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg22">;
+def int_nvvm_read_ptx_sreg_envreg23
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg23">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg23">;
+def int_nvvm_read_ptx_sreg_envreg24
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg24">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg24">;
+def int_nvvm_read_ptx_sreg_envreg25
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg25">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg25">;
+def int_nvvm_read_ptx_sreg_envreg26
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg26">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg26">;
+def int_nvvm_read_ptx_sreg_envreg27
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg27">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg27">;
+def int_nvvm_read_ptx_sreg_envreg28
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg28">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg28">;
+def int_nvvm_read_ptx_sreg_envreg29
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg29">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg29">;
+def int_nvvm_read_ptx_sreg_envreg30
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg30">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg30">;
+def int_nvvm_read_ptx_sreg_envreg31
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem],
+ "llvm.nvvm.read.ptx.sreg.envreg31">,
+ GCCBuiltin<"__nvvm_read_ptx_sreg_envreg31">;
+
+
+// Texture Fetch
+// texmode_independent
+def int_nvvm_tex_1d_v4f32_s32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.1d.v4f32.s32">;
+def int_nvvm_tex_1d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.v4f32.f32">;
+def int_nvvm_tex_1d_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.level.v4f32.f32">;
+def int_nvvm_tex_1d_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.grad.v4f32.f32">;
+def int_nvvm_tex_1d_v4s32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.1d.v4s32.s32">;
+def int_nvvm_tex_1d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.v4s32.f32">;
+def int_nvvm_tex_1d_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.level.v4s32.f32">;
+def int_nvvm_tex_1d_grad_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.grad.v4s32.f32">;
+def int_nvvm_tex_1d_v4u32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.1d.v4u32.s32">;
+def int_nvvm_tex_1d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.v4u32.f32">;
+def int_nvvm_tex_1d_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.level.v4u32.f32">;
+def int_nvvm_tex_1d_grad_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.grad.v4u32.f32">;
+
+def int_nvvm_tex_1d_array_v4f32_s32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.1d.array.v4f32.s32">;
+def int_nvvm_tex_1d_array_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.v4f32.f32">;
+def int_nvvm_tex_1d_array_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.level.v4f32.f32">;
+def int_nvvm_tex_1d_array_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.grad.v4f32.f32">;
+def int_nvvm_tex_1d_array_v4s32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.1d.array.v4s32.s32">;
+def int_nvvm_tex_1d_array_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.v4s32.f32">;
+def int_nvvm_tex_1d_array_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.level.v4s32.f32">;
+def int_nvvm_tex_1d_array_grad_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.grad.v4s32.f32">;
+def int_nvvm_tex_1d_array_v4u32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.1d.array.v4u32.s32">;
+def int_nvvm_tex_1d_array_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.v4u32.f32">;
+def int_nvvm_tex_1d_array_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.level.v4u32.f32">;
+def int_nvvm_tex_1d_array_grad_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.1d.array.grad.v4u32.f32">;
+
+def int_nvvm_tex_2d_v4f32_s32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.2d.v4f32.s32">;
+def int_nvvm_tex_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.v4f32.f32">;
+def int_nvvm_tex_2d_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.level.v4f32.f32">;
+def int_nvvm_tex_2d_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.grad.v4f32.f32">;
+def int_nvvm_tex_2d_v4s32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.2d.v4s32.s32">;
+def int_nvvm_tex_2d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.v4s32.f32">;
+def int_nvvm_tex_2d_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.level.v4s32.f32">;
+def int_nvvm_tex_2d_grad_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.grad.v4s32.f32">;
+def int_nvvm_tex_2d_v4u32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.2d.v4u32.s32">;
+def int_nvvm_tex_2d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.v4u32.f32">;
+def int_nvvm_tex_2d_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.level.v4u32.f32">;
+def int_nvvm_tex_2d_grad_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.grad.v4u32.f32">;
+
+def int_nvvm_tex_2d_array_v4f32_s32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty], [],
+ "llvm.nvvm.tex.2d.array.v4f32.s32">;
+def int_nvvm_tex_2d_array_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.v4f32.f32">;
+def int_nvvm_tex_2d_array_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.level.v4f32.f32">;
+def int_nvvm_tex_2d_array_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.grad.v4f32.f32">;
+def int_nvvm_tex_2d_array_v4s32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty], [],
+ "llvm.nvvm.tex.2d.array.v4s32.s32">;
+def int_nvvm_tex_2d_array_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.v4s32.f32">;
+def int_nvvm_tex_2d_array_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.level.v4s32.f32">;
+def int_nvvm_tex_2d_array_grad_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.grad.v4s32.f32">;
+def int_nvvm_tex_2d_array_v4u32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty], [],
+ "llvm.nvvm.tex.2d.array.v4u32.s32">;
+def int_nvvm_tex_2d_array_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.v4u32.f32">;
+def int_nvvm_tex_2d_array_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.level.v4u32.f32">;
+def int_nvvm_tex_2d_array_grad_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.2d.array.grad.v4u32.f32">;
+
+def int_nvvm_tex_3d_v4f32_s32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [], "llvm.nvvm.tex.3d.v4f32.s32">;
+def int_nvvm_tex_3d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.v4f32.f32">;
+def int_nvvm_tex_3d_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.level.v4f32.f32">;
+def int_nvvm_tex_3d_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.grad.v4f32.f32">;
+def int_nvvm_tex_3d_v4s32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [], "llvm.nvvm.tex.3d.v4s32.s32">;
+def int_nvvm_tex_3d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.v4s32.f32">;
+def int_nvvm_tex_3d_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.level.v4s32.f32">;
+def int_nvvm_tex_3d_grad_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.grad.v4s32.f32">;
+def int_nvvm_tex_3d_v4u32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [], "llvm.nvvm.tex.3d.v4u32.s32">;
+def int_nvvm_tex_3d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.v4u32.f32">;
+def int_nvvm_tex_3d_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.level.v4u32.f32">;
+def int_nvvm_tex_3d_grad_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.3d.grad.v4u32.f32">;
+
+def int_nvvm_tex_cube_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.v4f32.f32">;
+def int_nvvm_tex_cube_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.level.v4f32.f32">;
+def int_nvvm_tex_cube_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.v4s32.f32">;
+def int_nvvm_tex_cube_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.level.v4s32.f32">;
+def int_nvvm_tex_cube_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.v4u32.f32">;
+def int_nvvm_tex_cube_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.level.v4u32.f32">;
+
+def int_nvvm_tex_cube_array_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.array.v4f32.f32">;
+def int_nvvm_tex_cube_array_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.array.level.v4f32.f32">;
+def int_nvvm_tex_cube_array_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.array.v4s32.f32">;
+def int_nvvm_tex_cube_array_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.array.level.v4s32.f32">;
+def int_nvvm_tex_cube_array_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.array.v4u32.f32">;
+def int_nvvm_tex_cube_array_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.cube.array.level.v4u32.f32">;
+
+def int_nvvm_tld4_r_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.r.2d.v4f32.f32">;
+def int_nvvm_tld4_g_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.g.2d.v4f32.f32">;
+def int_nvvm_tld4_b_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.b.2d.v4f32.f32">;
+def int_nvvm_tld4_a_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.a.2d.v4f32.f32">;
+def int_nvvm_tld4_r_2d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.r.2d.v4s32.f32">;
+def int_nvvm_tld4_g_2d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.g.2d.v4s32.f32">;
+def int_nvvm_tld4_b_2d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.b.2d.v4s32.f32">;
+def int_nvvm_tld4_a_2d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.a.2d.v4s32.f32">;
+def int_nvvm_tld4_r_2d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.r.2d.v4u32.f32">;
+def int_nvvm_tld4_g_2d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.g.2d.v4u32.f32">;
+def int_nvvm_tld4_b_2d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.b.2d.v4u32.f32">;
+def int_nvvm_tld4_a_2d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.a.2d.v4u32.f32">;
+
+
+// texmode_unified
+def int_nvvm_tex_unified_1d_v4f32_s32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.1d.v4f32.s32">;
+def int_nvvm_tex_unified_1d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.v4f32.f32">;
+def int_nvvm_tex_unified_1d_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.level.v4f32.f32">;
+def int_nvvm_tex_unified_1d_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.grad.v4f32.f32">;
+def int_nvvm_tex_unified_1d_v4s32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.1d.v4s32.s32">;
+def int_nvvm_tex_unified_1d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.v4s32.f32">;
+def int_nvvm_tex_unified_1d_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.level.v4s32.f32">;
+def int_nvvm_tex_unified_1d_grad_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.grad.v4s32.f32">;
+def int_nvvm_tex_unified_1d_v4u32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.1d.v4u32.s32">;
+def int_nvvm_tex_unified_1d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.v4u32.f32">;
+def int_nvvm_tex_unified_1d_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.level.v4u32.f32">;
+def int_nvvm_tex_unified_1d_grad_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.grad.v4u32.f32">;
+
+def int_nvvm_tex_unified_1d_array_v4f32_s32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.v4f32.s32">;
+def int_nvvm_tex_unified_1d_array_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.v4f32.f32">;
+def int_nvvm_tex_unified_1d_array_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.level.v4f32.f32">;
+def int_nvvm_tex_unified_1d_array_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.grad.v4f32.f32">;
+def int_nvvm_tex_unified_1d_array_v4s32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.v4s32.s32">;
+def int_nvvm_tex_unified_1d_array_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.v4s32.f32">;
+def int_nvvm_tex_unified_1d_array_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.level.v4s32.f32">;
+def int_nvvm_tex_unified_1d_array_grad_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.grad.v4s32.f32">;
+def int_nvvm_tex_unified_1d_array_v4u32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.v4u32.s32">;
+def int_nvvm_tex_unified_1d_array_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.v4u32.f32">;
+def int_nvvm_tex_unified_1d_array_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.level.v4u32.f32">;
+def int_nvvm_tex_unified_1d_array_grad_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.1d.array.grad.v4u32.f32">;
+
+def int_nvvm_tex_unified_2d_v4f32_s32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.2d.v4f32.s32">;
+def int_nvvm_tex_unified_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.v4f32.f32">;
+def int_nvvm_tex_unified_2d_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.level.v4f32.f32">;
+def int_nvvm_tex_unified_2d_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.grad.v4f32.f32">;
+def int_nvvm_tex_unified_2d_v4s32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.2d.v4s32.s32">;
+def int_nvvm_tex_unified_2d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.v4s32.f32">;
+def int_nvvm_tex_unified_2d_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.level.v4s32.f32">;
+def int_nvvm_tex_unified_2d_grad_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.grad.v4s32.f32">;
+def int_nvvm_tex_unified_2d_v4u32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.2d.v4u32.s32">;
+def int_nvvm_tex_unified_2d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.v4u32.f32">;
+def int_nvvm_tex_unified_2d_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.level.v4u32.f32">;
+def int_nvvm_tex_unified_2d_grad_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.grad.v4u32.f32">;
+
+def int_nvvm_tex_unified_2d_array_v4f32_s32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.v4f32.s32">;
+def int_nvvm_tex_unified_2d_array_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.v4f32.f32">;
+def int_nvvm_tex_unified_2d_array_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.level.v4f32.f32">;
+def int_nvvm_tex_unified_2d_array_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.grad.v4f32.f32">;
+def int_nvvm_tex_unified_2d_array_v4s32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.v4s32.s32">;
+def int_nvvm_tex_unified_2d_array_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.v4s32.f32">;
+def int_nvvm_tex_unified_2d_array_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.level.v4s32.f32">;
+def int_nvvm_tex_unified_2d_array_grad_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.grad.v4s32.f32">;
+def int_nvvm_tex_unified_2d_array_v4u32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.v4u32.s32">;
+def int_nvvm_tex_unified_2d_array_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.v4u32.f32">;
+def int_nvvm_tex_unified_2d_array_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.level.v4u32.f32">;
+def int_nvvm_tex_unified_2d_array_grad_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.2d.array.grad.v4u32.f32">;
+
+def int_nvvm_tex_unified_3d_v4f32_s32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [], "llvm.nvvm.tex.unified.3d.v4f32.s32">;
+def int_nvvm_tex_unified_3d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.3d.v4f32.f32">;
+def int_nvvm_tex_unified_3d_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.3d.level.v4f32.f32">;
+def int_nvvm_tex_unified_3d_grad_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.3d.grad.v4f32.f32">;
+def int_nvvm_tex_unified_3d_v4s32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [], "llvm.nvvm.tex.unified.3d.v4s32.s32">;
+def int_nvvm_tex_unified_3d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.3d.v4s32.f32">;
+def int_nvvm_tex_unified_3d_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.3d.level.v4s32.f32">;
+def int_nvvm_tex_unified_3d_grad_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.3d.grad.v4s32.f32">;
+def int_nvvm_tex_unified_3d_v4u32_s32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [], "llvm.nvvm.tex.unified.3d.v4u32.s32">;
+def int_nvvm_tex_unified_3d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.3d.v4u32.f32">;
+def int_nvvm_tex_unified_3d_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.3d.level.v4u32.f32">;
+def int_nvvm_tex_unified_3d_grad_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.3d.grad.v4u32.f32">;
+
+def int_nvvm_tex_unified_cube_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.v4f32.f32">;
+def int_nvvm_tex_unified_cube_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.level.v4f32.f32">;
+def int_nvvm_tex_unified_cube_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.v4s32.f32">;
+def int_nvvm_tex_unified_cube_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.level.v4s32.f32">;
+def int_nvvm_tex_unified_cube_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.v4u32.f32">;
+def int_nvvm_tex_unified_cube_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.level.v4u32.f32">;
+
+def int_nvvm_tex_unified_cube_array_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.array.v4f32.f32">;
+def int_nvvm_tex_unified_cube_array_level_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.array.level.v4f32.f32">;
+def int_nvvm_tex_unified_cube_array_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.array.v4s32.f32">;
+def int_nvvm_tex_unified_cube_array_level_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.array.level.v4s32.f32">;
+def int_nvvm_tex_unified_cube_array_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.array.v4u32.f32">;
+def int_nvvm_tex_unified_cube_array_level_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty,
+ llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tex.unified.cube.array.level.v4u32.f32">;
+
+def int_nvvm_tld4_unified_r_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.r.2d.v4f32.f32">;
+def int_nvvm_tld4_unified_g_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.g.2d.v4f32.f32">;
+def int_nvvm_tld4_unified_b_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.b.2d.v4f32.f32">;
+def int_nvvm_tld4_unified_a_2d_v4f32_f32
+ : Intrinsic<[llvm_float_ty, llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.a.2d.v4f32.f32">;
+def int_nvvm_tld4_unified_r_2d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.r.2d.v4s32.f32">;
+def int_nvvm_tld4_unified_g_2d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.g.2d.v4s32.f32">;
+def int_nvvm_tld4_unified_b_2d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.b.2d.v4s32.f32">;
+def int_nvvm_tld4_unified_a_2d_v4s32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.a.2d.v4s32.f32">;
+def int_nvvm_tld4_unified_r_2d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.r.2d.v4u32.f32">;
+def int_nvvm_tld4_unified_g_2d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.g.2d.v4u32.f32">;
+def int_nvvm_tld4_unified_b_2d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.b.2d.v4u32.f32">;
+def int_nvvm_tld4_unified_a_2d_v4u32_f32
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_float_ty, llvm_float_ty], [],
+ "llvm.nvvm.tld4.unified.a.2d.v4u32.f32">;
+
+
+//=== Surface Load
+// .clamp variants
+def int_nvvm_suld_1d_i8_clamp
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i8.clamp">;
+def int_nvvm_suld_1d_i16_clamp
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i16.clamp">;
+def int_nvvm_suld_1d_i32_clamp
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i32.clamp">;
+def int_nvvm_suld_1d_i64_clamp
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i64.clamp">;
+def int_nvvm_suld_1d_v2i8_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i8.clamp">;
+def int_nvvm_suld_1d_v2i16_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i16.clamp">;
+def int_nvvm_suld_1d_v2i32_clamp
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i32.clamp">;
+def int_nvvm_suld_1d_v2i64_clamp
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i64.clamp">;
+def int_nvvm_suld_1d_v4i8_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i8.clamp">;
+def int_nvvm_suld_1d_v4i16_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i16.clamp">;
+def int_nvvm_suld_1d_v4i32_clamp
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i32.clamp">;
+
+def int_nvvm_suld_1d_array_i8_clamp
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i8.clamp">;
+def int_nvvm_suld_1d_array_i16_clamp
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i16.clamp">;
+def int_nvvm_suld_1d_array_i32_clamp
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i32.clamp">;
+def int_nvvm_suld_1d_array_i64_clamp
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i64.clamp">;
+def int_nvvm_suld_1d_array_v2i8_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i8.clamp">;
+def int_nvvm_suld_1d_array_v2i16_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i16.clamp">;
+def int_nvvm_suld_1d_array_v2i32_clamp
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i32.clamp">;
+def int_nvvm_suld_1d_array_v2i64_clamp
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i64.clamp">;
+def int_nvvm_suld_1d_array_v4i8_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i8.clamp">;
+def int_nvvm_suld_1d_array_v4i16_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i16.clamp">;
+def int_nvvm_suld_1d_array_v4i32_clamp
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i32.clamp">;
+
+def int_nvvm_suld_2d_i8_clamp
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i8.clamp">;
+def int_nvvm_suld_2d_i16_clamp
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i16.clamp">;
+def int_nvvm_suld_2d_i32_clamp
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i32.clamp">;
+def int_nvvm_suld_2d_i64_clamp
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i64.clamp">;
+def int_nvvm_suld_2d_v2i8_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i8.clamp">;
+def int_nvvm_suld_2d_v2i16_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i16.clamp">;
+def int_nvvm_suld_2d_v2i32_clamp
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i32.clamp">;
+def int_nvvm_suld_2d_v2i64_clamp
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i64.clamp">;
+def int_nvvm_suld_2d_v4i8_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i8.clamp">;
+def int_nvvm_suld_2d_v4i16_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i16.clamp">;
+def int_nvvm_suld_2d_v4i32_clamp
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i32.clamp">;
+
+def int_nvvm_suld_2d_array_i8_clamp
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i8.clamp">;
+def int_nvvm_suld_2d_array_i16_clamp
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i16.clamp">;
+def int_nvvm_suld_2d_array_i32_clamp
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i32.clamp">;
+def int_nvvm_suld_2d_array_i64_clamp
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i64.clamp">;
+def int_nvvm_suld_2d_array_v2i8_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i8.clamp">;
+def int_nvvm_suld_2d_array_v2i16_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i16.clamp">;
+def int_nvvm_suld_2d_array_v2i32_clamp
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i32.clamp">;
+def int_nvvm_suld_2d_array_v2i64_clamp
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i64.clamp">;
+def int_nvvm_suld_2d_array_v4i8_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i8.clamp">;
+def int_nvvm_suld_2d_array_v4i16_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i16.clamp">;
+def int_nvvm_suld_2d_array_v4i32_clamp
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i32.clamp">;
+
+def int_nvvm_suld_3d_i8_clamp
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i8.clamp">;
+def int_nvvm_suld_3d_i16_clamp
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i16.clamp">;
+def int_nvvm_suld_3d_i32_clamp
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i32.clamp">;
+def int_nvvm_suld_3d_i64_clamp
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i64.clamp">;
+def int_nvvm_suld_3d_v2i8_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i8.clamp">;
+def int_nvvm_suld_3d_v2i16_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i16.clamp">;
+def int_nvvm_suld_3d_v2i32_clamp
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i32.clamp">;
+def int_nvvm_suld_3d_v2i64_clamp
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i64.clamp">;
+def int_nvvm_suld_3d_v4i8_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i8.clamp">;
+def int_nvvm_suld_3d_v4i16_clamp
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i16.clamp">;
+def int_nvvm_suld_3d_v4i32_clamp
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i32.clamp">;
+
+// .trap variants
+def int_nvvm_suld_1d_i8_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i8.trap">;
+def int_nvvm_suld_1d_i16_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i16.trap">;
+def int_nvvm_suld_1d_i32_trap
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i32.trap">;
+def int_nvvm_suld_1d_i64_trap
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i64.trap">;
+def int_nvvm_suld_1d_v2i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i8.trap">;
+def int_nvvm_suld_1d_v2i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i16.trap">;
+def int_nvvm_suld_1d_v2i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i32.trap">;
+def int_nvvm_suld_1d_v2i64_trap
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i64.trap">;
+def int_nvvm_suld_1d_v4i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i8.trap">;
+def int_nvvm_suld_1d_v4i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i16.trap">;
+def int_nvvm_suld_1d_v4i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i32.trap">;
+
+def int_nvvm_suld_1d_array_i8_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i8.trap">;
+def int_nvvm_suld_1d_array_i16_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i16.trap">;
+def int_nvvm_suld_1d_array_i32_trap
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i32.trap">;
+def int_nvvm_suld_1d_array_i64_trap
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i64.trap">;
+def int_nvvm_suld_1d_array_v2i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i8.trap">;
+def int_nvvm_suld_1d_array_v2i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i16.trap">;
+def int_nvvm_suld_1d_array_v2i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i32.trap">;
+def int_nvvm_suld_1d_array_v2i64_trap
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i64.trap">;
+def int_nvvm_suld_1d_array_v4i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i8.trap">;
+def int_nvvm_suld_1d_array_v4i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i16.trap">;
+def int_nvvm_suld_1d_array_v4i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i32.trap">;
+
+def int_nvvm_suld_2d_i8_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i8.trap">;
+def int_nvvm_suld_2d_i16_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i16.trap">;
+def int_nvvm_suld_2d_i32_trap
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i32.trap">;
+def int_nvvm_suld_2d_i64_trap
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i64.trap">;
+def int_nvvm_suld_2d_v2i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i8.trap">;
+def int_nvvm_suld_2d_v2i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i16.trap">;
+def int_nvvm_suld_2d_v2i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i32.trap">;
+def int_nvvm_suld_2d_v2i64_trap
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i64.trap">;
+def int_nvvm_suld_2d_v4i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i8.trap">;
+def int_nvvm_suld_2d_v4i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i16.trap">;
+def int_nvvm_suld_2d_v4i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i32.trap">;
+
+def int_nvvm_suld_2d_array_i8_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i8.trap">;
+def int_nvvm_suld_2d_array_i16_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i16.trap">;
+def int_nvvm_suld_2d_array_i32_trap
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i32.trap">;
+def int_nvvm_suld_2d_array_i64_trap
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i64.trap">;
+def int_nvvm_suld_2d_array_v2i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i8.trap">;
+def int_nvvm_suld_2d_array_v2i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i16.trap">;
+def int_nvvm_suld_2d_array_v2i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i32.trap">;
+def int_nvvm_suld_2d_array_v2i64_trap
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i64.trap">;
+def int_nvvm_suld_2d_array_v4i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i8.trap">;
+def int_nvvm_suld_2d_array_v4i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i16.trap">;
+def int_nvvm_suld_2d_array_v4i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i32.trap">;
+
+def int_nvvm_suld_3d_i8_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i8.trap">;
+def int_nvvm_suld_3d_i16_trap
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i16.trap">;
+def int_nvvm_suld_3d_i32_trap
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i32.trap">;
+def int_nvvm_suld_3d_i64_trap
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i64.trap">;
+def int_nvvm_suld_3d_v2i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i8.trap">;
+def int_nvvm_suld_3d_v2i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i16.trap">;
+def int_nvvm_suld_3d_v2i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i32.trap">;
+def int_nvvm_suld_3d_v2i64_trap
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i64.trap">;
+def int_nvvm_suld_3d_v4i8_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i8.trap">;
+def int_nvvm_suld_3d_v4i16_trap
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i16.trap">;
+def int_nvvm_suld_3d_v4i32_trap
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i32.trap">;
+
+// .zero variants
+def int_nvvm_suld_1d_i8_zero
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i8.zero">;
+def int_nvvm_suld_1d_i16_zero
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i16.zero">;
+def int_nvvm_suld_1d_i32_zero
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i32.zero">;
+def int_nvvm_suld_1d_i64_zero
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.i64.zero">;
+def int_nvvm_suld_1d_v2i8_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i8.zero">;
+def int_nvvm_suld_1d_v2i16_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i16.zero">;
+def int_nvvm_suld_1d_v2i32_zero
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i32.zero">;
+def int_nvvm_suld_1d_v2i64_zero
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v2i64.zero">;
+def int_nvvm_suld_1d_v4i8_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i8.zero">;
+def int_nvvm_suld_1d_v4i16_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i16.zero">;
+def int_nvvm_suld_1d_v4i32_zero
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.v4i32.zero">;
+
+def int_nvvm_suld_1d_array_i8_zero
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i8.zero">;
+def int_nvvm_suld_1d_array_i16_zero
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i16.zero">;
+def int_nvvm_suld_1d_array_i32_zero
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i32.zero">;
+def int_nvvm_suld_1d_array_i64_zero
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.i64.zero">;
+def int_nvvm_suld_1d_array_v2i8_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i8.zero">;
+def int_nvvm_suld_1d_array_v2i16_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i16.zero">;
+def int_nvvm_suld_1d_array_v2i32_zero
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i32.zero">;
+def int_nvvm_suld_1d_array_v2i64_zero
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v2i64.zero">;
+def int_nvvm_suld_1d_array_v4i8_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i8.zero">;
+def int_nvvm_suld_1d_array_v4i16_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i16.zero">;
+def int_nvvm_suld_1d_array_v4i32_zero
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.1d.array.v4i32.zero">;
+
+def int_nvvm_suld_2d_i8_zero
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i8.zero">;
+def int_nvvm_suld_2d_i16_zero
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i16.zero">;
+def int_nvvm_suld_2d_i32_zero
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i32.zero">;
+def int_nvvm_suld_2d_i64_zero
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.i64.zero">;
+def int_nvvm_suld_2d_v2i8_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i8.zero">;
+def int_nvvm_suld_2d_v2i16_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i16.zero">;
+def int_nvvm_suld_2d_v2i32_zero
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i32.zero">;
+def int_nvvm_suld_2d_v2i64_zero
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v2i64.zero">;
+def int_nvvm_suld_2d_v4i8_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i8.zero">;
+def int_nvvm_suld_2d_v4i16_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i16.zero">;
+def int_nvvm_suld_2d_v4i32_zero
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.v4i32.zero">;
+
+def int_nvvm_suld_2d_array_i8_zero
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i8.zero">;
+def int_nvvm_suld_2d_array_i16_zero
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i16.zero">;
+def int_nvvm_suld_2d_array_i32_zero
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i32.zero">;
+def int_nvvm_suld_2d_array_i64_zero
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.i64.zero">;
+def int_nvvm_suld_2d_array_v2i8_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i8.zero">;
+def int_nvvm_suld_2d_array_v2i16_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i16.zero">;
+def int_nvvm_suld_2d_array_v2i32_zero
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i32.zero">;
+def int_nvvm_suld_2d_array_v2i64_zero
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v2i64.zero">;
+def int_nvvm_suld_2d_array_v4i8_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i8.zero">;
+def int_nvvm_suld_2d_array_v4i16_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i16.zero">;
+def int_nvvm_suld_2d_array_v4i32_zero
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.2d.array.v4i32.zero">;
+
+def int_nvvm_suld_3d_i8_zero
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i8.zero">;
+def int_nvvm_suld_3d_i16_zero
+ : Intrinsic<[llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i16.zero">;
+def int_nvvm_suld_3d_i32_zero
+ : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i32.zero">;
+def int_nvvm_suld_3d_i64_zero
+ : Intrinsic<[llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.i64.zero">;
+def int_nvvm_suld_3d_v2i8_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i8.zero">;
+def int_nvvm_suld_3d_v2i16_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i16.zero">;
+def int_nvvm_suld_3d_v2i32_zero
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i32.zero">;
+def int_nvvm_suld_3d_v2i64_zero
+ : Intrinsic<[llvm_i64_ty, llvm_i64_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v2i64.zero">;
+def int_nvvm_suld_3d_v4i8_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i8.zero">;
+def int_nvvm_suld_3d_v4i16_zero
+ : Intrinsic<[llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i16.zero">;
+def int_nvvm_suld_3d_v4i32_zero
+ : Intrinsic<[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.suld.3d.v4i32.zero">;
+
+//===- Texture Query ------------------------------------------------------===//
+
+def int_nvvm_txq_channel_order
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.channel.order">,
+ GCCBuiltin<"__nvvm_txq_channel_order">;
+def int_nvvm_txq_channel_data_type
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.channel.data.type">,
+ GCCBuiltin<"__nvvm_txq_channel_data_type">;
+def int_nvvm_txq_width
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.width">,
+ GCCBuiltin<"__nvvm_txq_width">;
+def int_nvvm_txq_height
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.height">,
+ GCCBuiltin<"__nvvm_txq_height">;
+def int_nvvm_txq_depth
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.depth">,
+ GCCBuiltin<"__nvvm_txq_depth">;
+def int_nvvm_txq_array_size
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.array.size">,
+ GCCBuiltin<"__nvvm_txq_array_size">;
+def int_nvvm_txq_num_samples
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.num.samples">,
+ GCCBuiltin<"__nvvm_txq_num_samples">;
+def int_nvvm_txq_num_mipmap_levels
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.txq.num.mipmap.levels">,
+ GCCBuiltin<"__nvvm_txq_num_mipmap_levels">;
+
+//===- Surface Query ------------------------------------------------------===//
+
+def int_nvvm_suq_channel_order
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.channel.order">,
+ GCCBuiltin<"__nvvm_suq_channel_order">;
+def int_nvvm_suq_channel_data_type
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.channel.data.type">,
+ GCCBuiltin<"__nvvm_suq_channel_data_type">;
+def int_nvvm_suq_width
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.width">,
+ GCCBuiltin<"__nvvm_suq_width">;
+def int_nvvm_suq_height
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.height">,
+ GCCBuiltin<"__nvvm_suq_height">;
+def int_nvvm_suq_depth
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.depth">,
+ GCCBuiltin<"__nvvm_suq_depth">;
+def int_nvvm_suq_array_size
+ : Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.suq.array.size">,
+ GCCBuiltin<"__nvvm_suq_array_size">;
+
+
+//===- Handle Query -------------------------------------------------------===//
+
+def int_nvvm_istypep_sampler
+ : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.istypep.sampler">,
+ GCCBuiltin<"__nvvm_istypep_sampler">;
+def int_nvvm_istypep_surface
+ : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.istypep.surface">,
+ GCCBuiltin<"__nvvm_istypep_surface">;
+def int_nvvm_istypep_texture
+ : Intrinsic<[llvm_i1_ty], [llvm_i64_ty], [IntrNoMem],
+ "llvm.nvvm.istypep.texture">,
+ GCCBuiltin<"__nvvm_istypep_texture">;
+
+
+
+//===- Surface Stores -----------------------------------------------------===//
+
+// Unformatted
+// .clamp variant
+def int_nvvm_sust_b_1d_i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i8_clamp">;
+def int_nvvm_sust_b_1d_i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i16_clamp">;
+def int_nvvm_sust_b_1d_i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i32_clamp">;
+def int_nvvm_sust_b_1d_i64_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.i64.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i64_clamp">;
+def int_nvvm_sust_b_1d_v2i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i8_clamp">;
+def int_nvvm_sust_b_1d_v2i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i16_clamp">;
+def int_nvvm_sust_b_1d_v2i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i32_clamp">;
+def int_nvvm_sust_b_1d_v2i64_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i64.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i64_clamp">;
+def int_nvvm_sust_b_1d_v4i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i8_clamp">;
+def int_nvvm_sust_b_1d_v4i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i16_clamp">;
+def int_nvvm_sust_b_1d_v4i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i32_clamp">;
+
+
+def int_nvvm_sust_b_1d_array_i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i8_clamp">;
+def int_nvvm_sust_b_1d_array_i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i16_clamp">;
+def int_nvvm_sust_b_1d_array_i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i32_clamp">;
+def int_nvvm_sust_b_1d_array_i64_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i64.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i64_clamp">;
+def int_nvvm_sust_b_1d_array_v2i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i8_clamp">;
+def int_nvvm_sust_b_1d_array_v2i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i16_clamp">;
+def int_nvvm_sust_b_1d_array_v2i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i32_clamp">;
+def int_nvvm_sust_b_1d_array_v2i64_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i64.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i64_clamp">;
+def int_nvvm_sust_b_1d_array_v4i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i8_clamp">;
+def int_nvvm_sust_b_1d_array_v4i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i16_clamp">;
+def int_nvvm_sust_b_1d_array_v4i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i32_clamp">;
+
+
+def int_nvvm_sust_b_2d_i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i8_clamp">;
+def int_nvvm_sust_b_2d_i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i16_clamp">;
+def int_nvvm_sust_b_2d_i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i32_clamp">;
+def int_nvvm_sust_b_2d_i64_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.i64.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i64_clamp">;
+def int_nvvm_sust_b_2d_v2i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i8_clamp">;
+def int_nvvm_sust_b_2d_v2i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i16_clamp">;
+def int_nvvm_sust_b_2d_v2i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i32_clamp">;
+def int_nvvm_sust_b_2d_v2i64_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i64.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i64_clamp">;
+def int_nvvm_sust_b_2d_v4i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i8_clamp">;
+def int_nvvm_sust_b_2d_v4i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i16_clamp">;
+def int_nvvm_sust_b_2d_v4i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i32_clamp">;
+
+
+def int_nvvm_sust_b_2d_array_i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i8_clamp">;
+def int_nvvm_sust_b_2d_array_i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i16_clamp">;
+def int_nvvm_sust_b_2d_array_i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i32_clamp">;
+def int_nvvm_sust_b_2d_array_i64_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i64.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i64_clamp">;
+def int_nvvm_sust_b_2d_array_v2i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i8_clamp">;
+def int_nvvm_sust_b_2d_array_v2i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i16_clamp">;
+def int_nvvm_sust_b_2d_array_v2i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i32_clamp">;
+def int_nvvm_sust_b_2d_array_v2i64_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i64.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i64_clamp">;
+def int_nvvm_sust_b_2d_array_v4i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i8_clamp">;
+def int_nvvm_sust_b_2d_array_v4i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i16_clamp">;
+def int_nvvm_sust_b_2d_array_v4i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i32_clamp">;
+
+
+def int_nvvm_sust_b_3d_i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i8_clamp">;
+def int_nvvm_sust_b_3d_i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i16_clamp">;
+def int_nvvm_sust_b_3d_i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i32_clamp">;
+def int_nvvm_sust_b_3d_i64_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.3d.i64.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i64_clamp">;
+def int_nvvm_sust_b_3d_v2i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i8_clamp">;
+def int_nvvm_sust_b_3d_v2i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i16_clamp">;
+def int_nvvm_sust_b_3d_v2i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i32_clamp">;
+def int_nvvm_sust_b_3d_v2i64_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i64.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i64_clamp">;
+def int_nvvm_sust_b_3d_v4i8_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i8.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i8_clamp">;
+def int_nvvm_sust_b_3d_v4i16_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i16.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i16_clamp">;
+def int_nvvm_sust_b_3d_v4i32_clamp
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i32.clamp">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i32_clamp">;
+
+
+// .trap variant
+def int_nvvm_sust_b_1d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i8_trap">;
+def int_nvvm_sust_b_1d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i16_trap">;
+def int_nvvm_sust_b_1d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i32_trap">;
+def int_nvvm_sust_b_1d_i64_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.i64.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i64_trap">;
+def int_nvvm_sust_b_1d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i8_trap">;
+def int_nvvm_sust_b_1d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i16_trap">;
+def int_nvvm_sust_b_1d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i32_trap">;
+def int_nvvm_sust_b_1d_v2i64_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i64.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i64_trap">;
+def int_nvvm_sust_b_1d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i8_trap">;
+def int_nvvm_sust_b_1d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i16_trap">;
+def int_nvvm_sust_b_1d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i32_trap">;
+
+
+def int_nvvm_sust_b_1d_array_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i8_trap">;
+def int_nvvm_sust_b_1d_array_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i16_trap">;
+def int_nvvm_sust_b_1d_array_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i32_trap">;
+def int_nvvm_sust_b_1d_array_i64_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i64.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i64_trap">;
+def int_nvvm_sust_b_1d_array_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i8_trap">;
+def int_nvvm_sust_b_1d_array_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i16_trap">;
+def int_nvvm_sust_b_1d_array_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i32_trap">;
+def int_nvvm_sust_b_1d_array_v2i64_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i64.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i64_trap">;
+def int_nvvm_sust_b_1d_array_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i8_trap">;
+def int_nvvm_sust_b_1d_array_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i16_trap">;
+def int_nvvm_sust_b_1d_array_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_b_2d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i8_trap">;
+def int_nvvm_sust_b_2d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i16_trap">;
+def int_nvvm_sust_b_2d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i32_trap">;
+def int_nvvm_sust_b_2d_i64_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.i64.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i64_trap">;
+def int_nvvm_sust_b_2d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i8_trap">;
+def int_nvvm_sust_b_2d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i16_trap">;
+def int_nvvm_sust_b_2d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i32_trap">;
+def int_nvvm_sust_b_2d_v2i64_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i64.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i64_trap">;
+def int_nvvm_sust_b_2d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i8_trap">;
+def int_nvvm_sust_b_2d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i16_trap">;
+def int_nvvm_sust_b_2d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i32_trap">;
+
+
+def int_nvvm_sust_b_2d_array_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i8_trap">;
+def int_nvvm_sust_b_2d_array_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i16_trap">;
+def int_nvvm_sust_b_2d_array_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i32_trap">;
+def int_nvvm_sust_b_2d_array_i64_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i64.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i64_trap">;
+def int_nvvm_sust_b_2d_array_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i8_trap">;
+def int_nvvm_sust_b_2d_array_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i16_trap">;
+def int_nvvm_sust_b_2d_array_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i32_trap">;
+def int_nvvm_sust_b_2d_array_v2i64_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i64.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i64_trap">;
+def int_nvvm_sust_b_2d_array_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i8_trap">;
+def int_nvvm_sust_b_2d_array_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i16_trap">;
+def int_nvvm_sust_b_2d_array_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_b_3d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i8_trap">;
+def int_nvvm_sust_b_3d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i16_trap">;
+def int_nvvm_sust_b_3d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i32_trap">;
+def int_nvvm_sust_b_3d_i64_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.3d.i64.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i64_trap">;
+def int_nvvm_sust_b_3d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i8_trap">;
+def int_nvvm_sust_b_3d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i16_trap">;
+def int_nvvm_sust_b_3d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i32_trap">;
+def int_nvvm_sust_b_3d_v2i64_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i64.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i64_trap">;
+def int_nvvm_sust_b_3d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i8_trap">;
+def int_nvvm_sust_b_3d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i16_trap">;
+def int_nvvm_sust_b_3d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i32_trap">;
+
+
+// .zero variant
+def int_nvvm_sust_b_1d_i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i8_zero">;
+def int_nvvm_sust_b_1d_i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i16_zero">;
+def int_nvvm_sust_b_1d_i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i32_zero">;
+def int_nvvm_sust_b_1d_i64_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.i64.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_i64_zero">;
+def int_nvvm_sust_b_1d_v2i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i8_zero">;
+def int_nvvm_sust_b_1d_v2i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i16_zero">;
+def int_nvvm_sust_b_1d_v2i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i32_zero">;
+def int_nvvm_sust_b_1d_v2i64_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.v2i64.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v2i64_zero">;
+def int_nvvm_sust_b_1d_v4i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i8_zero">;
+def int_nvvm_sust_b_1d_v4i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i16_zero">;
+def int_nvvm_sust_b_1d_v4i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.v4i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_v4i32_zero">;
+
+
+def int_nvvm_sust_b_1d_array_i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i8_zero">;
+def int_nvvm_sust_b_1d_array_i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i16_zero">;
+def int_nvvm_sust_b_1d_array_i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i32_zero">;
+def int_nvvm_sust_b_1d_array_i64_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.array.i64.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_i64_zero">;
+def int_nvvm_sust_b_1d_array_v2i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i8_zero">;
+def int_nvvm_sust_b_1d_array_v2i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i16_zero">;
+def int_nvvm_sust_b_1d_array_v2i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i32_zero">;
+def int_nvvm_sust_b_1d_array_v2i64_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v2i64.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v2i64_zero">;
+def int_nvvm_sust_b_1d_array_v4i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i8_zero">;
+def int_nvvm_sust_b_1d_array_v4i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i16_zero">;
+def int_nvvm_sust_b_1d_array_v4i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.1d.array.v4i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_1d_array_v4i32_zero">;
+
+
+def int_nvvm_sust_b_2d_i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i8_zero">;
+def int_nvvm_sust_b_2d_i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i16_zero">;
+def int_nvvm_sust_b_2d_i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i32_zero">;
+def int_nvvm_sust_b_2d_i64_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.i64.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_i64_zero">;
+def int_nvvm_sust_b_2d_v2i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i8_zero">;
+def int_nvvm_sust_b_2d_v2i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i16_zero">;
+def int_nvvm_sust_b_2d_v2i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i32_zero">;
+def int_nvvm_sust_b_2d_v2i64_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.v2i64.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v2i64_zero">;
+def int_nvvm_sust_b_2d_v4i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i8_zero">;
+def int_nvvm_sust_b_2d_v4i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i16_zero">;
+def int_nvvm_sust_b_2d_v4i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.v4i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_v4i32_zero">;
+
+
+def int_nvvm_sust_b_2d_array_i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i8_zero">;
+def int_nvvm_sust_b_2d_array_i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i16_zero">;
+def int_nvvm_sust_b_2d_array_i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i32_zero">;
+def int_nvvm_sust_b_2d_array_i64_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.array.i64.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_i64_zero">;
+def int_nvvm_sust_b_2d_array_v2i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i8_zero">;
+def int_nvvm_sust_b_2d_array_v2i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i16_zero">;
+def int_nvvm_sust_b_2d_array_v2i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i32_zero">;
+def int_nvvm_sust_b_2d_array_v2i64_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v2i64.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v2i64_zero">;
+def int_nvvm_sust_b_2d_array_v4i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i8_zero">;
+def int_nvvm_sust_b_2d_array_v4i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i16_zero">;
+def int_nvvm_sust_b_2d_array_v4i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.2d.array.v4i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_2d_array_v4i32_zero">;
+
+
+def int_nvvm_sust_b_3d_i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i8_zero">;
+def int_nvvm_sust_b_3d_i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i16_zero">;
+def int_nvvm_sust_b_3d_i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i32_zero">;
+def int_nvvm_sust_b_3d_i64_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.3d.i64.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_i64_zero">;
+def int_nvvm_sust_b_3d_v2i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i8_zero">;
+def int_nvvm_sust_b_3d_v2i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i16_zero">;
+def int_nvvm_sust_b_3d_v2i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i32_zero">;
+def int_nvvm_sust_b_3d_v2i64_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i64_ty, llvm_i64_ty], [],
+ "llvm.nvvm.sust.b.3d.v2i64.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v2i64_zero">;
+def int_nvvm_sust_b_3d_v4i8_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i8.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i8_zero">;
+def int_nvvm_sust_b_3d_v4i16_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i16.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i16_zero">;
+def int_nvvm_sust_b_3d_v4i32_zero
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.b.3d.v4i32.zero">,
+ GCCBuiltin<"__nvvm_sust_b_3d_v4i32_zero">;
+
+
+
+// Formatted
+
+def int_nvvm_sust_p_1d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_i8_trap">;
+def int_nvvm_sust_p_1d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_i16_trap">;
+def int_nvvm_sust_p_1d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_i32_trap">;
+def int_nvvm_sust_p_1d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v2i8_trap">;
+def int_nvvm_sust_p_1d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v2i16_trap">;
+def int_nvvm_sust_p_1d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v2i32_trap">;
+def int_nvvm_sust_p_1d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v4i8_trap">;
+def int_nvvm_sust_p_1d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v4i16_trap">;
+def int_nvvm_sust_p_1d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_v4i32_trap">;
+
+
+def int_nvvm_sust_p_1d_array_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_i8_trap">;
+def int_nvvm_sust_p_1d_array_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_i16_trap">;
+def int_nvvm_sust_p_1d_array_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.array.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_i32_trap">;
+def int_nvvm_sust_p_1d_array_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v2i8_trap">;
+def int_nvvm_sust_p_1d_array_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v2i16_trap">;
+def int_nvvm_sust_p_1d_array_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v2i32_trap">;
+def int_nvvm_sust_p_1d_array_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v4i8_trap">;
+def int_nvvm_sust_p_1d_array_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v4i16_trap">;
+def int_nvvm_sust_p_1d_array_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.1d.array.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_1d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_p_2d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_i8_trap">;
+def int_nvvm_sust_p_2d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_i16_trap">;
+def int_nvvm_sust_p_2d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_i32_trap">;
+def int_nvvm_sust_p_2d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v2i8_trap">;
+def int_nvvm_sust_p_2d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v2i16_trap">;
+def int_nvvm_sust_p_2d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v2i32_trap">;
+def int_nvvm_sust_p_2d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v4i8_trap">;
+def int_nvvm_sust_p_2d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i16_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v4i16_trap">;
+def int_nvvm_sust_p_2d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_v4i32_trap">;
+
+
+def int_nvvm_sust_p_2d_array_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_i8_trap">;
+def int_nvvm_sust_p_2d_array_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_i16_trap">;
+def int_nvvm_sust_p_2d_array_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.array.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_i32_trap">;
+def int_nvvm_sust_p_2d_array_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v2i8_trap">;
+def int_nvvm_sust_p_2d_array_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v2i16_trap">;
+def int_nvvm_sust_p_2d_array_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v2i32_trap">;
+def int_nvvm_sust_p_2d_array_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v4i8_trap">;
+def int_nvvm_sust_p_2d_array_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v4i16_trap">;
+def int_nvvm_sust_p_2d_array_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.2d.array.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_2d_array_v4i32_trap">;
+
+
+def int_nvvm_sust_p_3d_i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_i8_trap">;
+def int_nvvm_sust_p_3d_i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_i16_trap">;
+def int_nvvm_sust_p_3d_i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.3d.i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_i32_trap">;
+def int_nvvm_sust_p_3d_v2i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.v2i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v2i8_trap">;
+def int_nvvm_sust_p_3d_v2i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.v2i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v2i16_trap">;
+def int_nvvm_sust_p_3d_v2i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.3d.v2i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v2i32_trap">;
+def int_nvvm_sust_p_3d_v4i8_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.v4i8.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v4i8_trap">;
+def int_nvvm_sust_p_3d_v4i16_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i16_ty, llvm_i16_ty, llvm_i16_ty, llvm_i16_ty], [],
+ "llvm.nvvm.sust.p.3d.v4i16.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v4i16_trap">;
+def int_nvvm_sust_p_3d_v4i32_trap
+ : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [],
+ "llvm.nvvm.sust.p.3d.v4i32.trap">,
+ GCCBuiltin<"__nvvm_sust_p_3d_v4i32_trap">;
+
+
+def int_nvvm_rotate_b32
+ : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem], "llvm.nvvm.rotate.b32">,
+ GCCBuiltin<"__nvvm_rotate_b32">;
+
+def int_nvvm_rotate_b64
+ :Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem], "llvm.nvvm.rotate.b64">,
+ GCCBuiltin<"__nvvm_rotate_b64">;
+
+def int_nvvm_rotate_right_b64
+ : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty],
+ [IntrNoMem], "llvm.nvvm.rotate.right.b64">,
+ GCCBuiltin<"__nvvm_rotate_right_b64">;
+
+def int_nvvm_swap_lo_hi_b64
+ : Intrinsic<[llvm_i64_ty], [llvm_i64_ty],
+ [IntrNoMem], "llvm.nvvm.swap.lo.hi.b64">,
+ GCCBuiltin<"__nvvm_swap_lo_hi_b64">;
+
+
+// Old PTX back-end intrinsics retained here for backwards-compatibility
+
+multiclass PTXReadSpecialRegisterIntrinsic_v4i32<string prefix> {
+// FIXME: Do we need the 128-bit integer type version?
+// def _r64 : Intrinsic<[llvm_i128_ty], [], [IntrNoMem]>;
+
+// FIXME: Enable this once v4i32 support is enabled in back-end.
+// def _v4i16 : Intrinsic<[llvm_v4i32_ty], [], [IntrNoMem]>;
+
+ def _x : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<!strconcat(prefix, "_x")>;
+ def _y : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<!strconcat(prefix, "_y")>;
+ def _z : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<!strconcat(prefix, "_z")>;
+ def _w : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<!strconcat(prefix, "_w")>;
+}
+
+class PTXReadSpecialRegisterIntrinsic_r32<string name>
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>,
+ GCCBuiltin<name>;
+
+class PTXReadSpecialRegisterIntrinsic_r64<string name>
+ : Intrinsic<[llvm_i64_ty], [], [IntrNoMem]>,
+ GCCBuiltin<name>;
+
+defm int_ptx_read_tid : PTXReadSpecialRegisterIntrinsic_v4i32
+ <"__builtin_ptx_read_tid">;
+defm int_ptx_read_ntid : PTXReadSpecialRegisterIntrinsic_v4i32
+ <"__builtin_ptx_read_ntid">;
+
+def int_ptx_read_laneid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_laneid">;
+def int_ptx_read_warpid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_warpid">;
+def int_ptx_read_nwarpid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_nwarpid">;
+
+defm int_ptx_read_ctaid : PTXReadSpecialRegisterIntrinsic_v4i32
+ <"__builtin_ptx_read_ctaid">;
+defm int_ptx_read_nctaid : PTXReadSpecialRegisterIntrinsic_v4i32
+ <"__builtin_ptx_read_nctaid">;
+
+def int_ptx_read_smid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_smid">;
+def int_ptx_read_nsmid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_nsmid">;
+def int_ptx_read_gridid : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_gridid">;
+
+def int_ptx_read_lanemask_eq : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_lanemask_eq">;
+def int_ptx_read_lanemask_le : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_lanemask_le">;
+def int_ptx_read_lanemask_lt : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_lanemask_lt">;
+def int_ptx_read_lanemask_ge : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_lanemask_ge">;
+def int_ptx_read_lanemask_gt : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_lanemask_gt">;
+
+def int_ptx_read_clock : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_clock">;
+def int_ptx_read_clock64 : PTXReadSpecialRegisterIntrinsic_r64
+ <"__builtin_ptx_read_clock64">;
+
+def int_ptx_read_pm0 : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_pm0">;
+def int_ptx_read_pm1 : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_pm1">;
+def int_ptx_read_pm2 : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_pm2">;
+def int_ptx_read_pm3 : PTXReadSpecialRegisterIntrinsic_r32
+ <"__builtin_ptx_read_pm3">;
+
+def int_ptx_bar_sync : Intrinsic<[], [llvm_i32_ty], []>,
+ GCCBuiltin<"__builtin_ptx_bar_sync">;
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td
new file mode 100644
index 0000000..06dfc32
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td
@@ -0,0 +1,985 @@
+//===- IntrinsicsPowerPC.td - Defines PowerPC intrinsics ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the PowerPC-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Definitions for all PowerPC intrinsics.
+//
+
+// Non-altivec intrinsics.
+let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
+ // dcba/dcbf/dcbi/dcbst/dcbt/dcbz/dcbzl(PPC970) instructions.
+ def int_ppc_dcba : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbf : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbi : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbst : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbt : Intrinsic<[], [llvm_ptr_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+ def int_ppc_dcbtst: Intrinsic<[], [llvm_ptr_ty],
+ [IntrReadWriteArgMem, NoCapture<0>]>;
+ def int_ppc_dcbz : Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_ppc_dcbzl : Intrinsic<[], [llvm_ptr_ty], []>;
+
+ // sync instruction (i.e. sync 0, a.k.a hwsync)
+ def int_ppc_sync : Intrinsic<[], [], []>;
+ // lwsync is sync 1
+ def int_ppc_lwsync : Intrinsic<[], [], []>;
+
+ // Intrinsics used to generate ctr-based loops. These should only be
+ // generated by the PowerPC backend!
+ def int_ppc_mtctr : Intrinsic<[], [llvm_anyint_ty], []>;
+ def int_ppc_is_decremented_ctr_nonzero : Intrinsic<[llvm_i1_ty], [], []>;
+
+ // Intrinsics for [double]word extended forms of divide instructions
+ def int_ppc_divwe : GCCBuiltin<"__builtin_divwe">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_divweu : GCCBuiltin<"__builtin_divweu">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_divde : GCCBuiltin<"__builtin_divde">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_divdeu : GCCBuiltin<"__builtin_divdeu">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+
+ // Bit permute doubleword
+ def int_ppc_bpermd : GCCBuiltin<"__builtin_bpermd">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+}
+
+
+let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
+ /// PowerPC_Vec_Intrinsic - Base class for all altivec intrinsics.
+ class PowerPC_Vec_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+ list<LLVMType> param_types,
+ list<IntrinsicProperty> properties>
+ : GCCBuiltin<!strconcat("__builtin_altivec_", GCCIntSuffix)>,
+ Intrinsic<ret_types, param_types, properties>;
+
+ /// PowerPC_VSX_Intrinsic - Base class for all VSX intrinsics.
+ class PowerPC_VSX_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+ list<LLVMType> param_types,
+ list<IntrinsicProperty> properties>
+ : GCCBuiltin<!strconcat("__builtin_vsx_", GCCIntSuffix)>,
+ Intrinsic<ret_types, param_types, properties>;
+}
+
+//===----------------------------------------------------------------------===//
+// PowerPC Altivec Intrinsic Class Definitions.
+//
+
+/// PowerPC_Vec_FF_Intrinsic - A PowerPC intrinsic that takes one v4f32
+/// vector and returns one. These intrinsics have no side effects.
+class PowerPC_Vec_FF_Intrinsic<string GCCIntSuffix>
+ : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+ [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+/// PowerPC_Vec_FFF_Intrinsic - A PowerPC intrinsic that takes two v4f32
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_Vec_FFF_Intrinsic<string GCCIntSuffix>
+ : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+ [llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+
+/// PowerPC_Vec_BBB_Intrinsic - A PowerPC intrinsic that takes two v16i8
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_Vec_BBB_Intrinsic<string GCCIntSuffix>
+ : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+ [llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+
+/// PowerPC_Vec_HHH_Intrinsic - A PowerPC intrinsic that takes two v8i16
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_Vec_HHH_Intrinsic<string GCCIntSuffix>
+ : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+ [llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+
+/// PowerPC_Vec_WWW_Intrinsic - A PowerPC intrinsic that takes two v4i32
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_Vec_WWW_Intrinsic<string GCCIntSuffix>
+ : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+ [llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+/// PowerPC_Vec_DDD_Intrinsic - A PowerPC intrinsic that takes two v2i64
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_Vec_DDD_Intrinsic<string GCCIntSuffix>
+ : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+ [llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+/// PowerPC_Vec_QQQ_Intrinsic - A PowerPC intrinsic that takes two v1i128
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_Vec_QQQ_Intrinsic<string GCCIntSuffix>
+ : PowerPC_Vec_Intrinsic<GCCIntSuffix,
+ [llvm_v1i128_ty], [llvm_v1i128_ty, llvm_v1i128_ty],
+ [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// PowerPC VSX Intrinsic Class Definitions.
+//
+
+/// PowerPC_VSX_Vec_DDD_Intrinsic - A PowerPC intrinsic that takes two v2f64
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_VSX_Vec_DDD_Intrinsic<string GCCIntSuffix>
+ : PowerPC_VSX_Intrinsic<GCCIntSuffix,
+ [llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+
+/// PowerPC_VSX_Vec_FFF_Intrinsic - A PowerPC intrinsic that takes two v4f32
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_VSX_Vec_FFF_Intrinsic<string GCCIntSuffix>
+ : PowerPC_VSX_Intrinsic<GCCIntSuffix,
+ [llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+
+/// PowerPC_VSX_Sca_DDD_Intrinsic - A PowerPC intrinsic that takes two f64
+/// scalars and returns one. These intrinsics have no side effects.
+class PowerPC_VSX_Sca_DDD_Intrinsic<string GCCIntSuffix>
+ : PowerPC_VSX_Intrinsic<GCCIntSuffix,
+ [llvm_double_ty], [llvm_double_ty, llvm_double_ty],
+ [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// PowerPC Altivec Intrinsic Definitions.
+
+let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
+ // Data Stream Control.
+ def int_ppc_altivec_dss : GCCBuiltin<"__builtin_altivec_dss">,
+ Intrinsic<[], [llvm_i32_ty], []>;
+ def int_ppc_altivec_dssall : GCCBuiltin<"__builtin_altivec_dssall">,
+ Intrinsic<[], [], []>;
+ def int_ppc_altivec_dst : GCCBuiltin<"__builtin_altivec_dst">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
+ []>;
+ def int_ppc_altivec_dstt : GCCBuiltin<"__builtin_altivec_dstt">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
+ []>;
+ def int_ppc_altivec_dstst : GCCBuiltin<"__builtin_altivec_dstst">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
+ []>;
+ def int_ppc_altivec_dststt : GCCBuiltin<"__builtin_altivec_dststt">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
+ []>;
+
+ // VSCR access.
+ def int_ppc_altivec_mfvscr : GCCBuiltin<"__builtin_altivec_mfvscr">,
+ Intrinsic<[llvm_v8i16_ty], [], [IntrReadMem]>;
+ def int_ppc_altivec_mtvscr : GCCBuiltin<"__builtin_altivec_mtvscr">,
+ Intrinsic<[], [llvm_v4i32_ty], []>;
+
+
+ // Loads. These don't map directly to GCC builtins because they represent the
+ // source address with a single pointer.
+ def int_ppc_altivec_lvx :
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+ def int_ppc_altivec_lvxl :
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+ def int_ppc_altivec_lvebx :
+ Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+ def int_ppc_altivec_lvehx :
+ Intrinsic<[llvm_v8i16_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+ def int_ppc_altivec_lvewx :
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+
+ // Stores. These don't map directly to GCC builtins because they represent the
+ // source address with a single pointer.
+ def int_ppc_altivec_stvx :
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
+ [IntrReadWriteArgMem]>;
+ def int_ppc_altivec_stvxl :
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
+ [IntrReadWriteArgMem]>;
+ def int_ppc_altivec_stvebx :
+ Intrinsic<[], [llvm_v16i8_ty, llvm_ptr_ty],
+ [IntrReadWriteArgMem]>;
+ def int_ppc_altivec_stvehx :
+ Intrinsic<[], [llvm_v8i16_ty, llvm_ptr_ty],
+ [IntrReadWriteArgMem]>;
+ def int_ppc_altivec_stvewx :
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty],
+ [IntrReadWriteArgMem]>;
+
+ // Comparisons setting a vector.
+ def int_ppc_altivec_vcmpbfp : GCCBuiltin<"__builtin_altivec_vcmpbfp">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpeqfp : GCCBuiltin<"__builtin_altivec_vcmpeqfp">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgefp : GCCBuiltin<"__builtin_altivec_vcmpgefp">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtfp : GCCBuiltin<"__builtin_altivec_vcmpgtfp">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+
+ def int_ppc_altivec_vcmpequd : GCCBuiltin<"__builtin_altivec_vcmpequd">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtsd : GCCBuiltin<"__builtin_altivec_vcmpgtsd">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtud : GCCBuiltin<"__builtin_altivec_vcmpgtud">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+ def int_ppc_altivec_vcmpequw : GCCBuiltin<"__builtin_altivec_vcmpequw">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtsw : GCCBuiltin<"__builtin_altivec_vcmpgtsw">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtuw : GCCBuiltin<"__builtin_altivec_vcmpgtuw">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ def int_ppc_altivec_vcmpequh : GCCBuiltin<"__builtin_altivec_vcmpequh">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtsh : GCCBuiltin<"__builtin_altivec_vcmpgtsh">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtuh : GCCBuiltin<"__builtin_altivec_vcmpgtuh">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+
+ def int_ppc_altivec_vcmpequb : GCCBuiltin<"__builtin_altivec_vcmpequb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtsb : GCCBuiltin<"__builtin_altivec_vcmpgtsb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtub : GCCBuiltin<"__builtin_altivec_vcmpgtub">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+
+ // Predicate Comparisons. The first operand specifies interpretation of CR6.
+ def int_ppc_altivec_vcmpbfp_p : GCCBuiltin<"__builtin_altivec_vcmpbfp_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpeqfp_p : GCCBuiltin<"__builtin_altivec_vcmpeqfp_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgefp_p : GCCBuiltin<"__builtin_altivec_vcmpgefp_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtfp_p : GCCBuiltin<"__builtin_altivec_vcmpgtfp_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+ [IntrNoMem]>;
+
+ def int_ppc_altivec_vcmpequd_p : GCCBuiltin<"__builtin_altivec_vcmpequd_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2i64_ty,llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtsd_p : GCCBuiltin<"__builtin_altivec_vcmpgtsd_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2i64_ty,llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtud_p : GCCBuiltin<"__builtin_altivec_vcmpgtud_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2i64_ty,llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+ def int_ppc_altivec_vcmpequw_p : GCCBuiltin<"__builtin_altivec_vcmpequw_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtsw_p : GCCBuiltin<"__builtin_altivec_vcmpgtsw_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtuw_p : GCCBuiltin<"__builtin_altivec_vcmpgtuw_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ def int_ppc_altivec_vcmpequh_p : GCCBuiltin<"__builtin_altivec_vcmpequh_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtsh_p : GCCBuiltin<"__builtin_altivec_vcmpgtsh_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtuh_p : GCCBuiltin<"__builtin_altivec_vcmpgtuh_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
+ [IntrNoMem]>;
+
+ def int_ppc_altivec_vcmpequb_p : GCCBuiltin<"__builtin_altivec_vcmpequb_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtsb_p : GCCBuiltin<"__builtin_altivec_vcmpgtsb_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcmpgtub_p : GCCBuiltin<"__builtin_altivec_vcmpgtub_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v16i8_ty,llvm_v16i8_ty],
+ [IntrNoMem]>;
+}
+
+// Vector average.
+def int_ppc_altivec_vavgsb : PowerPC_Vec_BBB_Intrinsic<"vavgsb">;
+def int_ppc_altivec_vavgsh : PowerPC_Vec_HHH_Intrinsic<"vavgsh">;
+def int_ppc_altivec_vavgsw : PowerPC_Vec_WWW_Intrinsic<"vavgsw">;
+def int_ppc_altivec_vavgub : PowerPC_Vec_BBB_Intrinsic<"vavgub">;
+def int_ppc_altivec_vavguh : PowerPC_Vec_HHH_Intrinsic<"vavguh">;
+def int_ppc_altivec_vavguw : PowerPC_Vec_WWW_Intrinsic<"vavguw">;
+
+// Vector maximum.
+def int_ppc_altivec_vmaxfp : PowerPC_Vec_FFF_Intrinsic<"vmaxfp">;
+def int_ppc_altivec_vmaxsb : PowerPC_Vec_BBB_Intrinsic<"vmaxsb">;
+def int_ppc_altivec_vmaxsh : PowerPC_Vec_HHH_Intrinsic<"vmaxsh">;
+def int_ppc_altivec_vmaxsw : PowerPC_Vec_WWW_Intrinsic<"vmaxsw">;
+def int_ppc_altivec_vmaxsd : PowerPC_Vec_DDD_Intrinsic<"vmaxsd">;
+def int_ppc_altivec_vmaxub : PowerPC_Vec_BBB_Intrinsic<"vmaxub">;
+def int_ppc_altivec_vmaxuh : PowerPC_Vec_HHH_Intrinsic<"vmaxuh">;
+def int_ppc_altivec_vmaxuw : PowerPC_Vec_WWW_Intrinsic<"vmaxuw">;
+def int_ppc_altivec_vmaxud : PowerPC_Vec_DDD_Intrinsic<"vmaxud">;
+
+// Vector minimum.
+def int_ppc_altivec_vminfp : PowerPC_Vec_FFF_Intrinsic<"vminfp">;
+def int_ppc_altivec_vminsb : PowerPC_Vec_BBB_Intrinsic<"vminsb">;
+def int_ppc_altivec_vminsh : PowerPC_Vec_HHH_Intrinsic<"vminsh">;
+def int_ppc_altivec_vminsw : PowerPC_Vec_WWW_Intrinsic<"vminsw">;
+def int_ppc_altivec_vminsd : PowerPC_Vec_DDD_Intrinsic<"vminsd">;
+def int_ppc_altivec_vminub : PowerPC_Vec_BBB_Intrinsic<"vminub">;
+def int_ppc_altivec_vminuh : PowerPC_Vec_HHH_Intrinsic<"vminuh">;
+def int_ppc_altivec_vminuw : PowerPC_Vec_WWW_Intrinsic<"vminuw">;
+def int_ppc_altivec_vminud : PowerPC_Vec_DDD_Intrinsic<"vminud">;
+
+// Saturating adds.
+def int_ppc_altivec_vaddubs : PowerPC_Vec_BBB_Intrinsic<"vaddubs">;
+def int_ppc_altivec_vaddsbs : PowerPC_Vec_BBB_Intrinsic<"vaddsbs">;
+def int_ppc_altivec_vadduhs : PowerPC_Vec_HHH_Intrinsic<"vadduhs">;
+def int_ppc_altivec_vaddshs : PowerPC_Vec_HHH_Intrinsic<"vaddshs">;
+def int_ppc_altivec_vadduws : PowerPC_Vec_WWW_Intrinsic<"vadduws">;
+def int_ppc_altivec_vaddsws : PowerPC_Vec_WWW_Intrinsic<"vaddsws">;
+def int_ppc_altivec_vaddcuw : PowerPC_Vec_WWW_Intrinsic<"vaddcuw">;
+def int_ppc_altivec_vaddcuq : PowerPC_Vec_QQQ_Intrinsic<"vaddcuq">;
+
+// Saturating subs.
+def int_ppc_altivec_vsububs : PowerPC_Vec_BBB_Intrinsic<"vsububs">;
+def int_ppc_altivec_vsubsbs : PowerPC_Vec_BBB_Intrinsic<"vsubsbs">;
+def int_ppc_altivec_vsubuhs : PowerPC_Vec_HHH_Intrinsic<"vsubuhs">;
+def int_ppc_altivec_vsubshs : PowerPC_Vec_HHH_Intrinsic<"vsubshs">;
+def int_ppc_altivec_vsubuws : PowerPC_Vec_WWW_Intrinsic<"vsubuws">;
+def int_ppc_altivec_vsubsws : PowerPC_Vec_WWW_Intrinsic<"vsubsws">;
+def int_ppc_altivec_vsubcuw : PowerPC_Vec_WWW_Intrinsic<"vsubcuw">;
+def int_ppc_altivec_vsubcuq : PowerPC_Vec_QQQ_Intrinsic<"vsubcuq">;
+
+let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
+ // Saturating multiply-adds.
+ def int_ppc_altivec_vmhaddshs : GCCBuiltin<"__builtin_altivec_vmhaddshs">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vmhraddshs : GCCBuiltin<"__builtin_altivec_vmhraddshs">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_v8i16_ty], [IntrNoMem]>;
+
+ def int_ppc_altivec_vmaddfp : GCCBuiltin<"__builtin_altivec_vmaddfp">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vnmsubfp : GCCBuiltin<"__builtin_altivec_vnmsubfp">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+
+ // Vector Multiply Sum Intructions.
+ def int_ppc_altivec_vmsummbm : GCCBuiltin<"__builtin_altivec_vmsummbm">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vmsumshm : GCCBuiltin<"__builtin_altivec_vmsumshm">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vmsumshs : GCCBuiltin<"__builtin_altivec_vmsumshs">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vmsumubm : GCCBuiltin<"__builtin_altivec_vmsumubm">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vmsumuhm : GCCBuiltin<"__builtin_altivec_vmsumuhm">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vmsumuhs : GCCBuiltin<"__builtin_altivec_vmsumuhs">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+
+ // Vector Multiply Intructions.
+ def int_ppc_altivec_vmulesb : GCCBuiltin<"__builtin_altivec_vmulesb">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vmulesh : GCCBuiltin<"__builtin_altivec_vmulesh">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vmulesw : GCCBuiltin<"__builtin_altivec_vmulesw">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vmuleub : GCCBuiltin<"__builtin_altivec_vmuleub">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vmuleuh : GCCBuiltin<"__builtin_altivec_vmuleuh">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vmuleuw : GCCBuiltin<"__builtin_altivec_vmuleuw">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ def int_ppc_altivec_vmulosb : GCCBuiltin<"__builtin_altivec_vmulosb">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vmulosh : GCCBuiltin<"__builtin_altivec_vmulosh">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vmulosw : GCCBuiltin<"__builtin_altivec_vmulosw">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vmuloub : GCCBuiltin<"__builtin_altivec_vmuloub">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vmulouh : GCCBuiltin<"__builtin_altivec_vmulouh">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vmulouw : GCCBuiltin<"__builtin_altivec_vmulouw">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ // Vector Sum Intructions.
+ def int_ppc_altivec_vsumsws : GCCBuiltin<"__builtin_altivec_vsumsws">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vsum2sws : GCCBuiltin<"__builtin_altivec_vsum2sws">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vsum4sbs : GCCBuiltin<"__builtin_altivec_vsum4sbs">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vsum4shs : GCCBuiltin<"__builtin_altivec_vsum4shs">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vsum4ubs : GCCBuiltin<"__builtin_altivec_vsum4ubs">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+
+ // Other multiplies.
+ def int_ppc_altivec_vmladduhm : GCCBuiltin<"__builtin_altivec_vmladduhm">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+
+ // Packs.
+ def int_ppc_altivec_vpkpx : GCCBuiltin<"__builtin_altivec_vpkpx">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vpkshss : GCCBuiltin<"__builtin_altivec_vpkshss">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vpkshus : GCCBuiltin<"__builtin_altivec_vpkshus">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vpkswss : GCCBuiltin<"__builtin_altivec_vpkswss">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vpkswus : GCCBuiltin<"__builtin_altivec_vpkswus">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vpksdss : GCCBuiltin<"__builtin_altivec_vpksdss">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vpksdus : GCCBuiltin<"__builtin_altivec_vpksdus">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ // vpkuhum is lowered to a shuffle.
+ def int_ppc_altivec_vpkuhus : GCCBuiltin<"__builtin_altivec_vpkuhus">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ // vpkuwum is lowered to a shuffle.
+ def int_ppc_altivec_vpkuwus : GCCBuiltin<"__builtin_altivec_vpkuwus">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ // vpkudum is lowered to a shuffle.
+ def int_ppc_altivec_vpkudus : GCCBuiltin<"__builtin_altivec_vpkudus">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
+ // Unpacks.
+ def int_ppc_altivec_vupkhpx : GCCBuiltin<"__builtin_altivec_vupkhpx">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vupkhsb : GCCBuiltin<"__builtin_altivec_vupkhsb">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vupkhsh : GCCBuiltin<"__builtin_altivec_vupkhsh">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vupkhsw : GCCBuiltin<"__builtin_altivec_vupkhsw">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vupklpx : GCCBuiltin<"__builtin_altivec_vupklpx">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vupklsb : GCCBuiltin<"__builtin_altivec_vupklsb">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vupklsh : GCCBuiltin<"__builtin_altivec_vupklsh">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vupklsw : GCCBuiltin<"__builtin_altivec_vupklsw">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+
+
+ // FP <-> integer conversion.
+ def int_ppc_altivec_vcfsx : GCCBuiltin<"__builtin_altivec_vcfsx">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vcfux : GCCBuiltin<"__builtin_altivec_vcfux">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vctsxs : GCCBuiltin<"__builtin_altivec_vctsxs">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vctuxs : GCCBuiltin<"__builtin_altivec_vctuxs">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_ppc_altivec_vrfim : GCCBuiltin<"__builtin_altivec_vrfim">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vrfin : GCCBuiltin<"__builtin_altivec_vrfin">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vrfip : GCCBuiltin<"__builtin_altivec_vrfip">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vrfiz : GCCBuiltin<"__builtin_altivec_vrfiz">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+
+ // Add Extended Quadword
+ def int_ppc_altivec_vaddeuqm : GCCBuiltin<"__builtin_altivec_vaddeuqm">,
+ Intrinsic<[llvm_v1i128_ty],
+ [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vaddecuq : GCCBuiltin<"__builtin_altivec_vaddecuq">,
+ Intrinsic<[llvm_v1i128_ty],
+ [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
+ [IntrNoMem]>;
+
+ // Sub Extended Quadword
+ def int_ppc_altivec_vsubeuqm : GCCBuiltin<"__builtin_altivec_vsubeuqm">,
+ Intrinsic<[llvm_v1i128_ty],
+ [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
+ [IntrNoMem]>;
+ def int_ppc_altivec_vsubecuq : GCCBuiltin<"__builtin_altivec_vsubecuq">,
+ Intrinsic<[llvm_v1i128_ty],
+ [llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
+ [IntrNoMem]>;
+}
+
+def int_ppc_altivec_vsl : PowerPC_Vec_WWW_Intrinsic<"vsl">;
+def int_ppc_altivec_vslo : PowerPC_Vec_WWW_Intrinsic<"vslo">;
+
+def int_ppc_altivec_vslb : PowerPC_Vec_BBB_Intrinsic<"vslb">;
+def int_ppc_altivec_vslh : PowerPC_Vec_HHH_Intrinsic<"vslh">;
+def int_ppc_altivec_vslw : PowerPC_Vec_WWW_Intrinsic<"vslw">;
+
+// Right Shifts.
+def int_ppc_altivec_vsr : PowerPC_Vec_WWW_Intrinsic<"vsr">;
+def int_ppc_altivec_vsro : PowerPC_Vec_WWW_Intrinsic<"vsro">;
+
+def int_ppc_altivec_vsrb : PowerPC_Vec_BBB_Intrinsic<"vsrb">;
+def int_ppc_altivec_vsrh : PowerPC_Vec_HHH_Intrinsic<"vsrh">;
+def int_ppc_altivec_vsrw : PowerPC_Vec_WWW_Intrinsic<"vsrw">;
+def int_ppc_altivec_vsrab : PowerPC_Vec_BBB_Intrinsic<"vsrab">;
+def int_ppc_altivec_vsrah : PowerPC_Vec_HHH_Intrinsic<"vsrah">;
+def int_ppc_altivec_vsraw : PowerPC_Vec_WWW_Intrinsic<"vsraw">;
+
+// Rotates.
+def int_ppc_altivec_vrlb : PowerPC_Vec_BBB_Intrinsic<"vrlb">;
+def int_ppc_altivec_vrlh : PowerPC_Vec_HHH_Intrinsic<"vrlh">;
+def int_ppc_altivec_vrlw : PowerPC_Vec_WWW_Intrinsic<"vrlw">;
+def int_ppc_altivec_vrld : PowerPC_Vec_DDD_Intrinsic<"vrld">;
+
+let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
+ // Miscellaneous.
+ def int_ppc_altivec_lvsl :
+ Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrNoMem]>;
+ def int_ppc_altivec_lvsr :
+ Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+ def int_ppc_altivec_vperm : GCCBuiltin<"__builtin_altivec_vperm_4si">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_v16i8_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vsel : GCCBuiltin<"__builtin_altivec_vsel_4si">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vgbbd : GCCBuiltin<"__builtin_altivec_vgbbd">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_ppc_altivec_vbpermq : GCCBuiltin<"__builtin_altivec_vbpermq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+}
+
+def int_ppc_altivec_vexptefp : PowerPC_Vec_FF_Intrinsic<"vexptefp">;
+def int_ppc_altivec_vlogefp : PowerPC_Vec_FF_Intrinsic<"vlogefp">;
+def int_ppc_altivec_vrefp : PowerPC_Vec_FF_Intrinsic<"vrefp">;
+def int_ppc_altivec_vrsqrtefp : PowerPC_Vec_FF_Intrinsic<"vrsqrtefp">;
+
+// Power8 Intrinsics
+// Crypto
+let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
+ def int_ppc_altivec_crypto_vsbox :
+ GCCBuiltin<"__builtin_altivec_crypto_vsbox">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+ def int_ppc_altivec_crypto_vpermxor :
+ GCCBuiltin<"__builtin_altivec_crypto_vpermxor">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+def int_ppc_altivec_crypto_vshasigmad :
+ GCCBuiltin<"__builtin_altivec_crypto_vshasigmad">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_ppc_altivec_crypto_vshasigmaw :
+ GCCBuiltin<"__builtin_altivec_crypto_vshasigmaw">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+def int_ppc_altivec_crypto_vcipher :
+ PowerPC_Vec_DDD_Intrinsic<"crypto_vcipher">;
+def int_ppc_altivec_crypto_vcipherlast :
+ PowerPC_Vec_DDD_Intrinsic<"crypto_vcipherlast">;
+def int_ppc_altivec_crypto_vncipher :
+ PowerPC_Vec_DDD_Intrinsic<"crypto_vncipher">;
+def int_ppc_altivec_crypto_vncipherlast :
+ PowerPC_Vec_DDD_Intrinsic<"crypto_vncipherlast">;
+def int_ppc_altivec_crypto_vpmsumb :
+ PowerPC_Vec_BBB_Intrinsic<"crypto_vpmsumb">;
+def int_ppc_altivec_crypto_vpmsumh :
+ PowerPC_Vec_HHH_Intrinsic<"crypto_vpmsumh">;
+def int_ppc_altivec_crypto_vpmsumw :
+ PowerPC_Vec_WWW_Intrinsic<"crypto_vpmsumw">;
+def int_ppc_altivec_crypto_vpmsumd :
+ PowerPC_Vec_DDD_Intrinsic<"crypto_vpmsumd">;
+
+//===----------------------------------------------------------------------===//
+// PowerPC VSX Intrinsic Definitions.
+
+let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
+
+// Vector load.
+def int_ppc_vsx_lxvw4x :
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+def int_ppc_vsx_lxvd2x :
+ Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+
+// Vector store.
+def int_ppc_vsx_stxvw4x :
+ Intrinsic<[], [llvm_v4i32_ty, llvm_ptr_ty], [IntrReadWriteArgMem]>;
+def int_ppc_vsx_stxvd2x :
+ Intrinsic<[], [llvm_v2f64_ty, llvm_ptr_ty], [IntrReadWriteArgMem]>;
+
+// Vector and scalar maximum.
+def int_ppc_vsx_xvmaxdp : PowerPC_VSX_Vec_DDD_Intrinsic<"xvmaxdp">;
+def int_ppc_vsx_xvmaxsp : PowerPC_VSX_Vec_FFF_Intrinsic<"xvmaxsp">;
+def int_ppc_vsx_xsmaxdp : PowerPC_VSX_Sca_DDD_Intrinsic<"xsmaxdp">;
+
+// Vector and scalar minimum.
+def int_ppc_vsx_xvmindp : PowerPC_VSX_Vec_DDD_Intrinsic<"xvmindp">;
+def int_ppc_vsx_xvminsp : PowerPC_VSX_Vec_FFF_Intrinsic<"xvminsp">;
+def int_ppc_vsx_xsmindp : PowerPC_VSX_Sca_DDD_Intrinsic<"xsmindp">;
+
+// Vector divide.
+def int_ppc_vsx_xvdivdp : PowerPC_VSX_Vec_DDD_Intrinsic<"xvdivdp">;
+def int_ppc_vsx_xvdivsp : PowerPC_VSX_Vec_FFF_Intrinsic<"xvdivsp">;
+
+// Vector round-to-infinity (ceil)
+def int_ppc_vsx_xvrspip :
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvrdpip :
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+// Vector reciprocal estimate
+def int_ppc_vsx_xvresp : GCCBuiltin<"__builtin_vsx_xvresp">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvredp : GCCBuiltin<"__builtin_vsx_xvredp">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+// Vector rsqrte
+def int_ppc_vsx_xvrsqrtesp : GCCBuiltin<"__builtin_vsx_xvrsqrtesp">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvrsqrtedp : GCCBuiltin<"__builtin_vsx_xvrsqrtedp">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+
+// Vector compare
+def int_ppc_vsx_xvcmpeqdp :
+ PowerPC_VSX_Intrinsic<"xvcmpeqdp", [llvm_v2i64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpeqdp_p : GCCBuiltin<"__builtin_vsx_xvcmpeqdp_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2f64_ty,llvm_v2f64_ty],
+ [IntrNoMem]>;
+def int_ppc_vsx_xvcmpeqsp :
+ PowerPC_VSX_Intrinsic<"xvcmpeqsp", [llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpeqsp_p : GCCBuiltin<"__builtin_vsx_xvcmpeqsp_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+ [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgedp :
+ PowerPC_VSX_Intrinsic<"xvcmpgedp", [llvm_v2i64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgedp_p : GCCBuiltin<"__builtin_vsx_xvcmpgedp_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2f64_ty,llvm_v2f64_ty],
+ [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgesp :
+ PowerPC_VSX_Intrinsic<"xvcmpgesp", [llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgesp_p : GCCBuiltin<"__builtin_vsx_xvcmpgesp_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+ [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgtdp :
+ PowerPC_VSX_Intrinsic<"xvcmpgtdp", [llvm_v2i64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgtdp_p : GCCBuiltin<"__builtin_vsx_xvcmpgtdp_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v2f64_ty,llvm_v2f64_ty],
+ [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgtsp :
+ PowerPC_VSX_Intrinsic<"xvcmpgtsp", [llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcmpgtsp_p : GCCBuiltin<"__builtin_vsx_xvcmpgtsp_p">,
+ Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4f32_ty,llvm_v4f32_ty],
+ [IntrNoMem]>;
+def int_ppc_vsx_xxleqv :
+ PowerPC_VSX_Intrinsic<"xxleqv", [llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// PowerPC QPX Intrinsics.
+//
+
+let TargetPrefix = "ppc" in { // All PPC intrinsics start with "llvm.ppc.".
+ /// PowerPC_QPX_Intrinsic - Base class for all QPX intrinsics.
+ class PowerPC_QPX_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
+ list<LLVMType> param_types,
+ list<IntrinsicProperty> properties>
+ : GCCBuiltin<!strconcat("__builtin_qpx_", GCCIntSuffix)>,
+ Intrinsic<ret_types, param_types, properties>;
+}
+
+//===----------------------------------------------------------------------===//
+// PowerPC QPX Intrinsic Class Definitions.
+//
+
+/// PowerPC_QPX_FF_Intrinsic - A PowerPC intrinsic that takes one v4f64
+/// vector and returns one. These intrinsics have no side effects.
+class PowerPC_QPX_FF_Intrinsic<string GCCIntSuffix>
+ : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+ [llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+
+/// PowerPC_QPX_FFF_Intrinsic - A PowerPC intrinsic that takes two v4f64
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_QPX_FFF_Intrinsic<string GCCIntSuffix>
+ : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+ [llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+
+/// PowerPC_QPX_FFFF_Intrinsic - A PowerPC intrinsic that takes three v4f64
+/// vectors and returns one. These intrinsics have no side effects.
+class PowerPC_QPX_FFFF_Intrinsic<string GCCIntSuffix>
+ : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+ [llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+
+/// PowerPC_QPX_Load_Intrinsic - A PowerPC intrinsic that takes a pointer
+/// and returns a v4f64.
+class PowerPC_QPX_Load_Intrinsic<string GCCIntSuffix>
+ : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+ [llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+
+/// PowerPC_QPX_LoadPerm_Intrinsic - A PowerPC intrinsic that takes a pointer
+/// and returns a v4f64 permutation.
+class PowerPC_QPX_LoadPerm_Intrinsic<string GCCIntSuffix>
+ : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+ [llvm_v4f64_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+/// PowerPC_QPX_Store_Intrinsic - A PowerPC intrinsic that takes a pointer
+/// and stores a v4f64.
+class PowerPC_QPX_Store_Intrinsic<string GCCIntSuffix>
+ : PowerPC_QPX_Intrinsic<GCCIntSuffix,
+ [], [llvm_v4f64_ty, llvm_ptr_ty],
+ [IntrReadWriteArgMem]>;
+
+//===----------------------------------------------------------------------===//
+// PowerPC QPX Intrinsic Definitions.
+
+let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
+ // Add Instructions
+ def int_ppc_qpx_qvfadd : PowerPC_QPX_FFF_Intrinsic<"qvfadd">;
+ def int_ppc_qpx_qvfadds : PowerPC_QPX_FFF_Intrinsic<"qvfadds">;
+ def int_ppc_qpx_qvfsub : PowerPC_QPX_FFF_Intrinsic<"qvfsub">;
+ def int_ppc_qpx_qvfsubs : PowerPC_QPX_FFF_Intrinsic<"qvfsubs">;
+
+ // Estimate Instructions
+ def int_ppc_qpx_qvfre : PowerPC_QPX_FF_Intrinsic<"qvfre">;
+ def int_ppc_qpx_qvfres : PowerPC_QPX_FF_Intrinsic<"qvfres">;
+ def int_ppc_qpx_qvfrsqrte : PowerPC_QPX_FF_Intrinsic<"qvfrsqrte">;
+ def int_ppc_qpx_qvfrsqrtes : PowerPC_QPX_FF_Intrinsic<"qvfrsqrtes">;
+
+ // Multiply Instructions
+ def int_ppc_qpx_qvfmul : PowerPC_QPX_FFF_Intrinsic<"qvfmul">;
+ def int_ppc_qpx_qvfmuls : PowerPC_QPX_FFF_Intrinsic<"qvfmuls">;
+ def int_ppc_qpx_qvfxmul : PowerPC_QPX_FFF_Intrinsic<"qvfxmul">;
+ def int_ppc_qpx_qvfxmuls : PowerPC_QPX_FFF_Intrinsic<"qvfxmuls">;
+
+ // Multiply-add instructions
+ def int_ppc_qpx_qvfmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfmadd">;
+ def int_ppc_qpx_qvfmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfmadds">;
+ def int_ppc_qpx_qvfnmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfnmadd">;
+ def int_ppc_qpx_qvfnmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfnmadds">;
+ def int_ppc_qpx_qvfmsub : PowerPC_QPX_FFFF_Intrinsic<"qvfmsub">;
+ def int_ppc_qpx_qvfmsubs : PowerPC_QPX_FFFF_Intrinsic<"qvfmsubs">;
+ def int_ppc_qpx_qvfnmsub : PowerPC_QPX_FFFF_Intrinsic<"qvfnmsub">;
+ def int_ppc_qpx_qvfnmsubs : PowerPC_QPX_FFFF_Intrinsic<"qvfnmsubs">;
+ def int_ppc_qpx_qvfxmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfxmadd">;
+ def int_ppc_qpx_qvfxmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfxmadds">;
+ def int_ppc_qpx_qvfxxnpmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfxxnpmadd">;
+ def int_ppc_qpx_qvfxxnpmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfxxnpmadds">;
+ def int_ppc_qpx_qvfxxcpnmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfxxcpnmadd">;
+ def int_ppc_qpx_qvfxxcpnmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfxxcpnmadds">;
+ def int_ppc_qpx_qvfxxmadd : PowerPC_QPX_FFFF_Intrinsic<"qvfxxmadd">;
+ def int_ppc_qpx_qvfxxmadds : PowerPC_QPX_FFFF_Intrinsic<"qvfxxmadds">;
+
+ // Select Instruction
+ def int_ppc_qpx_qvfsel : PowerPC_QPX_FFFF_Intrinsic<"qvfsel">;
+
+ // Permute Instruction
+ def int_ppc_qpx_qvfperm : PowerPC_QPX_FFFF_Intrinsic<"qvfperm">;
+
+ // Convert and Round Instructions
+ def int_ppc_qpx_qvfctid : PowerPC_QPX_FF_Intrinsic<"qvfctid">;
+ def int_ppc_qpx_qvfctidu : PowerPC_QPX_FF_Intrinsic<"qvfctidu">;
+ def int_ppc_qpx_qvfctidz : PowerPC_QPX_FF_Intrinsic<"qvfctidz">;
+ def int_ppc_qpx_qvfctiduz : PowerPC_QPX_FF_Intrinsic<"qvfctiduz">;
+ def int_ppc_qpx_qvfctiw : PowerPC_QPX_FF_Intrinsic<"qvfctiw">;
+ def int_ppc_qpx_qvfctiwu : PowerPC_QPX_FF_Intrinsic<"qvfctiwu">;
+ def int_ppc_qpx_qvfctiwz : PowerPC_QPX_FF_Intrinsic<"qvfctiwz">;
+ def int_ppc_qpx_qvfctiwuz : PowerPC_QPX_FF_Intrinsic<"qvfctiwuz">;
+ def int_ppc_qpx_qvfcfid : PowerPC_QPX_FF_Intrinsic<"qvfcfid">;
+ def int_ppc_qpx_qvfcfidu : PowerPC_QPX_FF_Intrinsic<"qvfcfidu">;
+ def int_ppc_qpx_qvfcfids : PowerPC_QPX_FF_Intrinsic<"qvfcfids">;
+ def int_ppc_qpx_qvfcfidus : PowerPC_QPX_FF_Intrinsic<"qvfcfidus">;
+ def int_ppc_qpx_qvfrsp : PowerPC_QPX_FF_Intrinsic<"qvfrsp">;
+ def int_ppc_qpx_qvfriz : PowerPC_QPX_FF_Intrinsic<"qvfriz">;
+ def int_ppc_qpx_qvfrin : PowerPC_QPX_FF_Intrinsic<"qvfrin">;
+ def int_ppc_qpx_qvfrip : PowerPC_QPX_FF_Intrinsic<"qvfrip">;
+ def int_ppc_qpx_qvfrim : PowerPC_QPX_FF_Intrinsic<"qvfrim">;
+
+ // Move Instructions
+ def int_ppc_qpx_qvfneg : PowerPC_QPX_FF_Intrinsic<"qvfneg">;
+ def int_ppc_qpx_qvfabs : PowerPC_QPX_FF_Intrinsic<"qvfabs">;
+ def int_ppc_qpx_qvfnabs : PowerPC_QPX_FF_Intrinsic<"qvfnabs">;
+ def int_ppc_qpx_qvfcpsgn : PowerPC_QPX_FFF_Intrinsic<"qvfcpsgn">;
+
+ // Compare Instructions
+ def int_ppc_qpx_qvftstnan : PowerPC_QPX_FFF_Intrinsic<"qvftstnan">;
+ def int_ppc_qpx_qvfcmplt : PowerPC_QPX_FFF_Intrinsic<"qvfcmplt">;
+ def int_ppc_qpx_qvfcmpgt : PowerPC_QPX_FFF_Intrinsic<"qvfcmpgt">;
+ def int_ppc_qpx_qvfcmpeq : PowerPC_QPX_FFF_Intrinsic<"qvfcmpeq">;
+
+ // Load instructions
+ def int_ppc_qpx_qvlfd : PowerPC_QPX_Load_Intrinsic<"qvlfd">;
+ def int_ppc_qpx_qvlfda : PowerPC_QPX_Load_Intrinsic<"qvlfda">;
+ def int_ppc_qpx_qvlfs : PowerPC_QPX_Load_Intrinsic<"qvlfs">;
+ def int_ppc_qpx_qvlfsa : PowerPC_QPX_Load_Intrinsic<"qvlfsa">;
+
+ def int_ppc_qpx_qvlfcda : PowerPC_QPX_Load_Intrinsic<"qvlfcda">;
+ def int_ppc_qpx_qvlfcd : PowerPC_QPX_Load_Intrinsic<"qvlfcd">;
+ def int_ppc_qpx_qvlfcsa : PowerPC_QPX_Load_Intrinsic<"qvlfcsa">;
+ def int_ppc_qpx_qvlfcs : PowerPC_QPX_Load_Intrinsic<"qvlfcs">;
+ def int_ppc_qpx_qvlfiwaa : PowerPC_QPX_Load_Intrinsic<"qvlfiwaa">;
+ def int_ppc_qpx_qvlfiwa : PowerPC_QPX_Load_Intrinsic<"qvlfiwa">;
+ def int_ppc_qpx_qvlfiwza : PowerPC_QPX_Load_Intrinsic<"qvlfiwza">;
+ def int_ppc_qpx_qvlfiwz : PowerPC_QPX_Load_Intrinsic<"qvlfiwz">;
+
+ def int_ppc_qpx_qvlpcld : PowerPC_QPX_LoadPerm_Intrinsic<"qvlpcld">;
+ def int_ppc_qpx_qvlpcls : PowerPC_QPX_LoadPerm_Intrinsic<"qvlpcls">;
+ def int_ppc_qpx_qvlpcrd : PowerPC_QPX_LoadPerm_Intrinsic<"qvlpcrd">;
+ def int_ppc_qpx_qvlpcrs : PowerPC_QPX_LoadPerm_Intrinsic<"qvlpcrs">;
+
+ // Store instructions
+ def int_ppc_qpx_qvstfd : PowerPC_QPX_Store_Intrinsic<"qvstfd">;
+ def int_ppc_qpx_qvstfda : PowerPC_QPX_Store_Intrinsic<"qvstfda">;
+ def int_ppc_qpx_qvstfs : PowerPC_QPX_Store_Intrinsic<"qvstfs">;
+ def int_ppc_qpx_qvstfsa : PowerPC_QPX_Store_Intrinsic<"qvstfsa">;
+
+ def int_ppc_qpx_qvstfcda : PowerPC_QPX_Store_Intrinsic<"qvstfcda">;
+ def int_ppc_qpx_qvstfcd : PowerPC_QPX_Store_Intrinsic<"qvstfcd">;
+ def int_ppc_qpx_qvstfcsa : PowerPC_QPX_Store_Intrinsic<"qvstfcsa">;
+ def int_ppc_qpx_qvstfcs : PowerPC_QPX_Store_Intrinsic<"qvstfcs">;
+ def int_ppc_qpx_qvstfiwa : PowerPC_QPX_Store_Intrinsic<"qvstfiwa">;
+ def int_ppc_qpx_qvstfiw : PowerPC_QPX_Store_Intrinsic<"qvstfiw">;
+
+ // Logical and permutation formation
+ def int_ppc_qpx_qvflogical : PowerPC_QPX_Intrinsic<"qvflogical",
+ [llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_ppc_qpx_qvgpci : PowerPC_QPX_Intrinsic<"qvgpci",
+ [llvm_v4f64_ty], [llvm_i32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// PowerPC HTM Intrinsic Definitions.
+
+let TargetPrefix = "ppc" in { // All intrinsics start with "llvm.ppc.".
+
+def int_ppc_tbegin : GCCBuiltin<"__builtin_tbegin">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+def int_ppc_tend : GCCBuiltin<"__builtin_tend">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+
+def int_ppc_tabort : GCCBuiltin<"__builtin_tabort">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+def int_ppc_tabortwc : GCCBuiltin<"__builtin_tabortwc">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_ppc_tabortwci : GCCBuiltin<"__builtin_tabortwci">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_ppc_tabortdc : GCCBuiltin<"__builtin_tabortdc">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+def int_ppc_tabortdci : GCCBuiltin<"__builtin_tabortdci">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
+
+def int_ppc_tcheck : GCCBuiltin<"__builtin_tcheck">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+def int_ppc_treclaim : GCCBuiltin<"__builtin_treclaim">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+def int_ppc_trechkpt : GCCBuiltin<"__builtin_trechkpt">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+def int_ppc_tsr : GCCBuiltin<"__builtin_tsr">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty], []>;
+
+def int_ppc_get_texasr : GCCBuiltin<"__builtin_get_texasr">,
+ Intrinsic<[llvm_i64_ty], [], []>;
+def int_ppc_get_texasru : GCCBuiltin<"__builtin_get_texasru">,
+ Intrinsic<[llvm_i64_ty], [], []>;
+def int_ppc_get_tfhar : GCCBuiltin<"__builtin_get_tfhar">,
+ Intrinsic<[llvm_i64_ty], [], []>;
+def int_ppc_get_tfiar : GCCBuiltin<"__builtin_get_tfiar">,
+ Intrinsic<[llvm_i64_ty], [], []>;
+
+def int_ppc_set_texasr : GCCBuiltin<"__builtin_set_texasr">,
+ Intrinsic<[], [llvm_i64_ty], []>;
+def int_ppc_set_texasru : GCCBuiltin<"__builtin_set_texasru">,
+ Intrinsic<[], [llvm_i64_ty], []>;
+def int_ppc_set_tfhar : GCCBuiltin<"__builtin_set_tfhar">,
+ Intrinsic<[], [llvm_i64_ty], []>;
+def int_ppc_set_tfiar : GCCBuiltin<"__builtin_set_tfiar">,
+ Intrinsic<[], [llvm_i64_ty], []>;
+
+// Extended mnemonics
+def int_ppc_tendall : GCCBuiltin<"__builtin_tendall">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+def int_ppc_tresume : GCCBuiltin<"__builtin_tresume">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+def int_ppc_tsuspend : GCCBuiltin<"__builtin_tsuspend">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+
+def int_ppc_ttest : GCCBuiltin<"__builtin_ttest">,
+ Intrinsic<[llvm_i64_ty], [], []>;
+}
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsSystemZ.td b/contrib/llvm/include/llvm/IR/IntrinsicsSystemZ.td
new file mode 100644
index 0000000..96e7ca5
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsSystemZ.td
@@ -0,0 +1,376 @@
+//===- IntrinsicsSystemZ.td - Defines SystemZ intrinsics ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the SystemZ-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+class SystemZUnaryConv<string name, LLVMType result, LLVMType arg>
+ : GCCBuiltin<"__builtin_s390_" ## name>,
+ Intrinsic<[result], [arg], [IntrNoMem]>;
+
+class SystemZUnary<string name, LLVMType type>
+ : SystemZUnaryConv<name, type, type>;
+
+class SystemZUnaryConvCC<LLVMType result, LLVMType arg>
+ : Intrinsic<[result, llvm_i32_ty], [arg], [IntrNoMem]>;
+
+class SystemZUnaryCC<LLVMType type>
+ : SystemZUnaryConvCC<type, type>;
+
+class SystemZBinaryConv<string name, LLVMType result, LLVMType arg>
+ : GCCBuiltin<"__builtin_s390_" ## name>,
+ Intrinsic<[result], [arg, arg], [IntrNoMem]>;
+
+class SystemZBinary<string name, LLVMType type>
+ : SystemZBinaryConv<name, type, type>;
+
+class SystemZBinaryInt<string name, LLVMType type>
+ : GCCBuiltin<"__builtin_s390_" ## name>,
+ Intrinsic<[type], [type, llvm_i32_ty], [IntrNoMem]>;
+
+class SystemZBinaryConvCC<LLVMType result, LLVMType arg>
+ : Intrinsic<[result, llvm_i32_ty], [arg, arg], [IntrNoMem]>;
+
+class SystemZBinaryConvIntCC<LLVMType result, LLVMType arg>
+ : Intrinsic<[result, llvm_i32_ty], [arg, llvm_i32_ty], [IntrNoMem]>;
+
+class SystemZBinaryCC<LLVMType type>
+ : SystemZBinaryConvCC<type, type>;
+
+class SystemZTernaryConv<string name, LLVMType result, LLVMType arg>
+ : GCCBuiltin<"__builtin_s390_" ## name>,
+ Intrinsic<[result], [arg, arg, result], [IntrNoMem]>;
+
+class SystemZTernary<string name, LLVMType type>
+ : SystemZTernaryConv<name, type, type>;
+
+class SystemZTernaryInt<string name, LLVMType type>
+ : GCCBuiltin<"__builtin_s390_" ## name>,
+ Intrinsic<[type], [type, type, llvm_i32_ty], [IntrNoMem]>;
+
+class SystemZTernaryIntCC<LLVMType type>
+ : Intrinsic<[type, llvm_i32_ty], [type, type, llvm_i32_ty], [IntrNoMem]>;
+
+class SystemZQuaternaryInt<string name, LLVMType type>
+ : GCCBuiltin<"__builtin_s390_" ## name>,
+ Intrinsic<[type], [type, type, type, llvm_i32_ty], [IntrNoMem]>;
+
+class SystemZQuaternaryIntCC<LLVMType type>
+ : Intrinsic<[type, llvm_i32_ty], [type, type, type, llvm_i32_ty],
+ [IntrNoMem]>;
+
+multiclass SystemZUnaryExtBHF<string name> {
+ def b : SystemZUnaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+ def h : SystemZUnaryConv<name##"h", llvm_v4i32_ty, llvm_v8i16_ty>;
+ def f : SystemZUnaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+}
+
+multiclass SystemZUnaryExtBHWF<string name> {
+ def b : SystemZUnaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+ def hw : SystemZUnaryConv<name##"hw", llvm_v4i32_ty, llvm_v8i16_ty>;
+ def f : SystemZUnaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+}
+
+multiclass SystemZUnaryBHF<string name> {
+ def b : SystemZUnary<name##"b", llvm_v16i8_ty>;
+ def h : SystemZUnary<name##"h", llvm_v8i16_ty>;
+ def f : SystemZUnary<name##"f", llvm_v4i32_ty>;
+}
+
+multiclass SystemZUnaryBHFG<string name> : SystemZUnaryBHF<name> {
+ def g : SystemZUnary<name##"g", llvm_v2i64_ty>;
+}
+
+multiclass SystemZUnaryCCBHF {
+ def bs : SystemZUnaryCC<llvm_v16i8_ty>;
+ def hs : SystemZUnaryCC<llvm_v8i16_ty>;
+ def fs : SystemZUnaryCC<llvm_v4i32_ty>;
+}
+
+multiclass SystemZBinaryTruncHFG<string name> {
+ def h : SystemZBinaryConv<name##"h", llvm_v16i8_ty, llvm_v8i16_ty>;
+ def f : SystemZBinaryConv<name##"f", llvm_v8i16_ty, llvm_v4i32_ty>;
+ def g : SystemZBinaryConv<name##"g", llvm_v4i32_ty, llvm_v2i64_ty>;
+}
+
+multiclass SystemZBinaryTruncCCHFG {
+ def hs : SystemZBinaryConvCC<llvm_v16i8_ty, llvm_v8i16_ty>;
+ def fs : SystemZBinaryConvCC<llvm_v8i16_ty, llvm_v4i32_ty>;
+ def gs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v2i64_ty>;
+}
+
+multiclass SystemZBinaryExtBHF<string name> {
+ def b : SystemZBinaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+ def h : SystemZBinaryConv<name##"h", llvm_v4i32_ty, llvm_v8i16_ty>;
+ def f : SystemZBinaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+}
+
+multiclass SystemZBinaryExtBHFG<string name> : SystemZBinaryExtBHF<name> {
+ def g : SystemZBinaryConv<name##"g", llvm_v16i8_ty, llvm_v2i64_ty>;
+}
+
+multiclass SystemZBinaryBHF<string name> {
+ def b : SystemZBinary<name##"b", llvm_v16i8_ty>;
+ def h : SystemZBinary<name##"h", llvm_v8i16_ty>;
+ def f : SystemZBinary<name##"f", llvm_v4i32_ty>;
+}
+
+multiclass SystemZBinaryBHFG<string name> : SystemZBinaryBHF<name> {
+ def g : SystemZBinary<name##"g", llvm_v2i64_ty>;
+}
+
+multiclass SystemZBinaryIntBHFG<string name> {
+ def b : SystemZBinaryInt<name##"b", llvm_v16i8_ty>;
+ def h : SystemZBinaryInt<name##"h", llvm_v8i16_ty>;
+ def f : SystemZBinaryInt<name##"f", llvm_v4i32_ty>;
+ def g : SystemZBinaryInt<name##"g", llvm_v2i64_ty>;
+}
+
+multiclass SystemZBinaryCCBHF {
+ def bs : SystemZBinaryCC<llvm_v16i8_ty>;
+ def hs : SystemZBinaryCC<llvm_v8i16_ty>;
+ def fs : SystemZBinaryCC<llvm_v4i32_ty>;
+}
+
+multiclass SystemZCompareBHFG<string name> {
+ def bs : SystemZBinaryCC<llvm_v16i8_ty>;
+ def hs : SystemZBinaryCC<llvm_v8i16_ty>;
+ def fs : SystemZBinaryCC<llvm_v4i32_ty>;
+ def gs : SystemZBinaryCC<llvm_v2i64_ty>;
+}
+
+multiclass SystemZTernaryExtBHF<string name> {
+ def b : SystemZTernaryConv<name##"b", llvm_v8i16_ty, llvm_v16i8_ty>;
+ def h : SystemZTernaryConv<name##"h", llvm_v4i32_ty, llvm_v8i16_ty>;
+ def f : SystemZTernaryConv<name##"f", llvm_v2i64_ty, llvm_v4i32_ty>;
+}
+
+multiclass SystemZTernaryExtBHFG<string name> : SystemZTernaryExtBHF<name> {
+ def g : SystemZTernaryConv<name##"g", llvm_v16i8_ty, llvm_v2i64_ty>;
+}
+
+multiclass SystemZTernaryBHF<string name> {
+ def b : SystemZTernary<name##"b", llvm_v16i8_ty>;
+ def h : SystemZTernary<name##"h", llvm_v8i16_ty>;
+ def f : SystemZTernary<name##"f", llvm_v4i32_ty>;
+}
+
+multiclass SystemZTernaryIntBHF<string name> {
+ def b : SystemZTernaryInt<name##"b", llvm_v16i8_ty>;
+ def h : SystemZTernaryInt<name##"h", llvm_v8i16_ty>;
+ def f : SystemZTernaryInt<name##"f", llvm_v4i32_ty>;
+}
+
+multiclass SystemZTernaryIntCCBHF {
+ def bs : SystemZTernaryIntCC<llvm_v16i8_ty>;
+ def hs : SystemZTernaryIntCC<llvm_v8i16_ty>;
+ def fs : SystemZTernaryIntCC<llvm_v4i32_ty>;
+}
+
+multiclass SystemZQuaternaryIntBHF<string name> {
+ def b : SystemZQuaternaryInt<name##"b", llvm_v16i8_ty>;
+ def h : SystemZQuaternaryInt<name##"h", llvm_v8i16_ty>;
+ def f : SystemZQuaternaryInt<name##"f", llvm_v4i32_ty>;
+}
+
+multiclass SystemZQuaternaryIntBHFG<string name> : SystemZQuaternaryIntBHF<name> {
+ def g : SystemZQuaternaryInt<name##"g", llvm_v2i64_ty>;
+}
+
+multiclass SystemZQuaternaryIntCCBHF {
+ def bs : SystemZQuaternaryIntCC<llvm_v16i8_ty>;
+ def hs : SystemZQuaternaryIntCC<llvm_v8i16_ty>;
+ def fs : SystemZQuaternaryIntCC<llvm_v4i32_ty>;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Transactional-execution intrinsics
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "s390" in {
+ def int_s390_tbegin : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrNoDuplicate]>;
+
+ def int_s390_tbegin_nofloat : Intrinsic<[llvm_i32_ty],
+ [llvm_ptr_ty, llvm_i32_ty],
+ [IntrNoDuplicate]>;
+
+ def int_s390_tbeginc : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrNoDuplicate]>;
+
+ def int_s390_tabort : Intrinsic<[], [llvm_i64_ty],
+ [IntrNoReturn, Throws]>;
+
+ def int_s390_tend : GCCBuiltin<"__builtin_tend">,
+ Intrinsic<[llvm_i32_ty], []>;
+
+ def int_s390_etnd : GCCBuiltin<"__builtin_tx_nesting_depth">,
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+
+ def int_s390_ntstg : Intrinsic<[], [llvm_i64_ty, llvm_ptr64_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_s390_ppa_txassist : GCCBuiltin<"__builtin_tx_assist">,
+ Intrinsic<[], [llvm_i32_ty]>;
+}
+
+//===----------------------------------------------------------------------===//
+//
+// Vector intrinsics
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "s390" in {
+ def int_s390_lcbb : GCCBuiltin<"__builtin_s390_lcbb">,
+ Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_s390_vlbb : GCCBuiltin<"__builtin_s390_vlbb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_s390_vll : GCCBuiltin<"__builtin_s390_vll">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty, llvm_ptr_ty],
+ [IntrReadArgMem]>;
+
+ def int_s390_vpdi : GCCBuiltin<"__builtin_s390_vpdi">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_s390_vperm : GCCBuiltin<"__builtin_s390_vperm">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+
+ defm int_s390_vpks : SystemZBinaryTruncHFG<"vpks">;
+ defm int_s390_vpks : SystemZBinaryTruncCCHFG;
+
+ defm int_s390_vpkls : SystemZBinaryTruncHFG<"vpkls">;
+ defm int_s390_vpkls : SystemZBinaryTruncCCHFG;
+
+ def int_s390_vstl : GCCBuiltin<"__builtin_s390_vstl">,
+ Intrinsic<[], [llvm_v16i8_ty, llvm_i32_ty, llvm_ptr_ty],
+ // In fact write-only but there's no property
+ // for that.
+ [IntrReadWriteArgMem]>;
+
+ defm int_s390_vupl : SystemZUnaryExtBHWF<"vupl">;
+ defm int_s390_vupll : SystemZUnaryExtBHF<"vupll">;
+
+ defm int_s390_vuph : SystemZUnaryExtBHF<"vuph">;
+ defm int_s390_vuplh : SystemZUnaryExtBHF<"vuplh">;
+
+ defm int_s390_vacc : SystemZBinaryBHFG<"vacc">;
+
+ def int_s390_vaq : SystemZBinary<"vaq", llvm_v16i8_ty>;
+ def int_s390_vacq : SystemZTernary<"vacq", llvm_v16i8_ty>;
+ def int_s390_vaccq : SystemZBinary<"vaccq", llvm_v16i8_ty>;
+ def int_s390_vacccq : SystemZTernary<"vacccq", llvm_v16i8_ty>;
+
+ defm int_s390_vavg : SystemZBinaryBHFG<"vavg">;
+ defm int_s390_vavgl : SystemZBinaryBHFG<"vavgl">;
+
+ def int_s390_vcksm : SystemZBinary<"vcksm", llvm_v4i32_ty>;
+
+ defm int_s390_vgfm : SystemZBinaryExtBHFG<"vgfm">;
+ defm int_s390_vgfma : SystemZTernaryExtBHFG<"vgfma">;
+
+ defm int_s390_vmah : SystemZTernaryBHF<"vmah">;
+ defm int_s390_vmalh : SystemZTernaryBHF<"vmalh">;
+ defm int_s390_vmae : SystemZTernaryExtBHF<"vmae">;
+ defm int_s390_vmale : SystemZTernaryExtBHF<"vmale">;
+ defm int_s390_vmao : SystemZTernaryExtBHF<"vmao">;
+ defm int_s390_vmalo : SystemZTernaryExtBHF<"vmalo">;
+
+ defm int_s390_vmh : SystemZBinaryBHF<"vmh">;
+ defm int_s390_vmlh : SystemZBinaryBHF<"vmlh">;
+ defm int_s390_vme : SystemZBinaryExtBHF<"vme">;
+ defm int_s390_vmle : SystemZBinaryExtBHF<"vmle">;
+ defm int_s390_vmo : SystemZBinaryExtBHF<"vmo">;
+ defm int_s390_vmlo : SystemZBinaryExtBHF<"vmlo">;
+
+ defm int_s390_verllv : SystemZBinaryBHFG<"verllv">;
+ defm int_s390_verll : SystemZBinaryIntBHFG<"verll">;
+ defm int_s390_verim : SystemZQuaternaryIntBHFG<"verim">;
+
+ def int_s390_vsl : SystemZBinary<"vsl", llvm_v16i8_ty>;
+ def int_s390_vslb : SystemZBinary<"vslb", llvm_v16i8_ty>;
+ def int_s390_vsra : SystemZBinary<"vsra", llvm_v16i8_ty>;
+ def int_s390_vsrab : SystemZBinary<"vsrab", llvm_v16i8_ty>;
+ def int_s390_vsrl : SystemZBinary<"vsrl", llvm_v16i8_ty>;
+ def int_s390_vsrlb : SystemZBinary<"vsrlb", llvm_v16i8_ty>;
+
+ def int_s390_vsldb : GCCBuiltin<"__builtin_s390_vsldb">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ defm int_s390_vscbi : SystemZBinaryBHFG<"vscbi">;
+
+ def int_s390_vsq : SystemZBinary<"vsq", llvm_v16i8_ty>;
+ def int_s390_vsbiq : SystemZTernary<"vsbiq", llvm_v16i8_ty>;
+ def int_s390_vscbiq : SystemZBinary<"vscbiq", llvm_v16i8_ty>;
+ def int_s390_vsbcbiq : SystemZTernary<"vsbcbiq", llvm_v16i8_ty>;
+
+ def int_s390_vsumb : SystemZBinaryConv<"vsumb", llvm_v4i32_ty, llvm_v16i8_ty>;
+ def int_s390_vsumh : SystemZBinaryConv<"vsumh", llvm_v4i32_ty, llvm_v8i16_ty>;
+
+ def int_s390_vsumgh : SystemZBinaryConv<"vsumgh", llvm_v2i64_ty,
+ llvm_v8i16_ty>;
+ def int_s390_vsumgf : SystemZBinaryConv<"vsumgf", llvm_v2i64_ty,
+ llvm_v4i32_ty>;
+
+ def int_s390_vsumqf : SystemZBinaryConv<"vsumqf", llvm_v16i8_ty,
+ llvm_v4i32_ty>;
+ def int_s390_vsumqg : SystemZBinaryConv<"vsumqg", llvm_v16i8_ty,
+ llvm_v2i64_ty>;
+
+ def int_s390_vtm : SystemZBinaryConv<"vtm", llvm_i32_ty, llvm_v16i8_ty>;
+
+ defm int_s390_vceq : SystemZCompareBHFG<"vceq">;
+ defm int_s390_vch : SystemZCompareBHFG<"vch">;
+ defm int_s390_vchl : SystemZCompareBHFG<"vchl">;
+
+ defm int_s390_vfae : SystemZTernaryIntBHF<"vfae">;
+ defm int_s390_vfae : SystemZTernaryIntCCBHF;
+ defm int_s390_vfaez : SystemZTernaryIntBHF<"vfaez">;
+ defm int_s390_vfaez : SystemZTernaryIntCCBHF;
+
+ defm int_s390_vfee : SystemZBinaryBHF<"vfee">;
+ defm int_s390_vfee : SystemZBinaryCCBHF;
+ defm int_s390_vfeez : SystemZBinaryBHF<"vfeez">;
+ defm int_s390_vfeez : SystemZBinaryCCBHF;
+
+ defm int_s390_vfene : SystemZBinaryBHF<"vfene">;
+ defm int_s390_vfene : SystemZBinaryCCBHF;
+ defm int_s390_vfenez : SystemZBinaryBHF<"vfenez">;
+ defm int_s390_vfenez : SystemZBinaryCCBHF;
+
+ defm int_s390_vistr : SystemZUnaryBHF<"vistr">;
+ defm int_s390_vistr : SystemZUnaryCCBHF;
+
+ defm int_s390_vstrc : SystemZQuaternaryIntBHF<"vstrc">;
+ defm int_s390_vstrc : SystemZQuaternaryIntCCBHF;
+ defm int_s390_vstrcz : SystemZQuaternaryIntBHF<"vstrcz">;
+ defm int_s390_vstrcz : SystemZQuaternaryIntCCBHF;
+
+ def int_s390_vfcedbs : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
+ def int_s390_vfchdbs : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
+ def int_s390_vfchedbs : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
+
+ def int_s390_vftcidb : SystemZBinaryConvIntCC<llvm_v2i64_ty, llvm_v2f64_ty>;
+
+ def int_s390_vfidb : Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+}
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsWebAssembly.td b/contrib/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
new file mode 100644
index 0000000..3953aef
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -0,0 +1,22 @@
+//===- IntrinsicsWebAssembly.td - Defines wasm intrinsics --*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file defines all of the WebAssembly-specific intrinsics.
+///
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "wasm" in { // All intrinsics start with "llvm.wasm.".
+
+// Note that memory_size is not IntrNoMem because it must be sequenced with
+// respect to grow_memory calls.
+def int_wasm_memory_size : Intrinsic<[llvm_anyint_ty], [], [IntrReadMem]>;
+def int_wasm_grow_memory : Intrinsic<[], [llvm_anyint_ty], []>;
+
+}
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsX86.td b/contrib/llvm/include/llvm/IR/IntrinsicsX86.td
new file mode 100644
index 0000000..18390f8
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsX86.td
@@ -0,0 +1,7540 @@
+//===- IntrinsicsX86.td - Defines X86 intrinsics -----------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the X86-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Interrupt traps
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_int : Intrinsic<[], [llvm_i8_ty]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SEH intrinsics for Windows
+let TargetPrefix = "x86" in {
+ def int_x86_seh_lsda : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+ // Marks the EH registration node created in LLVM IR prior to code generation.
+ def int_x86_seh_ehregnode : Intrinsic<[], [llvm_ptr_ty], []>;
+
+ // Given a pointer to the end of an EH registration object, returns the true
+ // parent frame address that can be used with llvm.localrecover.
+ def int_x86_seh_recoverfp : Intrinsic<[llvm_ptr_ty],
+ [llvm_ptr_ty, llvm_ptr_ty],
+ [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Read Time Stamp Counter.
+let TargetPrefix = "x86" in {
+ def int_x86_rdtsc : GCCBuiltin<"__builtin_ia32_rdtsc">,
+ Intrinsic<[llvm_i64_ty], [], []>;
+ def int_x86_rdtscp : GCCBuiltin<"__builtin_ia32_rdtscp">,
+ Intrinsic<[llvm_i64_ty], [llvm_ptr_ty], [IntrReadWriteArgMem]>;
+}
+
+// Read Performance-Monitoring Counter.
+let TargetPrefix = "x86" in {
+ def int_x86_rdpmc : GCCBuiltin<"__builtin_ia32_rdpmc">,
+ Intrinsic<[llvm_i64_ty], [llvm_i32_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// 3DNow!
+
+let TargetPrefix = "x86" in {
+ def int_x86_3dnow_pavgusb : GCCBuiltin<"__builtin_ia32_pavgusb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pf2id : GCCBuiltin<"__builtin_ia32_pf2id">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_3dnow_pfacc : GCCBuiltin<"__builtin_ia32_pfacc">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfadd : GCCBuiltin<"__builtin_ia32_pfadd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfcmpeq : GCCBuiltin<"__builtin_ia32_pfcmpeq">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfcmpge : GCCBuiltin<"__builtin_ia32_pfcmpge">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfcmpgt : GCCBuiltin<"__builtin_ia32_pfcmpgt">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfmax : GCCBuiltin<"__builtin_ia32_pfmax">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfmin : GCCBuiltin<"__builtin_ia32_pfmin">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfmul : GCCBuiltin<"__builtin_ia32_pfmul">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfrcp : GCCBuiltin<"__builtin_ia32_pfrcp">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_3dnow_pfrcpit1 : GCCBuiltin<"__builtin_ia32_pfrcpit1">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfrcpit2 : GCCBuiltin<"__builtin_ia32_pfrcpit2">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfrsqrt : GCCBuiltin<"__builtin_ia32_pfrsqrt">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_3dnow_pfrsqit1 : GCCBuiltin<"__builtin_ia32_pfrsqit1">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfsub : GCCBuiltin<"__builtin_ia32_pfsub">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pfsubr : GCCBuiltin<"__builtin_ia32_pfsubr">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnow_pi2fd : GCCBuiltin<"__builtin_ia32_pi2fd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_3dnow_pmulhrw : GCCBuiltin<"__builtin_ia32_pmulhrw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// 3DNow! extensions
+
+let TargetPrefix = "x86" in {
+ def int_x86_3dnowa_pf2iw : GCCBuiltin<"__builtin_ia32_pf2iw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_3dnowa_pfnacc : GCCBuiltin<"__builtin_ia32_pfnacc">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnowa_pfpnacc : GCCBuiltin<"__builtin_ia32_pfpnacc">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_3dnowa_pi2fw : GCCBuiltin<"__builtin_ia32_pi2fw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_3dnowa_pswapd :
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE1
+
+// Arithmetic ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse_add_ss : GCCBuiltin<"__builtin_ia32_addss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_sub_ss : GCCBuiltin<"__builtin_ia32_subss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_mul_ss : GCCBuiltin<"__builtin_ia32_mulss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_div_ss : GCCBuiltin<"__builtin_ia32_divss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_sqrt_ss : GCCBuiltin<"__builtin_ia32_sqrtss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse_sqrt_ps : GCCBuiltin<"__builtin_ia32_sqrtps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse_rcp_ss : GCCBuiltin<"__builtin_ia32_rcpss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse_rcp_ps : GCCBuiltin<"__builtin_ia32_rcpps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse_rsqrt_ss : GCCBuiltin<"__builtin_ia32_rsqrtss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse_rsqrt_ps : GCCBuiltin<"__builtin_ia32_rsqrtps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse_min_ss : GCCBuiltin<"__builtin_ia32_minss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_min_ps : GCCBuiltin<"__builtin_ia32_minps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_max_ss : GCCBuiltin<"__builtin_ia32_maxss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_max_ps : GCCBuiltin<"__builtin_ia32_maxps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+}
+
+// Comparison ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse_cmp_ss : GCCBuiltin<"__builtin_ia32_cmpss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_sse_cmp_ps : GCCBuiltin<"__builtin_ia32_cmpps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_sse_comieq_ss : GCCBuiltin<"__builtin_ia32_comieq">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_comilt_ss : GCCBuiltin<"__builtin_ia32_comilt">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_comile_ss : GCCBuiltin<"__builtin_ia32_comile">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_comigt_ss : GCCBuiltin<"__builtin_ia32_comigt">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_comige_ss : GCCBuiltin<"__builtin_ia32_comige">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_comineq_ss : GCCBuiltin<"__builtin_ia32_comineq">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_ucomieq_ss : GCCBuiltin<"__builtin_ia32_ucomieq">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_ucomilt_ss : GCCBuiltin<"__builtin_ia32_ucomilt">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_ucomile_ss : GCCBuiltin<"__builtin_ia32_ucomile">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_ucomigt_ss : GCCBuiltin<"__builtin_ia32_ucomigt">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_ucomige_ss : GCCBuiltin<"__builtin_ia32_ucomige">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_ucomineq_ss : GCCBuiltin<"__builtin_ia32_ucomineq">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+}
+
+
+// Conversion ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse_cvtss2si : GCCBuiltin<"__builtin_ia32_cvtss2si">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_cvtss2si64 : GCCBuiltin<"__builtin_ia32_cvtss2si64">,
+ Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_cvttss2si : GCCBuiltin<"__builtin_ia32_cvttss2si">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_cvttss2si64 : GCCBuiltin<"__builtin_ia32_cvttss2si64">,
+ Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_cvtsi2ss : GCCBuiltin<"__builtin_ia32_cvtsi2ss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse_cvtsi642ss : GCCBuiltin<"__builtin_ia32_cvtsi642ss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_i64_ty], [IntrNoMem]>;
+
+ def int_x86_sse_cvtps2pi : GCCBuiltin<"__builtin_ia32_cvtps2pi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_cvttps2pi: GCCBuiltin<"__builtin_ia32_cvttps2pi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_cvtpi2ps : GCCBuiltin<"__builtin_ia32_cvtpi2ps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+// SIMD store ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse_storeu_ps : GCCBuiltin<"__builtin_ia32_storeups">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+}
+
+// Cacheability support ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse_sfence : GCCBuiltin<"__builtin_ia32_sfence">,
+ Intrinsic<[], [], []>;
+}
+
+// Control register.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse_stmxcsr :
+ Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_x86_sse_ldmxcsr :
+ Intrinsic<[], [llvm_ptr_ty], []>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse_movmsk_ps : GCCBuiltin<"__builtin_ia32_movmskps">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE2
+
+// FP arithmetic ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse2_add_sd : GCCBuiltin<"__builtin_ia32_addsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_sub_sd : GCCBuiltin<"__builtin_ia32_subsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_mul_sd : GCCBuiltin<"__builtin_ia32_mulsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_div_sd : GCCBuiltin<"__builtin_ia32_divsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_sqrt_sd : GCCBuiltin<"__builtin_ia32_sqrtsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_sse2_sqrt_pd : GCCBuiltin<"__builtin_ia32_sqrtpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_sse2_min_sd : GCCBuiltin<"__builtin_ia32_minsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_min_pd : GCCBuiltin<"__builtin_ia32_minpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_max_sd : GCCBuiltin<"__builtin_ia32_maxsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_max_pd : GCCBuiltin<"__builtin_ia32_maxpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+}
+
+// FP comparison ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse2_cmp_sd : GCCBuiltin<"__builtin_ia32_cmpsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_sse2_cmp_pd : GCCBuiltin<"__builtin_ia32_cmppd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_sse2_comieq_sd : GCCBuiltin<"__builtin_ia32_comisdeq">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_comilt_sd : GCCBuiltin<"__builtin_ia32_comisdlt">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_comile_sd : GCCBuiltin<"__builtin_ia32_comisdle">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_comigt_sd : GCCBuiltin<"__builtin_ia32_comisdgt">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_comige_sd : GCCBuiltin<"__builtin_ia32_comisdge">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_comineq_sd : GCCBuiltin<"__builtin_ia32_comisdneq">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_ucomieq_sd : GCCBuiltin<"__builtin_ia32_ucomisdeq">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_ucomilt_sd : GCCBuiltin<"__builtin_ia32_ucomisdlt">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_ucomile_sd : GCCBuiltin<"__builtin_ia32_ucomisdle">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_ucomigt_sd : GCCBuiltin<"__builtin_ia32_ucomisdgt">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_ucomige_sd : GCCBuiltin<"__builtin_ia32_ucomisdge">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_ucomineq_sd : GCCBuiltin<"__builtin_ia32_ucomisdneq">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+}
+
+// Integer arithmetic ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_sse2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_sse2_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_sse2_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_sse2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_pavg_w : GCCBuiltin<"__builtin_ia32_pavgw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_pmaxu_b : GCCBuiltin<"__builtin_ia32_pmaxub128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_pmaxs_w : GCCBuiltin<"__builtin_ia32_pmaxsw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_pminu_b : GCCBuiltin<"__builtin_ia32_pminub128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_pmins_w : GCCBuiltin<"__builtin_ia32_pminsw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_sse2_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem, Commutative]>;
+}
+
+// Integer shift ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse2_psll_w : GCCBuiltin<"__builtin_ia32_psllw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_sse2_psll_d : GCCBuiltin<"__builtin_ia32_pslld128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_psll_q : GCCBuiltin<"__builtin_ia32_psllq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_sse2_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_sse2_psrl_d : GCCBuiltin<"__builtin_ia32_psrld128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_sse2_psra_w : GCCBuiltin<"__builtin_ia32_psraw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_sse2_psra_d : GCCBuiltin<"__builtin_ia32_psrad128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+
+ def int_x86_sse2_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_psrai_d : GCCBuiltin<"__builtin_ia32_psradi128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Conversion ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse2_cvtdq2pd : GCCBuiltin<"__builtin_ia32_cvtdq2pd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtdq2ps : GCCBuiltin<"__builtin_ia32_cvtdq2ps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtpd2dq : GCCBuiltin<"__builtin_ia32_cvtpd2dq">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvttpd2dq : GCCBuiltin<"__builtin_ia32_cvttpd2dq">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtpd2ps : GCCBuiltin<"__builtin_ia32_cvtpd2ps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtps2dq : GCCBuiltin<"__builtin_ia32_cvtps2dq">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvttps2dq : GCCBuiltin<"__builtin_ia32_cvttps2dq">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtps2pd : GCCBuiltin<"__builtin_ia32_cvtps2pd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtsd2si : GCCBuiltin<"__builtin_ia32_cvtsd2si">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtsd2si64 : GCCBuiltin<"__builtin_ia32_cvtsd2si64">,
+ Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvttsd2si : GCCBuiltin<"__builtin_ia32_cvttsd2si">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvttsd2si64 : GCCBuiltin<"__builtin_ia32_cvttsd2si64">,
+ Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtsi2sd : GCCBuiltin<"__builtin_ia32_cvtsi2sd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtsi642sd : GCCBuiltin<"__builtin_ia32_cvtsi642sd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtsd2ss : GCCBuiltin<"__builtin_ia32_cvtsd2ss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_cvtss2sd : GCCBuiltin<"__builtin_ia32_cvtss2sd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse_cvtpd2pi : GCCBuiltin<"__builtin_ia32_cvtpd2pi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse_cvttpd2pi: GCCBuiltin<"__builtin_ia32_cvttpd2pi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse_cvtpi2pd : GCCBuiltin<"__builtin_ia32_cvtpi2pd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+// SIMD store ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse2_storeu_pd : GCCBuiltin<"__builtin_ia32_storeupd">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v2f64_ty], [IntrReadWriteArgMem]>;
+ def int_x86_sse2_storeu_dq : GCCBuiltin<"__builtin_ia32_storedqu">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v16i8_ty], [IntrReadWriteArgMem]>;
+ def int_x86_sse2_storel_dq : GCCBuiltin<"__builtin_ia32_storelv4si">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v4i32_ty], [IntrReadWriteArgMem]>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse2_packsswb_128 : GCCBuiltin<"__builtin_ia32_packsswb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_sse2_packssdw_128 : GCCBuiltin<"__builtin_ia32_packssdw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_sse2_packuswb_128 : GCCBuiltin<"__builtin_ia32_packuswb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_sse2_movmsk_pd : GCCBuiltin<"__builtin_ia32_movmskpd">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse2_pmovmskb_128 : GCCBuiltin<"__builtin_ia32_pmovmskb128">,
+ Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_sse2_maskmov_dqu : GCCBuiltin<"__builtin_ia32_maskmovdqu">,
+ Intrinsic<[], [llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_ptr_ty], []>;
+ def int_x86_sse2_clflush : GCCBuiltin<"__builtin_ia32_clflush">,
+ Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_x86_sse2_lfence : GCCBuiltin<"__builtin_ia32_lfence">,
+ Intrinsic<[], [], []>;
+ def int_x86_sse2_mfence : GCCBuiltin<"__builtin_ia32_mfence">,
+ Intrinsic<[], [], []>;
+ def int_x86_sse2_pause : GCCBuiltin<"__builtin_ia32_pause">,
+ Intrinsic<[], [], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE3
+
+// Addition / subtraction ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse3_addsub_ps : GCCBuiltin<"__builtin_ia32_addsubps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse3_addsub_pd : GCCBuiltin<"__builtin_ia32_addsubpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+}
+
+// Horizontal ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse3_hadd_ps : GCCBuiltin<"__builtin_ia32_haddps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse3_hadd_pd : GCCBuiltin<"__builtin_ia32_haddpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_sse3_hsub_ps : GCCBuiltin<"__builtin_ia32_hsubps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_sse3_hsub_pd : GCCBuiltin<"__builtin_ia32_hsubpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+}
+
+// Specialized unaligned load.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse3_ldu_dq : GCCBuiltin<"__builtin_ia32_lddqu">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrReadMem]>;
+}
+
+// Thread synchronization ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse3_monitor : GCCBuiltin<"__builtin_ia32_monitor">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_sse3_mwait : GCCBuiltin<"__builtin_ia32_mwait">,
+ Intrinsic<[], [llvm_i32_ty,
+ llvm_i32_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSSE3
+
+// Horizontal arithmetic ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_ssse3_phadd_w : GCCBuiltin<"__builtin_ia32_phaddw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_phadd_w_128 : GCCBuiltin<"__builtin_ia32_phaddw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+
+ def int_x86_ssse3_phadd_d : GCCBuiltin<"__builtin_ia32_phaddd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_phadd_d_128 : GCCBuiltin<"__builtin_ia32_phaddd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+
+ def int_x86_ssse3_phadd_sw : GCCBuiltin<"__builtin_ia32_phaddsw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_phadd_sw_128 : GCCBuiltin<"__builtin_ia32_phaddsw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+
+ def int_x86_ssse3_phsub_w : GCCBuiltin<"__builtin_ia32_phsubw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_phsub_w_128 : GCCBuiltin<"__builtin_ia32_phsubw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+
+ def int_x86_ssse3_phsub_d : GCCBuiltin<"__builtin_ia32_phsubd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_phsub_d_128 : GCCBuiltin<"__builtin_ia32_phsubd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+
+ def int_x86_ssse3_phsub_sw : GCCBuiltin<"__builtin_ia32_phsubsw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_phsub_sw_128 : GCCBuiltin<"__builtin_ia32_phsubsw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+
+ def int_x86_ssse3_pmadd_ub_sw : GCCBuiltin<"__builtin_ia32_pmaddubsw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_pmadd_ub_sw_128 : GCCBuiltin<"__builtin_ia32_pmaddubsw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem]>;
+}
+
+// Packed multiply high with round and scale
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_ssse3_pmul_hr_sw : GCCBuiltin<"__builtin_ia32_pmulhrsw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_ssse3_pmul_hr_sw_128 : GCCBuiltin<"__builtin_ia32_pmulhrsw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem, Commutative]>;
+}
+
+// Shuffle ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_ssse3_pshuf_b : GCCBuiltin<"__builtin_ia32_pshufb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_pshuf_b_128 : GCCBuiltin<"__builtin_ia32_pshufb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_sse2_pshuf_d : GCCBuiltin<"__builtin_ia32_pshufd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse2_pshufl_w : GCCBuiltin<"__builtin_ia32_pshuflw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse2_pshufh_w : GCCBuiltin<"__builtin_ia32_pshufhw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse_pshuf_w : GCCBuiltin<"__builtin_ia32_pshufw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
+// Sign ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_ssse3_psign_b : GCCBuiltin<"__builtin_ia32_psignb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_psign_b_128 : GCCBuiltin<"__builtin_ia32_psignb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
+ llvm_v16i8_ty], [IntrNoMem]>;
+
+ def int_x86_ssse3_psign_w : GCCBuiltin<"__builtin_ia32_psignw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_psign_w_128 : GCCBuiltin<"__builtin_ia32_psignw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+
+ def int_x86_ssse3_psign_d : GCCBuiltin<"__builtin_ia32_psignd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_psign_d_128 : GCCBuiltin<"__builtin_ia32_psignd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+}
+
+// Absolute value ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_ssse3_pabs_b : GCCBuiltin<"__builtin_ia32_pabsb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_pabs_b_128 : GCCBuiltin<"__builtin_ia32_pabsb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+
+ def int_x86_ssse3_pabs_w : GCCBuiltin<"__builtin_ia32_pabsw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_pabs_w_128 : GCCBuiltin<"__builtin_ia32_pabsw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+
+ def int_x86_ssse3_pabs_d : GCCBuiltin<"__builtin_ia32_pabsd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_ssse3_pabs_d_128 : GCCBuiltin<"__builtin_ia32_pabsd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE4.1
+
+// FP rounding ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_round_ss : GCCBuiltin<"__builtin_ia32_roundss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse41_round_ps : GCCBuiltin<"__builtin_ia32_roundps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse41_round_sd : GCCBuiltin<"__builtin_ia32_roundsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_sse41_round_pd : GCCBuiltin<"__builtin_ia32_roundpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Vector sign and zero extend
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_pmovsxbd : GCCBuiltin<"__builtin_ia32_pmovsxbd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovsxbq : GCCBuiltin<"__builtin_ia32_pmovsxbq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovsxbw : GCCBuiltin<"__builtin_ia32_pmovsxbw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovsxdq : GCCBuiltin<"__builtin_ia32_pmovsxdq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovsxwd : GCCBuiltin<"__builtin_ia32_pmovsxwd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovsxwq : GCCBuiltin<"__builtin_ia32_pmovsxwq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovzxbd : GCCBuiltin<"__builtin_ia32_pmovzxbd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovzxbq : GCCBuiltin<"__builtin_ia32_pmovzxbq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovzxbw : GCCBuiltin<"__builtin_ia32_pmovzxbw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovzxdq : GCCBuiltin<"__builtin_ia32_pmovzxdq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovzxwd : GCCBuiltin<"__builtin_ia32_pmovzxwd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pmovzxwq : GCCBuiltin<"__builtin_ia32_pmovzxwq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+}
+
+// Vector min element
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_phminposuw : GCCBuiltin<"__builtin_ia32_phminposuw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+}
+
+// Vector compare, min, max
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_pmaxsb : GCCBuiltin<"__builtin_ia32_pmaxsb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem, Commutative]>;
+ def int_x86_sse41_pmaxsd : GCCBuiltin<"__builtin_ia32_pmaxsd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_x86_sse41_pmaxud : GCCBuiltin<"__builtin_ia32_pmaxud128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_x86_sse41_pmaxuw : GCCBuiltin<"__builtin_ia32_pmaxuw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem, Commutative]>;
+ def int_x86_sse41_pminsb : GCCBuiltin<"__builtin_ia32_pminsb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem, Commutative]>;
+ def int_x86_sse41_pminsd : GCCBuiltin<"__builtin_ia32_pminsd128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_x86_sse41_pminud : GCCBuiltin<"__builtin_ia32_pminud128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem, Commutative]>;
+ def int_x86_sse41_pminuw : GCCBuiltin<"__builtin_ia32_pminuw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem, Commutative]>;
+}
+
+// Advanced Encryption Standard (AES) Instructions
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_aesni_aesimc : GCCBuiltin<"__builtin_ia32_aesimc128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_aesni_aesenc : GCCBuiltin<"__builtin_ia32_aesenc128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_aesni_aesenclast : GCCBuiltin<"__builtin_ia32_aesenclast128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_aesni_aesdec : GCCBuiltin<"__builtin_ia32_aesdec128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_aesni_aesdeclast : GCCBuiltin<"__builtin_ia32_aesdeclast128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_aesni_aeskeygenassist :
+ GCCBuiltin<"__builtin_ia32_aeskeygenassist128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
+// PCLMUL instruction
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_pclmulqdq : GCCBuiltin<"__builtin_ia32_pclmulqdq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
+// Vector pack
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_packusdw : GCCBuiltin<"__builtin_ia32_packusdw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+}
+
+// Vector multiply
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_pmuldq : GCCBuiltin<"__builtin_ia32_pmuldq128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem, Commutative]>;
+}
+
+// Vector extract
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_pextrb :
+ Intrinsic<[llvm_i32_ty], [llvm_v16i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pextrd :
+ Intrinsic<[llvm_i32_ty], [llvm_v4i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_pextrq :
+ Intrinsic<[llvm_i64_ty], [llvm_v2i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_extractps : GCCBuiltin<"__builtin_ia32_extractps128">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+}
+
+// Vector insert
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_insertps : GCCBuiltin<"__builtin_ia32_insertps128">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
+// Vector blend
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_pblendvb : GCCBuiltin<"__builtin_ia32_pblendvb128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_blendvpd : GCCBuiltin<"__builtin_ia32_blendvpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_blendvps : GCCBuiltin<"__builtin_ia32_blendvps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,llvm_v4f32_ty],
+ [IntrNoMem]>;
+}
+
+// Vector dot product
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_dppd : GCCBuiltin<"__builtin_ia32_dppd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem, Commutative]>;
+ def int_x86_sse41_dpps : GCCBuiltin<"__builtin_ia32_dpps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem, Commutative]>;
+}
+
+// Vector sum of absolute differences
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_mpsadbw : GCCBuiltin<"__builtin_ia32_mpsadbw128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty,llvm_i8_ty],
+ [IntrNoMem, Commutative]>;
+}
+
+// Cacheability support ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_movntdqa : GCCBuiltin<"__builtin_ia32_movntdqa">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+}
+
+// Test instruction with bitwise comparison.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse41_ptestz : GCCBuiltin<"__builtin_ia32_ptestz128">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_ptestc : GCCBuiltin<"__builtin_ia32_ptestc128">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_sse41_ptestnzc : GCCBuiltin<"__builtin_ia32_ptestnzc128">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE4.2
+
+// Miscellaneous
+// CRC Instruction
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse42_crc32_32_8 : GCCBuiltin<"__builtin_ia32_crc32qi">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_crc32_32_16 : GCCBuiltin<"__builtin_ia32_crc32hi">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_crc32_32_32 : GCCBuiltin<"__builtin_ia32_crc32si">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_crc32_64_64 : GCCBuiltin<"__builtin_ia32_crc32di">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+}
+
+// String/text processing ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse42_pcmpistrm128 : GCCBuiltin<"__builtin_ia32_pcmpistrm128">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpistri128 : GCCBuiltin<"__builtin_ia32_pcmpistri128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpistria128 : GCCBuiltin<"__builtin_ia32_pcmpistria128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpistric128 : GCCBuiltin<"__builtin_ia32_pcmpistric128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpistrio128 : GCCBuiltin<"__builtin_ia32_pcmpistrio128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpistris128 : GCCBuiltin<"__builtin_ia32_pcmpistris128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpistriz128 : GCCBuiltin<"__builtin_ia32_pcmpistriz128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpestrm128 : GCCBuiltin<"__builtin_ia32_pcmpestrm128">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+ llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpestri128 : GCCBuiltin<"__builtin_ia32_pcmpestri128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+ llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpestria128 : GCCBuiltin<"__builtin_ia32_pcmpestria128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+ llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpestric128 : GCCBuiltin<"__builtin_ia32_pcmpestric128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+ llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpestrio128 : GCCBuiltin<"__builtin_ia32_pcmpestrio128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+ llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpestris128 : GCCBuiltin<"__builtin_ia32_pcmpestris128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+ llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse42_pcmpestriz128 : GCCBuiltin<"__builtin_ia32_pcmpestriz128">,
+ Intrinsic<[llvm_i32_ty],
+ [llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty, llvm_i32_ty,
+ llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SSE4A
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_sse4a_extrqi : GCCBuiltin<"__builtin_ia32_extrqi">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sse4a_extrq : GCCBuiltin<"__builtin_ia32_extrq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v16i8_ty], [IntrNoMem]>;
+
+ def int_x86_sse4a_insertqi : GCCBuiltin<"__builtin_ia32_insertqi">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_i8_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_sse4a_insertq : GCCBuiltin<"__builtin_ia32_insertq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+
+ def int_x86_sse4a_movnt_ss : GCCBuiltin<"__builtin_ia32_movntss">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4f32_ty], []>;
+ def int_x86_sse4a_movnt_sd : GCCBuiltin<"__builtin_ia32_movntsd">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v2f64_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// AVX
+
+// Arithmetic ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_addsub_pd_256 : GCCBuiltin<"__builtin_ia32_addsubpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_addsub_ps_256 : GCCBuiltin<"__builtin_ia32_addsubps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_max_pd_256 : GCCBuiltin<"__builtin_ia32_maxpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_max_ps_256 : GCCBuiltin<"__builtin_ia32_maxps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_min_pd_256 : GCCBuiltin<"__builtin_ia32_minpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_min_ps_256 : GCCBuiltin<"__builtin_ia32_minps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_sqrt_pd_256 : GCCBuiltin<"__builtin_ia32_sqrtpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_sqrt_ps_256 : GCCBuiltin<"__builtin_ia32_sqrtps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_rsqrt_ps_256 : GCCBuiltin<"__builtin_ia32_rsqrtps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_rcp_ps_256 : GCCBuiltin<"__builtin_ia32_rcpps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_round_pd_256 : GCCBuiltin<"__builtin_ia32_roundpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx_round_ps_256 : GCCBuiltin<"__builtin_ia32_roundps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Horizontal ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_hadd_pd_256 : GCCBuiltin<"__builtin_ia32_haddpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_hsub_ps_256 : GCCBuiltin<"__builtin_ia32_hsubps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_hsub_pd_256 : GCCBuiltin<"__builtin_ia32_hsubpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_hadd_ps_256 : GCCBuiltin<"__builtin_ia32_haddps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector permutation
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_vpermilvar_pd : GCCBuiltin<"__builtin_ia32_vpermilvarpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx_vpermilvar_ps : GCCBuiltin<"__builtin_ia32_vpermilvarps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_vpermilvar_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx_vpermilvar_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx_vperm2f128_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vperm2f128_pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_vperm2f128_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vperm2f128_ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_vperm2f128_si_256 :
+ GCCBuiltin<"__builtin_ia32_vperm2f128_si256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_d_128 :
+ GCCBuiltin<"__builtin_ia32_vpermi2vard128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_d_256 :
+ GCCBuiltin<"__builtin_ia32_vpermi2vard256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_d_512 :
+ GCCBuiltin<"__builtin_ia32_vpermi2vard512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_hi_128 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varhi128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_hi_256 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varhi256_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_hi_512 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varhi512_mask">,
+ Intrinsic<[llvm_v32i16_ty],
+ [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8i64_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16i32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_q_128 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_q_256 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermi2var_q_512 :
+ GCCBuiltin<"__builtin_ia32_vpermi2varq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_d_512:
+ GCCBuiltin<"__builtin_ia32_vpermt2vard512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_q_512:
+ GCCBuiltin<"__builtin_ia32_vpermt2varq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_ps_512:
+ GCCBuiltin<"__builtin_ia32_vpermt2varps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16i32_ty,
+ llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_pd_512:
+ GCCBuiltin<"__builtin_ia32_vpermt2varpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8i64_ty,
+ llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_d_128 :
+ GCCBuiltin<"__builtin_ia32_vpermt2vard128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_d_128 :
+ GCCBuiltin<"__builtin_ia32_vpermt2vard128_maskz">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_d_256 :
+ GCCBuiltin<"__builtin_ia32_vpermt2vard256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_d_256 :
+ GCCBuiltin<"__builtin_ia32_vpermt2vard256_maskz">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_d_512 :
+ GCCBuiltin<"__builtin_ia32_vpermt2vard512_maskz">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_hi_128 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varhi128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_hi_128 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varhi128_maskz">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_hi_256 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varhi256_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_hi_256 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varhi256_maskz">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_hi_512 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varhi512_mask">,
+ Intrinsic<[llvm_v32i16_ty],
+ [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_hi_512 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varhi512_maskz">,
+ Intrinsic<[llvm_v32i16_ty],
+ [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2i64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varpd128_maskz">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2i64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4i64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varpd256_maskz">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4i64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varpd512_maskz">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8i64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4i32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varps128_maskz">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4i32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8i32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varps256_maskz">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8i32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varps512_maskz">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16i32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_q_128 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_q_128 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varq128_maskz">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermt2var_q_256 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_q_256 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varq256_maskz">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vpermt2var_q_512 :
+ GCCBuiltin<"__builtin_ia32_vpermt2varq512_maskz">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermil_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vpermilpd_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermil_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vpermilpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_i32_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermil_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vpermilpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermil_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vpermilps_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermil_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vpermilps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermil_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vpermilps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermilvar_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermilvar_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8i64_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermilvar_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarpd_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermilvar_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermilvar_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16i32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vpermilvar_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vpermilvarps_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pshuf_b_128 :
+ GCCBuiltin<"__builtin_ia32_pshufb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pshuf_b_256 :
+ GCCBuiltin<"__builtin_ia32_pshufb256_mask">,
+ Intrinsic<[llvm_v32i8_ty],
+ [llvm_v32i8_ty, llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pshuf_b_512 :
+ GCCBuiltin<"__builtin_ia32_pshufb512_mask">,
+ Intrinsic<[llvm_v64i8_ty],
+ [llvm_v64i8_ty, llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_f32x4_256 :
+ GCCBuiltin<"__builtin_ia32_shuf_f32x4_256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_f32x4 :
+ GCCBuiltin<"__builtin_ia32_shuf_f32x4_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_f64x2_256 :
+ GCCBuiltin<"__builtin_ia32_shuf_f64x2_256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_f64x2 :
+ GCCBuiltin<"__builtin_ia32_shuf_f64x2_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_i32x4_256 :
+ GCCBuiltin<"__builtin_ia32_shuf_i32x4_256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_i32x4 :
+ GCCBuiltin<"__builtin_ia32_shuf_i32x4_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_i64x2_256 :
+ GCCBuiltin<"__builtin_ia32_shuf_i64x2_256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_i64x2 :
+ GCCBuiltin<"__builtin_ia32_shuf_i64x2_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_pd_128 :
+ GCCBuiltin<"__builtin_ia32_shufpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_pd_256 :
+ GCCBuiltin<"__builtin_ia32_shufpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_pd_512 :
+ GCCBuiltin<"__builtin_ia32_shufpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_ps_128 :
+ GCCBuiltin<"__builtin_ia32_shufps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_ps_256 :
+ GCCBuiltin<"__builtin_ia32_shufps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_shuf_ps_512 :
+ GCCBuiltin<"__builtin_ia32_shufps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_movshdup_128 :
+ GCCBuiltin<"__builtin_ia32_movshdup128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_movshdup_256 :
+ GCCBuiltin<"__builtin_ia32_movshdup256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_movshdup_512 :
+ GCCBuiltin<"__builtin_ia32_movshdup512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_movsldup_128 :
+ GCCBuiltin<"__builtin_ia32_movsldup128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_movsldup_256 :
+ GCCBuiltin<"__builtin_ia32_movsldup256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_movsldup_512 :
+ GCCBuiltin<"__builtin_ia32_movsldup512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_movddup_128 :
+ GCCBuiltin<"__builtin_ia32_movddup128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_movddup_256 :
+ GCCBuiltin<"__builtin_ia32_movddup256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_movddup_512 :
+ GCCBuiltin<"__builtin_ia32_movddup512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
+// Vector blend
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_blendv_pd_256 : GCCBuiltin<"__builtin_ia32_blendvpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_blendv_ps_256 : GCCBuiltin<"__builtin_ia32_blendvps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector dot product
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_dp_ps_256 : GCCBuiltin<"__builtin_ia32_dpps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Vector compare
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_cmp_pd_256 : GCCBuiltin<"__builtin_ia32_cmppd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx_cmp_ps_256 : GCCBuiltin<"__builtin_ia32_cmpps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Vector convert
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_cvtdq2_pd_256 : GCCBuiltin<"__builtin_ia32_cvtdq2pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx_cvtdq2_ps_256 : GCCBuiltin<"__builtin_ia32_cvtdq2ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx_cvt_pd2_ps_256 : GCCBuiltin<"__builtin_ia32_cvtpd2ps256">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_cvt_ps2dq_256 : GCCBuiltin<"__builtin_ia32_cvtps2dq256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_cvt_ps2_pd_256 : GCCBuiltin<"__builtin_ia32_cvtps2pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx_cvtt_pd2dq_256 : GCCBuiltin<"__builtin_ia32_cvttpd2dq256">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_cvt_pd2dq_256 : GCCBuiltin<"__builtin_ia32_cvtpd2dq256">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_cvtt_ps2dq_256 : GCCBuiltin<"__builtin_ia32_cvttps2dq256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector bit test
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_vtestz_pd : GCCBuiltin<"__builtin_ia32_vtestzpd">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestc_pd : GCCBuiltin<"__builtin_ia32_vtestcpd">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestnzc_pd : GCCBuiltin<"__builtin_ia32_vtestnzcpd">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestz_ps : GCCBuiltin<"__builtin_ia32_vtestzps">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestc_ps : GCCBuiltin<"__builtin_ia32_vtestcps">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestnzc_ps : GCCBuiltin<"__builtin_ia32_vtestnzcps">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestz_pd_256 : GCCBuiltin<"__builtin_ia32_vtestzpd256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestc_pd_256 : GCCBuiltin<"__builtin_ia32_vtestcpd256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestnzc_pd_256 : GCCBuiltin<"__builtin_ia32_vtestnzcpd256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty,
+ llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestz_ps_256 : GCCBuiltin<"__builtin_ia32_vtestzps256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestc_ps_256 : GCCBuiltin<"__builtin_ia32_vtestcps256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_vtestnzc_ps_256 : GCCBuiltin<"__builtin_ia32_vtestnzcps256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty,
+ llvm_v8f32_ty], [IntrNoMem]>;
+ def int_x86_avx_ptestz_256 : GCCBuiltin<"__builtin_ia32_ptestz256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i64_ty,
+ llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx_ptestc_256 : GCCBuiltin<"__builtin_ia32_ptestc256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i64_ty,
+ llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx_ptestnzc_256 : GCCBuiltin<"__builtin_ia32_ptestnzc256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4i64_ty,
+ llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ptestm_d_512 : GCCBuiltin<"__builtin_ia32_ptestmd512">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ptestm_q_512 : GCCBuiltin<"__builtin_ia32_ptestmq512">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_fpclass_pd_128 :
+ GCCBuiltin<"__builtin_ia32_fpclasspd128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v2f64_ty, llvm_i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_fpclass_pd_256 :
+ GCCBuiltin<"__builtin_ia32_fpclasspd256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4f64_ty, llvm_i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_fpclass_pd_512 :
+ GCCBuiltin<"__builtin_ia32_fpclasspd512_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8f64_ty, llvm_i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_fpclass_ps_128 :
+ GCCBuiltin<"__builtin_ia32_fpclassps128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_fpclass_ps_256 :
+ GCCBuiltin<"__builtin_ia32_fpclassps256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8f32_ty, llvm_i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_fpclass_ps_512 :
+ GCCBuiltin<"__builtin_ia32_fpclassps512_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16f32_ty, llvm_i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_fpclass_sd :
+ GCCBuiltin<"__builtin_ia32_fpclasssd">,
+ Intrinsic<[llvm_i8_ty], [llvm_v2f64_ty, llvm_i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_fpclass_ss :
+ GCCBuiltin<"__builtin_ia32_fpclassss">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4f32_ty, llvm_i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
+// Vector extract sign mask
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_movmsk_pd_256 : GCCBuiltin<"__builtin_ia32_movmskpd256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_avx_movmsk_ps_256 : GCCBuiltin<"__builtin_ia32_movmskps256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+}
+
+// Vector zero
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_vzeroall : GCCBuiltin<"__builtin_ia32_vzeroall">,
+ Intrinsic<[], [], []>;
+ def int_x86_avx_vzeroupper : GCCBuiltin<"__builtin_ia32_vzeroupper">,
+ Intrinsic<[], [], []>;
+}
+
+// Vector load with broadcast
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_vbroadcastf128_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastf128_pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+ def int_x86_avx_vbroadcastf128_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastf128_ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+}
+
+// SIMD load ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_ldu_dq_256 : GCCBuiltin<"__builtin_ia32_lddqu256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_ptr_ty], [IntrReadMem]>;
+}
+
+// SIMD store ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_storeu_pd_256 : GCCBuiltin<"__builtin_ia32_storeupd256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx_storeu_ps_256 : GCCBuiltin<"__builtin_ia32_storeups256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx_storeu_dq_256 : GCCBuiltin<"__builtin_ia32_storedqu256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty], [IntrReadWriteArgMem]>;
+}
+
+// Conditional load ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_maskload_pd : GCCBuiltin<"__builtin_ia32_maskloadpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2i64_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx_maskload_ps : GCCBuiltin<"__builtin_ia32_maskloadps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4i32_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx_maskload_pd_256 : GCCBuiltin<"__builtin_ia32_maskloadpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4i64_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx_maskload_ps_256 : GCCBuiltin<"__builtin_ia32_maskloadps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8i32_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_mask_loadu_ps_512 : GCCBuiltin<"__builtin_ia32_loadups512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_ptr_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_mask_loadu_pd_512 : GCCBuiltin<"__builtin_ia32_loadupd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_ptr_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_mask_load_ps_512 : GCCBuiltin<"__builtin_ia32_loadaps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_ptr_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_mask_load_pd_512 : GCCBuiltin<"__builtin_ia32_loadapd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_ptr_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_mask_move_ss : GCCBuiltin<"__builtin_ia32_movss_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_move_sd : GCCBuiltin<"__builtin_ia32_movsd_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
+// Conditional store ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx_maskstore_pd : GCCBuiltin<"__builtin_ia32_maskstorepd">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v2i64_ty, llvm_v2f64_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx_maskstore_ps : GCCBuiltin<"__builtin_ia32_maskstoreps">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v4i32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx_maskstore_pd_256 :
+ GCCBuiltin<"__builtin_ia32_maskstorepd256">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v4i64_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx_maskstore_ps_256 :
+ GCCBuiltin<"__builtin_ia32_maskstoreps256">,
+ Intrinsic<[], [llvm_ptr_ty,
+ llvm_v8i32_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_storeu_ps_512 :
+ GCCBuiltin<"__builtin_ia32_storeups512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_storeu_pd_512 :
+ GCCBuiltin<"__builtin_ia32_storeupd512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_store_ps_512 :
+ GCCBuiltin<"__builtin_ia32_storeaps512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_store_pd_512 :
+ GCCBuiltin<"__builtin_ia32_storeapd512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_store_ss :
+ GCCBuiltin<"__builtin_ia32_storess_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// AVX2
+
+// Integer arithmetic ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmul_dq : GCCBuiltin<"__builtin_ia32_pmuldq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pavg_w : GCCBuiltin<"__builtin_ia32_pavgw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+}
+
+// Vector min, max
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pmaxu_b : GCCBuiltin<"__builtin_ia32_pmaxub256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmaxu_w : GCCBuiltin<"__builtin_ia32_pmaxuw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmaxu_d : GCCBuiltin<"__builtin_ia32_pmaxud256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmaxs_b : GCCBuiltin<"__builtin_ia32_pmaxsb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmaxs_w : GCCBuiltin<"__builtin_ia32_pmaxsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmaxs_d : GCCBuiltin<"__builtin_ia32_pmaxsd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pminu_b : GCCBuiltin<"__builtin_ia32_pminub256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pminu_w : GCCBuiltin<"__builtin_ia32_pminuw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pminu_d : GCCBuiltin<"__builtin_ia32_pminud256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmins_b : GCCBuiltin<"__builtin_ia32_pminsb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmins_w : GCCBuiltin<"__builtin_ia32_pminsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_pmins_d : GCCBuiltin<"__builtin_ia32_pminsd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx512_mask_pmaxs_b_128 : GCCBuiltin<"__builtin_ia32_pmaxsb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_b_256 : GCCBuiltin<"__builtin_ia32_pmaxsb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_b_512 : GCCBuiltin<"__builtin_ia32_pmaxsb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_b_128 : GCCBuiltin<"__builtin_ia32_pmaxub128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_b_256 : GCCBuiltin<"__builtin_ia32_pmaxub256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_b_512 : GCCBuiltin<"__builtin_ia32_pmaxub512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_w_128 : GCCBuiltin<"__builtin_ia32_pmaxsw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_w_256 : GCCBuiltin<"__builtin_ia32_pmaxsw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_w_512 : GCCBuiltin<"__builtin_ia32_pmaxsw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty],[IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_w_128 : GCCBuiltin<"__builtin_ia32_pmaxuw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_w_256 : GCCBuiltin<"__builtin_ia32_pmaxuw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_w_512 : GCCBuiltin<"__builtin_ia32_pmaxuw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty],[IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_b_128 : GCCBuiltin<"__builtin_ia32_pminsb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty,llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_b_256 : GCCBuiltin<"__builtin_ia32_pminsb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_b_512 : GCCBuiltin<"__builtin_ia32_pminsb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_b_128 : GCCBuiltin<"__builtin_ia32_pminub128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_b_256 : GCCBuiltin<"__builtin_ia32_pminub256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_b_512 : GCCBuiltin<"__builtin_ia32_pminub512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_w_128 : GCCBuiltin<"__builtin_ia32_pminsw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_w_256 : GCCBuiltin<"__builtin_ia32_pminsw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_w_512 : GCCBuiltin<"__builtin_ia32_pminsw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty],[IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_w_128 : GCCBuiltin<"__builtin_ia32_pminuw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_w_256 : GCCBuiltin<"__builtin_ia32_pminuw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_w_512 : GCCBuiltin<"__builtin_ia32_pminuw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_d_512 : GCCBuiltin<"__builtin_ia32_pmaxud512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_d_256 : GCCBuiltin<"__builtin_ia32_pmaxud256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_d_128 : GCCBuiltin<"__builtin_ia32_pmaxud128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_d_512 : GCCBuiltin<"__builtin_ia32_pmaxsd512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_d_256 : GCCBuiltin<"__builtin_ia32_pmaxsd256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_d_128 : GCCBuiltin<"__builtin_ia32_pmaxsd128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_q_512 : GCCBuiltin<"__builtin_ia32_pmaxuq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_q_256 : GCCBuiltin<"__builtin_ia32_pmaxuq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxu_q_128 : GCCBuiltin<"__builtin_ia32_pmaxuq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_q_512 : GCCBuiltin<"__builtin_ia32_pmaxsq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_q_256 : GCCBuiltin<"__builtin_ia32_pmaxsq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaxs_q_128 : GCCBuiltin<"__builtin_ia32_pmaxsq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_d_512 : GCCBuiltin<"__builtin_ia32_pminud512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_d_256 : GCCBuiltin<"__builtin_ia32_pminud256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_d_128 : GCCBuiltin<"__builtin_ia32_pminud128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_d_512 : GCCBuiltin<"__builtin_ia32_pminsd512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_d_256 : GCCBuiltin<"__builtin_ia32_pminsd256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_d_128 : GCCBuiltin<"__builtin_ia32_pminsd128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_q_512 : GCCBuiltin<"__builtin_ia32_pminuq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_q_256 : GCCBuiltin<"__builtin_ia32_pminuq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pminu_q_128 : GCCBuiltin<"__builtin_ia32_pminuq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_q_512 : GCCBuiltin<"__builtin_ia32_pminsq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_q_256 : GCCBuiltin<"__builtin_ia32_pminsq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmins_q_128 : GCCBuiltin<"__builtin_ia32_pminsq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Integer shift ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_psll_w : GCCBuiltin<"__builtin_ia32_psllw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_psll_d : GCCBuiltin<"__builtin_ia32_pslld256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psll_q : GCCBuiltin<"__builtin_ia32_psllq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrl_d : GCCBuiltin<"__builtin_ia32_psrld256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx2_psra_w : GCCBuiltin<"__builtin_ia32_psraw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_psra_d : GCCBuiltin<"__builtin_ia32_psrad256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v4i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx2_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_psrai_d : GCCBuiltin<"__builtin_ia32_psradi256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi512">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi512">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrai_d : GCCBuiltin<"__builtin_ia32_psradi512">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrai_q : GCCBuiltin<"__builtin_ia32_psraqi512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_psrl_w_128 : GCCBuiltin<"__builtin_ia32_psrlw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_w_256 : GCCBuiltin<"__builtin_ia32_psrlw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v8i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_w_512 : GCCBuiltin<"__builtin_ia32_psrlw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+ llvm_v8i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_wi_128 : GCCBuiltin<"__builtin_ia32_psrlwi128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
+ llvm_i8_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_wi_256 : GCCBuiltin<"__builtin_ia32_psrlwi256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_i8_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_wi_512 : GCCBuiltin<"__builtin_ia32_psrlwi512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty,
+ llvm_i8_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_psll_d : GCCBuiltin<"__builtin_ia32_pslld512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psll_q : GCCBuiltin<"__builtin_ia32_psllq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_d : GCCBuiltin<"__builtin_ia32_psrld512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_d : GCCBuiltin<"__builtin_ia32_psrad512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psra_q : GCCBuiltin<"__builtin_ia32_psraq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Pack ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_packsswb : GCCBuiltin<"__builtin_ia32_packsswb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_packssdw : GCCBuiltin<"__builtin_ia32_packssdw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_packuswb : GCCBuiltin<"__builtin_ia32_packuswb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_packusdw : GCCBuiltin<"__builtin_ia32_packusdw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem]>;
+}
+
+// Absolute value ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pabs_b : GCCBuiltin<"__builtin_ia32_pabsb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_pabs_w : GCCBuiltin<"__builtin_ia32_pabsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_pabs_d : GCCBuiltin<"__builtin_ia32_pabsd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_b_128 :
+ GCCBuiltin<"__builtin_ia32_pabsb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_b_256 :
+ GCCBuiltin<"__builtin_ia32_pabsb256_mask">,
+ Intrinsic<[llvm_v32i8_ty],
+ [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_b_512 :
+ GCCBuiltin<"__builtin_ia32_pabsb512_mask">,
+ Intrinsic<[llvm_v64i8_ty],
+ [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_d_128 :
+ GCCBuiltin<"__builtin_ia32_pabsd128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_d_256 :
+ GCCBuiltin<"__builtin_ia32_pabsd256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_d_512 :
+ GCCBuiltin<"__builtin_ia32_pabsd512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_q_128 :
+ GCCBuiltin<"__builtin_ia32_pabsq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_q_256 :
+ GCCBuiltin<"__builtin_ia32_pabsq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_q_512 :
+ GCCBuiltin<"__builtin_ia32_pabsq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_w_128 :
+ GCCBuiltin<"__builtin_ia32_pabsw128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_w_256 :
+ GCCBuiltin<"__builtin_ia32_pabsw256_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pabs_w_512 :
+ GCCBuiltin<"__builtin_ia32_pabsw512_mask">,
+ Intrinsic<[llvm_v32i16_ty],
+ [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+}
+
+// Horizontal arithmetic ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_phadd_w : GCCBuiltin<"__builtin_ia32_phaddw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_phadd_d : GCCBuiltin<"__builtin_ia32_phaddd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_phadd_sw : GCCBuiltin<"__builtin_ia32_phaddsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_phsub_w : GCCBuiltin<"__builtin_ia32_phsubw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_phsub_d : GCCBuiltin<"__builtin_ia32_phsubd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx2_phsub_sw : GCCBuiltin<"__builtin_ia32_phsubsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_pmadd_ub_sw : GCCBuiltin<"__builtin_ia32_pmaddubsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+}
+
+// Sign ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_psign_b : GCCBuiltin<"__builtin_ia32_psignb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_psign_w : GCCBuiltin<"__builtin_ia32_psignw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx2_psign_d : GCCBuiltin<"__builtin_ia32_psignd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
+ llvm_v8i32_ty], [IntrNoMem]>;
+}
+
+// Packed multiply high with round and scale
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pmul_hr_sw : GCCBuiltin<"__builtin_ia32_pmulhrsw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
+ llvm_v16i16_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx512_mask_pmul_hr_sw_128 : GCCBuiltin<"__builtin_ia32_pmulhrsw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmul_hr_sw_256 : GCCBuiltin<"__builtin_ia32_pmulhrsw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmul_hr_sw_512 : GCCBuiltin<"__builtin_ia32_pmulhrsw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Vector sign and zero extend
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pmovsxbd : GCCBuiltin<"__builtin_ia32_pmovsxbd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovsxbq : GCCBuiltin<"__builtin_ia32_pmovsxbq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovsxbw : GCCBuiltin<"__builtin_ia32_pmovsxbw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovsxdq : GCCBuiltin<"__builtin_ia32_pmovsxdq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovsxwd : GCCBuiltin<"__builtin_ia32_pmovsxwd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovsxwq : GCCBuiltin<"__builtin_ia32_pmovsxwq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxbd : GCCBuiltin<"__builtin_ia32_pmovzxbd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxbq : GCCBuiltin<"__builtin_ia32_pmovzxbq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxbw : GCCBuiltin<"__builtin_ia32_pmovzxbw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxdq : GCCBuiltin<"__builtin_ia32_pmovzxdq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxwd : GCCBuiltin<"__builtin_ia32_pmovzxwd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_pmovzxwq : GCCBuiltin<"__builtin_ia32_pmovzxwq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+}
+
+// Vector blend
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pblendvb : GCCBuiltin<"__builtin_ia32_pblendvb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+}
+
+// Vector load with broadcast
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx512_mask_pbroadcast_d_gpr_512 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastd512_gpr_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_i32_ty, llvm_v16i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pbroadcast_q_gpr_512 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastq512_gpr_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_i64_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pbroadcast_q_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastq512_mem_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_i64_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Vector permutation
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_permd : GCCBuiltin<"__builtin_ia32_permvarsi256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_permps : GCCBuiltin<"__builtin_ia32_permvarsf256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_vperm2i128 : GCCBuiltin<"__builtin_ia32_permti256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Vector extract and insert
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx512_mask_vextractf32x4_512 :
+ GCCBuiltin<"__builtin_ia32_extractf32x4_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v16f32_ty, llvm_i32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextracti32x4_512 :
+ GCCBuiltin<"__builtin_ia32_extracti32x4_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i32_ty, llvm_i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextractf32x4_256 :
+ GCCBuiltin<"__builtin_ia32_extractf32x4_256_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v8f32_ty, llvm_i32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextracti32x4_256 :
+ GCCBuiltin<"__builtin_ia32_extracti32x4_256_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i32_ty, llvm_i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextractf64x2_256 :
+ GCCBuiltin<"__builtin_ia32_extractf64x2_256_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v4f64_ty, llvm_i32_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextracti64x2_256 :
+ GCCBuiltin<"__builtin_ia32_extracti64x2_256_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i64_ty, llvm_i32_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextractf64x2_512 :
+ GCCBuiltin<"__builtin_ia32_extractf64x2_512_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v8f64_ty, llvm_i32_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextracti64x2_512 :
+ GCCBuiltin<"__builtin_ia32_extracti64x2_512_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v8i64_ty, llvm_i32_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextractf32x8_512 :
+ GCCBuiltin<"__builtin_ia32_extractf32x8_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v16f32_ty, llvm_i32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextracti32x8_512 :
+ GCCBuiltin<"__builtin_ia32_extracti32x8_mask">,
+ Intrinsic<[llvm_v8i32_ty],[llvm_v16i32_ty, llvm_i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextractf64x4_512 :
+ GCCBuiltin<"__builtin_ia32_extractf64x4_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v8f64_ty, llvm_i32_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vextracti64x4_512 :
+ GCCBuiltin<"__builtin_ia32_extracti64x4_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i64_ty, llvm_i32_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_insertf32x4_256 :
+ GCCBuiltin<"__builtin_ia32_insertf32x4_256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_insertf32x4_512 :
+ GCCBuiltin<"__builtin_ia32_insertf32x4_512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_insertf32x8_512 :
+ GCCBuiltin<"__builtin_ia32_insertf32x8_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v8f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_insertf64x2_256 :
+ GCCBuiltin<"__builtin_ia32_insertf64x2_256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_insertf64x2_512 :
+ GCCBuiltin<"__builtin_ia32_insertf64x2_512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_insertf64x4_512 :
+ GCCBuiltin<"__builtin_ia32_insertf64x4_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v4f64_ty, llvm_i32_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_inserti32x4_256 :
+ GCCBuiltin<"__builtin_ia32_inserti32x4_256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_inserti32x4_512 :
+ GCCBuiltin<"__builtin_ia32_inserti32x4_512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_v16i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_inserti32x8_512 :
+ GCCBuiltin<"__builtin_ia32_inserti32x8_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_inserti64x2_256 :
+ GCCBuiltin<"__builtin_ia32_inserti64x2_256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_inserti64x2_512 :
+ GCCBuiltin<"__builtin_ia32_inserti64x2_512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_inserti64x4_512 :
+ GCCBuiltin<"__builtin_ia32_inserti64x4_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
+// Conditional load ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_maskload_d : GCCBuiltin<"__builtin_ia32_maskloadd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_maskload_q : GCCBuiltin<"__builtin_ia32_maskloadq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_maskload_d_256 : GCCBuiltin<"__builtin_ia32_maskloadd256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_maskload_q_256 : GCCBuiltin<"__builtin_ia32_maskloadq256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_mask_loadu_d_512 : GCCBuiltin<"__builtin_ia32_loaddqusi512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_mask_loadu_q_512 : GCCBuiltin<"__builtin_ia32_loaddqudi512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+}
+
+// Conditional store ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_maskstore_d : GCCBuiltin<"__builtin_ia32_maskstored">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx2_maskstore_q : GCCBuiltin<"__builtin_ia32_maskstoreq">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx2_maskstore_d_256 :
+ GCCBuiltin<"__builtin_ia32_maskstored256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx2_maskstore_q_256 :
+ GCCBuiltin<"__builtin_ia32_maskstoreq256">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_storeu_d_512 :
+ GCCBuiltin<"__builtin_ia32_storedqusi512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_storeu_q_512 :
+ GCCBuiltin<"__builtin_ia32_storedqudi512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+}
+
+// Variable bit shift ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_psllv_d : GCCBuiltin<"__builtin_ia32_psllv4si">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psllv_d_256 : GCCBuiltin<"__builtin_ia32_psllv8si">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psllv_q : GCCBuiltin<"__builtin_ia32_psllv2di">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psllv_q_256 : GCCBuiltin<"__builtin_ia32_psllv4di">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx2_psrlv_d : GCCBuiltin<"__builtin_ia32_psrlv4si">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psrlv_d_256 : GCCBuiltin<"__builtin_ia32_psrlv8si">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psrlv_q : GCCBuiltin<"__builtin_ia32_psrlv2di">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psrlv_q_256 : GCCBuiltin<"__builtin_ia32_psrlv4di">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx2_psrav_d : GCCBuiltin<"__builtin_ia32_psrav4si">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx2_psrav_d_256 : GCCBuiltin<"__builtin_ia32_psrav8si">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_psllv_d : GCCBuiltin<"__builtin_ia32_psllv16si_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_psllv_q : GCCBuiltin<"__builtin_ia32_psllv8di_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_psrav_d : GCCBuiltin<"__builtin_ia32_psrav16si_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_psrav_q : GCCBuiltin<"__builtin_ia32_psrav8di_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_psrlv_d : GCCBuiltin<"__builtin_ia32_psrlv16si_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_psrlv_q : GCCBuiltin<"__builtin_ia32_psrlv8di_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_psll_dq_512 : GCCBuiltin<"__builtin_ia32_pslldq512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_psrl_dq_512 : GCCBuiltin<"__builtin_ia32_psrldq512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+}
+
+// Gather ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_gather_d_pd : GCCBuiltin<"__builtin_ia32_gatherd_pd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_d_pd_256 : GCCBuiltin<"__builtin_ia32_gatherd_pd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_q_pd : GCCBuiltin<"__builtin_ia32_gatherq_pd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_q_pd_256 : GCCBuiltin<"__builtin_ia32_gatherq_pd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_d_ps : GCCBuiltin<"__builtin_ia32_gatherd_ps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_d_ps_256 : GCCBuiltin<"__builtin_ia32_gatherd_ps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_q_ps : GCCBuiltin<"__builtin_ia32_gatherq_ps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_q_ps_256 : GCCBuiltin<"__builtin_ia32_gatherq_ps256">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx2_gather_d_q : GCCBuiltin<"__builtin_ia32_gatherd_q">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_d_q_256 : GCCBuiltin<"__builtin_ia32_gatherd_q256">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_q_q : GCCBuiltin<"__builtin_ia32_gatherq_q">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_q_q_256 : GCCBuiltin<"__builtin_ia32_gatherq_q256">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_d_d : GCCBuiltin<"__builtin_ia32_gatherd_d">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_d_d_256 : GCCBuiltin<"__builtin_ia32_gatherd_d256">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_q_d : GCCBuiltin<"__builtin_ia32_gatherq_d">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx2_gather_q_d_256 : GCCBuiltin<"__builtin_ia32_gatherq_d256">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadArgMem]>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx2_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_pshuf_b : GCCBuiltin<"__builtin_ia32_pshufb256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
+ llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx2_mpsadbw : GCCBuiltin<"__builtin_ia32_mpsadbw256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_i8_ty], [IntrNoMem, Commutative]>;
+ def int_x86_avx2_movntdqa : GCCBuiltin<"__builtin_ia32_movntdqa256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// FMA3 and FMA4
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_fma_vfmadd_ss : GCCBuiltin<"__builtin_ia32_vfmaddss">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmadd_sd : GCCBuiltin<"__builtin_ia32_vfmaddsd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmadd_ps : GCCBuiltin<"__builtin_ia32_vfmaddps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmadd_pd : GCCBuiltin<"__builtin_ia32_vfmaddpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfmaddps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfmaddpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_fma_vfmsub_ss : GCCBuiltin<"__builtin_ia32_vfmsubss">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmsub_sd : GCCBuiltin<"__builtin_ia32_vfmsubsd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmsub_ps : GCCBuiltin<"__builtin_ia32_vfmsubps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmsub_pd : GCCBuiltin<"__builtin_ia32_vfmsubpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmsub_ps_256 : GCCBuiltin<"__builtin_ia32_vfmsubps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmsub_pd_256 : GCCBuiltin<"__builtin_ia32_vfmsubpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmadd_ss : GCCBuiltin<"__builtin_ia32_vfnmaddss">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmadd_sd : GCCBuiltin<"__builtin_ia32_vfnmaddsd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmadd_ps : GCCBuiltin<"__builtin_ia32_vfnmaddps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmadd_pd : GCCBuiltin<"__builtin_ia32_vfnmaddpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmadd_ps_256 : GCCBuiltin<"__builtin_ia32_vfnmaddps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmadd_pd_256 : GCCBuiltin<"__builtin_ia32_vfnmaddpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmsub_ss : GCCBuiltin<"__builtin_ia32_vfnmsubss">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmsub_sd : GCCBuiltin<"__builtin_ia32_vfnmsubsd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmsub_ps : GCCBuiltin<"__builtin_ia32_vfnmsubps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmsub_pd : GCCBuiltin<"__builtin_ia32_vfnmsubpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmsub_ps_256 : GCCBuiltin<"__builtin_ia32_vfnmsubps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfnmsub_pd_256 : GCCBuiltin<"__builtin_ia32_vfnmsubpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmaddsub_ps : GCCBuiltin<"__builtin_ia32_vfmaddsubps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmaddsub_pd : GCCBuiltin<"__builtin_ia32_vfmaddsubpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmaddsub_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmaddsub_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmsubadd_ps : GCCBuiltin<"__builtin_ia32_vfmsubaddps">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmsubadd_pd : GCCBuiltin<"__builtin_ia32_vfmsubaddpd">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmsubadd_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmsubaddps256">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty],
+ [IntrNoMem]>;
+ def int_x86_fma_vfmsubadd_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmsubaddpd256">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmadd_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmadd_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddpd128_mask3">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmadd_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddpd128_maskz">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmadd_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmadd_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddpd256_mask3">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmadd_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddpd256_maskz">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmadd_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmadd_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddpd512_mask3">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmadd_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddpd512_maskz">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmadd_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmadd_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddps128_mask3">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmadd_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddps128_maskz">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmadd_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmadd_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddps256_mask3">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmadd_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddps256_maskz">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmadd_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmadd_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddps512_mask3">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmadd_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddps512_maskz">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmaddsub_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmaddsub_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_mask3">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmaddsub_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd128_maskz">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmaddsub_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmaddsub_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_mask3">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmaddsub_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd256_maskz">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmaddsub_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmaddsub_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_mask3">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmaddsub_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubpd512_maskz">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmaddsub_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmaddsub_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps128_mask3">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmaddsub_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps128_maskz">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmaddsub_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmaddsub_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps256_mask3">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmaddsub_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps256_maskz">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfmaddsub_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmaddsub_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps512_mask3">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_vfmaddsub_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfmaddsubps512_maskz">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsub_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfmsubpd128_mask3">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsub_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmsubpd256_mask3">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsub_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfmsubpd512_mask3">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsub_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfmsubps128_mask3">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsub_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmsubps256_mask3">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsub_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfmsubps512_mask3">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsubadd_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfmsubaddpd128_mask3">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsubadd_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfmsubaddpd256_mask3">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsubadd_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfmsubaddpd512_mask3">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsubadd_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfmsubaddps128_mask3">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsubadd_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfmsubaddps256_mask3">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfmsubadd_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfmsubaddps512_mask3">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmadd_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfnmaddpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmadd_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfnmaddpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmadd_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfnmaddpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmadd_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfnmaddps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmadd_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfnmaddps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmadd_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfnmaddps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmsub_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfnmsub_pd_128 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubpd128_mask3">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmsub_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfnmsub_pd_256 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubpd256_mask3">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmsub_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfnmsub_pd_512 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubpd512_mask3">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmsub_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfnmsub_ps_128 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubps128_mask3">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmsub_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfnmsub_ps_256 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubps256_mask3">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_vfnmsub_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask3_vfnmsub_ps_512 :
+ GCCBuiltin<"__builtin_ia32_vfnmsubps512_mask3">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+}
+
+//===----------------------------------------------------------------------===//
+// XOP
+
+ def int_x86_xop_vpermil2pd : GCCBuiltin<"__builtin_ia32_vpermil2pd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_xop_vpermil2pd_256 :
+ GCCBuiltin<"__builtin_ia32_vpermil2pd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_xop_vpermil2ps : GCCBuiltin<"__builtin_ia32_vpermil2ps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpermil2ps_256 :
+ GCCBuiltin<"__builtin_ia32_vpermil2ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_xop_vfrcz_pd : GCCBuiltin<"__builtin_ia32_vfrczpd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_xop_vfrcz_ps : GCCBuiltin<"__builtin_ia32_vfrczps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_xop_vfrcz_sd : GCCBuiltin<"__builtin_ia32_vfrczsd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_xop_vfrcz_ss : GCCBuiltin<"__builtin_ia32_vfrczss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_xop_vfrcz_pd_256 : GCCBuiltin<"__builtin_ia32_vfrczpd256">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty], [IntrNoMem]>;
+ def int_x86_xop_vfrcz_ps_256 : GCCBuiltin<"__builtin_ia32_vfrczps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty], [IntrNoMem]>;
+
+ def int_x86_xop_vpcmov :
+ GCCBuiltin<"__builtin_ia32_vpcmov">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpcmov_256 :
+ GCCBuiltin<"__builtin_ia32_vpcmov_256">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_xop_vpcomb : GCCBuiltin<"__builtin_ia32_vpcomb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomw : GCCBuiltin<"__builtin_ia32_vpcomw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomd : GCCBuiltin<"__builtin_ia32_vpcomd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomq : GCCBuiltin<"__builtin_ia32_vpcomq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomub : GCCBuiltin<"__builtin_ia32_vpcomub">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomuw : GCCBuiltin<"__builtin_ia32_vpcomuw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomud : GCCBuiltin<"__builtin_ia32_vpcomud">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vpcomuq : GCCBuiltin<"__builtin_ia32_vpcomuq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_xop_vphaddbd :
+ GCCBuiltin<"__builtin_ia32_vphaddbd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddbq :
+ GCCBuiltin<"__builtin_ia32_vphaddbq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddbw :
+ GCCBuiltin<"__builtin_ia32_vphaddbw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphadddq :
+ GCCBuiltin<"__builtin_ia32_vphadddq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddubd :
+ GCCBuiltin<"__builtin_ia32_vphaddubd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddubq :
+ GCCBuiltin<"__builtin_ia32_vphaddubq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddubw :
+ GCCBuiltin<"__builtin_ia32_vphaddubw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddudq :
+ GCCBuiltin<"__builtin_ia32_vphaddudq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_xop_vphadduwd :
+ GCCBuiltin<"__builtin_ia32_vphadduwd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_xop_vphadduwq :
+ GCCBuiltin<"__builtin_ia32_vphadduwq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddwd :
+ GCCBuiltin<"__builtin_ia32_vphaddwd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_xop_vphaddwq :
+ GCCBuiltin<"__builtin_ia32_vphaddwq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_xop_vphsubbw :
+ GCCBuiltin<"__builtin_ia32_vphsubbw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_xop_vphsubdq :
+ GCCBuiltin<"__builtin_ia32_vphsubdq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_xop_vphsubwd :
+ GCCBuiltin<"__builtin_ia32_vphsubwd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_xop_vpmacsdd :
+ GCCBuiltin<"__builtin_ia32_vpmacsdd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacsdqh :
+ GCCBuiltin<"__builtin_ia32_vpmacsdqh">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacsdql :
+ GCCBuiltin<"__builtin_ia32_vpmacsdql">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacssdd :
+ GCCBuiltin<"__builtin_ia32_vpmacssdd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacssdqh :
+ GCCBuiltin<"__builtin_ia32_vpmacssdqh">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacssdql :
+ GCCBuiltin<"__builtin_ia32_vpmacssdql">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacsswd :
+ GCCBuiltin<"__builtin_ia32_vpmacsswd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacssww :
+ GCCBuiltin<"__builtin_ia32_vpmacssww">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacswd :
+ GCCBuiltin<"__builtin_ia32_vpmacswd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmacsww :
+ GCCBuiltin<"__builtin_ia32_vpmacsww">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmadcsswd :
+ GCCBuiltin<"__builtin_ia32_vpmadcsswd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpmadcswd :
+ GCCBuiltin<"__builtin_ia32_vpmadcswd">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpperm :
+ GCCBuiltin<"__builtin_ia32_vpperm">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_xop_vprotb : GCCBuiltin<"__builtin_ia32_vprotb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotd : GCCBuiltin<"__builtin_ia32_vprotd">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotq : GCCBuiltin<"__builtin_ia32_vprotq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotw : GCCBuiltin<"__builtin_ia32_vprotw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotbi : GCCBuiltin<"__builtin_ia32_vprotbi">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotdi : GCCBuiltin<"__builtin_ia32_vprotdi">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotqi : GCCBuiltin<"__builtin_ia32_vprotqi">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vprotwi : GCCBuiltin<"__builtin_ia32_vprotwi">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_xop_vpshab :
+ GCCBuiltin<"__builtin_ia32_vpshab">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshad :
+ GCCBuiltin<"__builtin_ia32_vpshad">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshaq :
+ GCCBuiltin<"__builtin_ia32_vpshaq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshaw :
+ GCCBuiltin<"__builtin_ia32_vpshaw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshlb :
+ GCCBuiltin<"__builtin_ia32_vpshlb">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshld :
+ GCCBuiltin<"__builtin_ia32_vpshld">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshlq :
+ GCCBuiltin<"__builtin_ia32_vpshlq">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+ def int_x86_xop_vpshlw :
+ GCCBuiltin<"__builtin_ia32_vpshlw">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
+ [IntrNoMem]>;
+
+//===----------------------------------------------------------------------===//
+// MMX
+
+// Empty MMX state op.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_mmx_emms : GCCBuiltin<"__builtin_ia32_emms">,
+ Intrinsic<[], [], []>;
+ def int_x86_mmx_femms : GCCBuiltin<"__builtin_ia32_femms">,
+ Intrinsic<[], [], []>;
+}
+
+// Integer arithmetic ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ // Addition
+ def int_x86_mmx_padd_b : GCCBuiltin<"__builtin_ia32_paddb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_padd_w : GCCBuiltin<"__builtin_ia32_paddw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_padd_d : GCCBuiltin<"__builtin_ia32_paddd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_padd_q : GCCBuiltin<"__builtin_ia32_paddq">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+
+ def int_x86_mmx_padds_b : GCCBuiltin<"__builtin_ia32_paddsb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_padds_w : GCCBuiltin<"__builtin_ia32_paddsw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+ def int_x86_mmx_paddus_b : GCCBuiltin<"__builtin_ia32_paddusb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_paddus_w : GCCBuiltin<"__builtin_ia32_paddusw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+ // Subtraction
+ def int_x86_mmx_psub_b : GCCBuiltin<"__builtin_ia32_psubb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_psub_w : GCCBuiltin<"__builtin_ia32_psubw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_psub_d : GCCBuiltin<"__builtin_ia32_psubd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_psub_q : GCCBuiltin<"__builtin_ia32_psubq">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+
+ def int_x86_mmx_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_psubus_b : GCCBuiltin<"__builtin_ia32_psubusb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_psubus_w : GCCBuiltin<"__builtin_ia32_psubusw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+
+ // Multiplication
+ def int_x86_mmx_pmulh_w : GCCBuiltin<"__builtin_ia32_pmulhw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_pmull_w : GCCBuiltin<"__builtin_ia32_pmullw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_pmulu_dq : GCCBuiltin<"__builtin_ia32_pmuludq">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_pmadd_wd : GCCBuiltin<"__builtin_ia32_pmaddwd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+ // Bitwise operations
+ def int_x86_mmx_pand : GCCBuiltin<"__builtin_ia32_pand">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_pandn : GCCBuiltin<"__builtin_ia32_pandn">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_por : GCCBuiltin<"__builtin_ia32_por">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_pxor : GCCBuiltin<"__builtin_ia32_pxor">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+
+ // Averages
+ def int_x86_mmx_pavg_b : GCCBuiltin<"__builtin_ia32_pavgb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_pavg_w : GCCBuiltin<"__builtin_ia32_pavgw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+ // Maximum
+ def int_x86_mmx_pmaxu_b : GCCBuiltin<"__builtin_ia32_pmaxub">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_pmaxs_w : GCCBuiltin<"__builtin_ia32_pmaxsw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+ // Minimum
+ def int_x86_mmx_pminu_b : GCCBuiltin<"__builtin_ia32_pminub">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_pmins_w : GCCBuiltin<"__builtin_ia32_pminsw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+ // Packed sum of absolute differences
+ def int_x86_mmx_psad_bw : GCCBuiltin<"__builtin_ia32_psadbw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+}
+
+// Integer shift ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ // Shift left logical
+ def int_x86_mmx_psll_w : GCCBuiltin<"__builtin_ia32_psllw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_psll_d : GCCBuiltin<"__builtin_ia32_pslld">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_psll_q : GCCBuiltin<"__builtin_ia32_psllq">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_psrl_w : GCCBuiltin<"__builtin_ia32_psrlw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_psrl_d : GCCBuiltin<"__builtin_ia32_psrld">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_psrl_q : GCCBuiltin<"__builtin_ia32_psrlq">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_psra_w : GCCBuiltin<"__builtin_ia32_psraw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_psra_d : GCCBuiltin<"__builtin_ia32_psrad">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_pslli_w : GCCBuiltin<"__builtin_ia32_psllwi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_mmx_pslli_d : GCCBuiltin<"__builtin_ia32_pslldi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_mmx_pslli_q : GCCBuiltin<"__builtin_ia32_psllqi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_psrli_w : GCCBuiltin<"__builtin_ia32_psrlwi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_mmx_psrli_d : GCCBuiltin<"__builtin_ia32_psrldi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_mmx_psrli_q : GCCBuiltin<"__builtin_ia32_psrlqi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_psrai_w : GCCBuiltin<"__builtin_ia32_psrawi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_mmx_psrai_d : GCCBuiltin<"__builtin_ia32_psradi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Pack ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_mmx_packsswb : GCCBuiltin<"__builtin_ia32_packsswb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_packssdw : GCCBuiltin<"__builtin_ia32_packssdw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_packuswb : GCCBuiltin<"__builtin_ia32_packuswb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+// Unpacking ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_mmx_punpckhbw : GCCBuiltin<"__builtin_ia32_punpckhbw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_punpckhwd : GCCBuiltin<"__builtin_ia32_punpckhwd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_punpckhdq : GCCBuiltin<"__builtin_ia32_punpckhdq">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_punpcklbw : GCCBuiltin<"__builtin_ia32_punpcklbw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_punpcklwd : GCCBuiltin<"__builtin_ia32_punpcklwd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+ def int_x86_mmx_punpckldq : GCCBuiltin<"__builtin_ia32_punpckldq">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty, llvm_x86mmx_ty],
+ [IntrNoMem]>;
+}
+
+// Integer comparison ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_mmx_pcmpeq_b : GCCBuiltin<"__builtin_ia32_pcmpeqb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_pcmpeq_w : GCCBuiltin<"__builtin_ia32_pcmpeqw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+ def int_x86_mmx_pcmpeq_d : GCCBuiltin<"__builtin_ia32_pcmpeqd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem, Commutative]>;
+
+ def int_x86_mmx_pcmpgt_b : GCCBuiltin<"__builtin_ia32_pcmpgtb">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_pcmpgt_w : GCCBuiltin<"__builtin_ia32_pcmpgtw">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+ def int_x86_mmx_pcmpgt_d : GCCBuiltin<"__builtin_ia32_pcmpgtd">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty], [IntrNoMem]>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_mmx_maskmovq : GCCBuiltin<"__builtin_ia32_maskmovq">,
+ Intrinsic<[], [llvm_x86mmx_ty, llvm_x86mmx_ty, llvm_ptr_ty], []>;
+
+ def int_x86_mmx_pmovmskb : GCCBuiltin<"__builtin_ia32_pmovmskb">,
+ Intrinsic<[llvm_i32_ty], [llvm_x86mmx_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_movnt_dq : GCCBuiltin<"__builtin_ia32_movntq">,
+ Intrinsic<[], [llvm_ptrx86mmx_ty, llvm_x86mmx_ty], []>;
+
+ def int_x86_mmx_palignr_b : GCCBuiltin<"__builtin_ia32_palignr">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_x86mmx_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_mmx_pextr_w : GCCBuiltin<"__builtin_ia32_vec_ext_v4hi">,
+ Intrinsic<[llvm_i32_ty], [llvm_x86mmx_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_mmx_pinsr_w : GCCBuiltin<"__builtin_ia32_vec_set_v4hi">,
+ Intrinsic<[llvm_x86mmx_ty], [llvm_x86mmx_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// BMI
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_bmi_bextr_32 : GCCBuiltin<"__builtin_ia32_bextr_u32">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_bmi_bextr_64 : GCCBuiltin<"__builtin_ia32_bextr_u64">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_bmi_bzhi_32 : GCCBuiltin<"__builtin_ia32_bzhi_si">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_bmi_bzhi_64 : GCCBuiltin<"__builtin_ia32_bzhi_di">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_bmi_pdep_32 : GCCBuiltin<"__builtin_ia32_pdep_si">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_bmi_pdep_64 : GCCBuiltin<"__builtin_ia32_pdep_di">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_bmi_pext_32 : GCCBuiltin<"__builtin_ia32_pext_si">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_bmi_pext_64 : GCCBuiltin<"__builtin_ia32_pext_di">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// FS/GS Base
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_rdfsbase_32 : GCCBuiltin<"__builtin_ia32_rdfsbase32">,
+ Intrinsic<[llvm_i32_ty], []>;
+ def int_x86_rdgsbase_32 : GCCBuiltin<"__builtin_ia32_rdgsbase32">,
+ Intrinsic<[llvm_i32_ty], []>;
+ def int_x86_rdfsbase_64 : GCCBuiltin<"__builtin_ia32_rdfsbase64">,
+ Intrinsic<[llvm_i64_ty], []>;
+ def int_x86_rdgsbase_64 : GCCBuiltin<"__builtin_ia32_rdgsbase64">,
+ Intrinsic<[llvm_i64_ty], []>;
+ def int_x86_wrfsbase_32 : GCCBuiltin<"__builtin_ia32_wrfsbase32">,
+ Intrinsic<[], [llvm_i32_ty]>;
+ def int_x86_wrgsbase_32 : GCCBuiltin<"__builtin_ia32_wrgsbase32">,
+ Intrinsic<[], [llvm_i32_ty]>;
+ def int_x86_wrfsbase_64 : GCCBuiltin<"__builtin_ia32_wrfsbase64">,
+ Intrinsic<[], [llvm_i64_ty]>;
+ def int_x86_wrgsbase_64 : GCCBuiltin<"__builtin_ia32_wrgsbase64">,
+ Intrinsic<[], [llvm_i64_ty]>;
+}
+
+//===----------------------------------------------------------------------===//
+// FXSR
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_fxrstor : GCCBuiltin<"__builtin_ia32_fxrstor">,
+ Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_x86_fxrstor64 : GCCBuiltin<"__builtin_ia32_fxrstor64">,
+ Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_x86_fxsave : GCCBuiltin<"__builtin_ia32_fxsave">,
+ Intrinsic<[], [llvm_ptr_ty], []>;
+ def int_x86_fxsave64 : GCCBuiltin<"__builtin_ia32_fxsave64">,
+ Intrinsic<[], [llvm_ptr_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// XSAVE
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_xsave :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xsave64 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xrstor :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xrstor64 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xsaveopt :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xsaveopt64 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xrstors :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xrstors64 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xsavec :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xsavec64 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xsaves :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+ def int_x86_xsaves64 :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Support protection key
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_rdpkru : GCCBuiltin <"__builtin_ia32_rdpkru">,
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+ def int_x86_wrpkru : GCCBuiltin<"__builtin_ia32_wrpkru">,
+ Intrinsic<[], [llvm_i32_ty], [IntrNoMem]>;
+}
+//===----------------------------------------------------------------------===//
+// Half float conversion
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_vcvtph2ps_128 : GCCBuiltin<"__builtin_ia32_vcvtph2ps">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_vcvtph2ps_256 : GCCBuiltin<"__builtin_ia32_vcvtph2ps256">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_vcvtps2ph_128 : GCCBuiltin<"__builtin_ia32_vcvtps2ph">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_vcvtps2ph_256 : GCCBuiltin<"__builtin_ia32_vcvtps2ph256">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_vcvtph2ps_512 : GCCBuiltin<"__builtin_ia32_vcvtph2ps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16i16_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vcvtph2ps_256 : GCCBuiltin<"__builtin_ia32_vcvtph2ps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8i16_ty, llvm_v8f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vcvtph2ps_128 : GCCBuiltin<"__builtin_ia32_vcvtph2ps_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v8i16_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vcvtps2ph_512 : GCCBuiltin<"__builtin_ia32_vcvtps2ph512_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16f32_ty, llvm_i32_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vcvtps2ph_256 : GCCBuiltin<"__builtin_ia32_vcvtps2ph256_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8f32_ty, llvm_i32_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_vcvtps2ph_128 : GCCBuiltin<"__builtin_ia32_vcvtps2ph_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4f32_ty, llvm_i32_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// TBM
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_tbm_bextri_u32 : GCCBuiltin<"__builtin_ia32_bextri_u32">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_tbm_bextri_u64 : GCCBuiltin<"__builtin_ia32_bextri_u64">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// RDRAND intrinsics - Return a random value and whether it is valid.
+// RDSEED intrinsics - Return a NIST SP800-90B & C compliant random value and
+// whether it is valid.
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ // These are declared side-effecting so they don't get eliminated by CSE or
+ // LICM.
+ def int_x86_rdrand_16 : Intrinsic<[llvm_i16_ty, llvm_i32_ty], [], []>;
+ def int_x86_rdrand_32 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [], []>;
+ def int_x86_rdrand_64 : Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
+ def int_x86_rdseed_16 : Intrinsic<[llvm_i16_ty, llvm_i32_ty], [], []>;
+ def int_x86_rdseed_32 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [], []>;
+ def int_x86_rdseed_64 : Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// ADX
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_addcarryx_u32: GCCBuiltin<"__builtin_ia32_addcarryx_u32">,
+ Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_ptr_ty], [IntrReadWriteArgMem]>;
+ def int_x86_addcarryx_u64: GCCBuiltin<"__builtin_ia32_addcarryx_u64">,
+ Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty,
+ llvm_ptr_ty], [IntrReadWriteArgMem]>;
+ def int_x86_addcarry_u32: GCCBuiltin<"__builtin_ia32_addcarry_u32">,
+ Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_ptr_ty], [IntrReadWriteArgMem]>;
+ def int_x86_addcarry_u64: GCCBuiltin<"__builtin_ia32_addcarry_u64">,
+ Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty,
+ llvm_ptr_ty], [IntrReadWriteArgMem]>;
+ def int_x86_subborrow_u32: GCCBuiltin<"__builtin_ia32_subborrow_u32">,
+ Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i32_ty, llvm_i32_ty,
+ llvm_ptr_ty], [IntrReadWriteArgMem]>;
+ def int_x86_subborrow_u64: GCCBuiltin<"__builtin_ia32_subborrow_u64">,
+ Intrinsic<[llvm_i8_ty], [llvm_i8_ty, llvm_i64_ty, llvm_i64_ty,
+ llvm_ptr_ty], [IntrReadWriteArgMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// RTM intrinsics. Transactional Memory support.
+
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_xbegin : GCCBuiltin<"__builtin_ia32_xbegin">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+ def int_x86_xend : GCCBuiltin<"__builtin_ia32_xend">,
+ Intrinsic<[], [], []>;
+ def int_x86_xabort : GCCBuiltin<"__builtin_ia32_xabort">,
+ Intrinsic<[], [llvm_i8_ty], [IntrNoReturn]>;
+ def int_x86_xtest : GCCBuiltin<"__builtin_ia32_xtest">,
+ Intrinsic<[llvm_i32_ty], [], []>;
+}
+
+//===----------------------------------------------------------------------===//
+// AVX512
+
+// Mask ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ // Mask instructions
+ // 16-bit mask
+ def int_x86_avx512_kand_w : GCCBuiltin<"__builtin_ia32_kandhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_kandn_w : GCCBuiltin<"__builtin_ia32_kandnhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_knot_w : GCCBuiltin<"__builtin_ia32_knothi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_kor_w : GCCBuiltin<"__builtin_ia32_korhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_kxor_w : GCCBuiltin<"__builtin_ia32_kxorhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_kxnor_w : GCCBuiltin<"__builtin_ia32_kxnorhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_kunpck_bw : GCCBuiltin<"__builtin_ia32_kunpckhi">,
+ Intrinsic<[llvm_i16_ty], [llvm_i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_kunpck_wd : GCCBuiltin<"__builtin_ia32_kunpcksi">,
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_kunpck_dq : GCCBuiltin<"__builtin_ia32_kunpckdi">,
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_kortestz_w : GCCBuiltin<"__builtin_ia32_kortestzhi">,
+ Intrinsic<[llvm_i32_ty], [llvm_i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_kortestc_w : GCCBuiltin<"__builtin_ia32_kortestchi">,
+ Intrinsic<[llvm_i32_ty], [llvm_i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+}
+
+// Conversion ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx512_cvtss2usi : GCCBuiltin<"__builtin_ia32_cvtss2usi">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtss2usi64 : GCCBuiltin<"__builtin_ia32_cvtss2usi64">,
+ Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvttss2si : GCCBuiltin<"__builtin_ia32_vcvttss2si32">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvttss2si64 : GCCBuiltin<"__builtin_ia32_vcvttss2si64">,
+ Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvttss2usi : GCCBuiltin<"__builtin_ia32_vcvttss2usi32">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvttss2usi64 : GCCBuiltin<"__builtin_ia32_vcvttss2usi64">,
+ Intrinsic<[llvm_i64_ty], [llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtusi2ss : GCCBuiltin<"__builtin_ia32_cvtusi2ss32">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtusi642ss : GCCBuiltin<"__builtin_ia32_cvtusi2ss64">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_cvtsd2usi : GCCBuiltin<"__builtin_ia32_cvtsd2usi">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtsd2usi64 : GCCBuiltin<"__builtin_ia32_cvtsd2usi64">,
+ Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvttsd2si : GCCBuiltin<"__builtin_ia32_vcvttsd2si32">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvttsd2si64 : GCCBuiltin<"__builtin_ia32_vcvttsd2si64">,
+ Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvttsd2usi : GCCBuiltin<"__builtin_ia32_vcvttsd2usi32">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvttsd2usi64 : GCCBuiltin<"__builtin_ia32_vcvttsd2usi64">,
+ Intrinsic<[llvm_i64_ty], [llvm_v2f64_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtusi2sd : GCCBuiltin<"__builtin_ia32_cvtusi2sd32">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtusi642sd : GCCBuiltin<"__builtin_ia32_cvtusi2sd64">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_cvtsi2ss32 : GCCBuiltin<"__builtin_ia32_cvtsi2ss32">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtsi2ss64 : GCCBuiltin<"__builtin_ia32_cvtsi2ss64">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
+ llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtsi2sd32 : GCCBuiltin<"__builtin_ia32_cvtsi2sd32">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtsi2sd64 : GCCBuiltin<"__builtin_ia32_cvtsi2sd64">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
+ llvm_i64_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_cvtb2mask_128 : GCCBuiltin<"__builtin_ia32_cvtb2mask128">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtb2mask_256 : GCCBuiltin<"__builtin_ia32_cvtb2mask256">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtb2mask_512 : GCCBuiltin<"__builtin_ia32_cvtb2mask512">,
+ Intrinsic<[llvm_i64_ty], [llvm_v64i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_cvtw2mask_128 : GCCBuiltin<"__builtin_ia32_cvtw2mask128">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtw2mask_256 : GCCBuiltin<"__builtin_ia32_cvtw2mask256">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtw2mask_512 : GCCBuiltin<"__builtin_ia32_cvtw2mask512">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_cvtd2mask_128 : GCCBuiltin<"__builtin_ia32_cvtd2mask128">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtd2mask_256 : GCCBuiltin<"__builtin_ia32_cvtd2mask256">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtd2mask_512 : GCCBuiltin<"__builtin_ia32_cvtd2mask512">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_cvtq2mask_128 : GCCBuiltin<"__builtin_ia32_cvtq2mask128">,
+ Intrinsic<[llvm_i8_ty], [llvm_v2i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtq2mask_256 : GCCBuiltin<"__builtin_ia32_cvtq2mask256">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtq2mask_512 : GCCBuiltin<"__builtin_ia32_cvtq2mask512">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i64_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_cvtmask2b_128 : GCCBuiltin<"__builtin_ia32_cvtmask2b128">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtmask2b_256 : GCCBuiltin<"__builtin_ia32_cvtmask2b256">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtmask2b_512 : GCCBuiltin<"__builtin_ia32_cvtmask2b512">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_i64_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_cvtmask2w_128 : GCCBuiltin<"__builtin_ia32_cvtmask2w128">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtmask2w_256 : GCCBuiltin<"__builtin_ia32_cvtmask2w256">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtmask2w_512 : GCCBuiltin<"__builtin_ia32_cvtmask2w512">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_cvtmask2d_128 : GCCBuiltin<"__builtin_ia32_cvtmask2d128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtmask2d_256 : GCCBuiltin<"__builtin_ia32_cvtmask2d256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtmask2d_512 : GCCBuiltin<"__builtin_ia32_cvtmask2d512">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_cvtmask2q_128 : GCCBuiltin<"__builtin_ia32_cvtmask2q128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtmask2q_256 : GCCBuiltin<"__builtin_ia32_cvtmask2q256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_cvtmask2q_512 : GCCBuiltin<"__builtin_ia32_cvtmask2q512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_i8_ty], [IntrNoMem]>;
+
+}
+
+// Pack ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx512_mask_packsswb_128 : GCCBuiltin<"__builtin_ia32_packsswb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packsswb_256 : GCCBuiltin<"__builtin_ia32_packsswb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v16i16_ty,llvm_v16i16_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packsswb_512 : GCCBuiltin<"__builtin_ia32_packsswb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v32i16_ty,llvm_v32i16_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packssdw_128 : GCCBuiltin<"__builtin_ia32_packssdw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packssdw_256 : GCCBuiltin<"__builtin_ia32_packssdw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packssdw_512 : GCCBuiltin<"__builtin_ia32_packssdw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packuswb_128 : GCCBuiltin<"__builtin_ia32_packuswb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packuswb_256 : GCCBuiltin<"__builtin_ia32_packuswb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v16i16_ty,llvm_v16i16_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packuswb_512 : GCCBuiltin<"__builtin_ia32_packuswb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v32i16_ty,llvm_v32i16_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packusdw_128 : GCCBuiltin<"__builtin_ia32_packusdw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packusdw_256 : GCCBuiltin<"__builtin_ia32_packusdw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_packusdw_512 : GCCBuiltin<"__builtin_ia32_packusdw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Unpack ops.
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx512_mask_unpckh_pd_128 :
+ GCCBuiltin<"__builtin_ia32_unpckhpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckh_pd_256 :
+ GCCBuiltin<"__builtin_ia32_unpckhpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckh_pd_512 :
+ GCCBuiltin<"__builtin_ia32_unpckhpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckh_ps_128 :
+ GCCBuiltin<"__builtin_ia32_unpckhps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckh_ps_256 :
+ GCCBuiltin<"__builtin_ia32_unpckhps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckh_ps_512 :
+ GCCBuiltin<"__builtin_ia32_unpckhps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckl_pd_128 :
+ GCCBuiltin<"__builtin_ia32_unpcklpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckl_pd_256 :
+ GCCBuiltin<"__builtin_ia32_unpcklpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckl_pd_512 :
+ GCCBuiltin<"__builtin_ia32_unpcklpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckl_ps_128 :
+ GCCBuiltin<"__builtin_ia32_unpcklps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckl_ps_256 :
+ GCCBuiltin<"__builtin_ia32_unpcklps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_unpckl_ps_512 :
+ GCCBuiltin<"__builtin_ia32_unpcklps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhb_w_128 :
+ GCCBuiltin<"__builtin_ia32_punpckhbw128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhb_w_256 :
+ GCCBuiltin<"__builtin_ia32_punpckhbw256_mask">,
+ Intrinsic<[llvm_v32i8_ty],
+ [llvm_v32i8_ty, llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhb_w_512 :
+ GCCBuiltin<"__builtin_ia32_punpckhbw512_mask">,
+ Intrinsic<[llvm_v64i8_ty],
+ [llvm_v64i8_ty, llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhd_q_128 :
+ GCCBuiltin<"__builtin_ia32_punpckhdq128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhd_q_256 :
+ GCCBuiltin<"__builtin_ia32_punpckhdq256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhd_q_512 :
+ GCCBuiltin<"__builtin_ia32_punpckhdq512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhqd_q_128 :
+ GCCBuiltin<"__builtin_ia32_punpckhqdq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhqd_q_256 :
+ GCCBuiltin<"__builtin_ia32_punpckhqdq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhqd_q_512 :
+ GCCBuiltin<"__builtin_ia32_punpckhqdq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhw_d_128 :
+ GCCBuiltin<"__builtin_ia32_punpckhwd128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhw_d_256 :
+ GCCBuiltin<"__builtin_ia32_punpckhwd256_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckhw_d_512 :
+ GCCBuiltin<"__builtin_ia32_punpckhwd512_mask">,
+ Intrinsic<[llvm_v32i16_ty],
+ [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpcklb_w_128 :
+ GCCBuiltin<"__builtin_ia32_punpcklbw128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpcklb_w_256 :
+ GCCBuiltin<"__builtin_ia32_punpcklbw256_mask">,
+ Intrinsic<[llvm_v32i8_ty],
+ [llvm_v32i8_ty, llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpcklb_w_512 :
+ GCCBuiltin<"__builtin_ia32_punpcklbw512_mask">,
+ Intrinsic<[llvm_v64i8_ty],
+ [llvm_v64i8_ty, llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckld_q_128 :
+ GCCBuiltin<"__builtin_ia32_punpckldq128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckld_q_256 :
+ GCCBuiltin<"__builtin_ia32_punpckldq256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpckld_q_512 :
+ GCCBuiltin<"__builtin_ia32_punpckldq512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpcklqd_q_128 :
+ GCCBuiltin<"__builtin_ia32_punpcklqdq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpcklqd_q_256 :
+ GCCBuiltin<"__builtin_ia32_punpcklqdq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpcklqd_q_512 :
+ GCCBuiltin<"__builtin_ia32_punpcklqdq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpcklw_d_128 :
+ GCCBuiltin<"__builtin_ia32_punpcklwd128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpcklw_d_256 :
+ GCCBuiltin<"__builtin_ia32_punpcklwd256_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_punpcklw_d_512 :
+ GCCBuiltin<"__builtin_ia32_punpcklwd512_mask">,
+ Intrinsic<[llvm_v32i16_ty],
+ [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+}
+
+// Vector convert
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx512_mask_cvtdq2pd_128 :
+ GCCBuiltin<"__builtin_ia32_cvtdq2pd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v4i32_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtdq2pd_256 :
+ GCCBuiltin<"__builtin_ia32_cvtdq2pd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4i32_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtdq2pd_512 :
+ GCCBuiltin<"__builtin_ia32_cvtdq2pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8i32_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtdq2ps_128 :
+ GCCBuiltin<"__builtin_ia32_cvtdq2ps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtdq2ps_256 :
+ GCCBuiltin<"__builtin_ia32_cvtdq2ps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtdq2ps_512 :
+ GCCBuiltin<"__builtin_ia32_cvtdq2ps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16i32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2dq_128 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2dq128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v2f64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2dq_256 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2dq256_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2dq_512 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2dq512_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8f64_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2ps_256 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2ps256_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f64_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2ps_512 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2ps512_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f64_ty, llvm_v8f32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtsd2ss_round :
+ GCCBuiltin<"__builtin_ia32_cvtsd2ss_round">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtss2sd_round :
+ GCCBuiltin<"__builtin_ia32_cvtss2sd_round">,
+ Intrinsic<[llvm_v2f64_ty],
+ [ llvm_v4f32_ty, llvm_v4f32_ty, llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2ps :
+ GCCBuiltin<"__builtin_ia32_cvtpd2ps_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v2f64_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2qq_128 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2qq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2f64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2qq_256 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2qq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4f64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2qq_512 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2qq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8f64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2udq_128 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2udq128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v2f64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2udq_256 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2udq256_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2udq_512 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2udq512_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8f64_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2uqq_128 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2uqq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2f64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2uqq_256 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2uqq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4f64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtpd2uqq_512 :
+ GCCBuiltin<"__builtin_ia32_cvtpd2uqq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8f64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2dq_128 :
+ GCCBuiltin<"__builtin_ia32_cvtps2dq128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2dq_256 :
+ GCCBuiltin<"__builtin_ia32_cvtps2dq256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8f32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2dq_512 :
+ GCCBuiltin<"__builtin_ia32_cvtps2dq512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2pd_128 :
+ GCCBuiltin<"__builtin_ia32_cvtps2pd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v4f32_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2pd_256 :
+ GCCBuiltin<"__builtin_ia32_cvtps2pd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f32_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2pd_512 :
+ GCCBuiltin<"__builtin_ia32_cvtps2pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f32_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2qq_128 :
+ GCCBuiltin<"__builtin_ia32_cvtps2qq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4f32_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2qq_256 :
+ GCCBuiltin<"__builtin_ia32_cvtps2qq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4f32_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2qq_512 :
+ GCCBuiltin<"__builtin_ia32_cvtps2qq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2udq_128 :
+ GCCBuiltin<"__builtin_ia32_cvtps2udq128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2udq_256 :
+ GCCBuiltin<"__builtin_ia32_cvtps2udq256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8f32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2udq_512 :
+ GCCBuiltin<"__builtin_ia32_cvtps2udq512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2uqq_128 :
+ GCCBuiltin<"__builtin_ia32_cvtps2uqq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4f32_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2uqq_256 :
+ GCCBuiltin<"__builtin_ia32_cvtps2uqq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4f32_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtps2uqq_512 :
+ GCCBuiltin<"__builtin_ia32_cvtps2uqq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtqq2pd_128 :
+ GCCBuiltin<"__builtin_ia32_cvtqq2pd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtqq2pd_256 :
+ GCCBuiltin<"__builtin_ia32_cvtqq2pd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtqq2pd_512 :
+ GCCBuiltin<"__builtin_ia32_cvtqq2pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8i64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtqq2ps_128 :
+ GCCBuiltin<"__builtin_ia32_cvtqq2ps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v2i64_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtqq2ps_256 :
+ GCCBuiltin<"__builtin_ia32_cvtqq2ps256_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4i64_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtqq2ps_512 :
+ GCCBuiltin<"__builtin_ia32_cvtqq2ps512_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8i64_ty, llvm_v8f32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2dq_128 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2dq128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v2f64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2dq_256 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2dq256_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2dq_512 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2dq512_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8f64_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2qq_128 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2qq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2f64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2qq_256 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2qq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4f64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2qq_512 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2qq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8f64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2udq_128 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2udq128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v2f64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2udq_256 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2udq256_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2udq_512 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2udq512_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8f64_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2uqq_128 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2uqq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2f64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2uqq_256 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2uqq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4f64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttpd2uqq_512 :
+ GCCBuiltin<"__builtin_ia32_cvttpd2uqq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8f64_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2dq_128 :
+ GCCBuiltin<"__builtin_ia32_cvttps2dq128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2dq_256 :
+ GCCBuiltin<"__builtin_ia32_cvttps2dq256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8f32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2dq_512 :
+ GCCBuiltin<"__builtin_ia32_cvttps2dq512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2qq_128 :
+ GCCBuiltin<"__builtin_ia32_cvttps2qq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4f32_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2qq_256 :
+ GCCBuiltin<"__builtin_ia32_cvttps2qq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4f32_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2qq_512 :
+ GCCBuiltin<"__builtin_ia32_cvttps2qq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2udq_128 :
+ GCCBuiltin<"__builtin_ia32_cvttps2udq128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4f32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2udq_256 :
+ GCCBuiltin<"__builtin_ia32_cvttps2udq256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8f32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2udq_512 :
+ GCCBuiltin<"__builtin_ia32_cvttps2udq512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16f32_ty, llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2uqq_128 :
+ GCCBuiltin<"__builtin_ia32_cvttps2uqq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v4f32_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2uqq_256 :
+ GCCBuiltin<"__builtin_ia32_cvttps2uqq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4f32_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvttps2uqq_512 :
+ GCCBuiltin<"__builtin_ia32_cvttps2uqq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8f32_ty, llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtudq2pd_128 :
+ GCCBuiltin<"__builtin_ia32_cvtudq2pd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v4i32_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtudq2pd_256 :
+ GCCBuiltin<"__builtin_ia32_cvtudq2pd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4i32_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtudq2pd_512 :
+ GCCBuiltin<"__builtin_ia32_cvtudq2pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8i32_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtudq2ps_128 :
+ GCCBuiltin<"__builtin_ia32_cvtudq2ps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtudq2ps_256 :
+ GCCBuiltin<"__builtin_ia32_cvtudq2ps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtudq2ps_512 :
+ GCCBuiltin<"__builtin_ia32_cvtudq2ps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16i32_ty, llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtuqq2pd_128 :
+ GCCBuiltin<"__builtin_ia32_cvtuqq2pd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2i64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtuqq2pd_256 :
+ GCCBuiltin<"__builtin_ia32_cvtuqq2pd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4i64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtuqq2pd_512 :
+ GCCBuiltin<"__builtin_ia32_cvtuqq2pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8i64_ty, llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtuqq2ps_128 :
+ GCCBuiltin<"__builtin_ia32_cvtuqq2ps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v2i64_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtuqq2ps_256 :
+ GCCBuiltin<"__builtin_ia32_cvtuqq2ps256_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4i64_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cvtuqq2ps_512 :
+ GCCBuiltin<"__builtin_ia32_cvtuqq2ps512_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8i64_ty, llvm_v8f32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_rndscale_pd_128 : GCCBuiltin<"__builtin_ia32_rndscalepd_128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_i32_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_rndscale_pd_256 : GCCBuiltin<"__builtin_ia32_rndscalepd_256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_i32_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_rndscale_pd_512 : GCCBuiltin<"__builtin_ia32_rndscalepd_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_rndscale_ps_128 : GCCBuiltin<"__builtin_ia32_rndscaleps_128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_rndscale_ps_256 : GCCBuiltin<"__builtin_ia32_rndscaleps_256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_i32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_rndscale_ps_512 : GCCBuiltin<"__builtin_ia32_rndscaleps_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_reduce_pd_128 : GCCBuiltin<"__builtin_ia32_reducepd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_i32_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_reduce_pd_256 : GCCBuiltin<"__builtin_ia32_reducepd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_i32_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_reduce_pd_512 : GCCBuiltin<"__builtin_ia32_reducepd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_i32_ty, llvm_v8f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_reduce_ps_128 : GCCBuiltin<"__builtin_ia32_reduceps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_i32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_reduce_ps_256 : GCCBuiltin<"__builtin_ia32_reduceps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_i32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_reduce_ps_512 : GCCBuiltin<"__builtin_ia32_reduceps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_pd_128 : GCCBuiltin<"__builtin_ia32_rangepd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_pd_256 : GCCBuiltin<"__builtin_ia32_rangepd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i32_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_pd_512 : GCCBuiltin<"__builtin_ia32_rangepd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i32_ty,
+ llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_ps_128 : GCCBuiltin<"__builtin_ia32_rangeps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_ps_256 : GCCBuiltin<"__builtin_ia32_rangeps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+def int_x86_avx512_mask_range_ps_512 : GCCBuiltin<"__builtin_ia32_rangeps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty,
+ llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Vector load with broadcast
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx512_vbroadcast_ss_512 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastss512">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_broadcast_ss_ps_512 :
+ GCCBuiltin<"__builtin_ia32_broadcastss512">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v4f32_ty, llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_broadcast_ss_ps_256 :
+ GCCBuiltin<"__builtin_ia32_broadcastss256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v4f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_broadcast_ss_ps_128 :
+ GCCBuiltin<"__builtin_ia32_broadcastss128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_vbroadcast_sd_512 :
+ GCCBuiltin<"__builtin_ia32_vbroadcastsd512">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_ptr_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_broadcast_sd_pd_512 :
+ GCCBuiltin<"__builtin_ia32_broadcastsd512">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v2f64_ty, llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_broadcast_sd_pd_256 :
+ GCCBuiltin<"__builtin_ia32_broadcastsd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v2f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_pbroadcastb_128 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastb_256 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastb256_mask">,
+ Intrinsic<[llvm_v32i8_ty],
+ [llvm_v16i8_ty, llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastb_512 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastb512_mask">,
+ Intrinsic<[llvm_v64i8_ty],
+ [llvm_v16i8_ty, llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastw_128 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastw128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastw_256 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastw256_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v8i16_ty, llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastw_512 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastw512_mask">,
+ Intrinsic<[llvm_v32i16_ty],
+ [llvm_v8i16_ty, llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastd_128 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastd128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastd_256 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastd256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v4i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastd_512 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastd512">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastq_128 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastq_256 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v2i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_pbroadcastq_512 :
+ GCCBuiltin<"__builtin_ia32_pbroadcastq512">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcastf32x2_256 :
+ GCCBuiltin<"__builtin_ia32_broadcastf32x2_256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v4f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcastf32x2_512 :
+ GCCBuiltin<"__builtin_ia32_broadcastf32x2_512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v4f32_ty, llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcasti32x2_128 :
+ GCCBuiltin<"__builtin_ia32_broadcasti32x2_128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcasti32x2_256 :
+ GCCBuiltin<"__builtin_ia32_broadcasti32x2_256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v4i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcasti32x2_512 :
+ GCCBuiltin<"__builtin_ia32_broadcasti32x2_512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcastf32x4_256 :
+ GCCBuiltin<"__builtin_ia32_broadcastf32x4_256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v4f32_ty, llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcastf32x4_512 :
+ GCCBuiltin<"__builtin_ia32_broadcastf32x4_512">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v4f32_ty, llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcastf32x8_512 :
+ GCCBuiltin<"__builtin_ia32_broadcastf32x8_512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v8f32_ty, llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcastf64x2_256 :
+ GCCBuiltin<"__builtin_ia32_broadcastf64x2_256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v2f64_ty, llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcastf64x2_512 :
+ GCCBuiltin<"__builtin_ia32_broadcastf64x2_512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v2f64_ty, llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcastf64x4_512 :
+ GCCBuiltin<"__builtin_ia32_broadcastf64x4_512">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v4f64_ty, llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcasti32x4_256 :
+ GCCBuiltin<"__builtin_ia32_broadcasti32x4_256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v4i32_ty, llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcasti32x4_512 :
+ GCCBuiltin<"__builtin_ia32_broadcasti32x4_512">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v4i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcasti32x8_512 :
+ GCCBuiltin<"__builtin_ia32_broadcasti32x8_512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v8i32_ty, llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcasti64x2_256 :
+ GCCBuiltin<"__builtin_ia32_broadcasti64x2_256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v2i64_ty, llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcasti64x2_512 :
+ GCCBuiltin<"__builtin_ia32_broadcasti64x2_512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v2i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_broadcasti64x4_512 :
+ GCCBuiltin<"__builtin_ia32_broadcasti64x4_512">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v4i64_ty, llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_pbroadcastd_i32_512 :
+ Intrinsic<[llvm_v16i32_ty], [llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_pbroadcastq_i64_512 :
+ Intrinsic<[llvm_v8i64_ty], [llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_broadcastmw_512 :
+ GCCBuiltin<"__builtin_ia32_broadcastmw512">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_broadcastmw_256 :
+ GCCBuiltin<"__builtin_ia32_broadcastmw256">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_broadcastmw_128 :
+ GCCBuiltin<"__builtin_ia32_broadcastmw128">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_broadcastmb_512 :
+ GCCBuiltin<"__builtin_ia32_broadcastmb512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_broadcastmb_256 :
+ GCCBuiltin<"__builtin_ia32_broadcastmb256">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_broadcastmb_128 :
+ GCCBuiltin<"__builtin_ia32_broadcastmb128">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Vector sign and zero extend
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx512_pmovzxbq : GCCBuiltin<"__builtin_ia32_pmovzxbq512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_pmovzxwd : GCCBuiltin<"__builtin_ia32_pmovzxwd512">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_pmovzxbd : GCCBuiltin<"__builtin_ia32_pmovzxbd512">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_pmovzxwq : GCCBuiltin<"__builtin_ia32_pmovzxwq512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_pmovzxdq : GCCBuiltin<"__builtin_ia32_pmovzxdq512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i32_ty],
+ [IntrNoMem]>;
+}
+//Bitwise Ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx512_mask_pand_d_128 : GCCBuiltin<"__builtin_ia32_pandd128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pand_d_256 : GCCBuiltin<"__builtin_ia32_pandd256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pand_d_512 : GCCBuiltin<"__builtin_ia32_pandd512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pand_q_128 : GCCBuiltin<"__builtin_ia32_pandq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pand_q_256 : GCCBuiltin<"__builtin_ia32_pandq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pand_q_512 : GCCBuiltin<"__builtin_ia32_pandq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pandn_d_128 : GCCBuiltin<"__builtin_ia32_pandnd128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pandn_d_256 : GCCBuiltin<"__builtin_ia32_pandnd256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pandn_d_512 : GCCBuiltin<"__builtin_ia32_pandnd512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pandn_q_128 : GCCBuiltin<"__builtin_ia32_pandnq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pandn_q_256 : GCCBuiltin<"__builtin_ia32_pandnq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pandn_q_512 : GCCBuiltin<"__builtin_ia32_pandnq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_por_d_128 : GCCBuiltin<"__builtin_ia32_pord128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_por_d_256 : GCCBuiltin<"__builtin_ia32_pord256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_por_d_512 : GCCBuiltin<"__builtin_ia32_pord512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_por_q_128 : GCCBuiltin<"__builtin_ia32_porq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_por_q_256 : GCCBuiltin<"__builtin_ia32_porq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_por_q_512 : GCCBuiltin<"__builtin_ia32_porq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pxor_d_128 : GCCBuiltin<"__builtin_ia32_pxord128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pxor_d_256 : GCCBuiltin<"__builtin_ia32_pxord256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pxor_d_512 : GCCBuiltin<"__builtin_ia32_pxord512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pxor_q_128 : GCCBuiltin<"__builtin_ia32_pxorq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pxor_q_256 : GCCBuiltin<"__builtin_ia32_pxorq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pxor_q_512 : GCCBuiltin<"__builtin_ia32_pxorq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+}
+// Arithmetic ops
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+
+ def int_x86_avx512_mask_add_ps_128 : GCCBuiltin<"__builtin_ia32_addps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_add_ps_256 : GCCBuiltin<"__builtin_ia32_addps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_add_ps_512 : GCCBuiltin<"__builtin_ia32_addps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_add_pd_128 : GCCBuiltin<"__builtin_ia32_addpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_add_pd_256 : GCCBuiltin<"__builtin_ia32_addpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_add_pd_512 : GCCBuiltin<"__builtin_ia32_addpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sub_ps_128 : GCCBuiltin<"__builtin_ia32_subps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sub_ps_256 : GCCBuiltin<"__builtin_ia32_subps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sub_ps_512 : GCCBuiltin<"__builtin_ia32_subps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sub_pd_128 : GCCBuiltin<"__builtin_ia32_subpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sub_pd_256 : GCCBuiltin<"__builtin_ia32_subpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sub_pd_512 : GCCBuiltin<"__builtin_ia32_subpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_mul_ps_128 : GCCBuiltin<"__builtin_ia32_mulps_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_mul_ps_256 : GCCBuiltin<"__builtin_ia32_mulps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_mul_ps_512 : GCCBuiltin<"__builtin_ia32_mulps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_mul_pd_128 : GCCBuiltin<"__builtin_ia32_mulpd_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_mul_pd_256 : GCCBuiltin<"__builtin_ia32_mulpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_mul_pd_512 : GCCBuiltin<"__builtin_ia32_mulpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_div_ps_128 : GCCBuiltin<"__builtin_ia32_divps_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_div_ps_256 : GCCBuiltin<"__builtin_ia32_divps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_div_ps_512 : GCCBuiltin<"__builtin_ia32_divps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_div_pd_128 : GCCBuiltin<"__builtin_ia32_divpd_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_div_pd_256 : GCCBuiltin<"__builtin_ia32_divpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_div_pd_512 : GCCBuiltin<"__builtin_ia32_divpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_max_ps_128 : GCCBuiltin<"__builtin_ia32_maxps_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_max_ps_256 : GCCBuiltin<"__builtin_ia32_maxps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_max_ps_512 : GCCBuiltin<"__builtin_ia32_maxps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_max_pd_128 : GCCBuiltin<"__builtin_ia32_maxpd_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_max_pd_256 : GCCBuiltin<"__builtin_ia32_maxpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_max_pd_512 : GCCBuiltin<"__builtin_ia32_maxpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_min_ps_128 : GCCBuiltin<"__builtin_ia32_minps_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_min_ps_256 : GCCBuiltin<"__builtin_ia32_minps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_min_ps_512 : GCCBuiltin<"__builtin_ia32_minps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_min_pd_128 : GCCBuiltin<"__builtin_ia32_minpd_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_min_pd_256 : GCCBuiltin<"__builtin_ia32_minpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_min_pd_512 : GCCBuiltin<"__builtin_ia32_minpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_add_ss_round : GCCBuiltin<"__builtin_ia32_addss_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_div_ss_round : GCCBuiltin<"__builtin_ia32_divss_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_mul_ss_round : GCCBuiltin<"__builtin_ia32_mulss_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sub_ss_round : GCCBuiltin<"__builtin_ia32_subss_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_max_ss_round : GCCBuiltin<"__builtin_ia32_maxss_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_min_ss_round : GCCBuiltin<"__builtin_ia32_minss_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_add_sd_round : GCCBuiltin<"__builtin_ia32_addsd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_div_sd_round : GCCBuiltin<"__builtin_ia32_divsd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_mul_sd_round : GCCBuiltin<"__builtin_ia32_mulsd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sub_sd_round : GCCBuiltin<"__builtin_ia32_subsd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_max_sd_round : GCCBuiltin<"__builtin_ia32_maxsd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_min_sd_round : GCCBuiltin<"__builtin_ia32_minsd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_rndscale_ss : GCCBuiltin<"__builtin_ia32_rndscaless_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_rndscale_sd : GCCBuiltin<"__builtin_ia32_rndscalesd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_range_ss : GCCBuiltin<"__builtin_ia32_rangess128_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_range_sd : GCCBuiltin<"__builtin_ia32_rangesd128_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_reduce_ss : GCCBuiltin<"__builtin_ia32_reducess">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_reduce_sd : GCCBuiltin<"__builtin_ia32_reducesd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_scalef_sd : GCCBuiltin<"__builtin_ia32_scalefsd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_scalef_ss : GCCBuiltin<"__builtin_ia32_scalefss_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_scalef_pd_128 : GCCBuiltin<"__builtin_ia32_scalefpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_scalef_pd_256 : GCCBuiltin<"__builtin_ia32_scalefpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty],[IntrNoMem]>;
+ def int_x86_avx512_mask_scalef_pd_512 : GCCBuiltin<"__builtin_ia32_scalefpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_scalef_ps_128 : GCCBuiltin<"__builtin_ia32_scalefps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_scalef_ps_256 : GCCBuiltin<"__builtin_ia32_scalefps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_scalef_ps_512 : GCCBuiltin<"__builtin_ia32_scalefps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_sqrt_ss : GCCBuiltin<"__builtin_ia32_sqrtss_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sqrt_sd : GCCBuiltin<"__builtin_ia32_sqrtsd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_sqrt_pd_128 : GCCBuiltin<"__builtin_ia32_sqrtpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sqrt_pd_256 : GCCBuiltin<"__builtin_ia32_sqrtpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sqrt_pd_512 : GCCBuiltin<"__builtin_ia32_sqrtpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sqrt_ps_128 : GCCBuiltin<"__builtin_ia32_sqrtps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sqrt_ps_256 : GCCBuiltin<"__builtin_ia32_sqrtps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_sqrt_ps_512 : GCCBuiltin<"__builtin_ia32_sqrtps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_getexp_pd_128 : GCCBuiltin<"__builtin_ia32_getexppd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_getexp_pd_256 : GCCBuiltin<"__builtin_ia32_getexppd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_getexp_pd_512 : GCCBuiltin<"__builtin_ia32_getexppd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_getexp_ps_128 : GCCBuiltin<"__builtin_ia32_getexpps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_getexp_ps_256 : GCCBuiltin<"__builtin_ia32_getexpps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_getexp_ps_512 : GCCBuiltin<"__builtin_ia32_getexpps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_getexp_ss : GCCBuiltin<"__builtin_ia32_getexpss_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_getexp_sd : GCCBuiltin<"__builtin_ia32_getexpsd_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_getmant_pd_128 :
+ GCCBuiltin<"__builtin_ia32_getmantpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty,llvm_i32_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_getmant_pd_256 :
+ GCCBuiltin<"__builtin_ia32_getmantpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty,llvm_i32_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_getmant_pd_512 :
+ GCCBuiltin<"__builtin_ia32_getmantpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty,llvm_i32_ty, llvm_v8f64_ty, llvm_i8_ty,llvm_i32_ty ],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_getmant_ps_128 :
+ GCCBuiltin<"__builtin_ia32_getmantps128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_getmant_ps_256 :
+ GCCBuiltin<"__builtin_ia32_getmantps256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_i32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_getmant_ps_512 :
+ GCCBuiltin<"__builtin_ia32_getmantps512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty,llvm_i32_ty, llvm_v16f32_ty,llvm_i16_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_getmant_ss :
+ GCCBuiltin<"__builtin_ia32_getmantss_round">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4f32_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_getmant_sd :
+ GCCBuiltin<"__builtin_ia32_getmantsd_round">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty, llvm_v2f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_rsqrt14_ss : GCCBuiltin<"__builtin_ia32_rsqrt14ss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rsqrt14_sd : GCCBuiltin<"__builtin_ia32_rsqrt14sd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_rsqrt14_pd_128 : GCCBuiltin<"__builtin_ia32_rsqrt14pd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rsqrt14_pd_256 : GCCBuiltin<"__builtin_ia32_rsqrt14pd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rsqrt14_pd_512 : GCCBuiltin<"__builtin_ia32_rsqrt14pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rsqrt14_ps_128 : GCCBuiltin<"__builtin_ia32_rsqrt14ps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rsqrt14_ps_256 : GCCBuiltin<"__builtin_ia32_rsqrt14ps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rsqrt14_ps_512 : GCCBuiltin<"__builtin_ia32_rsqrt14ps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp14_ss : GCCBuiltin<"__builtin_ia32_rcp14ss">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp14_sd : GCCBuiltin<"__builtin_ia32_rcp14sd">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_rcp14_pd_128 : GCCBuiltin<"__builtin_ia32_rcp14pd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp14_pd_256 : GCCBuiltin<"__builtin_ia32_rcp14pd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp14_pd_512 : GCCBuiltin<"__builtin_ia32_rcp14pd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp14_ps_128 : GCCBuiltin<"__builtin_ia32_rcp14ps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp14_ps_256 : GCCBuiltin<"__builtin_ia32_rcp14ps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp14_ps_512 : GCCBuiltin<"__builtin_ia32_rcp14ps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_rcp28_ps : GCCBuiltin<"__builtin_ia32_rcp28ps_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_rcp28_pd : GCCBuiltin<"__builtin_ia32_rcp28pd_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_exp2_ps : GCCBuiltin<"__builtin_ia32_exp2ps_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_exp2_pd : GCCBuiltin<"__builtin_ia32_exp2pd_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_rcp28_ss : GCCBuiltin<"__builtin_ia32_rcp28ss_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_rcp28_sd : GCCBuiltin<"__builtin_ia32_rcp28sd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_rsqrt28_ps : GCCBuiltin<"__builtin_ia32_rsqrt28ps_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_rsqrt28_pd : GCCBuiltin<"__builtin_ia32_rsqrt28pd_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_rsqrt28_ss : GCCBuiltin<"__builtin_ia32_rsqrt28ss_round">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_rsqrt28_sd : GCCBuiltin<"__builtin_ia32_rsqrt28sd_round">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+def int_x86_avx512_psad_bw_512 : GCCBuiltin<"__builtin_ia32_psadbw512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
+ [IntrNoMem]>;
+}
+// FP logical ops
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_mask_and_pd_128 : GCCBuiltin<"__builtin_ia32_andpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_and_pd_256 : GCCBuiltin<"__builtin_ia32_andpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_and_pd_512 : GCCBuiltin<"__builtin_ia32_andpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_and_ps_128 : GCCBuiltin<"__builtin_ia32_andps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_and_ps_256 : GCCBuiltin<"__builtin_ia32_andps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_and_ps_512 : GCCBuiltin<"__builtin_ia32_andps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_andn_pd_128 : GCCBuiltin<"__builtin_ia32_andnpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_andn_pd_256 : GCCBuiltin<"__builtin_ia32_andnpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_andn_pd_512 : GCCBuiltin<"__builtin_ia32_andnpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_andn_ps_128 : GCCBuiltin<"__builtin_ia32_andnps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_andn_ps_256 : GCCBuiltin<"__builtin_ia32_andnps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_andn_ps_512 : GCCBuiltin<"__builtin_ia32_andnps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_or_pd_128 : GCCBuiltin<"__builtin_ia32_orpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_or_pd_256 : GCCBuiltin<"__builtin_ia32_orpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_or_pd_512 : GCCBuiltin<"__builtin_ia32_orpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_or_ps_128 : GCCBuiltin<"__builtin_ia32_orps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_or_ps_256 : GCCBuiltin<"__builtin_ia32_orps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_or_ps_512 : GCCBuiltin<"__builtin_ia32_orps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_xor_pd_128 : GCCBuiltin<"__builtin_ia32_xorpd128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_xor_pd_256 : GCCBuiltin<"__builtin_ia32_xorpd256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_v4f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_xor_pd_512 : GCCBuiltin<"__builtin_ia32_xorpd512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_v8f64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_xor_ps_128 : GCCBuiltin<"__builtin_ia32_xorps128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_xor_ps_256 : GCCBuiltin<"__builtin_ia32_xorps256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_v8f32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_xor_ps_512 : GCCBuiltin<"__builtin_ia32_xorps512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_v16f32_ty, llvm_i16_ty], [IntrNoMem]>;
+}
+// Integer arithmetic ops
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_mask_padd_b_128 : GCCBuiltin<"__builtin_ia32_paddb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_b_256 : GCCBuiltin<"__builtin_ia32_paddb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_b_512 : GCCBuiltin<"__builtin_ia32_paddb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_w_128 : GCCBuiltin<"__builtin_ia32_paddw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_w_256 : GCCBuiltin<"__builtin_ia32_paddw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_w_512 : GCCBuiltin<"__builtin_ia32_paddw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padds_b_128 : GCCBuiltin<"__builtin_ia32_paddsb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padds_b_256 : GCCBuiltin<"__builtin_ia32_paddsb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padds_b_512 : GCCBuiltin<"__builtin_ia32_paddsb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padds_w_128 : GCCBuiltin<"__builtin_ia32_paddsw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padds_w_256 : GCCBuiltin<"__builtin_ia32_paddsw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padds_w_512 : GCCBuiltin<"__builtin_ia32_paddsw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_paddus_b_128 : GCCBuiltin<"__builtin_ia32_paddusb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_paddus_b_256 : GCCBuiltin<"__builtin_ia32_paddusb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_paddus_b_512 : GCCBuiltin<"__builtin_ia32_paddusb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_paddus_w_128 : GCCBuiltin<"__builtin_ia32_paddusw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_paddus_w_256 : GCCBuiltin<"__builtin_ia32_paddusw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_paddus_w_512 : GCCBuiltin<"__builtin_ia32_paddusw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_d_128 : GCCBuiltin<"__builtin_ia32_paddd128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_d_256 : GCCBuiltin<"__builtin_ia32_paddd256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_d_512 : GCCBuiltin<"__builtin_ia32_paddd512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_q_128 : GCCBuiltin<"__builtin_ia32_paddq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_q_256 : GCCBuiltin<"__builtin_ia32_paddq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_padd_q_512 : GCCBuiltin<"__builtin_ia32_paddq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_b_128 : GCCBuiltin<"__builtin_ia32_psubb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_b_256 : GCCBuiltin<"__builtin_ia32_psubb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_b_512 : GCCBuiltin<"__builtin_ia32_psubb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_w_128 : GCCBuiltin<"__builtin_ia32_psubw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_w_256 : GCCBuiltin<"__builtin_ia32_psubw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_w_512 : GCCBuiltin<"__builtin_ia32_psubw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubs_b_128 : GCCBuiltin<"__builtin_ia32_psubsb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubs_b_256 : GCCBuiltin<"__builtin_ia32_psubsb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubs_b_512 : GCCBuiltin<"__builtin_ia32_psubsb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubs_w_128 : GCCBuiltin<"__builtin_ia32_psubsw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubs_w_256 : GCCBuiltin<"__builtin_ia32_psubsw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubs_w_512 : GCCBuiltin<"__builtin_ia32_psubsw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubus_b_128 : GCCBuiltin<"__builtin_ia32_psubusb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubus_b_256 : GCCBuiltin<"__builtin_ia32_psubusb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubus_b_512 : GCCBuiltin<"__builtin_ia32_psubusb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubus_w_128 : GCCBuiltin<"__builtin_ia32_psubusw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubus_w_256 : GCCBuiltin<"__builtin_ia32_psubusw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psubus_w_512 : GCCBuiltin<"__builtin_ia32_psubusw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_d_128 : GCCBuiltin<"__builtin_ia32_psubd128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_d_256 : GCCBuiltin<"__builtin_ia32_psubd256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_d_512 : GCCBuiltin<"__builtin_ia32_psubd512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_q_128 : GCCBuiltin<"__builtin_ia32_psubq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_q_256 : GCCBuiltin<"__builtin_ia32_psubq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_psub_q_512 : GCCBuiltin<"__builtin_ia32_psubq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmulu_dq_128 : GCCBuiltin<"__builtin_ia32_pmuludq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmul_dq_128 : GCCBuiltin<"__builtin_ia32_pmuldq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmulu_dq_256 : GCCBuiltin<"__builtin_ia32_pmuludq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmul_dq_256 : GCCBuiltin<"__builtin_ia32_pmuldq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmulu_dq_512 : GCCBuiltin<"__builtin_ia32_pmuludq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmul_dq_512 : GCCBuiltin<"__builtin_ia32_pmuldq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmull_w_128 : GCCBuiltin<"__builtin_ia32_pmullw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmull_w_256 : GCCBuiltin<"__builtin_ia32_pmullw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmull_w_512 : GCCBuiltin<"__builtin_ia32_pmullw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmull_d_128 : GCCBuiltin<"__builtin_ia32_pmulld128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmull_d_256 : GCCBuiltin<"__builtin_ia32_pmulld256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmull_d_512 : GCCBuiltin<"__builtin_ia32_pmulld512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmull_q_128 : GCCBuiltin<"__builtin_ia32_pmullq128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmull_q_256 : GCCBuiltin<"__builtin_ia32_pmullq256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmull_q_512 : GCCBuiltin<"__builtin_ia32_pmullq512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmulhu_w_512 : GCCBuiltin<"__builtin_ia32_pmulhuw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmulh_w_512 : GCCBuiltin<"__builtin_ia32_pmulhw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmulhu_w_128 : GCCBuiltin<"__builtin_ia32_pmulhuw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmulhu_w_256 : GCCBuiltin<"__builtin_ia32_pmulhuw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmulh_w_128 : GCCBuiltin<"__builtin_ia32_pmulhw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmulh_w_256 : GCCBuiltin<"__builtin_ia32_pmulhw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pavg_b_512 : GCCBuiltin<"__builtin_ia32_pavgb512_mask">,
+ Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
+ llvm_v64i8_ty, llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pavg_w_512 : GCCBuiltin<"__builtin_ia32_pavgw512_mask">,
+ Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
+ llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pavg_b_128 : GCCBuiltin<"__builtin_ia32_pavgb128_mask">,
+ Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ llvm_v16i8_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pavg_b_256 : GCCBuiltin<"__builtin_ia32_pavgb256_mask">,
+ Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
+ llvm_v32i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pavg_w_128 : GCCBuiltin<"__builtin_ia32_pavgw128_mask">,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pavg_w_256 : GCCBuiltin<"__builtin_ia32_pavgw256_mask">,
+ Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
+ llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaddw_d_128 :
+ GCCBuiltin<"__builtin_ia32_pmaddwd128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaddw_d_256 :
+ GCCBuiltin<"__builtin_ia32_pmaddwd256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v16i16_ty, llvm_v16i16_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaddw_d_512 :
+ GCCBuiltin<"__builtin_ia32_pmaddwd512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v32i16_ty, llvm_v32i16_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaddubs_w_128 :
+ GCCBuiltin<"__builtin_ia32_pmaddubsw128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaddubs_w_256 :
+ GCCBuiltin<"__builtin_ia32_pmaddubsw256_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v32i8_ty, llvm_v32i8_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmaddubs_w_512 :
+ GCCBuiltin<"__builtin_ia32_pmaddubsw512_mask">,
+ Intrinsic<[llvm_v32i16_ty],
+ [llvm_v64i8_ty, llvm_v64i8_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_dbpsadbw_128 :
+ GCCBuiltin<"__builtin_ia32_dbpsadbw128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_v8i16_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_dbpsadbw_256 :
+ GCCBuiltin<"__builtin_ia32_dbpsadbw256_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty, llvm_v16i16_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_dbpsadbw_512 :
+ GCCBuiltin<"__builtin_ia32_dbpsadbw512_mask">,
+ Intrinsic<[llvm_v32i16_ty],
+ [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty, llvm_v32i16_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+}
+
+// Gather and Scatter ops
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_gather_dpd_512 : GCCBuiltin<"__builtin_ia32_gathersiv8df">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
+ llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_gather_dps_512 : GCCBuiltin<"__builtin_ia32_gathersiv16sf">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_ptr_ty,
+ llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_gather_qpd_512 : GCCBuiltin<"__builtin_ia32_gatherdiv8df">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_gather_qps_512 : GCCBuiltin<"__builtin_ia32_gatherdiv16sf">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+
+ def int_x86_avx512_gather_dpq_512 : GCCBuiltin<"__builtin_ia32_gathersiv8di">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_gather_dpi_512 : GCCBuiltin<"__builtin_ia32_gathersiv16si">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_ptr_ty,
+ llvm_v16i32_ty, llvm_i16_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_gather_qpq_512 : GCCBuiltin<"__builtin_ia32_gatherdiv8di">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+ def int_x86_avx512_gather_qpi_512 : GCCBuiltin<"__builtin_ia32_gatherdiv16si">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_ptr_ty,
+ llvm_v8i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3div2_df :
+ GCCBuiltin<"__builtin_ia32_gather3div2df">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3div2_di :
+ GCCBuiltin<"__builtin_ia32_gather3div2di">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v2i64_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3div4_df :
+ GCCBuiltin<"__builtin_ia32_gather3div4df">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3div4_di :
+ GCCBuiltin<"__builtin_ia32_gather3div4di">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3div4_sf :
+ GCCBuiltin<"__builtin_ia32_gather3div4sf">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3div4_si :
+ GCCBuiltin<"__builtin_ia32_gather3div4si">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3div8_sf :
+ GCCBuiltin<"__builtin_ia32_gather3div8sf">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3div8_si :
+ GCCBuiltin<"__builtin_ia32_gather3div8si">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3siv2_df :
+ GCCBuiltin<"__builtin_ia32_gather3siv2df">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3siv2_di :
+ GCCBuiltin<"__builtin_ia32_gather3siv2di">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v2i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3siv4_df :
+ GCCBuiltin<"__builtin_ia32_gather3siv4df">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3siv4_di :
+ GCCBuiltin<"__builtin_ia32_gather3siv4di">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v4i64_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3siv4_sf :
+ GCCBuiltin<"__builtin_ia32_gather3siv4sf">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3siv4_si :
+ GCCBuiltin<"__builtin_ia32_gather3siv4si">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3siv8_sf :
+ GCCBuiltin<"__builtin_ia32_gather3siv8sf">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+ def int_x86_avx512_gather3siv8_si :
+ GCCBuiltin<"__builtin_ia32_gather3siv8si">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty, llvm_i32_ty],
+ [IntrReadArgMem]>;
+
+// scatter
+ def int_x86_avx512_scatter_dpd_512 : GCCBuiltin<"__builtin_ia32_scattersiv8df">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
+ llvm_v8i32_ty, llvm_v8f64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatter_dps_512 : GCCBuiltin<"__builtin_ia32_scattersiv16sf">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i16_ty,
+ llvm_v16i32_ty, llvm_v16f32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatter_qpd_512 : GCCBuiltin<"__builtin_ia32_scatterdiv8df">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
+ llvm_v8i64_ty, llvm_v8f64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatter_qps_512 : GCCBuiltin<"__builtin_ia32_scatterdiv16sf">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
+ llvm_v8i64_ty, llvm_v8f32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+
+ def int_x86_avx512_scatter_dpq_512 : GCCBuiltin<"__builtin_ia32_scattersiv8di">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,
+ llvm_v8i32_ty, llvm_v8i64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatter_dpi_512 : GCCBuiltin<"__builtin_ia32_scattersiv16si">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i16_ty,
+ llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatter_qpq_512 : GCCBuiltin<"__builtin_ia32_scatterdiv8di">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty,llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatter_qpi_512 : GCCBuiltin<"__builtin_ia32_scatterdiv16si">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_i8_ty, llvm_v8i64_ty, llvm_v8i32_ty,
+ llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scatterdiv2_df :
+ GCCBuiltin<"__builtin_ia32_scatterdiv2df">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v2f64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scatterdiv2_di :
+ GCCBuiltin<"__builtin_ia32_scatterdiv2di">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scatterdiv4_df :
+ GCCBuiltin<"__builtin_ia32_scatterdiv4df">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4f64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scatterdiv4_di :
+ GCCBuiltin<"__builtin_ia32_scatterdiv4di">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scatterdiv4_sf :
+ GCCBuiltin<"__builtin_ia32_scatterdiv4sf">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v4f32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scatterdiv4_si :
+ GCCBuiltin<"__builtin_ia32_scatterdiv4si">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v2i64_ty, llvm_v4i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scatterdiv8_sf :
+ GCCBuiltin<"__builtin_ia32_scatterdiv8sf">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4f32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scatterdiv8_si :
+ GCCBuiltin<"__builtin_ia32_scatterdiv8si">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v4i64_ty, llvm_v4i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scattersiv2_df :
+ GCCBuiltin<"__builtin_ia32_scattersiv2df">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v2f64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scattersiv2_di :
+ GCCBuiltin<"__builtin_ia32_scattersiv2di">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v2i64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scattersiv4_df :
+ GCCBuiltin<"__builtin_ia32_scattersiv4df">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4f64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scattersiv4_di :
+ GCCBuiltin<"__builtin_ia32_scattersiv4di">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4i64_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scattersiv4_sf :
+ GCCBuiltin<"__builtin_ia32_scattersiv4sf">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4f32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scattersiv4_si :
+ GCCBuiltin<"__builtin_ia32_scattersiv4si">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scattersiv8_sf :
+ GCCBuiltin<"__builtin_ia32_scattersiv8sf">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v8i32_ty, llvm_v8f32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_scattersiv8_si :
+ GCCBuiltin<"__builtin_ia32_scattersiv8si">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_i8_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+
+ // gather prefetch
+ def int_x86_avx512_gatherpf_dpd_512 : GCCBuiltin<"__builtin_ia32_gatherpfdpd">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_gatherpf_dps_512 : GCCBuiltin<"__builtin_ia32_gatherpfdps">,
+ Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_gatherpf_qpd_512 : GCCBuiltin<"__builtin_ia32_gatherpfqpd">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_gatherpf_qps_512 : GCCBuiltin<"__builtin_ia32_gatherpfqps">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+
+ // scatter prefetch
+ def int_x86_avx512_scatterpf_dpd_512 : GCCBuiltin<"__builtin_ia32_scatterpfdpd">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i32_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatterpf_dps_512 : GCCBuiltin<"__builtin_ia32_scatterpfdps">,
+ Intrinsic<[], [llvm_i16_ty, llvm_v16i32_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatterpf_qpd_512 : GCCBuiltin<"__builtin_ia32_scatterpfqpd">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_scatterpf_qps_512 : GCCBuiltin<"__builtin_ia32_scatterpfqps">,
+ Intrinsic<[], [llvm_i8_ty, llvm_v8i64_ty, llvm_ptr_ty,
+ llvm_i32_ty, llvm_i32_ty], [IntrReadWriteArgMem]>;
+}
+
+// AVX-512 conflict detection instruction
+// Instructions that count the number of leading zero bits
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_mask_conflict_d_128 :
+ GCCBuiltin<"__builtin_ia32_vpconflictsi_128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_conflict_d_256 :
+ GCCBuiltin<"__builtin_ia32_vpconflictsi_256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_conflict_d_512 :
+ GCCBuiltin<"__builtin_ia32_vpconflictsi_512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_conflict_q_128 :
+ GCCBuiltin<"__builtin_ia32_vpconflictdi_128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_conflict_q_256 :
+ GCCBuiltin<"__builtin_ia32_vpconflictdi_256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_conflict_q_512 :
+ GCCBuiltin<"__builtin_ia32_vpconflictdi_512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_lzcnt_d_128 :
+ GCCBuiltin<"__builtin_ia32_vplzcntd_128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_lzcnt_d_256 :
+ GCCBuiltin<"__builtin_ia32_vplzcntd_256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_lzcnt_d_512 :
+ GCCBuiltin<"__builtin_ia32_vplzcntd_512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_lzcnt_q_128 :
+ GCCBuiltin<"__builtin_ia32_vplzcntq_128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_lzcnt_q_256 :
+ GCCBuiltin<"__builtin_ia32_vplzcntq_256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_lzcnt_q_512 :
+ GCCBuiltin<"__builtin_ia32_vplzcntq_512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+}
+
+// Vector blend
+let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
+ def int_x86_avx512_mask_blend_ps_512 : GCCBuiltin<"__builtin_ia32_blendmps_512_mask">,
+ Intrinsic<[llvm_v16f32_ty],
+ [llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_ps_256 : GCCBuiltin<"__builtin_ia32_blendmps_256_mask">,
+ Intrinsic<[llvm_v8f32_ty],
+ [llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_ps_128 : GCCBuiltin<"__builtin_ia32_blendmps_128_mask">,
+ Intrinsic<[llvm_v4f32_ty],
+ [llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_pd_512 : GCCBuiltin<"__builtin_ia32_blendmpd_512_mask">,
+ Intrinsic<[llvm_v8f64_ty],
+ [llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_pd_256 : GCCBuiltin<"__builtin_ia32_blendmpd_256_mask">,
+ Intrinsic<[llvm_v4f64_ty],
+ [llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_pd_128 : GCCBuiltin<"__builtin_ia32_blendmpd_128_mask">,
+ Intrinsic<[llvm_v2f64_ty],
+ [llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_blend_d_512 : GCCBuiltin<"__builtin_ia32_blendmd_512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_q_512 : GCCBuiltin<"__builtin_ia32_blendmq_512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_d_256 : GCCBuiltin<"__builtin_ia32_blendmd_256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_q_256 : GCCBuiltin<"__builtin_ia32_blendmq_256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_d_128 : GCCBuiltin<"__builtin_ia32_blendmd_128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_q_128 : GCCBuiltin<"__builtin_ia32_blendmq_128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_blend_w_512 : GCCBuiltin<"__builtin_ia32_blendmw_512_mask">,
+ Intrinsic<[llvm_v32i16_ty],
+ [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_w_256 : GCCBuiltin<"__builtin_ia32_blendmw_256_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_w_128 : GCCBuiltin<"__builtin_ia32_blendmw_128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_b_512 : GCCBuiltin<"__builtin_ia32_blendmb_512_mask">,
+ Intrinsic<[llvm_v64i8_ty],
+ [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_b_256 : GCCBuiltin<"__builtin_ia32_blendmb_256_mask">,
+ Intrinsic<[llvm_v32i8_ty],
+ [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_blend_b_128 : GCCBuiltin<"__builtin_ia32_blendmb_128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+
+}
+
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_mask_valign_q_512 :
+ GCCBuiltin<"__builtin_ia32_alignq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_valign_d_512 :
+ GCCBuiltin<"__builtin_ia32_alignd512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty, llvm_v16i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_valign_q_256 :
+ GCCBuiltin<"__builtin_ia32_alignq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_v4i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_valign_d_256 :
+ GCCBuiltin<"__builtin_ia32_alignd256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_v8i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_valign_q_128 :
+ GCCBuiltin<"__builtin_ia32_alignq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_v2i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_valign_d_128 :
+ GCCBuiltin<"__builtin_ia32_alignd128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_palignr_128 :
+ GCCBuiltin<"__builtin_ia32_palignr128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_v16i8_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_palignr_256 :
+ GCCBuiltin<"__builtin_ia32_palignr256_mask">,
+ Intrinsic<[llvm_v32i8_ty],
+ [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty, llvm_v32i8_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_palignr_512 :
+ GCCBuiltin<"__builtin_ia32_palignr512_mask">,
+ Intrinsic<[llvm_v64i8_ty],
+ [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty, llvm_v64i8_ty,
+ llvm_i64_ty], [IntrNoMem]>;
+}
+
+// Compares
+let TargetPrefix = "x86" in {
+ // 512-bit
+ def int_x86_avx512_vcomi_sd : GCCBuiltin<"__builtin_ia32_vcomisd">,
+ Intrinsic<[llvm_i32_ty], [llvm_v2f64_ty,
+ llvm_v2f64_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_vcomi_ss : GCCBuiltin<"__builtin_ia32_vcomiss">,
+ Intrinsic<[llvm_i32_ty], [llvm_v4f32_ty,
+ llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_b_512 : GCCBuiltin<"__builtin_ia32_pcmpeqb512_mask">,
+ Intrinsic<[llvm_i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_w_512 : GCCBuiltin<"__builtin_ia32_pcmpeqw512_mask">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_d_512 : GCCBuiltin<"__builtin_ia32_pcmpeqd512_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_q_512 : GCCBuiltin<"__builtin_ia32_pcmpeqq512_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pcmpgt_b_512: GCCBuiltin<"__builtin_ia32_pcmpgtb512_mask">,
+ Intrinsic<[llvm_i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i64_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpgt_w_512: GCCBuiltin<"__builtin_ia32_pcmpgtw512_mask">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpgt_d_512: GCCBuiltin<"__builtin_ia32_pcmpgtd512_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpgt_q_512: GCCBuiltin<"__builtin_ia32_pcmpgtq512_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cmp_b_512: GCCBuiltin<"__builtin_ia32_cmpb512_mask">,
+ Intrinsic<[llvm_i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty,
+ llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_w_512: GCCBuiltin<"__builtin_ia32_cmpw512_mask">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_d_512: GCCBuiltin<"__builtin_ia32_cmpd512_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty,
+ llvm_i16_ty], [IntrNoMem ]>;
+ def int_x86_avx512_mask_cmp_q_512: GCCBuiltin<"__builtin_ia32_cmpq512_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_ucmp_b_512: GCCBuiltin<"__builtin_ia32_ucmpb512_mask">,
+ Intrinsic<[llvm_i64_ty], [llvm_v64i8_ty, llvm_v64i8_ty, llvm_i32_ty,
+ llvm_i64_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ucmp_w_512: GCCBuiltin<"__builtin_ia32_ucmpw512_mask">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ucmp_d_512: GCCBuiltin<"__builtin_ia32_ucmpd512_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ucmp_q_512: GCCBuiltin<"__builtin_ia32_ucmpq512_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ // 256-bit
+ def int_x86_avx512_mask_pcmpeq_b_256 : GCCBuiltin<"__builtin_ia32_pcmpeqb256_mask">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_w_256 : GCCBuiltin<"__builtin_ia32_pcmpeqw256_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_d_256 : GCCBuiltin<"__builtin_ia32_pcmpeqd256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_q_256 : GCCBuiltin<"__builtin_ia32_pcmpeqq256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pcmpgt_b_256: GCCBuiltin<"__builtin_ia32_pcmpgtb256_mask">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpgt_w_256: GCCBuiltin<"__builtin_ia32_pcmpgtw256_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpgt_d_256: GCCBuiltin<"__builtin_ia32_pcmpgtd256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpgt_q_256: GCCBuiltin<"__builtin_ia32_pcmpgtq256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cmp_b_256: GCCBuiltin<"__builtin_ia32_cmpb256_mask">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_w_256: GCCBuiltin<"__builtin_ia32_cmpw256_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_d_256: GCCBuiltin<"__builtin_ia32_cmpd256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_q_256: GCCBuiltin<"__builtin_ia32_cmpq256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_ucmp_b_256: GCCBuiltin<"__builtin_ia32_ucmpb256_mask">,
+ Intrinsic<[llvm_i32_ty], [llvm_v32i8_ty, llvm_v32i8_ty, llvm_i32_ty,
+ llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ucmp_w_256: GCCBuiltin<"__builtin_ia32_ucmpw256_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty, llvm_i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ucmp_d_256: GCCBuiltin<"__builtin_ia32_ucmpd256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ucmp_q_256: GCCBuiltin<"__builtin_ia32_ucmpq256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ // 128-bit
+ def int_x86_avx512_mask_pcmpeq_b_128 : GCCBuiltin<"__builtin_ia32_pcmpeqb128_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_w_128 : GCCBuiltin<"__builtin_ia32_pcmpeqw128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_d_128 : GCCBuiltin<"__builtin_ia32_pcmpeqd128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpeq_q_128 : GCCBuiltin<"__builtin_ia32_pcmpeqq128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pcmpgt_b_128: GCCBuiltin<"__builtin_ia32_pcmpgtb128_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpgt_w_128: GCCBuiltin<"__builtin_ia32_pcmpgtw128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpgt_d_128: GCCBuiltin<"__builtin_ia32_pcmpgtd128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pcmpgt_q_128: GCCBuiltin<"__builtin_ia32_pcmpgtq128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+
+ def int_x86_avx512_mask_cmp_b_128: GCCBuiltin<"__builtin_ia32_cmpb128_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_w_128: GCCBuiltin<"__builtin_ia32_cmpw128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_d_128: GCCBuiltin<"__builtin_ia32_cmpd128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_q_128: GCCBuiltin<"__builtin_ia32_cmpq128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_ucmp_b_128: GCCBuiltin<"__builtin_ia32_ucmpb128_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ucmp_w_128: GCCBuiltin<"__builtin_ia32_ucmpw128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ucmp_d_128: GCCBuiltin<"__builtin_ia32_ucmpd128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_ucmp_q_128: GCCBuiltin<"__builtin_ia32_ucmpq128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Compress, Expand
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_mask_compress_ps_512 :
+ GCCBuiltin<"__builtin_ia32_compresssf512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_compress_pd_512 :
+ GCCBuiltin<"__builtin_ia32_compressdf512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_compress_ps_256 :
+ GCCBuiltin<"__builtin_ia32_compresssf256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_compress_pd_256 :
+ GCCBuiltin<"__builtin_ia32_compressdf256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_compress_ps_128 :
+ GCCBuiltin<"__builtin_ia32_compresssf128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_compress_pd_128 :
+ GCCBuiltin<"__builtin_ia32_compressdf128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_compress_store_ps_512 :
+ GCCBuiltin<"__builtin_ia32_compressstoresf512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v16f32_ty,
+ llvm_i16_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_compress_store_pd_512 :
+ GCCBuiltin<"__builtin_ia32_compressstoredf512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8f64_ty,
+ llvm_i8_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_compress_store_ps_256 :
+ GCCBuiltin<"__builtin_ia32_compressstoresf256_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty,
+ llvm_i8_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_compress_store_pd_256 :
+ GCCBuiltin<"__builtin_ia32_compressstoredf256_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty,
+ llvm_i8_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_compress_store_ps_128 :
+ GCCBuiltin<"__builtin_ia32_compressstoresf128_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_compress_store_pd_128 :
+ GCCBuiltin<"__builtin_ia32_compressstoredf128_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrReadWriteArgMem]>;
+
+ def int_x86_avx512_mask_compress_d_512 :
+ GCCBuiltin<"__builtin_ia32_compresssi512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_compress_q_512 :
+ GCCBuiltin<"__builtin_ia32_compressdi512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_compress_d_256 :
+ GCCBuiltin<"__builtin_ia32_compresssi256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_compress_q_256 :
+ GCCBuiltin<"__builtin_ia32_compressdi256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_compress_d_128 :
+ GCCBuiltin<"__builtin_ia32_compresssi128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_compress_q_128 :
+ GCCBuiltin<"__builtin_ia32_compressdi128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_compress_store_d_512 :
+ GCCBuiltin<"__builtin_ia32_compressstoresi512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v16i32_ty,
+ llvm_i16_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_compress_store_q_512 :
+ GCCBuiltin<"__builtin_ia32_compressstoredi512_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_compress_store_d_256 :
+ GCCBuiltin<"__builtin_ia32_compressstoresi256_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty,
+ llvm_i8_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_compress_store_q_256 :
+ GCCBuiltin<"__builtin_ia32_compressstoredi256_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty,
+ llvm_i8_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_compress_store_d_128 :
+ GCCBuiltin<"__builtin_ia32_compressstoresi128_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty,
+ llvm_i8_ty], [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_compress_store_q_128 :
+ GCCBuiltin<"__builtin_ia32_compressstoredi128_mask">,
+ Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty,
+ llvm_i8_ty], [IntrReadWriteArgMem]>;
+
+// expand
+ def int_x86_avx512_mask_expand_ps_512 :
+ GCCBuiltin<"__builtin_ia32_expandsf512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_expand_pd_512 :
+ GCCBuiltin<"__builtin_ia32_expanddf512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_expand_ps_256 :
+ GCCBuiltin<"__builtin_ia32_expandsf256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_expand_pd_256 :
+ GCCBuiltin<"__builtin_ia32_expanddf256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_expand_ps_128 :
+ GCCBuiltin<"__builtin_ia32_expandsf128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_expand_pd_128 :
+ GCCBuiltin<"__builtin_ia32_expanddf128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_expand_load_ps_512 :
+ GCCBuiltin<"__builtin_ia32_expandloadsf512_mask">,
+ Intrinsic<[llvm_v16f32_ty], [llvm_ptr_ty, llvm_v16f32_ty,
+ llvm_i16_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_expand_load_pd_512 :
+ GCCBuiltin<"__builtin_ia32_expandloaddf512_mask">,
+ Intrinsic<[llvm_v8f64_ty], [llvm_ptr_ty, llvm_v8f64_ty,
+ llvm_i8_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_expand_load_ps_256 :
+ GCCBuiltin<"__builtin_ia32_expandloadsf256_mask">,
+ Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty,
+ llvm_i8_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_expand_load_pd_256 :
+ GCCBuiltin<"__builtin_ia32_expandloaddf256_mask">,
+ Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty,
+ llvm_i8_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_expand_load_ps_128 :
+ GCCBuiltin<"__builtin_ia32_expandloadsf128_mask">,
+ Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty,
+ llvm_i8_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_expand_load_pd_128 :
+ GCCBuiltin<"__builtin_ia32_expandloaddf128_mask">,
+ Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty,
+ llvm_i8_ty], [IntrReadArgMem]>;
+
+ def int_x86_avx512_mask_expand_d_512 :
+ GCCBuiltin<"__builtin_ia32_expandsi512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_expand_q_512 :
+ GCCBuiltin<"__builtin_ia32_expanddi512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_expand_d_256 :
+ GCCBuiltin<"__builtin_ia32_expandsi256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_expand_q_256 :
+ GCCBuiltin<"__builtin_ia32_expanddi256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_expand_d_128 :
+ GCCBuiltin<"__builtin_ia32_expandsi128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_expand_q_128 :
+ GCCBuiltin<"__builtin_ia32_expanddi128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_expand_load_d_512 :
+ GCCBuiltin<"__builtin_ia32_expandloadsi512_mask">,
+ Intrinsic<[llvm_v16i32_ty], [llvm_ptr_ty, llvm_v16i32_ty,
+ llvm_i16_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_expand_load_q_512 :
+ GCCBuiltin<"__builtin_ia32_expandloaddi512_mask">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_ptr_ty, llvm_v8i64_ty,
+ llvm_i8_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_expand_load_d_256 :
+ GCCBuiltin<"__builtin_ia32_expandloadsi256_mask">,
+ Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty,
+ llvm_i8_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_expand_load_q_256 :
+ GCCBuiltin<"__builtin_ia32_expandloaddi256_mask">,
+ Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty,
+ llvm_i8_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_expand_load_d_128 :
+ GCCBuiltin<"__builtin_ia32_expandloadsi128_mask">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty,
+ llvm_i8_ty], [IntrReadArgMem]>;
+ def int_x86_avx512_mask_expand_load_q_128 :
+ GCCBuiltin<"__builtin_ia32_expandloaddi128_mask">,
+ Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty,
+ llvm_i8_ty], [IntrReadArgMem]>;
+
+}
+
+// truncate
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_mask_pmov_qb_128 :
+ GCCBuiltin<"__builtin_ia32_pmovqb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v2i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_qb_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovqb128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_qb_128 :
+ GCCBuiltin<"__builtin_ia32_pmovsqb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v2i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_qb_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovsqb128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_qb_128 :
+ GCCBuiltin<"__builtin_ia32_pmovusqb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v2i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_qb_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovusqb128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_qb_256 :
+ GCCBuiltin<"__builtin_ia32_pmovqb256_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v4i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_qb_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovqb256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_qb_256 :
+ GCCBuiltin<"__builtin_ia32_pmovsqb256_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v4i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_qb_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovsqb256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_qb_256 :
+ GCCBuiltin<"__builtin_ia32_pmovusqb256_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v4i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_qb_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovusqb256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_qb_512 :
+ GCCBuiltin<"__builtin_ia32_pmovqb512_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v8i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_qb_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovqb512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_qb_512 :
+ GCCBuiltin<"__builtin_ia32_pmovsqb512_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v8i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_qb_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovsqb512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_qb_512 :
+ GCCBuiltin<"__builtin_ia32_pmovusqb512_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v8i64_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_qb_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovusqb512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_qw_128 :
+ GCCBuiltin<"__builtin_ia32_pmovqw128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v2i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_qw_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovqw128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_qw_128 :
+ GCCBuiltin<"__builtin_ia32_pmovsqw128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v2i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_qw_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovsqw128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_qw_128 :
+ GCCBuiltin<"__builtin_ia32_pmovusqw128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v2i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_qw_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovusqw128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_qw_256 :
+ GCCBuiltin<"__builtin_ia32_pmovqw256_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v4i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_qw_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovqw256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_qw_256 :
+ GCCBuiltin<"__builtin_ia32_pmovsqw256_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v4i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_qw_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovsqw256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_qw_256 :
+ GCCBuiltin<"__builtin_ia32_pmovusqw256_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v4i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_qw_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovusqw256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_qw_512 :
+ GCCBuiltin<"__builtin_ia32_pmovqw512_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_qw_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovqw512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_qw_512 :
+ GCCBuiltin<"__builtin_ia32_pmovsqw512_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_qw_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovsqw512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_qw_512 :
+ GCCBuiltin<"__builtin_ia32_pmovusqw512_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i64_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_qw_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovusqw512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_qd_128 :
+ GCCBuiltin<"__builtin_ia32_pmovqd128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_qd_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovqd128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_qd_128 :
+ GCCBuiltin<"__builtin_ia32_pmovsqd128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_qd_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovsqd128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_qd_128 :
+ GCCBuiltin<"__builtin_ia32_pmovusqd128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v2i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_qd_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovusqd128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v2i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_qd_256 :
+ GCCBuiltin<"__builtin_ia32_pmovqd256_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_qd_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovqd256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_qd_256 :
+ GCCBuiltin<"__builtin_ia32_pmovsqd256_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_qd_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovsqd256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_qd_256 :
+ GCCBuiltin<"__builtin_ia32_pmovusqd256_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i64_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_qd_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovusqd256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_qd_512 :
+ GCCBuiltin<"__builtin_ia32_pmovqd512_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i64_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_qd_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovqd512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_qd_512 :
+ GCCBuiltin<"__builtin_ia32_pmovsqd512_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i64_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_qd_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovsqd512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_qd_512 :
+ GCCBuiltin<"__builtin_ia32_pmovusqd512_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i64_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_qd_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovusqd512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i64_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_db_128 :
+ GCCBuiltin<"__builtin_ia32_pmovdb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v4i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_db_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovdb128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_db_128 :
+ GCCBuiltin<"__builtin_ia32_pmovsdb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v4i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_db_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovsdb128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_db_128 :
+ GCCBuiltin<"__builtin_ia32_pmovusdb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v4i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_db_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovusdb128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_db_256 :
+ GCCBuiltin<"__builtin_ia32_pmovdb256_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v8i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_db_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovdb256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_db_256 :
+ GCCBuiltin<"__builtin_ia32_pmovsdb256_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v8i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_db_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovsdb256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_db_256 :
+ GCCBuiltin<"__builtin_ia32_pmovusdb256_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v8i32_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_db_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovusdb256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_db_512 :
+ GCCBuiltin<"__builtin_ia32_pmovdb512_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i32_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_db_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovdb512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_db_512 :
+ GCCBuiltin<"__builtin_ia32_pmovsdb512_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i32_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_db_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovsdb512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_db_512 :
+ GCCBuiltin<"__builtin_ia32_pmovusdb512_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i32_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_db_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovusdb512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_dw_128 :
+ GCCBuiltin<"__builtin_ia32_pmovdw128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v4i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_dw_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovdw128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_dw_128 :
+ GCCBuiltin<"__builtin_ia32_pmovsdw128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v4i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_dw_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovsdw128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_dw_128 :
+ GCCBuiltin<"__builtin_ia32_pmovusdw128_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v4i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_dw_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovusdw128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_dw_256 :
+ GCCBuiltin<"__builtin_ia32_pmovdw256_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_dw_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovdw256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_dw_256 :
+ GCCBuiltin<"__builtin_ia32_pmovsdw256_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_dw_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovsdw256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_dw_256 :
+ GCCBuiltin<"__builtin_ia32_pmovusdw256_mask">,
+ Intrinsic<[llvm_v8i16_ty],
+ [llvm_v8i32_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_dw_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovusdw256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i32_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_dw_512 :
+ GCCBuiltin<"__builtin_ia32_pmovdw512_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v16i32_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_dw_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovdw512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_dw_512 :
+ GCCBuiltin<"__builtin_ia32_pmovsdw512_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v16i32_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_dw_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovsdw512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_dw_512 :
+ GCCBuiltin<"__builtin_ia32_pmovusdw512_mask">,
+ Intrinsic<[llvm_v16i16_ty],
+ [llvm_v16i32_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_dw_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovusdw512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v16i32_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_wb_128 :
+ GCCBuiltin<"__builtin_ia32_pmovwb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v8i16_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_wb_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovwb128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_wb_128 :
+ GCCBuiltin<"__builtin_ia32_pmovswb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v8i16_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_wb_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovswb128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_wb_128 :
+ GCCBuiltin<"__builtin_ia32_pmovuswb128_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v8i16_ty, llvm_v16i8_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_wb_mem_128 :
+ GCCBuiltin<"__builtin_ia32_pmovuswb128mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v8i16_ty, llvm_i8_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_wb_256 :
+ GCCBuiltin<"__builtin_ia32_pmovwb256_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i16_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_wb_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovwb256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_wb_256 :
+ GCCBuiltin<"__builtin_ia32_pmovswb256_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i16_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_wb_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovswb256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_wb_256 :
+ GCCBuiltin<"__builtin_ia32_pmovuswb256_mask">,
+ Intrinsic<[llvm_v16i8_ty],
+ [llvm_v16i16_ty, llvm_v16i8_ty, llvm_i16_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_wb_mem_256 :
+ GCCBuiltin<"__builtin_ia32_pmovuswb256mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v16i16_ty, llvm_i16_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmov_wb_512 :
+ GCCBuiltin<"__builtin_ia32_pmovwb512_mask">,
+ Intrinsic<[llvm_v32i8_ty],
+ [llvm_v32i16_ty, llvm_v32i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmov_wb_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovwb512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovs_wb_512 :
+ GCCBuiltin<"__builtin_ia32_pmovswb512_mask">,
+ Intrinsic<[llvm_v32i8_ty],
+ [llvm_v32i16_ty, llvm_v32i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovs_wb_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovswb512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+ def int_x86_avx512_mask_pmovus_wb_512 :
+ GCCBuiltin<"__builtin_ia32_pmovuswb512_mask">,
+ Intrinsic<[llvm_v32i8_ty],
+ [llvm_v32i16_ty, llvm_v32i8_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_x86_avx512_mask_pmovus_wb_mem_512 :
+ GCCBuiltin<"__builtin_ia32_pmovuswb512mem_mask">,
+ Intrinsic<[],
+ [llvm_ptr_ty, llvm_v32i16_ty, llvm_i32_ty],
+ [IntrReadWriteArgMem]>;
+}
+
+// Bitwise ternary logic
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_mask_pternlog_d_128 :
+ GCCBuiltin<"__builtin_ia32_pternlogd128_mask">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_pternlog_d_128 :
+ GCCBuiltin<"__builtin_ia32_pternlogd128_maskz">,
+ Intrinsic<[llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pternlog_d_256 :
+ GCCBuiltin<"__builtin_ia32_pternlogd256_mask">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_pternlog_d_256 :
+ GCCBuiltin<"__builtin_ia32_pternlogd256_maskz">,
+ Intrinsic<[llvm_v8i32_ty],
+ [llvm_v8i32_ty, llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pternlog_d_512 :
+ GCCBuiltin<"__builtin_ia32_pternlogd512_mask">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_pternlog_d_512 :
+ GCCBuiltin<"__builtin_ia32_pternlogd512_maskz">,
+ Intrinsic<[llvm_v16i32_ty],
+ [llvm_v16i32_ty, llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty,
+ llvm_i16_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pternlog_q_128 :
+ GCCBuiltin<"__builtin_ia32_pternlogq128_mask">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_pternlog_q_128 :
+ GCCBuiltin<"__builtin_ia32_pternlogq128_maskz">,
+ Intrinsic<[llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pternlog_q_256 :
+ GCCBuiltin<"__builtin_ia32_pternlogq256_mask">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_pternlog_q_256 :
+ GCCBuiltin<"__builtin_ia32_pternlogq256_maskz">,
+ Intrinsic<[llvm_v4i64_ty],
+ [llvm_v4i64_ty, llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_mask_pternlog_q_512 :
+ GCCBuiltin<"__builtin_ia32_pternlogq512_mask">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_maskz_pternlog_q_512 :
+ GCCBuiltin<"__builtin_ia32_pternlogq512_maskz">,
+ Intrinsic<[llvm_v8i64_ty],
+ [llvm_v8i64_ty, llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty,
+ llvm_i8_ty], [IntrNoMem]>;
+}
+
+// Misc.
+let TargetPrefix = "x86" in {
+ def int_x86_avx512_mask_cmp_ps_512 :
+ GCCBuiltin<"__builtin_ia32_cmpps512_mask">,
+ Intrinsic<[llvm_i16_ty], [llvm_v16f32_ty, llvm_v16f32_ty,
+ llvm_i32_ty, llvm_i16_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_pd_512 :
+ GCCBuiltin<"__builtin_ia32_cmppd512_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8f64_ty, llvm_v8f64_ty,
+ llvm_i32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_ps_256 :
+ GCCBuiltin<"__builtin_ia32_cmpps256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v8f32_ty, llvm_v8f32_ty,
+ llvm_i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_pd_256 :
+ GCCBuiltin<"__builtin_ia32_cmppd256_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4f64_ty, llvm_v4f64_ty,
+ llvm_i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_ps_128 :
+ GCCBuiltin<"__builtin_ia32_cmpps128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_pd_128 :
+ GCCBuiltin<"__builtin_ia32_cmppd128_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i32_ty, llvm_i8_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_ss :
+ GCCBuiltin<"__builtin_ia32_cmpss_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v4f32_ty, llvm_v4f32_ty,
+ llvm_i32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+ def int_x86_avx512_mask_cmp_sd :
+ GCCBuiltin<"__builtin_ia32_cmpsd_mask">,
+ Intrinsic<[llvm_i8_ty], [llvm_v2f64_ty, llvm_v2f64_ty,
+ llvm_i32_ty, llvm_i8_ty, llvm_i32_ty], [IntrNoMem]>;
+
+ def int_x86_avx512_movntdqa :
+ GCCBuiltin<"__builtin_ia32_movntdqa512">,
+ Intrinsic<[llvm_v8i64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+}
+
+//===----------------------------------------------------------------------===//
+// SHA intrinsics
+let TargetPrefix = "x86" in {
+ def int_x86_sha1rnds4 : GCCBuiltin<"__builtin_ia32_sha1rnds4">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i8_ty],
+ [IntrNoMem]>;
+ def int_x86_sha1nexte : GCCBuiltin<"__builtin_ia32_sha1nexte">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_sha1msg1 : GCCBuiltin<"__builtin_ia32_sha1msg1">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_sha1msg2 : GCCBuiltin<"__builtin_ia32_sha1msg2">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_sha256rnds2 : GCCBuiltin<"__builtin_ia32_sha256rnds2">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+ def int_x86_sha256msg1 : GCCBuiltin<"__builtin_ia32_sha256msg1">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+ def int_x86_sha256msg2 : GCCBuiltin<"__builtin_ia32_sha256msg2">,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+}
diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsXCore.td b/contrib/llvm/include/llvm/IR/IntrinsicsXCore.td
new file mode 100644
index 0000000..b614e1e
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/IntrinsicsXCore.td
@@ -0,0 +1,121 @@
+//==- IntrinsicsXCore.td - XCore intrinsics -*- tablegen -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the XCore-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "xcore" in { // All intrinsics start with "llvm.xcore.".
+ // Miscellaneous instructions.
+ def int_xcore_bitrev : Intrinsic<[llvm_i32_ty],[llvm_i32_ty],[IntrNoMem]>,
+ GCCBuiltin<"__builtin_bitrev">;
+ def int_xcore_crc8 : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
+ [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_xcore_crc32 : Intrinsic<[llvm_i32_ty],
+ [llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_xcore_sext : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_xcore_zext : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem]>;
+ def int_xcore_getid : Intrinsic<[llvm_i32_ty],[],[IntrNoMem]>,
+ GCCBuiltin<"__builtin_getid">;
+ def int_xcore_getps : Intrinsic<[llvm_i32_ty],[llvm_i32_ty]>,
+ GCCBuiltin<"__builtin_getps">;
+ def int_xcore_setps : Intrinsic<[],[llvm_i32_ty, llvm_i32_ty]>,
+ GCCBuiltin<"__builtin_setps">;
+ def int_xcore_geted : Intrinsic<[llvm_i32_ty],[]>;
+ def int_xcore_getet : Intrinsic<[llvm_i32_ty],[]>;
+ def int_xcore_setsr : Intrinsic<[],[llvm_i32_ty]>;
+ def int_xcore_clrsr : Intrinsic<[],[llvm_i32_ty]>;
+
+ // Resource instructions.
+ def int_xcore_getr : Intrinsic<[llvm_anyptr_ty],[llvm_i32_ty]>;
+ def int_xcore_freer : Intrinsic<[],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_in : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],[NoCapture<0>]>;
+ def int_xcore_int : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_inct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_out : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_outt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_outct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_chkct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_testct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_testwct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_setd : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_setc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_inshr : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_outshr : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_setpt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_clrpt : Intrinsic<[],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_getts : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_syncr : Intrinsic<[],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_settw : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_setv : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_setev : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_eeu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<0>]>;
+ def int_xcore_edu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<0>]>;
+ def int_xcore_setclk : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
+ [NoCapture<0>, NoCapture<1>]>;
+ def int_xcore_setrdy : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
+ [NoCapture<0>, NoCapture<1>]>;
+ def int_xcore_setpsc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
+ [NoCapture<0>]>;
+ def int_xcore_peek : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_endin : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+
+ // Intrinsics for events.
+ def int_xcore_waitevent : Intrinsic<[llvm_ptr_ty],[], [IntrReadMem]>;
+
+ // If any of the resources owned by the thread are ready this returns the
+ // vector of one of the ready resources. If no resources owned by the thread
+ // are ready then the operand passed to the intrinsic is returned.
+ def int_xcore_checkevent : Intrinsic<[llvm_ptr_ty],[llvm_ptr_ty]>;
+
+ def int_xcore_clre : Intrinsic<[],[],[]>;
+
+ // Intrinsics for threads.
+ def int_xcore_getst : Intrinsic <[llvm_anyptr_ty],[llvm_anyptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_msync : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<0>]>;
+ def int_xcore_ssync : Intrinsic <[],[]>;
+ def int_xcore_mjoin : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<0>]>;
+ def int_xcore_initsp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_initpc : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_initlr : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_initcp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
+ [NoCapture<0>]>;
+ def int_xcore_initdp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
+ [NoCapture<0>]>;
+}
diff --git a/contrib/llvm/include/llvm/IR/LLVMContext.h b/contrib/llvm/include/llvm/IR/LLVMContext.h
new file mode 100644
index 0000000..c546fc3
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/LLVMContext.h
@@ -0,0 +1,235 @@
+//===-- llvm/LLVMContext.h - Class for managing "global" state --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares LLVMContext, a container of "global" state in LLVM, such
+// as the global type and constant uniquing tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LLVMCONTEXT_H
+#define LLVM_IR_LLVMCONTEXT_H
+
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Options.h"
+
+namespace llvm {
+
+class LLVMContextImpl;
+class StringRef;
+class Twine;
+class Instruction;
+class Module;
+class SMDiagnostic;
+class DiagnosticInfo;
+template <typename T> class SmallVectorImpl;
+class Function;
+class DebugLoc;
+
+/// This is an important class for using LLVM in a threaded context. It
+/// (opaquely) owns and manages the core "global" data of LLVM's core
+/// infrastructure, including the type and constant uniquing tables.
+/// LLVMContext itself provides no locking guarantees, so you should be careful
+/// to have one context per thread.
+class LLVMContext {
+public:
+ LLVMContextImpl *const pImpl;
+ LLVMContext();
+ ~LLVMContext();
+
+ // Pinned metadata names, which always have the same value. This is a
+ // compile-time performance optimization, not a correctness optimization.
+ enum {
+ MD_dbg = 0, // "dbg"
+ MD_tbaa = 1, // "tbaa"
+ MD_prof = 2, // "prof"
+ MD_fpmath = 3, // "fpmath"
+ MD_range = 4, // "range"
+ MD_tbaa_struct = 5, // "tbaa.struct"
+ MD_invariant_load = 6, // "invariant.load"
+ MD_alias_scope = 7, // "alias.scope"
+ MD_noalias = 8, // "noalias",
+ MD_nontemporal = 9, // "nontemporal"
+ MD_mem_parallel_loop_access = 10, // "llvm.mem.parallel_loop_access"
+ MD_nonnull = 11, // "nonnull"
+ MD_dereferenceable = 12, // "dereferenceable"
+ MD_dereferenceable_or_null = 13, // "dereferenceable_or_null"
+ MD_make_implicit = 14, // "make.implicit"
+ MD_unpredictable = 15, // "unpredictable"
+ MD_invariant_group = 16, // "invariant.group"
+ MD_align = 17 // "align"
+ };
+
+ /// Known operand bundle tag IDs, which always have the same value. All
+ /// operand bundle tags that LLVM has special knowledge of are listed here.
+ /// Additionally, this scheme allows LLVM to efficiently check for specific
+ /// operand bundle tags without comparing strings.
+ enum {
+ OB_deopt = 0, // "deopt"
+ OB_funclet = 1, // "funclet"
+ };
+
+ /// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
+ /// This ID is uniqued across modules in the current LLVMContext.
+ unsigned getMDKindID(StringRef Name) const;
+
+ /// getMDKindNames - Populate client supplied SmallVector with the name for
+ /// custom metadata IDs registered in this LLVMContext.
+ void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
+
+ /// getOperandBundleTags - Populate client supplied SmallVector with the
+ /// bundle tags registered in this LLVMContext. The bundle tags are ordered
+ /// by increasing bundle IDs.
+ /// \see LLVMContext::getOperandBundleTagID
+ void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;
+
+ /// getOperandBundleTagID - Maps a bundle tag to an integer ID. Every bundle
+ /// tag registered with an LLVMContext has an unique ID.
+ uint32_t getOperandBundleTagID(StringRef Tag) const;
+
+ typedef void (*InlineAsmDiagHandlerTy)(const SMDiagnostic&, void *Context,
+ unsigned LocCookie);
+
+ /// Defines the type of a diagnostic handler.
+ /// \see LLVMContext::setDiagnosticHandler.
+ /// \see LLVMContext::diagnose.
+ typedef void (*DiagnosticHandlerTy)(const DiagnosticInfo &DI, void *Context);
+
+ /// Defines the type of a yield callback.
+ /// \see LLVMContext::setYieldCallback.
+ typedef void (*YieldCallbackTy)(LLVMContext *Context, void *OpaqueHandle);
+
+ /// setInlineAsmDiagnosticHandler - This method sets a handler that is invoked
+ /// when problems with inline asm are detected by the backend. The first
+ /// argument is a function pointer and the second is a context pointer that
+ /// gets passed into the DiagHandler.
+ ///
+ /// LLVMContext doesn't take ownership or interpret either of these
+ /// pointers.
+ void setInlineAsmDiagnosticHandler(InlineAsmDiagHandlerTy DiagHandler,
+ void *DiagContext = nullptr);
+
+ /// getInlineAsmDiagnosticHandler - Return the diagnostic handler set by
+ /// setInlineAsmDiagnosticHandler.
+ InlineAsmDiagHandlerTy getInlineAsmDiagnosticHandler() const;
+
+ /// getInlineAsmDiagnosticContext - Return the diagnostic context set by
+ /// setInlineAsmDiagnosticHandler.
+ void *getInlineAsmDiagnosticContext() const;
+
+ /// setDiagnosticHandler - This method sets a handler that is invoked
+ /// when the backend needs to report anything to the user. The first
+ /// argument is a function pointer and the second is a context pointer that
+ /// gets passed into the DiagHandler. The third argument should be set to
+ /// true if the handler only expects enabled diagnostics.
+ ///
+ /// LLVMContext doesn't take ownership or interpret either of these
+ /// pointers.
+ void setDiagnosticHandler(DiagnosticHandlerTy DiagHandler,
+ void *DiagContext = nullptr,
+ bool RespectFilters = false);
+
+ /// getDiagnosticHandler - Return the diagnostic handler set by
+ /// setDiagnosticHandler.
+ DiagnosticHandlerTy getDiagnosticHandler() const;
+
+ /// getDiagnosticContext - Return the diagnostic context set by
+ /// setDiagnosticContext.
+ void *getDiagnosticContext() const;
+
+ /// \brief Report a message to the currently installed diagnostic handler.
+ ///
+ /// This function returns, in particular in the case of error reporting
+ /// (DI.Severity == \a DS_Error), so the caller should leave the compilation
+ /// process in a self-consistent state, even though the generated code
+ /// need not be correct.
+ ///
+ /// The diagnostic message will be implicitly prefixed with a severity keyword
+ /// according to \p DI.getSeverity(), i.e., "error: " for \a DS_Error,
+ /// "warning: " for \a DS_Warning, and "note: " for \a DS_Note.
+ void diagnose(const DiagnosticInfo &DI);
+
+ /// \brief Registers a yield callback with the given context.
+ ///
+ /// The yield callback function may be called by LLVM to transfer control back
+ /// to the client that invoked the LLVM compilation. This can be used to yield
+ /// control of the thread, or perform periodic work needed by the client.
+ /// There is no guaranteed frequency at which callbacks must occur; in fact,
+ /// the client is not guaranteed to ever receive this callback. It is at the
+ /// sole discretion of LLVM to do so and only if it can guarantee that
+ /// suspending the thread won't block any forward progress in other LLVM
+ /// contexts in the same process.
+ ///
+ /// At a suspend point, the state of the current LLVM context is intentionally
+ /// undefined. No assumptions about it can or should be made. Only LLVM
+ /// context API calls that explicitly state that they can be used during a
+ /// yield callback are allowed to be used. Any other API calls into the
+ /// context are not supported until the yield callback function returns
+ /// control to LLVM. Other LLVM contexts are unaffected by this restriction.
+ void setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle);
+
+ /// \brief Calls the yield callback (if applicable).
+ ///
+ /// This transfers control of the current thread back to the client, which may
+ /// suspend the current thread. Only call this method when LLVM doesn't hold
+ /// any global mutex or cannot block the execution in another LLVM context.
+ void yield();
+
+ /// emitError - Emit an error message to the currently installed error handler
+ /// with optional location information. This function returns, so code should
+ /// be prepared to drop the erroneous construct on the floor and "not crash".
+ /// The generated code need not be correct. The error message will be
+ /// implicitly prefixed with "error: " and should not end with a ".".
+ void emitError(unsigned LocCookie, const Twine &ErrorStr);
+ void emitError(const Instruction *I, const Twine &ErrorStr);
+ void emitError(const Twine &ErrorStr);
+
+ /// \brief Query for a debug option's value.
+ ///
+ /// This function returns typed data populated from command line parsing.
+ template <typename ValT, typename Base, ValT(Base::*Mem)>
+ ValT getOption() const {
+ return OptionRegistry::instance().template get<ValT, Base, Mem>();
+ }
+
+private:
+ LLVMContext(LLVMContext&) = delete;
+ void operator=(LLVMContext&) = delete;
+
+ /// addModule - Register a module as being instantiated in this context. If
+ /// the context is deleted, the module will be deleted as well.
+ void addModule(Module*);
+
+ /// removeModule - Unregister a module from this context.
+ void removeModule(Module*);
+
+ // Module needs access to the add/removeModule methods.
+ friend class Module;
+};
+
+/// getGlobalContext - Returns a global context. This is for LLVM clients that
+/// only care about operating on a single thread.
+extern LLVMContext &getGlobalContext();
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLVMContext, LLVMContextRef)
+
+/* Specialized opaque context conversions.
+ */
+inline LLVMContext **unwrap(LLVMContextRef* Tys) {
+ return reinterpret_cast<LLVMContext**>(Tys);
+}
+
+inline LLVMContextRef *wrap(const LLVMContext **Tys) {
+ return reinterpret_cast<LLVMContextRef*>(const_cast<LLVMContext**>(Tys));
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/LegacyPassManager.h b/contrib/llvm/include/llvm/IR/LegacyPassManager.h
new file mode 100644
index 0000000..5257a0e
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/LegacyPassManager.h
@@ -0,0 +1,103 @@
+//===- LegacyPassManager.h - Legacy Container for Passes --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the legacy PassManager class. This class is used to hold,
+// maintain, and optimize execution of Passes. The PassManager class ensures
+// that analysis results are available before a pass runs, and that Pass's are
+// destroyed when the PassManager is destroyed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LEGACYPASSMANAGER_H
+#define LLVM_IR_LEGACYPASSMANAGER_H
+
+#include "llvm/Pass.h"
+#include "llvm/Support/CBindingWrapping.h"
+
+namespace llvm {
+
+class Pass;
+class Module;
+
+namespace legacy {
+
+class PassManagerImpl;
+class FunctionPassManagerImpl;
+
+/// PassManagerBase - An abstract interface to allow code to add passes to
+/// a pass manager without having to hard-code what kind of pass manager
+/// it is.
+class PassManagerBase {
+public:
+ virtual ~PassManagerBase();
+
+ /// Add a pass to the queue of passes to run. This passes ownership of
+ /// the Pass to the PassManager. When the PassManager is destroyed, the pass
+ /// will be destroyed as well, so there is no need to delete the pass. This
+ /// may even destroy the pass right away if it is found to be redundant. This
+ /// implies that all passes MUST be allocated with 'new'.
+ virtual void add(Pass *P) = 0;
+};
+
+/// PassManager manages ModulePassManagers
+class PassManager : public PassManagerBase {
+public:
+
+ PassManager();
+ ~PassManager() override;
+
+ void add(Pass *P) override;
+
+ /// run - Execute all of the passes scheduled for execution. Keep track of
+ /// whether any of the passes modifies the module, and if so, return true.
+ bool run(Module &M);
+
+private:
+ /// PassManagerImpl_New is the actual class. PassManager is just the
+ /// wraper to publish simple pass manager interface
+ PassManagerImpl *PM;
+};
+
+/// FunctionPassManager manages FunctionPasses and BasicBlockPassManagers.
+class FunctionPassManager : public PassManagerBase {
+public:
+ /// FunctionPassManager ctor - This initializes the pass manager. It needs,
+ /// but does not take ownership of, the specified Module.
+ explicit FunctionPassManager(Module *M);
+ ~FunctionPassManager() override;
+
+ void add(Pass *P) override;
+
+ /// run - Execute all of the passes scheduled for execution. Keep
+ /// track of whether any of the passes modifies the function, and if
+ /// so, return true.
+ ///
+ bool run(Function &F);
+
+ /// doInitialization - Run all of the initializers for the function passes.
+ ///
+ bool doInitialization();
+
+ /// doFinalization - Run all of the finalizers for the function passes.
+ ///
+ bool doFinalization();
+
+private:
+ FunctionPassManagerImpl *FPM;
+ Module *M;
+};
+
+} // End legacy namespace
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_STDCXX_CONVERSION_FUNCTIONS(legacy::PassManagerBase, LLVMPassManagerRef)
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/LegacyPassManagers.h b/contrib/llvm/include/llvm/IR/LegacyPassManagers.h
new file mode 100644
index 0000000..b8e3347
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/LegacyPassManagers.h
@@ -0,0 +1,508 @@
+//===- LegacyPassManagers.h - Legacy Pass Infrastructure --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the LLVM Pass Manager infrastructure.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LEGACYPASSMANAGERS_H
+#define LLVM_IR_LEGACYPASSMANAGERS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Pass.h"
+#include <map>
+#include <vector>
+
+//===----------------------------------------------------------------------===//
+// Overview:
+// The Pass Manager Infrastructure manages passes. It's responsibilities are:
+//
+// o Manage optimization pass execution order
+// o Make required Analysis information available before pass P is run
+// o Release memory occupied by dead passes
+// o If Analysis information is dirtied by a pass then regenerate Analysis
+// information before it is consumed by another pass.
+//
+// Pass Manager Infrastructure uses multiple pass managers. They are
+// PassManager, FunctionPassManager, MPPassManager, FPPassManager, BBPassManager.
+// This class hierarchy uses multiple inheritance but pass managers do not
+// derive from another pass manager.
+//
+// PassManager and FunctionPassManager are two top-level pass manager that
+// represents the external interface of this entire pass manager infrastucture.
+//
+// Important classes :
+//
+// [o] class PMTopLevelManager;
+//
+// Two top level managers, PassManager and FunctionPassManager, derive from
+// PMTopLevelManager. PMTopLevelManager manages information used by top level
+// managers such as last user info.
+//
+// [o] class PMDataManager;
+//
+// PMDataManager manages information, e.g. list of available analysis info,
+// used by a pass manager to manage execution order of passes. It also provides
+// a place to implement common pass manager APIs. All pass managers derive from
+// PMDataManager.
+//
+// [o] class BBPassManager : public FunctionPass, public PMDataManager;
+//
+// BBPassManager manages BasicBlockPasses.
+//
+// [o] class FunctionPassManager;
+//
+// This is a external interface used to manage FunctionPasses. This
+// interface relies on FunctionPassManagerImpl to do all the tasks.
+//
+// [o] class FunctionPassManagerImpl : public ModulePass, PMDataManager,
+// public PMTopLevelManager;
+//
+// FunctionPassManagerImpl is a top level manager. It manages FPPassManagers
+//
+// [o] class FPPassManager : public ModulePass, public PMDataManager;
+//
+// FPPassManager manages FunctionPasses and BBPassManagers
+//
+// [o] class MPPassManager : public Pass, public PMDataManager;
+//
+// MPPassManager manages ModulePasses and FPPassManagers
+//
+// [o] class PassManager;
+//
+// This is a external interface used by various tools to manages passes. It
+// relies on PassManagerImpl to do all the tasks.
+//
+// [o] class PassManagerImpl : public Pass, public PMDataManager,
+// public PMTopLevelManager
+//
+// PassManagerImpl is a top level pass manager responsible for managing
+// MPPassManagers.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/PrettyStackTrace.h"
+
+namespace llvm {
+ class Module;
+ class Pass;
+ class StringRef;
+ class Value;
+ class Timer;
+ class PMDataManager;
+
+// enums for debugging strings
+enum PassDebuggingString {
+ EXECUTION_MSG, // "Executing Pass '" + PassName
+ MODIFICATION_MSG, // "Made Modification '" + PassName
+ FREEING_MSG, // " Freeing Pass '" + PassName
+ ON_BASICBLOCK_MSG, // "' on BasicBlock '" + InstructionName + "'...\n"
+ ON_FUNCTION_MSG, // "' on Function '" + FunctionName + "'...\n"
+ ON_MODULE_MSG, // "' on Module '" + ModuleName + "'...\n"
+ ON_REGION_MSG, // "' on Region '" + Msg + "'...\n'"
+ ON_LOOP_MSG, // "' on Loop '" + Msg + "'...\n'"
+ ON_CG_MSG // "' on Call Graph Nodes '" + Msg + "'...\n'"
+};
+
+/// PassManagerPrettyStackEntry - This is used to print informative information
+/// about what pass is running when/if a stack trace is generated.
+class PassManagerPrettyStackEntry : public PrettyStackTraceEntry {
+ Pass *P;
+ Value *V;
+ Module *M;
+
+public:
+ explicit PassManagerPrettyStackEntry(Pass *p)
+ : P(p), V(nullptr), M(nullptr) {} // When P is releaseMemory'd.
+ PassManagerPrettyStackEntry(Pass *p, Value &v)
+ : P(p), V(&v), M(nullptr) {} // When P is run on V
+ PassManagerPrettyStackEntry(Pass *p, Module &m)
+ : P(p), V(nullptr), M(&m) {} // When P is run on M
+
+ /// print - Emit information about this stack frame to OS.
+ void print(raw_ostream &OS) const override;
+};
+
+//===----------------------------------------------------------------------===//
+// PMStack
+//
+/// PMStack - This class implements a stack data structure of PMDataManager
+/// pointers.
+///
+/// Top level pass managers (see PassManager.cpp) maintain active Pass Managers
+/// using PMStack. Each Pass implements assignPassManager() to connect itself
+/// with appropriate manager. assignPassManager() walks PMStack to find
+/// suitable manager.
+class PMStack {
+public:
+ typedef std::vector<PMDataManager *>::const_reverse_iterator iterator;
+ iterator begin() const { return S.rbegin(); }
+ iterator end() const { return S.rend(); }
+
+ void pop();
+ PMDataManager *top() const { return S.back(); }
+ void push(PMDataManager *PM);
+ bool empty() const { return S.empty(); }
+
+ void dump() const;
+
+private:
+ std::vector<PMDataManager *> S;
+};
+
+//===----------------------------------------------------------------------===//
+// PMTopLevelManager
+//
+/// PMTopLevelManager manages LastUser info and collects common APIs used by
+/// top level pass managers.
+class PMTopLevelManager {
+protected:
+ explicit PMTopLevelManager(PMDataManager *PMDM);
+
+ unsigned getNumContainedManagers() const {
+ return (unsigned)PassManagers.size();
+ }
+
+ void initializeAllAnalysisInfo();
+
+private:
+ virtual PMDataManager *getAsPMDataManager() = 0;
+ virtual PassManagerType getTopLevelPassManagerType() = 0;
+
+public:
+ /// Schedule pass P for execution. Make sure that passes required by
+ /// P are run before P is run. Update analysis info maintained by
+ /// the manager. Remove dead passes. This is a recursive function.
+ void schedulePass(Pass *P);
+
+ /// Set pass P as the last user of the given analysis passes.
+ void setLastUser(ArrayRef<Pass*> AnalysisPasses, Pass *P);
+
+ /// Collect passes whose last user is P
+ void collectLastUses(SmallVectorImpl<Pass *> &LastUses, Pass *P);
+
+ /// Find the pass that implements Analysis AID. Search immutable
+ /// passes and all pass managers. If desired pass is not found
+ /// then return NULL.
+ Pass *findAnalysisPass(AnalysisID AID);
+
+ /// Retrieve the PassInfo for an analysis.
+ const PassInfo *findAnalysisPassInfo(AnalysisID AID) const;
+
+ /// Find analysis usage information for the pass P.
+ AnalysisUsage *findAnalysisUsage(Pass *P);
+
+ virtual ~PMTopLevelManager();
+
+ /// Add immutable pass and initialize it.
+ void addImmutablePass(ImmutablePass *P);
+
+ inline SmallVectorImpl<ImmutablePass *>& getImmutablePasses() {
+ return ImmutablePasses;
+ }
+
+ void addPassManager(PMDataManager *Manager) {
+ PassManagers.push_back(Manager);
+ }
+
+ // Add Manager into the list of managers that are not directly
+ // maintained by this top level pass manager
+ inline void addIndirectPassManager(PMDataManager *Manager) {
+ IndirectPassManagers.push_back(Manager);
+ }
+
+ // Print passes managed by this top level manager.
+ void dumpPasses() const;
+ void dumpArguments() const;
+
+ // Active Pass Managers
+ PMStack activeStack;
+
+protected:
+ /// Collection of pass managers
+ SmallVector<PMDataManager *, 8> PassManagers;
+
+private:
+ /// Collection of pass managers that are not directly maintained
+ /// by this pass manager
+ SmallVector<PMDataManager *, 8> IndirectPassManagers;
+
+ // Map to keep track of last user of the analysis pass.
+ // LastUser->second is the last user of Lastuser->first.
+ DenseMap<Pass *, Pass *> LastUser;
+
+ // Map to keep track of passes that are last used by a pass.
+ // This inverse map is initialized at PM->run() based on
+ // LastUser map.
+ DenseMap<Pass *, SmallPtrSet<Pass *, 8> > InversedLastUser;
+
+ /// Immutable passes are managed by top level manager.
+ SmallVector<ImmutablePass *, 16> ImmutablePasses;
+
+ /// Map from ID to immutable passes.
+ SmallDenseMap<AnalysisID, ImmutablePass *, 8> ImmutablePassMap;
+
+
+ /// A wrapper around AnalysisUsage for the purpose of uniqueing. The wrapper
+ /// is used to avoid needing to make AnalysisUsage itself a folding set node.
+ struct AUFoldingSetNode : public FoldingSetNode {
+ AnalysisUsage AU;
+ AUFoldingSetNode(const AnalysisUsage &AU) : AU(AU) {}
+ void Profile(FoldingSetNodeID &ID) const {
+ Profile(ID, AU);
+ }
+ static void Profile(FoldingSetNodeID &ID, const AnalysisUsage &AU) {
+ // TODO: We could consider sorting the dependency arrays within the
+ // AnalysisUsage (since they are conceptually unordered).
+ ID.AddBoolean(AU.getPreservesAll());
+ auto ProfileVec = [&](const SmallVectorImpl<AnalysisID>& Vec) {
+ ID.AddInteger(Vec.size());
+ for(AnalysisID AID : Vec)
+ ID.AddPointer(AID);
+ };
+ ProfileVec(AU.getRequiredSet());
+ ProfileVec(AU.getRequiredTransitiveSet());
+ ProfileVec(AU.getPreservedSet());
+ ProfileVec(AU.getUsedSet());
+ }
+ };
+
+ // Contains all of the unique combinations of AnalysisUsage. This is helpful
+ // when we have multiple instances of the same pass since they'll usually
+ // have the same analysis usage and can share storage.
+ FoldingSet<AUFoldingSetNode> UniqueAnalysisUsages;
+
+ // Allocator used for allocating UAFoldingSetNodes. This handles deletion of
+ // all allocated nodes in one fell swoop.
+ SpecificBumpPtrAllocator<AUFoldingSetNode> AUFoldingSetNodeAllocator;
+
+ // Maps from a pass to it's associated entry in UniqueAnalysisUsages. Does
+ // not own the storage associated with either key or value..
+ DenseMap<Pass *, AnalysisUsage*> AnUsageMap;
+
+ /// Collection of PassInfo objects found via analysis IDs and in this top
+ /// level manager. This is used to memoize queries to the pass registry.
+ /// FIXME: This is an egregious hack because querying the pass registry is
+ /// either slow or racy.
+ mutable DenseMap<AnalysisID, const PassInfo *> AnalysisPassInfos;
+};
+
+//===----------------------------------------------------------------------===//
+// PMDataManager
+
+/// PMDataManager provides the common place to manage the analysis data
+/// used by pass managers.
+class PMDataManager {
+public:
+ explicit PMDataManager() : TPM(nullptr), Depth(0) {
+ initializeAnalysisInfo();
+ }
+
+ virtual ~PMDataManager();
+
+ virtual Pass *getAsPass() = 0;
+
+ /// Augment AvailableAnalysis by adding analysis made available by pass P.
+ void recordAvailableAnalysis(Pass *P);
+
+ /// verifyPreservedAnalysis -- Verify analysis presreved by pass P.
+ void verifyPreservedAnalysis(Pass *P);
+
+ /// Remove Analysis that is not preserved by the pass
+ void removeNotPreservedAnalysis(Pass *P);
+
+ /// Remove dead passes used by P.
+ void removeDeadPasses(Pass *P, StringRef Msg,
+ enum PassDebuggingString);
+
+ /// Remove P.
+ void freePass(Pass *P, StringRef Msg,
+ enum PassDebuggingString);
+
+ /// Add pass P into the PassVector. Update
+ /// AvailableAnalysis appropriately if ProcessAnalysis is true.
+ void add(Pass *P, bool ProcessAnalysis = true);
+
+ /// Add RequiredPass into list of lower level passes required by pass P.
+ /// RequiredPass is run on the fly by Pass Manager when P requests it
+ /// through getAnalysis interface.
+ virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
+
+ virtual Pass *getOnTheFlyPass(Pass *P, AnalysisID PI, Function &F);
+
+ /// Initialize available analysis information.
+ void initializeAnalysisInfo() {
+ AvailableAnalysis.clear();
+ for (unsigned i = 0; i < PMT_Last; ++i)
+ InheritedAnalysis[i] = nullptr;
+ }
+
+ // Return true if P preserves high level analysis used by other
+ // passes that are managed by this manager.
+ bool preserveHigherLevelAnalysis(Pass *P);
+
+ /// Populate UsedPasses with analysis pass that are used or required by pass
+ /// P and are available. Populate ReqPassNotAvailable with analysis pass that
+ /// are required by pass P but are not available.
+ void collectRequiredAndUsedAnalyses(
+ SmallVectorImpl<Pass *> &UsedPasses,
+ SmallVectorImpl<AnalysisID> &ReqPassNotAvailable, Pass *P);
+
+ /// All Required analyses should be available to the pass as it runs! Here
+ /// we fill in the AnalysisImpls member of the pass so that it can
+ /// successfully use the getAnalysis() method to retrieve the
+ /// implementations it needs.
+ void initializeAnalysisImpl(Pass *P);
+
+ /// Find the pass that implements Analysis AID. If desired pass is not found
+ /// then return NULL.
+ Pass *findAnalysisPass(AnalysisID AID, bool Direction);
+
+ // Access toplevel manager
+ PMTopLevelManager *getTopLevelManager() { return TPM; }
+ void setTopLevelManager(PMTopLevelManager *T) { TPM = T; }
+
+ unsigned getDepth() const { return Depth; }
+ void setDepth(unsigned newDepth) { Depth = newDepth; }
+
+ // Print routines used by debug-pass
+ void dumpLastUses(Pass *P, unsigned Offset) const;
+ void dumpPassArguments() const;
+ void dumpPassInfo(Pass *P, enum PassDebuggingString S1,
+ enum PassDebuggingString S2, StringRef Msg);
+ void dumpRequiredSet(const Pass *P) const;
+ void dumpPreservedSet(const Pass *P) const;
+ void dumpUsedSet(const Pass *P) const;
+
+ unsigned getNumContainedPasses() const {
+ return (unsigned)PassVector.size();
+ }
+
+ virtual PassManagerType getPassManagerType() const {
+ assert ( 0 && "Invalid use of getPassManagerType");
+ return PMT_Unknown;
+ }
+
+ DenseMap<AnalysisID, Pass*> *getAvailableAnalysis() {
+ return &AvailableAnalysis;
+ }
+
+ // Collect AvailableAnalysis from all the active Pass Managers.
+ void populateInheritedAnalysis(PMStack &PMS) {
+ unsigned Index = 0;
+ for (PMStack::iterator I = PMS.begin(), E = PMS.end();
+ I != E; ++I)
+ InheritedAnalysis[Index++] = (*I)->getAvailableAnalysis();
+ }
+
+protected:
+ // Top level manager.
+ PMTopLevelManager *TPM;
+
+ // Collection of pass that are managed by this manager
+ SmallVector<Pass *, 16> PassVector;
+
+ // Collection of Analysis provided by Parent pass manager and
+ // used by current pass manager. At at time there can not be more
+ // then PMT_Last active pass mangers.
+ DenseMap<AnalysisID, Pass *> *InheritedAnalysis[PMT_Last];
+
+ /// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions
+ /// or higher is specified.
+ bool isPassDebuggingExecutionsOrMore() const;
+
+private:
+ void dumpAnalysisUsage(StringRef Msg, const Pass *P,
+ const AnalysisUsage::VectorType &Set) const;
+
+ // Set of available Analysis. This information is used while scheduling
+ // pass. If a pass requires an analysis which is not available then
+ // the required analysis pass is scheduled to run before the pass itself is
+ // scheduled to run.
+ DenseMap<AnalysisID, Pass*> AvailableAnalysis;
+
+ // Collection of higher level analysis used by the pass managed by
+ // this manager.
+ SmallVector<Pass *, 16> HigherLevelAnalysis;
+
+ unsigned Depth;
+};
+
+//===----------------------------------------------------------------------===//
+// FPPassManager
+//
+/// FPPassManager manages BBPassManagers and FunctionPasses.
+/// It batches all function passes and basic block pass managers together and
+/// sequence them to process one function at a time before processing next
+/// function.
+class FPPassManager : public ModulePass, public PMDataManager {
+public:
+ static char ID;
+ explicit FPPassManager()
+ : ModulePass(ID), PMDataManager() { }
+
+ /// run - Execute all of the passes scheduled for execution. Keep track of
+ /// whether any of the passes modifies the module, and if so, return true.
+ bool runOnFunction(Function &F);
+ bool runOnModule(Module &M) override;
+
+ /// cleanup - After running all passes, clean up pass manager cache.
+ void cleanup();
+
+ /// doInitialization - Overrides ModulePass doInitialization for global
+ /// initialization tasks
+ ///
+ using ModulePass::doInitialization;
+
+ /// doInitialization - Run all of the initializers for the function passes.
+ ///
+ bool doInitialization(Module &M) override;
+
+ /// doFinalization - Overrides ModulePass doFinalization for global
+ /// finalization tasks
+ ///
+ using ModulePass::doFinalization;
+
+ /// doFinalization - Run all of the finalizers for the function passes.
+ ///
+ bool doFinalization(Module &M) override;
+
+ PMDataManager *getAsPMDataManager() override { return this; }
+ Pass *getAsPass() override { return this; }
+
+ /// Pass Manager itself does not invalidate any analysis info.
+ void getAnalysisUsage(AnalysisUsage &Info) const override {
+ Info.setPreservesAll();
+ }
+
+ // Print passes managed by this manager
+ void dumpPassStructure(unsigned Offset) override;
+
+ const char *getPassName() const override {
+ return "Function Pass Manager";
+ }
+
+ FunctionPass *getContainedPass(unsigned N) {
+ assert ( N < PassVector.size() && "Pass number out of range!");
+ FunctionPass *FP = static_cast<FunctionPass *>(PassVector[N]);
+ return FP;
+ }
+
+ PassManagerType getPassManagerType() const override {
+ return PMT_FunctionPassManager;
+ }
+};
+
+Timer *getPassTimer(Pass *);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/LegacyPassNameParser.h b/contrib/llvm/include/llvm/IR/LegacyPassNameParser.h
new file mode 100644
index 0000000..39ae80d
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/LegacyPassNameParser.h
@@ -0,0 +1,139 @@
+//===- LegacyPassNameParser.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the PassNameParser and FilteredPassNameParser<> classes,
+// which are used to add command line arguments to a utility for all of the
+// passes that have been registered into the system.
+//
+// The PassNameParser class adds ALL passes linked into the system (that are
+// creatable) as command line arguments to the tool (when instantiated with the
+// appropriate command line option template). The FilteredPassNameParser<>
+// template is used for the same purposes as PassNameParser, except that it only
+// includes passes that have a PassType that are compatible with the filter
+// (which is the template argument).
+//
+// Note that this is part of the legacy pass manager infrastructure and will be
+// (eventually) going away.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_LEGACYPASSNAMEPARSER_H
+#define LLVM_IR_LEGACYPASSNAMEPARSER_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstring>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// PassNameParser class - Make use of the pass registration mechanism to
+// automatically add a command line argument to opt for each pass.
+//
+class PassNameParser : public PassRegistrationListener,
+ public cl::parser<const PassInfo*> {
+public:
+ PassNameParser(cl::Option &O);
+ ~PassNameParser() override;
+
+ void initialize() {
+ cl::parser<const PassInfo*>::initialize();
+
+ // Add all of the passes to the map that got initialized before 'this' did.
+ enumeratePasses();
+ }
+
+ // ignorablePassImpl - Can be overriden in subclasses to refine the list of
+ // which passes we want to include.
+ //
+ virtual bool ignorablePassImpl(const PassInfo *P) const { return false; }
+
+ inline bool ignorablePass(const PassInfo *P) const {
+ // Ignore non-selectable and non-constructible passes! Ignore
+ // non-optimizations.
+ return P->getPassArgument() == nullptr || *P->getPassArgument() == 0 ||
+ P->getNormalCtor() == nullptr || ignorablePassImpl(P);
+ }
+
+ // Implement the PassRegistrationListener callbacks used to populate our map
+ //
+ void passRegistered(const PassInfo *P) override {
+ if (ignorablePass(P)) return;
+ if (findOption(P->getPassArgument()) != getNumOptions()) {
+ errs() << "Two passes with the same argument (-"
+ << P->getPassArgument() << ") attempted to be registered!\n";
+ llvm_unreachable(nullptr);
+ }
+ addLiteralOption(P->getPassArgument(), P, P->getPassName());
+ }
+ void passEnumerate(const PassInfo *P) override { passRegistered(P); }
+
+ // printOptionInfo - Print out information about this option. Override the
+ // default implementation to sort the table before we print...
+ void printOptionInfo(const cl::Option &O, size_t GlobalWidth) const override {
+ PassNameParser *PNP = const_cast<PassNameParser*>(this);
+ array_pod_sort(PNP->Values.begin(), PNP->Values.end(), ValLessThan);
+ cl::parser<const PassInfo*>::printOptionInfo(O, GlobalWidth);
+ }
+
+private:
+ // ValLessThan - Provide a sorting comparator for Values elements...
+ static int ValLessThan(const PassNameParser::OptionInfo *VT1,
+ const PassNameParser::OptionInfo *VT2) {
+ return std::strcmp(VT1->Name, VT2->Name);
+ }
+};
+
+///===----------------------------------------------------------------------===//
+/// FilteredPassNameParser class - Make use of the pass registration
+/// mechanism to automatically add a command line argument to opt for
+/// each pass that satisfies a filter criteria. Filter should return
+/// true for passes to be registered as command-line options.
+///
+template<typename Filter>
+class FilteredPassNameParser : public PassNameParser {
+private:
+ Filter filter;
+
+public:
+ bool ignorablePassImpl(const PassInfo *P) const override {
+ return !filter(*P);
+ }
+};
+
+///===----------------------------------------------------------------------===//
+/// PassArgFilter - A filter for use with PassNameFilterParser that only
+/// accepts a Pass whose Arg matches certain strings.
+///
+/// Use like this:
+///
+/// extern const char AllowedPassArgs[] = "-anders_aa -dse";
+///
+/// static cl::list<
+/// const PassInfo*,
+/// bool,
+/// FilteredPassNameParser<PassArgFilter<AllowedPassArgs> > >
+/// PassList(cl::desc("Passes available:"));
+///
+/// Only the -anders_aa and -dse options will be available to the user.
+///
+template<const char *Args>
+class PassArgFilter {
+public:
+ bool operator()(const PassInfo &P) const {
+ return(std::strstr(Args, P.getPassArgument()));
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/MDBuilder.h b/contrib/llvm/include/llvm/IR/MDBuilder.h
new file mode 100644
index 0000000..35341e3
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/MDBuilder.h
@@ -0,0 +1,164 @@
+//===---- llvm/MDBuilder.h - Builder for LLVM metadata ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MDBuilder class, which is used as a convenient way to
+// create LLVM metadata with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MDBUILDER_H
+#define LLVM_IR_MDBUILDER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include <utility>
+
+namespace llvm {
+
+class APInt;
+template <typename T> class ArrayRef;
+class LLVMContext;
+class Constant;
+class ConstantAsMetadata;
+class MDNode;
+class MDString;
+
+class MDBuilder {
+ LLVMContext &Context;
+
+public:
+ MDBuilder(LLVMContext &context) : Context(context) {}
+
+ /// \brief Return the given string as metadata.
+ MDString *createString(StringRef Str);
+
+ /// \brief Return the given constant as metadata.
+ ConstantAsMetadata *createConstant(Constant *C);
+
+ //===------------------------------------------------------------------===//
+ // FPMath metadata.
+ //===------------------------------------------------------------------===//
+
+ /// \brief Return metadata with the given settings. The special value 0.0
+ /// for the Accuracy parameter indicates the default (maximal precision)
+ /// setting.
+ MDNode *createFPMath(float Accuracy);
+
+ //===------------------------------------------------------------------===//
+ // Prof metadata.
+ //===------------------------------------------------------------------===//
+
+ /// \brief Return metadata containing two branch weights.
+ MDNode *createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight);
+
+ /// \brief Return metadata containing a number of branch weights.
+ MDNode *createBranchWeights(ArrayRef<uint32_t> Weights);
+
+ /// Return metadata specifying that a branch or switch is unpredictable.
+ MDNode *createUnpredictable();
+
+ /// Return metadata containing the entry count for a function.
+ MDNode *createFunctionEntryCount(uint64_t Count);
+
+ //===------------------------------------------------------------------===//
+ // Range metadata.
+ //===------------------------------------------------------------------===//
+
+ /// \brief Return metadata describing the range [Lo, Hi).
+ MDNode *createRange(const APInt &Lo, const APInt &Hi);
+
+ /// \brief Return metadata describing the range [Lo, Hi).
+ MDNode *createRange(Constant *Lo, Constant *Hi);
+
+ //===------------------------------------------------------------------===//
+ // AA metadata.
+ //===------------------------------------------------------------------===//
+
+protected:
+ /// \brief Return metadata appropriate for a AA root node (scope or TBAA).
+ /// Each returned node is distinct from all other metadata and will never
+ /// be identified (uniqued) with anything else.
+ MDNode *createAnonymousAARoot(StringRef Name = StringRef(),
+ MDNode *Extra = nullptr);
+
+public:
+ /// \brief Return metadata appropriate for a TBAA root node. Each returned
+ /// node is distinct from all other metadata and will never be identified
+ /// (uniqued) with anything else.
+ MDNode *createAnonymousTBAARoot() {
+ return createAnonymousAARoot();
+ }
+
+ /// \brief Return metadata appropriate for an alias scope domain node.
+ /// Each returned node is distinct from all other metadata and will never
+ /// be identified (uniqued) with anything else.
+ MDNode *createAnonymousAliasScopeDomain(StringRef Name = StringRef()) {
+ return createAnonymousAARoot(Name);
+ }
+
+ /// \brief Return metadata appropriate for an alias scope root node.
+ /// Each returned node is distinct from all other metadata and will never
+ /// be identified (uniqued) with anything else.
+ MDNode *createAnonymousAliasScope(MDNode *Domain,
+ StringRef Name = StringRef()) {
+ return createAnonymousAARoot(Name, Domain);
+ }
+
+ /// \brief Return metadata appropriate for a TBAA root node with the given
+ /// name. This may be identified (uniqued) with other roots with the same
+ /// name.
+ MDNode *createTBAARoot(StringRef Name);
+
+ /// \brief Return metadata appropriate for an alias scope domain node with
+ /// the given name. This may be identified (uniqued) with other roots with
+ /// the same name.
+ MDNode *createAliasScopeDomain(StringRef Name);
+
+ /// \brief Return metadata appropriate for an alias scope node with
+ /// the given name. This may be identified (uniqued) with other scopes with
+ /// the same name and domain.
+ MDNode *createAliasScope(StringRef Name, MDNode *Domain);
+
+ /// \brief Return metadata for a non-root TBAA node with the given name,
+ /// parent in the TBAA tree, and value for 'pointsToConstantMemory'.
+ MDNode *createTBAANode(StringRef Name, MDNode *Parent,
+ bool isConstant = false);
+
+ struct TBAAStructField {
+ uint64_t Offset;
+ uint64_t Size;
+ MDNode *TBAA;
+ TBAAStructField(uint64_t Offset, uint64_t Size, MDNode *TBAA) :
+ Offset(Offset), Size(Size), TBAA(TBAA) {}
+ };
+
+ /// \brief Return metadata for a tbaa.struct node with the given
+ /// struct field descriptions.
+ MDNode *createTBAAStructNode(ArrayRef<TBAAStructField> Fields);
+
+ /// \brief Return metadata for a TBAA struct node in the type DAG
+ /// with the given name, a list of pairs (offset, field type in the type DAG).
+ MDNode *
+ createTBAAStructTypeNode(StringRef Name,
+ ArrayRef<std::pair<MDNode *, uint64_t>> Fields);
+
+ /// \brief Return metadata for a TBAA scalar type node with the
+ /// given name, an offset and a parent in the TBAA type DAG.
+ MDNode *createTBAAScalarTypeNode(StringRef Name, MDNode *Parent,
+ uint64_t Offset = 0);
+
+ /// \brief Return metadata for a TBAA tag node with the given
+ /// base type, access type and offset relative to the base type.
+ MDNode *createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType,
+ uint64_t Offset, bool IsConstant = false);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Mangler.h b/contrib/llvm/include/llvm/IR/Mangler.h
new file mode 100644
index 0000000..ea2f0c3
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Mangler.h
@@ -0,0 +1,56 @@
+//===-- llvm/IR/Mangler.h - Self-contained name mangler ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Unified name mangler for various backends.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MANGLER_H
+#define LLVM_IR_MANGLER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class DataLayout;
+template <typename T> class SmallVectorImpl;
+class Twine;
+
+class Mangler {
+ /// We need to give global values the same name every time they are mangled.
+ /// This keeps track of the number we give to anonymous ones.
+ mutable DenseMap<const GlobalValue*, unsigned> AnonGlobalIDs;
+
+ /// This simple counter is used to unique value names.
+ mutable unsigned NextAnonGlobalID;
+
+public:
+ Mangler() : NextAnonGlobalID(1) {}
+
+ /// Print the appropriate prefix and the specified global variable's name.
+ /// If the global variable doesn't have a name, this fills in a unique name
+ /// for the global.
+ void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV,
+ bool CannotUsePrivateLabel) const;
+ void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
+ bool CannotUsePrivateLabel) const;
+
+ /// Print the appropriate prefix and the specified name as the global variable
+ /// name. GVName must not be empty.
+ static void getNameWithPrefix(raw_ostream &OS, const Twine &GVName,
+ const DataLayout &DL);
+ static void getNameWithPrefix(SmallVectorImpl<char> &OutName,
+ const Twine &GVName, const DataLayout &DL);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Metadata.def b/contrib/llvm/include/llvm/IR/Metadata.def
new file mode 100644
index 0000000..b1d2217
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Metadata.def
@@ -0,0 +1,123 @@
+//===- llvm/IR/Metadata.def - Metadata definitions --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Macros for running through all types of metadata.
+//
+//===----------------------------------------------------------------------===//
+
+#if !(defined HANDLE_METADATA || defined HANDLE_METADATA_LEAF || \
+ defined HANDLE_METADATA_BRANCH || defined HANDLE_MDNODE_LEAF || \
+ defined HANDLE_MDNODE_LEAF_UNIQUABLE || defined HANDLE_MDNODE_BRANCH || \
+ defined HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE || \
+ defined HANDLE_SPECIALIZED_MDNODE_LEAF || \
+ defined HANDLE_SPECIALIZED_MDNODE_BRANCH)
+#error "Missing macro definition of HANDLE_METADATA*"
+#endif
+
+// Handler for all types of metadata.
+#ifndef HANDLE_METADATA
+#define HANDLE_METADATA(CLASS)
+#endif
+
+// Handler for leaf nodes in the class hierarchy.
+#ifndef HANDLE_METADATA_LEAF
+#define HANDLE_METADATA_LEAF(CLASS) HANDLE_METADATA(CLASS)
+#endif
+
+// Handler for non-leaf nodes in the class hierarchy.
+#ifndef HANDLE_METADATA_BRANCH
+#define HANDLE_METADATA_BRANCH(CLASS) HANDLE_METADATA(CLASS)
+#endif
+
+// Handler for specialized and uniquable leaf nodes under MDNode. Defers to
+// HANDLE_MDNODE_LEAF_UNIQUABLE if it's defined, otherwise to
+// HANDLE_SPECIALIZED_MDNODE_LEAF.
+#ifndef HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE
+#ifdef HANDLE_MDNODE_LEAF_UNIQUABLE
+#define HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS)
+#else
+#define HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(CLASS) \
+ HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)
+#endif
+#endif
+
+// Handler for leaf nodes under MDNode.
+#ifndef HANDLE_MDNODE_LEAF_UNIQUABLE
+#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) HANDLE_MDNODE_LEAF(CLASS)
+#endif
+
+// Handler for leaf nodes under MDNode.
+#ifndef HANDLE_MDNODE_LEAF
+#define HANDLE_MDNODE_LEAF(CLASS) HANDLE_METADATA_LEAF(CLASS)
+#endif
+
+// Handler for non-leaf nodes under MDNode.
+#ifndef HANDLE_MDNODE_BRANCH
+#define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_METADATA_BRANCH(CLASS)
+#endif
+
+// Handler for specialized leaf nodes under MDNode.
+#ifndef HANDLE_SPECIALIZED_MDNODE_LEAF
+#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) HANDLE_MDNODE_LEAF(CLASS)
+#endif
+
+// Handler for specialized non-leaf nodes under MDNode.
+#ifndef HANDLE_SPECIALIZED_MDNODE_BRANCH
+#define HANDLE_SPECIALIZED_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_BRANCH(CLASS)
+#endif
+
+HANDLE_METADATA_LEAF(MDString)
+HANDLE_METADATA_BRANCH(ValueAsMetadata)
+HANDLE_METADATA_LEAF(ConstantAsMetadata)
+HANDLE_METADATA_LEAF(LocalAsMetadata)
+HANDLE_MDNODE_BRANCH(MDNode)
+HANDLE_MDNODE_LEAF_UNIQUABLE(MDTuple)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILocation)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIExpression)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DINode)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(GenericDINode)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubrange)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIEnumerator)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DIScope)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DIType)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIBasicType)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIDerivedType)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DICompositeType)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubroutineType)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIFile)
+HANDLE_SPECIALIZED_MDNODE_LEAF(DICompileUnit)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DILocalScope)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubprogram)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DILexicalBlockBase)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILexicalBlock)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILexicalBlockFile)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DINamespace)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIModule)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DITemplateParameter)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DITemplateTypeParameter)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DITemplateValueParameter)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DIVariable)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIGlobalVariable)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILocalVariable)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIObjCProperty)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIImportedEntity)
+HANDLE_SPECIALIZED_MDNODE_BRANCH(DIMacroNode)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIMacro)
+HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIMacroFile)
+
+#undef HANDLE_METADATA
+#undef HANDLE_METADATA_LEAF
+#undef HANDLE_METADATA_BRANCH
+#undef HANDLE_MDNODE_LEAF
+#undef HANDLE_MDNODE_LEAF_UNIQUABLE
+#undef HANDLE_MDNODE_BRANCH
+#undef HANDLE_SPECIALIZED_MDNODE_LEAF
+#undef HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE
+#undef HANDLE_SPECIALIZED_MDNODE_BRANCH
diff --git a/contrib/llvm/include/llvm/IR/Metadata.h b/contrib/llvm/include/llvm/IR/Metadata.h
new file mode 100644
index 0000000..2ea5913
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Metadata.h
@@ -0,0 +1,1297 @@
+//===- llvm/IR/Metadata.h - Metadata definitions ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file contains the declarations for metadata subclasses.
+/// They represent the different flavors of metadata that live in LLVM.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_METADATA_H
+#define LLVM_IR_METADATA_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <type_traits>
+
+namespace llvm {
+
+class LLVMContext;
+class Module;
+class ModuleSlotTracker;
+
+enum LLVMConstants : uint32_t {
+ DEBUG_METADATA_VERSION = 3 // Current debug info version number.
+};
+
+/// \brief Root of the metadata hierarchy.
+///
+/// This is a root class for typeless data in the IR.
+class Metadata {
+ friend class ReplaceableMetadataImpl;
+
+ /// \brief RTTI.
+ const unsigned char SubclassID;
+
+protected:
+ /// \brief Active type of storage.
+ enum StorageType { Uniqued, Distinct, Temporary };
+
+ /// \brief Storage flag for non-uniqued, otherwise unowned, metadata.
+ unsigned Storage : 2;
+ // TODO: expose remaining bits to subclasses.
+
+ unsigned short SubclassData16;
+ unsigned SubclassData32;
+
+public:
+ enum MetadataKind {
+ MDTupleKind,
+ DILocationKind,
+ GenericDINodeKind,
+ DISubrangeKind,
+ DIEnumeratorKind,
+ DIBasicTypeKind,
+ DIDerivedTypeKind,
+ DICompositeTypeKind,
+ DISubroutineTypeKind,
+ DIFileKind,
+ DICompileUnitKind,
+ DISubprogramKind,
+ DILexicalBlockKind,
+ DILexicalBlockFileKind,
+ DINamespaceKind,
+ DIModuleKind,
+ DITemplateTypeParameterKind,
+ DITemplateValueParameterKind,
+ DIGlobalVariableKind,
+ DILocalVariableKind,
+ DIExpressionKind,
+ DIObjCPropertyKind,
+ DIImportedEntityKind,
+ ConstantAsMetadataKind,
+ LocalAsMetadataKind,
+ MDStringKind,
+ DIMacroKind,
+ DIMacroFileKind
+ };
+
+protected:
+ Metadata(unsigned ID, StorageType Storage)
+ : SubclassID(ID), Storage(Storage), SubclassData16(0), SubclassData32(0) {
+ }
+ ~Metadata() = default;
+
+ /// \brief Default handling of a changed operand, which asserts.
+ ///
+ /// If subclasses pass themselves in as owners to a tracking node reference,
+ /// they must provide an implementation of this method.
+ void handleChangedOperand(void *, Metadata *) {
+ llvm_unreachable("Unimplemented in Metadata subclass");
+ }
+
+public:
+ unsigned getMetadataID() const { return SubclassID; }
+
+ /// \brief User-friendly dump.
+ ///
+ /// If \c M is provided, metadata nodes will be numbered canonically;
+ /// otherwise, pointer addresses are substituted.
+ ///
+ /// Note: this uses an explicit overload instead of default arguments so that
+ /// the nullptr version is easy to call from a debugger.
+ ///
+ /// @{
+ void dump() const;
+ void dump(const Module *M) const;
+ /// @}
+
+ /// \brief Print.
+ ///
+ /// Prints definition of \c this.
+ ///
+ /// If \c M is provided, metadata nodes will be numbered canonically;
+ /// otherwise, pointer addresses are substituted.
+ /// @{
+ void print(raw_ostream &OS, const Module *M = nullptr,
+ bool IsForDebug = false) const;
+ void print(raw_ostream &OS, ModuleSlotTracker &MST, const Module *M = nullptr,
+ bool IsForDebug = false) const;
+ /// @}
+
+ /// \brief Print as operand.
+ ///
+ /// Prints reference of \c this.
+ ///
+ /// If \c M is provided, metadata nodes will be numbered canonically;
+ /// otherwise, pointer addresses are substituted.
+ /// @{
+ void printAsOperand(raw_ostream &OS, const Module *M = nullptr) const;
+ void printAsOperand(raw_ostream &OS, ModuleSlotTracker &MST,
+ const Module *M = nullptr) const;
+ /// @}
+};
+
+#define HANDLE_METADATA(CLASS) class CLASS;
+#include "llvm/IR/Metadata.def"
+
+// Provide specializations of isa so that we don't need definitions of
+// subclasses to see if the metadata is a subclass.
+#define HANDLE_METADATA_LEAF(CLASS) \
+ template <> struct isa_impl<CLASS, Metadata> { \
+ static inline bool doit(const Metadata &MD) { \
+ return MD.getMetadataID() == Metadata::CLASS##Kind; \
+ } \
+ };
+#include "llvm/IR/Metadata.def"
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Metadata &MD) {
+ MD.print(OS);
+ return OS;
+}
+
+/// \brief Metadata wrapper in the Value hierarchy.
+///
+/// A member of the \a Value hierarchy to represent a reference to metadata.
+/// This allows, e.g., instrinsics to have metadata as operands.
+///
+/// Notably, this is the only thing in either hierarchy that is allowed to
+/// reference \a LocalAsMetadata.
+class MetadataAsValue : public Value {
+ friend class ReplaceableMetadataImpl;
+ friend class LLVMContextImpl;
+
+ Metadata *MD;
+
+ MetadataAsValue(Type *Ty, Metadata *MD);
+ ~MetadataAsValue() override;
+
+ /// \brief Drop use of metadata (during teardown).
+ void dropUse() { MD = nullptr; }
+
+public:
+ static MetadataAsValue *get(LLVMContext &Context, Metadata *MD);
+ static MetadataAsValue *getIfExists(LLVMContext &Context, Metadata *MD);
+ Metadata *getMetadata() const { return MD; }
+
+ static bool classof(const Value *V) {
+ return V->getValueID() == MetadataAsValueVal;
+ }
+
+private:
+ void handleChangedMetadata(Metadata *MD);
+ void track();
+ void untrack();
+};
+
+/// \brief API for tracking metadata references through RAUW and deletion.
+///
+/// Shared API for updating \a Metadata pointers in subclasses that support
+/// RAUW.
+///
+/// This API is not meant to be used directly. See \a TrackingMDRef for a
+/// user-friendly tracking reference.
+class MetadataTracking {
+public:
+ /// \brief Track the reference to metadata.
+ ///
+ /// Register \c MD with \c *MD, if the subclass supports tracking. If \c *MD
+ /// gets RAUW'ed, \c MD will be updated to the new address. If \c *MD gets
+ /// deleted, \c MD will be set to \c nullptr.
+ ///
+ /// If tracking isn't supported, \c *MD will not change.
+ ///
+ /// \return true iff tracking is supported by \c MD.
+ static bool track(Metadata *&MD) {
+ return track(&MD, *MD, static_cast<Metadata *>(nullptr));
+ }
+
+ /// \brief Track the reference to metadata for \a Metadata.
+ ///
+ /// As \a track(Metadata*&), but with support for calling back to \c Owner to
+ /// tell it that its operand changed. This could trigger \c Owner being
+ /// re-uniqued.
+ static bool track(void *Ref, Metadata &MD, Metadata &Owner) {
+ return track(Ref, MD, &Owner);
+ }
+
+ /// \brief Track the reference to metadata for \a MetadataAsValue.
+ ///
+ /// As \a track(Metadata*&), but with support for calling back to \c Owner to
+ /// tell it that its operand changed. This could trigger \c Owner being
+ /// re-uniqued.
+ static bool track(void *Ref, Metadata &MD, MetadataAsValue &Owner) {
+ return track(Ref, MD, &Owner);
+ }
+
+ /// \brief Stop tracking a reference to metadata.
+ ///
+ /// Stops \c *MD from tracking \c MD.
+ static void untrack(Metadata *&MD) { untrack(&MD, *MD); }
+ static void untrack(void *Ref, Metadata &MD);
+
+ /// \brief Move tracking from one reference to another.
+ ///
+ /// Semantically equivalent to \c untrack(MD) followed by \c track(New),
+ /// except that ownership callbacks are maintained.
+ ///
+ /// Note: it is an error if \c *MD does not equal \c New.
+ ///
+ /// \return true iff tracking is supported by \c MD.
+ static bool retrack(Metadata *&MD, Metadata *&New) {
+ return retrack(&MD, *MD, &New);
+ }
+ static bool retrack(void *Ref, Metadata &MD, void *New);
+
+ /// \brief Check whether metadata is replaceable.
+ static bool isReplaceable(const Metadata &MD);
+
+ typedef PointerUnion<MetadataAsValue *, Metadata *> OwnerTy;
+
+private:
+ /// \brief Track a reference to metadata for an owner.
+ ///
+ /// Generalized version of tracking.
+ static bool track(void *Ref, Metadata &MD, OwnerTy Owner);
+};
+
+/// \brief Shared implementation of use-lists for replaceable metadata.
+///
+/// Most metadata cannot be RAUW'ed. This is a shared implementation of
+/// use-lists and associated API for the two that support it (\a ValueAsMetadata
+/// and \a TempMDNode).
+class ReplaceableMetadataImpl {
+ friend class MetadataTracking;
+
+public:
+ typedef MetadataTracking::OwnerTy OwnerTy;
+
+private:
+ LLVMContext &Context;
+ uint64_t NextIndex;
+ SmallDenseMap<void *, std::pair<OwnerTy, uint64_t>, 4> UseMap;
+
+public:
+ ReplaceableMetadataImpl(LLVMContext &Context)
+ : Context(Context), NextIndex(0) {}
+ ~ReplaceableMetadataImpl() {
+ assert(UseMap.empty() && "Cannot destroy in-use replaceable metadata");
+ }
+
+ LLVMContext &getContext() const { return Context; }
+
+ /// \brief Replace all uses of this with MD.
+ ///
+ /// Replace all uses of this with \c MD, which is allowed to be null.
+ void replaceAllUsesWith(Metadata *MD);
+
+ /// \brief Resolve all uses of this.
+ ///
+ /// Resolve all uses of this, turning off RAUW permanently. If \c
+ /// ResolveUsers, call \a MDNode::resolve() on any users whose last operand
+ /// is resolved.
+ void resolveAllUses(bool ResolveUsers = true);
+
+private:
+ void addRef(void *Ref, OwnerTy Owner);
+ void dropRef(void *Ref);
+ void moveRef(void *Ref, void *New, const Metadata &MD);
+
+ static ReplaceableMetadataImpl *get(Metadata &MD);
+};
+
+/// \brief Value wrapper in the Metadata hierarchy.
+///
+/// This is a custom value handle that allows other metadata to refer to
+/// classes in the Value hierarchy.
+///
+/// Because of full uniquing support, each value is only wrapped by a single \a
+/// ValueAsMetadata object, so the lookup maps are far more efficient than
+/// those using ValueHandleBase.
+class ValueAsMetadata : public Metadata, ReplaceableMetadataImpl {
+ friend class ReplaceableMetadataImpl;
+ friend class LLVMContextImpl;
+
+ Value *V;
+
+ /// \brief Drop users without RAUW (during teardown).
+ void dropUsers() {
+ ReplaceableMetadataImpl::resolveAllUses(/* ResolveUsers */ false);
+ }
+
+protected:
+ ValueAsMetadata(unsigned ID, Value *V)
+ : Metadata(ID, Uniqued), ReplaceableMetadataImpl(V->getContext()), V(V) {
+ assert(V && "Expected valid value");
+ }
+ ~ValueAsMetadata() = default;
+
+public:
+ static ValueAsMetadata *get(Value *V);
+ static ConstantAsMetadata *getConstant(Value *C) {
+ return cast<ConstantAsMetadata>(get(C));
+ }
+ static LocalAsMetadata *getLocal(Value *Local) {
+ return cast<LocalAsMetadata>(get(Local));
+ }
+
+ static ValueAsMetadata *getIfExists(Value *V);
+ static ConstantAsMetadata *getConstantIfExists(Value *C) {
+ return cast_or_null<ConstantAsMetadata>(getIfExists(C));
+ }
+ static LocalAsMetadata *getLocalIfExists(Value *Local) {
+ return cast_or_null<LocalAsMetadata>(getIfExists(Local));
+ }
+
+ Value *getValue() const { return V; }
+ Type *getType() const { return V->getType(); }
+ LLVMContext &getContext() const { return V->getContext(); }
+
+ static void handleDeletion(Value *V);
+ static void handleRAUW(Value *From, Value *To);
+
+protected:
+ /// \brief Handle collisions after \a Value::replaceAllUsesWith().
+ ///
+ /// RAUW isn't supported directly for \a ValueAsMetadata, but if the wrapped
+ /// \a Value gets RAUW'ed and the target already exists, this is used to
+ /// merge the two metadata nodes.
+ void replaceAllUsesWith(Metadata *MD) {
+ ReplaceableMetadataImpl::replaceAllUsesWith(MD);
+ }
+
+public:
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == LocalAsMetadataKind ||
+ MD->getMetadataID() == ConstantAsMetadataKind;
+ }
+};
+
+class ConstantAsMetadata : public ValueAsMetadata {
+ friend class ValueAsMetadata;
+
+ ConstantAsMetadata(Constant *C)
+ : ValueAsMetadata(ConstantAsMetadataKind, C) {}
+
+public:
+ static ConstantAsMetadata *get(Constant *C) {
+ return ValueAsMetadata::getConstant(C);
+ }
+ static ConstantAsMetadata *getIfExists(Constant *C) {
+ return ValueAsMetadata::getConstantIfExists(C);
+ }
+
+ Constant *getValue() const {
+ return cast<Constant>(ValueAsMetadata::getValue());
+ }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == ConstantAsMetadataKind;
+ }
+};
+
+class LocalAsMetadata : public ValueAsMetadata {
+ friend class ValueAsMetadata;
+
+ LocalAsMetadata(Value *Local)
+ : ValueAsMetadata(LocalAsMetadataKind, Local) {
+ assert(!isa<Constant>(Local) && "Expected local value");
+ }
+
+public:
+ static LocalAsMetadata *get(Value *Local) {
+ return ValueAsMetadata::getLocal(Local);
+ }
+ static LocalAsMetadata *getIfExists(Value *Local) {
+ return ValueAsMetadata::getLocalIfExists(Local);
+ }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == LocalAsMetadataKind;
+ }
+};
+
+/// \brief Transitional API for extracting constants from Metadata.
+///
+/// This namespace contains transitional functions for metadata that points to
+/// \a Constants.
+///
+/// In prehistory -- when metadata was a subclass of \a Value -- \a MDNode
+/// operands could refer to any \a Value. There's was a lot of code like this:
+///
+/// \code
+/// MDNode *N = ...;
+/// auto *CI = dyn_cast<ConstantInt>(N->getOperand(2));
+/// \endcode
+///
+/// Now that \a Value and \a Metadata are in separate hierarchies, maintaining
+/// the semantics for \a isa(), \a cast(), \a dyn_cast() (etc.) requires three
+/// steps: cast in the \a Metadata hierarchy, extraction of the \a Value, and
+/// cast in the \a Value hierarchy. Besides creating boiler-plate, this
+/// requires subtle control flow changes.
+///
+/// The end-goal is to create a new type of metadata, called (e.g.) \a MDInt,
+/// so that metadata can refer to numbers without traversing a bridge to the \a
+/// Value hierarchy. In this final state, the code above would look like this:
+///
+/// \code
+/// MDNode *N = ...;
+/// auto *MI = dyn_cast<MDInt>(N->getOperand(2));
+/// \endcode
+///
+/// The API in this namespace supports the transition. \a MDInt doesn't exist
+/// yet, and even once it does, changing each metadata schema to use it is its
+/// own mini-project. In the meantime this API prevents us from introducing
+/// complex and bug-prone control flow that will disappear in the end. In
+/// particular, the above code looks like this:
+///
+/// \code
+/// MDNode *N = ...;
+/// auto *CI = mdconst::dyn_extract<ConstantInt>(N->getOperand(2));
+/// \endcode
+///
+/// The full set of provided functions includes:
+///
+/// mdconst::hasa <=> isa
+/// mdconst::extract <=> cast
+/// mdconst::extract_or_null <=> cast_or_null
+/// mdconst::dyn_extract <=> dyn_cast
+/// mdconst::dyn_extract_or_null <=> dyn_cast_or_null
+///
+/// The target of the cast must be a subclass of \a Constant.
+namespace mdconst {
+
+namespace detail {
+template <class T> T &make();
+template <class T, class Result> struct HasDereference {
+ typedef char Yes[1];
+ typedef char No[2];
+ template <size_t N> struct SFINAE {};
+
+ template <class U, class V>
+ static Yes &hasDereference(SFINAE<sizeof(static_cast<V>(*make<U>()))> * = 0);
+ template <class U, class V> static No &hasDereference(...);
+
+ static const bool value =
+ sizeof(hasDereference<T, Result>(nullptr)) == sizeof(Yes);
+};
+template <class V, class M> struct IsValidPointer {
+ static const bool value = std::is_base_of<Constant, V>::value &&
+ HasDereference<M, const Metadata &>::value;
+};
+template <class V, class M> struct IsValidReference {
+ static const bool value = std::is_base_of<Constant, V>::value &&
+ std::is_convertible<M, const Metadata &>::value;
+};
+} // end namespace detail
+
+/// \brief Check whether Metadata has a Value.
+///
+/// As an analogue to \a isa(), check whether \c MD has an \a Value inside of
+/// type \c X.
+template <class X, class Y>
+inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, bool>::type
+hasa(Y &&MD) {
+ assert(MD && "Null pointer sent into hasa");
+ if (auto *V = dyn_cast<ConstantAsMetadata>(MD))
+ return isa<X>(V->getValue());
+ return false;
+}
+template <class X, class Y>
+inline
+ typename std::enable_if<detail::IsValidReference<X, Y &>::value, bool>::type
+ hasa(Y &MD) {
+ return hasa(&MD);
+}
+
+/// \brief Extract a Value from Metadata.
+///
+/// As an analogue to \a cast(), extract the \a Value subclass \c X from \c MD.
+template <class X, class Y>
+inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+extract(Y &&MD) {
+ return cast<X>(cast<ConstantAsMetadata>(MD)->getValue());
+}
+template <class X, class Y>
+inline
+ typename std::enable_if<detail::IsValidReference<X, Y &>::value, X *>::type
+ extract(Y &MD) {
+ return extract(&MD);
+}
+
+/// \brief Extract a Value from Metadata, allowing null.
+///
+/// As an analogue to \a cast_or_null(), extract the \a Value subclass \c X
+/// from \c MD, allowing \c MD to be null.
+template <class X, class Y>
+inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+extract_or_null(Y &&MD) {
+ if (auto *V = cast_or_null<ConstantAsMetadata>(MD))
+ return cast<X>(V->getValue());
+ return nullptr;
+}
+
+/// \brief Extract a Value from Metadata, if any.
+///
+/// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X
+/// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a
+/// Value it does contain is of the wrong subclass.
+template <class X, class Y>
+inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+dyn_extract(Y &&MD) {
+ if (auto *V = dyn_cast<ConstantAsMetadata>(MD))
+ return dyn_cast<X>(V->getValue());
+ return nullptr;
+}
+
+/// \brief Extract a Value from Metadata, if any, allowing null.
+///
+/// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X
+/// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a
+/// Value it does contain is of the wrong subclass, allowing \c MD to be null.
+template <class X, class Y>
+inline typename std::enable_if<detail::IsValidPointer<X, Y>::value, X *>::type
+dyn_extract_or_null(Y &&MD) {
+ if (auto *V = dyn_cast_or_null<ConstantAsMetadata>(MD))
+ return dyn_cast<X>(V->getValue());
+ return nullptr;
+}
+
+} // end namespace mdconst
+
+//===----------------------------------------------------------------------===//
+/// \brief A single uniqued string.
+///
+/// These are used to efficiently contain a byte sequence for metadata.
+/// MDString is always unnamed.
+class MDString : public Metadata {
+ friend class StringMapEntry<MDString>;
+
+ MDString(const MDString &) = delete;
+ MDString &operator=(MDString &&) = delete;
+ MDString &operator=(const MDString &) = delete;
+
+ StringMapEntry<MDString> *Entry;
+ MDString() : Metadata(MDStringKind, Uniqued), Entry(nullptr) {}
+ MDString(MDString &&) : Metadata(MDStringKind, Uniqued) {}
+
+public:
+ static MDString *get(LLVMContext &Context, StringRef Str);
+ static MDString *get(LLVMContext &Context, const char *Str) {
+ return get(Context, Str ? StringRef(Str) : StringRef());
+ }
+
+ StringRef getString() const;
+
+ unsigned getLength() const { return (unsigned)getString().size(); }
+
+ typedef StringRef::iterator iterator;
+
+ /// \brief Pointer to the first byte of the string.
+ iterator begin() const { return getString().begin(); }
+
+ /// \brief Pointer to one byte past the end of the string.
+ iterator end() const { return getString().end(); }
+
+ const unsigned char *bytes_begin() const { return getString().bytes_begin(); }
+ const unsigned char *bytes_end() const { return getString().bytes_end(); }
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast.
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == MDStringKind;
+ }
+};
+
+/// \brief A collection of metadata nodes that might be associated with a
+/// memory access used by the alias-analysis infrastructure.
+struct AAMDNodes {
+ explicit AAMDNodes(MDNode *T = nullptr, MDNode *S = nullptr,
+ MDNode *N = nullptr)
+ : TBAA(T), Scope(S), NoAlias(N) {}
+
+ bool operator==(const AAMDNodes &A) const {
+ return TBAA == A.TBAA && Scope == A.Scope && NoAlias == A.NoAlias;
+ }
+
+ bool operator!=(const AAMDNodes &A) const { return !(*this == A); }
+
+ explicit operator bool() const { return TBAA || Scope || NoAlias; }
+
+ /// \brief The tag for type-based alias analysis.
+ MDNode *TBAA;
+
+ /// \brief The tag for alias scope specification (used with noalias).
+ MDNode *Scope;
+
+ /// \brief The tag specifying the noalias scope.
+ MDNode *NoAlias;
+};
+
+// Specialize DenseMapInfo for AAMDNodes.
+template<>
+struct DenseMapInfo<AAMDNodes> {
+ static inline AAMDNodes getEmptyKey() {
+ return AAMDNodes(DenseMapInfo<MDNode *>::getEmptyKey(),
+ nullptr, nullptr);
+ }
+ static inline AAMDNodes getTombstoneKey() {
+ return AAMDNodes(DenseMapInfo<MDNode *>::getTombstoneKey(),
+ nullptr, nullptr);
+ }
+ static unsigned getHashValue(const AAMDNodes &Val) {
+ return DenseMapInfo<MDNode *>::getHashValue(Val.TBAA) ^
+ DenseMapInfo<MDNode *>::getHashValue(Val.Scope) ^
+ DenseMapInfo<MDNode *>::getHashValue(Val.NoAlias);
+ }
+ static bool isEqual(const AAMDNodes &LHS, const AAMDNodes &RHS) {
+ return LHS == RHS;
+ }
+};
+
+/// \brief Tracking metadata reference owned by Metadata.
+///
+/// Similar to \a TrackingMDRef, but it's expected to be owned by an instance
+/// of \a Metadata, which has the option of registering itself for callbacks to
+/// re-unique itself.
+///
+/// In particular, this is used by \a MDNode.
+class MDOperand {
+ MDOperand(MDOperand &&) = delete;
+ MDOperand(const MDOperand &) = delete;
+ MDOperand &operator=(MDOperand &&) = delete;
+ MDOperand &operator=(const MDOperand &) = delete;
+
+ Metadata *MD;
+
+public:
+ MDOperand() : MD(nullptr) {}
+ ~MDOperand() { untrack(); }
+
+ Metadata *get() const { return MD; }
+ operator Metadata *() const { return get(); }
+ Metadata *operator->() const { return get(); }
+ Metadata &operator*() const { return *get(); }
+
+ void reset() {
+ untrack();
+ MD = nullptr;
+ }
+ void reset(Metadata *MD, Metadata *Owner) {
+ untrack();
+ this->MD = MD;
+ track(Owner);
+ }
+
+private:
+ void track(Metadata *Owner) {
+ if (MD) {
+ if (Owner)
+ MetadataTracking::track(this, *MD, *Owner);
+ else
+ MetadataTracking::track(MD);
+ }
+ }
+ void untrack() {
+ assert(static_cast<void *>(this) == &MD && "Expected same address");
+ if (MD)
+ MetadataTracking::untrack(MD);
+ }
+};
+
+template <> struct simplify_type<MDOperand> {
+ typedef Metadata *SimpleType;
+ static SimpleType getSimplifiedValue(MDOperand &MD) { return MD.get(); }
+};
+
+template <> struct simplify_type<const MDOperand> {
+ typedef Metadata *SimpleType;
+ static SimpleType getSimplifiedValue(const MDOperand &MD) { return MD.get(); }
+};
+
+/// \brief Pointer to the context, with optional RAUW support.
+///
+/// Either a raw (non-null) pointer to the \a LLVMContext, or an owned pointer
+/// to \a ReplaceableMetadataImpl (which has a reference to \a LLVMContext).
+class ContextAndReplaceableUses {
+ PointerUnion<LLVMContext *, ReplaceableMetadataImpl *> Ptr;
+
+ ContextAndReplaceableUses() = delete;
+ ContextAndReplaceableUses(ContextAndReplaceableUses &&) = delete;
+ ContextAndReplaceableUses(const ContextAndReplaceableUses &) = delete;
+ ContextAndReplaceableUses &operator=(ContextAndReplaceableUses &&) = delete;
+ ContextAndReplaceableUses &
+ operator=(const ContextAndReplaceableUses &) = delete;
+
+public:
+ ContextAndReplaceableUses(LLVMContext &Context) : Ptr(&Context) {}
+ ContextAndReplaceableUses(
+ std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses)
+ : Ptr(ReplaceableUses.release()) {
+ assert(getReplaceableUses() && "Expected non-null replaceable uses");
+ }
+ ~ContextAndReplaceableUses() { delete getReplaceableUses(); }
+
+ operator LLVMContext &() { return getContext(); }
+
+ /// \brief Whether this contains RAUW support.
+ bool hasReplaceableUses() const {
+ return Ptr.is<ReplaceableMetadataImpl *>();
+ }
+ LLVMContext &getContext() const {
+ if (hasReplaceableUses())
+ return getReplaceableUses()->getContext();
+ return *Ptr.get<LLVMContext *>();
+ }
+ ReplaceableMetadataImpl *getReplaceableUses() const {
+ if (hasReplaceableUses())
+ return Ptr.get<ReplaceableMetadataImpl *>();
+ return nullptr;
+ }
+
+ /// \brief Assign RAUW support to this.
+ ///
+ /// Make this replaceable, taking ownership of \c ReplaceableUses (which must
+ /// not be null).
+ void
+ makeReplaceable(std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses) {
+ assert(ReplaceableUses && "Expected non-null replaceable uses");
+ assert(&ReplaceableUses->getContext() == &getContext() &&
+ "Expected same context");
+ delete getReplaceableUses();
+ Ptr = ReplaceableUses.release();
+ }
+
+ /// \brief Drop RAUW support.
+ ///
+ /// Cede ownership of RAUW support, returning it.
+ std::unique_ptr<ReplaceableMetadataImpl> takeReplaceableUses() {
+ assert(hasReplaceableUses() && "Expected to own replaceable uses");
+ std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses(
+ getReplaceableUses());
+ Ptr = &ReplaceableUses->getContext();
+ return ReplaceableUses;
+ }
+};
+
+struct TempMDNodeDeleter {
+ inline void operator()(MDNode *Node) const;
+};
+
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ typedef std::unique_ptr<CLASS, TempMDNodeDeleter> Temp##CLASS;
+#define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_LEAF(CLASS)
+#include "llvm/IR/Metadata.def"
+
+/// \brief Metadata node.
+///
+/// Metadata nodes can be uniqued, like constants, or distinct. Temporary
+/// metadata nodes (with full support for RAUW) can be used to delay uniquing
+/// until forward references are known. The basic metadata node is an \a
+/// MDTuple.
+///
+/// There is limited support for RAUW at construction time. At construction
+/// time, if any operand is a temporary node (or an unresolved uniqued node,
+/// which indicates a transitive temporary operand), the node itself will be
+/// unresolved. As soon as all operands become resolved, it will drop RAUW
+/// support permanently.
+///
+/// If an unresolved node is part of a cycle, \a resolveCycles() needs
+/// to be called on some member of the cycle once all temporary nodes have been
+/// replaced.
+class MDNode : public Metadata {
+ friend class ReplaceableMetadataImpl;
+ friend class LLVMContextImpl;
+
+ MDNode(const MDNode &) = delete;
+ void operator=(const MDNode &) = delete;
+ void *operator new(size_t) = delete;
+
+ unsigned NumOperands;
+ unsigned NumUnresolved;
+
+protected:
+ ContextAndReplaceableUses Context;
+
+ void *operator new(size_t Size, unsigned NumOps);
+ void operator delete(void *Mem);
+
+ /// \brief Required by std, but never called.
+ void operator delete(void *, unsigned) {
+ llvm_unreachable("Constructor throws?");
+ }
+
+ /// \brief Required by std, but never called.
+ void operator delete(void *, unsigned, bool) {
+ llvm_unreachable("Constructor throws?");
+ }
+
+ MDNode(LLVMContext &Context, unsigned ID, StorageType Storage,
+ ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = None);
+ ~MDNode() = default;
+
+ void dropAllReferences();
+
+ MDOperand *mutable_begin() { return mutable_end() - NumOperands; }
+ MDOperand *mutable_end() { return reinterpret_cast<MDOperand *>(this); }
+
+ typedef iterator_range<MDOperand *> mutable_op_range;
+ mutable_op_range mutable_operands() {
+ return mutable_op_range(mutable_begin(), mutable_end());
+ }
+
+public:
+ static inline MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs);
+ static inline MDTuple *getIfExists(LLVMContext &Context,
+ ArrayRef<Metadata *> MDs);
+ static inline MDTuple *getDistinct(LLVMContext &Context,
+ ArrayRef<Metadata *> MDs);
+ static inline TempMDTuple getTemporary(LLVMContext &Context,
+ ArrayRef<Metadata *> MDs);
+
+ /// \brief Create a (temporary) clone of this.
+ TempMDNode clone() const;
+
+ /// \brief Deallocate a node created by getTemporary.
+ ///
+ /// Calls \c replaceAllUsesWith(nullptr) before deleting, so any remaining
+ /// references will be reset.
+ static void deleteTemporary(MDNode *N);
+
+ LLVMContext &getContext() const { return Context.getContext(); }
+
+ /// \brief Replace a specific operand.
+ void replaceOperandWith(unsigned I, Metadata *New);
+
+ /// \brief Check if node is fully resolved.
+ ///
+ /// If \a isTemporary(), this always returns \c false; if \a isDistinct(),
+ /// this always returns \c true.
+ ///
+ /// If \a isUniqued(), returns \c true if this has already dropped RAUW
+ /// support (because all operands are resolved).
+ ///
+ /// As forward declarations are resolved, their containers should get
+ /// resolved automatically. However, if this (or one of its operands) is
+ /// involved in a cycle, \a resolveCycles() needs to be called explicitly.
+ bool isResolved() const { return !Context.hasReplaceableUses(); }
+
+ bool isUniqued() const { return Storage == Uniqued; }
+ bool isDistinct() const { return Storage == Distinct; }
+ bool isTemporary() const { return Storage == Temporary; }
+
+ /// \brief RAUW a temporary.
+ ///
+ /// \pre \a isTemporary() must be \c true.
+ void replaceAllUsesWith(Metadata *MD) {
+ assert(isTemporary() && "Expected temporary node");
+ assert(!isResolved() && "Expected RAUW support");
+ Context.getReplaceableUses()->replaceAllUsesWith(MD);
+ }
+
+ /// \brief Resolve cycles.
+ ///
+ /// Once all forward declarations have been resolved, force cycles to be
+ /// resolved. If \p MDMaterialized is true, then any temporary metadata
+ /// is ignored, otherwise it asserts when encountering temporary metadata.
+ ///
+ /// \pre No operands (or operands' operands, etc.) have \a isTemporary().
+ void resolveCycles(bool MDMaterialized = true);
+
+ /// \brief Replace a temporary node with a permanent one.
+ ///
+ /// Try to create a uniqued version of \c N -- in place, if possible -- and
+ /// return it. If \c N cannot be uniqued, return a distinct node instead.
+ template <class T>
+ static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type
+ replaceWithPermanent(std::unique_ptr<T, TempMDNodeDeleter> N) {
+ return cast<T>(N.release()->replaceWithPermanentImpl());
+ }
+
+ /// \brief Replace a temporary node with a uniqued one.
+ ///
+ /// Create a uniqued version of \c N -- in place, if possible -- and return
+ /// it. Takes ownership of the temporary node.
+ ///
+ /// \pre N does not self-reference.
+ template <class T>
+ static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type
+ replaceWithUniqued(std::unique_ptr<T, TempMDNodeDeleter> N) {
+ return cast<T>(N.release()->replaceWithUniquedImpl());
+ }
+
+ /// \brief Replace a temporary node with a distinct one.
+ ///
+ /// Create a distinct version of \c N -- in place, if possible -- and return
+ /// it. Takes ownership of the temporary node.
+ template <class T>
+ static typename std::enable_if<std::is_base_of<MDNode, T>::value, T *>::type
+ replaceWithDistinct(std::unique_ptr<T, TempMDNodeDeleter> N) {
+ return cast<T>(N.release()->replaceWithDistinctImpl());
+ }
+
+private:
+ MDNode *replaceWithPermanentImpl();
+ MDNode *replaceWithUniquedImpl();
+ MDNode *replaceWithDistinctImpl();
+
+protected:
+ /// \brief Set an operand.
+ ///
+ /// Sets the operand directly, without worrying about uniquing.
+ void setOperand(unsigned I, Metadata *New);
+
+ void storeDistinctInContext();
+ template <class T, class StoreT>
+ static T *storeImpl(T *N, StorageType Storage, StoreT &Store);
+ template <class T> static T *storeImpl(T *N, StorageType Storage);
+
+private:
+ void handleChangedOperand(void *Ref, Metadata *New);
+
+ void resolve();
+ void resolveAfterOperandChange(Metadata *Old, Metadata *New);
+ void decrementUnresolvedOperandCount();
+ unsigned countUnresolvedOperands();
+
+ /// \brief Mutate this to be "uniqued".
+ ///
+ /// Mutate this so that \a isUniqued().
+ /// \pre \a isTemporary().
+ /// \pre already added to uniquing set.
+ void makeUniqued();
+
+ /// \brief Mutate this to be "distinct".
+ ///
+ /// Mutate this so that \a isDistinct().
+ /// \pre \a isTemporary().
+ void makeDistinct();
+
+ void deleteAsSubclass();
+ MDNode *uniquify();
+ void eraseFromStore();
+
+ template <class NodeTy> struct HasCachedHash;
+ template <class NodeTy>
+ static void dispatchRecalculateHash(NodeTy *N, std::true_type) {
+ N->recalculateHash();
+ }
+ template <class NodeTy>
+ static void dispatchRecalculateHash(NodeTy *, std::false_type) {}
+ template <class NodeTy>
+ static void dispatchResetHash(NodeTy *N, std::true_type) {
+ N->setHash(0);
+ }
+ template <class NodeTy>
+ static void dispatchResetHash(NodeTy *, std::false_type) {}
+
+public:
+ typedef const MDOperand *op_iterator;
+ typedef iterator_range<op_iterator> op_range;
+
+ op_iterator op_begin() const {
+ return const_cast<MDNode *>(this)->mutable_begin();
+ }
+ op_iterator op_end() const {
+ return const_cast<MDNode *>(this)->mutable_end();
+ }
+ op_range operands() const { return op_range(op_begin(), op_end()); }
+
+ const MDOperand &getOperand(unsigned I) const {
+ assert(I < NumOperands && "Out of range");
+ return op_begin()[I];
+ }
+
+ /// \brief Return number of MDNode operands.
+ unsigned getNumOperands() const { return NumOperands; }
+
+ /// \brief Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const Metadata *MD) {
+ switch (MD->getMetadataID()) {
+ default:
+ return false;
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ case CLASS##Kind: \
+ return true;
+#include "llvm/IR/Metadata.def"
+ }
+ }
+
+ /// \brief Check whether MDNode is a vtable access.
+ bool isTBAAVtableAccess() const;
+
+ /// \brief Methods for metadata merging.
+ static MDNode *concatenate(MDNode *A, MDNode *B);
+ static MDNode *intersect(MDNode *A, MDNode *B);
+ static MDNode *getMostGenericTBAA(MDNode *A, MDNode *B);
+ static MDNode *getMostGenericFPMath(MDNode *A, MDNode *B);
+ static MDNode *getMostGenericRange(MDNode *A, MDNode *B);
+ static MDNode *getMostGenericAliasScope(MDNode *A, MDNode *B);
+ static MDNode *getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B);
+
+};
+
+/// \brief Tuple of metadata.
+///
+/// This is the simple \a MDNode arbitrary tuple. Nodes are uniqued by
+/// default based on their operands.
+class MDTuple : public MDNode {
+ friend class LLVMContextImpl;
+ friend class MDNode;
+
+ MDTuple(LLVMContext &C, StorageType Storage, unsigned Hash,
+ ArrayRef<Metadata *> Vals)
+ : MDNode(C, MDTupleKind, Storage, Vals) {
+ setHash(Hash);
+ }
+ ~MDTuple() { dropAllReferences(); }
+
+ void setHash(unsigned Hash) { SubclassData32 = Hash; }
+ void recalculateHash();
+
+ static MDTuple *getImpl(LLVMContext &Context, ArrayRef<Metadata *> MDs,
+ StorageType Storage, bool ShouldCreate = true);
+
+ TempMDTuple cloneImpl() const {
+ return getTemporary(getContext(),
+ SmallVector<Metadata *, 4>(op_begin(), op_end()));
+ }
+
+public:
+ /// \brief Get the hash, if any.
+ unsigned getHash() const { return SubclassData32; }
+
+ static MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+ return getImpl(Context, MDs, Uniqued);
+ }
+ static MDTuple *getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+ return getImpl(Context, MDs, Uniqued, /* ShouldCreate */ false);
+ }
+
+ /// \brief Return a distinct node.
+ ///
+ /// Return a distinct node -- i.e., a node that is not uniqued.
+ static MDTuple *getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+ return getImpl(Context, MDs, Distinct);
+ }
+
+ /// \brief Return a temporary node.
+ ///
+ /// For use in constructing cyclic MDNode structures. A temporary MDNode is
+ /// not uniqued, may be RAUW'd, and must be manually deleted with
+ /// deleteTemporary.
+ static TempMDTuple getTemporary(LLVMContext &Context,
+ ArrayRef<Metadata *> MDs) {
+ return TempMDTuple(getImpl(Context, MDs, Temporary));
+ }
+
+ /// \brief Return a (temporary) clone of this.
+ TempMDTuple clone() const { return cloneImpl(); }
+
+ static bool classof(const Metadata *MD) {
+ return MD->getMetadataID() == MDTupleKind;
+ }
+};
+
+MDTuple *MDNode::get(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+ return MDTuple::get(Context, MDs);
+}
+MDTuple *MDNode::getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+ return MDTuple::getIfExists(Context, MDs);
+}
+MDTuple *MDNode::getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) {
+ return MDTuple::getDistinct(Context, MDs);
+}
+TempMDTuple MDNode::getTemporary(LLVMContext &Context,
+ ArrayRef<Metadata *> MDs) {
+ return MDTuple::getTemporary(Context, MDs);
+}
+
+void TempMDNodeDeleter::operator()(MDNode *Node) const {
+ MDNode::deleteTemporary(Node);
+}
+
+/// \brief Typed iterator through MDNode operands.
+///
+/// An iterator that transforms an \a MDNode::iterator into an iterator over a
+/// particular Metadata subclass.
+template <class T>
+class TypedMDOperandIterator
+ : std::iterator<std::input_iterator_tag, T *, std::ptrdiff_t, void, T *> {
+ MDNode::op_iterator I = nullptr;
+
+public:
+ TypedMDOperandIterator() = default;
+ explicit TypedMDOperandIterator(MDNode::op_iterator I) : I(I) {}
+ T *operator*() const { return cast_or_null<T>(*I); }
+ TypedMDOperandIterator &operator++() {
+ ++I;
+ return *this;
+ }
+ TypedMDOperandIterator operator++(int) {
+ TypedMDOperandIterator Temp(*this);
+ ++I;
+ return Temp;
+ }
+ bool operator==(const TypedMDOperandIterator &X) const { return I == X.I; }
+ bool operator!=(const TypedMDOperandIterator &X) const { return I != X.I; }
+};
+
+/// \brief Typed, array-like tuple of metadata.
+///
+/// This is a wrapper for \a MDTuple that makes it act like an array holding a
+/// particular type of metadata.
+template <class T> class MDTupleTypedArrayWrapper {
+ const MDTuple *N = nullptr;
+
+public:
+ MDTupleTypedArrayWrapper() = default;
+ MDTupleTypedArrayWrapper(const MDTuple *N) : N(N) {}
+
+ template <class U>
+ MDTupleTypedArrayWrapper(
+ const MDTupleTypedArrayWrapper<U> &Other,
+ typename std::enable_if<std::is_convertible<U *, T *>::value>::type * =
+ nullptr)
+ : N(Other.get()) {}
+
+ template <class U>
+ explicit MDTupleTypedArrayWrapper(
+ const MDTupleTypedArrayWrapper<U> &Other,
+ typename std::enable_if<!std::is_convertible<U *, T *>::value>::type * =
+ nullptr)
+ : N(Other.get()) {}
+
+ explicit operator bool() const { return get(); }
+ explicit operator MDTuple *() const { return get(); }
+
+ MDTuple *get() const { return const_cast<MDTuple *>(N); }
+ MDTuple *operator->() const { return get(); }
+ MDTuple &operator*() const { return *get(); }
+
+ // FIXME: Fix callers and remove condition on N.
+ unsigned size() const { return N ? N->getNumOperands() : 0u; }
+ T *operator[](unsigned I) const { return cast_or_null<T>(N->getOperand(I)); }
+
+ // FIXME: Fix callers and remove condition on N.
+ typedef TypedMDOperandIterator<T> iterator;
+ iterator begin() const { return N ? iterator(N->op_begin()) : iterator(); }
+ iterator end() const { return N ? iterator(N->op_end()) : iterator(); }
+};
+
+#define HANDLE_METADATA(CLASS) \
+ typedef MDTupleTypedArrayWrapper<CLASS> CLASS##Array;
+#include "llvm/IR/Metadata.def"
+
+//===----------------------------------------------------------------------===//
+/// \brief A tuple of MDNodes.
+///
+/// Despite its name, a NamedMDNode isn't itself an MDNode. NamedMDNodes belong
+/// to modules, have names, and contain lists of MDNodes.
+///
+/// TODO: Inherit from Metadata.
+class NamedMDNode : public ilist_node<NamedMDNode> {
+ friend struct ilist_traits<NamedMDNode>;
+ friend class LLVMContextImpl;
+ friend class Module;
+ NamedMDNode(const NamedMDNode &) = delete;
+
+ std::string Name;
+ Module *Parent;
+ void *Operands; // SmallVector<TrackingMDRef, 4>
+
+ void setParent(Module *M) { Parent = M; }
+
+ explicit NamedMDNode(const Twine &N);
+
+ template<class T1, class T2>
+ class op_iterator_impl :
+ public std::iterator<std::bidirectional_iterator_tag, T2> {
+ const NamedMDNode *Node;
+ unsigned Idx;
+ op_iterator_impl(const NamedMDNode *N, unsigned i) : Node(N), Idx(i) { }
+
+ friend class NamedMDNode;
+
+ public:
+ op_iterator_impl() : Node(nullptr), Idx(0) { }
+
+ bool operator==(const op_iterator_impl &o) const { return Idx == o.Idx; }
+ bool operator!=(const op_iterator_impl &o) const { return Idx != o.Idx; }
+ op_iterator_impl &operator++() {
+ ++Idx;
+ return *this;
+ }
+ op_iterator_impl operator++(int) {
+ op_iterator_impl tmp(*this);
+ operator++();
+ return tmp;
+ }
+ op_iterator_impl &operator--() {
+ --Idx;
+ return *this;
+ }
+ op_iterator_impl operator--(int) {
+ op_iterator_impl tmp(*this);
+ operator--();
+ return tmp;
+ }
+
+ T1 operator*() const { return Node->getOperand(Idx); }
+ };
+
+public:
+ /// \brief Drop all references and remove the node from parent module.
+ void eraseFromParent();
+
+ /// \brief Remove all uses and clear node vector.
+ void dropAllReferences();
+
+ ~NamedMDNode();
+
+ /// \brief Get the module that holds this named metadata collection.
+ inline Module *getParent() { return Parent; }
+ inline const Module *getParent() const { return Parent; }
+
+ MDNode *getOperand(unsigned i) const;
+ unsigned getNumOperands() const;
+ void addOperand(MDNode *M);
+ void setOperand(unsigned I, MDNode *New);
+ StringRef getName() const;
+ void print(raw_ostream &ROS, bool IsForDebug = false) const;
+ void dump() const;
+
+ // ---------------------------------------------------------------------------
+ // Operand Iterator interface...
+ //
+ typedef op_iterator_impl<MDNode *, MDNode> op_iterator;
+ op_iterator op_begin() { return op_iterator(this, 0); }
+ op_iterator op_end() { return op_iterator(this, getNumOperands()); }
+
+ typedef op_iterator_impl<const MDNode *, MDNode> const_op_iterator;
+ const_op_iterator op_begin() const { return const_op_iterator(this, 0); }
+ const_op_iterator op_end() const { return const_op_iterator(this, getNumOperands()); }
+
+ inline iterator_range<op_iterator> operands() {
+ return make_range(op_begin(), op_end());
+ }
+ inline iterator_range<const_op_iterator> operands() const {
+ return make_range(op_begin(), op_end());
+ }
+};
+
+} // end llvm namespace
+
+#endif // LLVM_IR_METADATA_H
diff --git a/contrib/llvm/include/llvm/IR/Module.h b/contrib/llvm/include/llvm/IR/Module.h
new file mode 100644
index 0000000..942f685
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Module.h
@@ -0,0 +1,660 @@
+//===-- llvm/Module.h - C++ class to represent a VM module ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// Module.h This file contains the declarations for the Module class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MODULE_H
+#define LLVM_IR_MODULE_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/DataTypes.h"
+#include <system_error>
+
+namespace llvm {
+class FunctionType;
+class GVMaterializer;
+class LLVMContext;
+class RandomNumberGenerator;
+class StructType;
+
+template<> struct ilist_traits<NamedMDNode>
+ : public ilist_default_traits<NamedMDNode> {
+ // createSentinel is used to get hold of a node that marks the end of
+ // the list...
+ NamedMDNode *createSentinel() const {
+ return static_cast<NamedMDNode*>(&Sentinel);
+ }
+ static void destroySentinel(NamedMDNode*) {}
+
+ NamedMDNode *provideInitialHead() const { return createSentinel(); }
+ NamedMDNode *ensureHead(NamedMDNode*) const { return createSentinel(); }
+ static void noteHead(NamedMDNode*, NamedMDNode*) {}
+ void addNodeToList(NamedMDNode *) {}
+ void removeNodeFromList(NamedMDNode *) {}
+
+private:
+ mutable ilist_node<NamedMDNode> Sentinel;
+};
+
+/// A Module instance is used to store all the information related to an
+/// LLVM module. Modules are the top level container of all other LLVM
+/// Intermediate Representation (IR) objects. Each module directly contains a
+/// list of globals variables, a list of functions, a list of libraries (or
+/// other modules) this module depends on, a symbol table, and various data
+/// about the target's characteristics.
+///
+/// A module maintains a GlobalValRefMap object that is used to hold all
+/// constant references to global variables in the module. When a global
+/// variable is destroyed, it should have no entries in the GlobalValueRefMap.
+/// @brief The main container class for the LLVM Intermediate Representation.
+class Module {
+/// @name Types And Enumerations
+/// @{
+public:
+ /// The type for the list of global variables.
+ typedef SymbolTableList<GlobalVariable> GlobalListType;
+ /// The type for the list of functions.
+ typedef SymbolTableList<Function> FunctionListType;
+ /// The type for the list of aliases.
+ typedef SymbolTableList<GlobalAlias> AliasListType;
+ /// The type for the list of named metadata.
+ typedef ilist<NamedMDNode> NamedMDListType;
+ /// The type of the comdat "symbol" table.
+ typedef StringMap<Comdat> ComdatSymTabType;
+
+ /// The Global Variable iterator.
+ typedef GlobalListType::iterator global_iterator;
+ /// The Global Variable constant iterator.
+ typedef GlobalListType::const_iterator const_global_iterator;
+
+ /// The Function iterators.
+ typedef FunctionListType::iterator iterator;
+ /// The Function constant iterator
+ typedef FunctionListType::const_iterator const_iterator;
+
+ /// The Function reverse iterator.
+ typedef FunctionListType::reverse_iterator reverse_iterator;
+ /// The Function constant reverse iterator.
+ typedef FunctionListType::const_reverse_iterator const_reverse_iterator;
+
+ /// The Global Alias iterators.
+ typedef AliasListType::iterator alias_iterator;
+ /// The Global Alias constant iterator
+ typedef AliasListType::const_iterator const_alias_iterator;
+
+ /// The named metadata iterators.
+ typedef NamedMDListType::iterator named_metadata_iterator;
+ /// The named metadata constant iterators.
+ typedef NamedMDListType::const_iterator const_named_metadata_iterator;
+
+ /// This enumeration defines the supported behaviors of module flags.
+ enum ModFlagBehavior {
+ /// Emits an error if two values disagree, otherwise the resulting value is
+ /// that of the operands.
+ Error = 1,
+
+ /// Emits a warning if two values disagree. The result value will be the
+ /// operand for the flag from the first module being linked.
+ Warning = 2,
+
+ /// Adds a requirement that another module flag be present and have a
+ /// specified value after linking is performed. The value must be a metadata
+ /// pair, where the first element of the pair is the ID of the module flag
+ /// to be restricted, and the second element of the pair is the value the
+ /// module flag should be restricted to. This behavior can be used to
+ /// restrict the allowable results (via triggering of an error) of linking
+ /// IDs with the **Override** behavior.
+ Require = 3,
+
+ /// Uses the specified value, regardless of the behavior or value of the
+ /// other module. If both modules specify **Override**, but the values
+ /// differ, an error will be emitted.
+ Override = 4,
+
+ /// Appends the two values, which are required to be metadata nodes.
+ Append = 5,
+
+ /// Appends the two values, which are required to be metadata
+ /// nodes. However, duplicate entries in the second list are dropped
+ /// during the append operation.
+ AppendUnique = 6,
+
+ // Markers:
+ ModFlagBehaviorFirstVal = Error,
+ ModFlagBehaviorLastVal = AppendUnique
+ };
+
+ /// Checks if Metadata represents a valid ModFlagBehavior, and stores the
+ /// converted result in MFB.
+ static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB);
+
+ struct ModuleFlagEntry {
+ ModFlagBehavior Behavior;
+ MDString *Key;
+ Metadata *Val;
+ ModuleFlagEntry(ModFlagBehavior B, MDString *K, Metadata *V)
+ : Behavior(B), Key(K), Val(V) {}
+ };
+
+/// @}
+/// @name Member Variables
+/// @{
+private:
+ LLVMContext &Context; ///< The LLVMContext from which types and
+ ///< constants are allocated.
+ GlobalListType GlobalList; ///< The Global Variables in the module
+ FunctionListType FunctionList; ///< The Functions in the module
+ AliasListType AliasList; ///< The Aliases in the module
+ NamedMDListType NamedMDList; ///< The named metadata in the module
+ std::string GlobalScopeAsm; ///< Inline Asm at global scope.
+ ValueSymbolTable *ValSymTab; ///< Symbol table for values
+ ComdatSymTabType ComdatSymTab; ///< Symbol table for COMDATs
+ std::unique_ptr<GVMaterializer>
+ Materializer; ///< Used to materialize GlobalValues
+ std::string ModuleID; ///< Human readable identifier for the module
+ std::string TargetTriple; ///< Platform target triple Module compiled on
+ ///< Format: (arch)(sub)-(vendor)-(sys0-(abi)
+ void *NamedMDSymTab; ///< NamedMDNode names.
+ DataLayout DL; ///< DataLayout associated with the module
+
+ friend class Constant;
+
+/// @}
+/// @name Constructors
+/// @{
+public:
+ /// The Module constructor. Note that there is no default constructor. You
+ /// must provide a name for the module upon construction.
+ explicit Module(StringRef ModuleID, LLVMContext& C);
+ /// The module destructor. This will dropAllReferences.
+ ~Module();
+
+/// @}
+/// @name Module Level Accessors
+/// @{
+
+ /// Get the module identifier which is, essentially, the name of the module.
+ /// @returns the module identifier as a string
+ const std::string &getModuleIdentifier() const { return ModuleID; }
+
+ /// \brief Get a short "name" for the module.
+ ///
+ /// This is useful for debugging or logging. It is essentially a convenience
+ /// wrapper around getModuleIdentifier().
+ StringRef getName() const { return ModuleID; }
+
+ /// Get the data layout string for the module's target platform. This is
+ /// equivalent to getDataLayout()->getStringRepresentation().
+ const std::string &getDataLayoutStr() const {
+ return DL.getStringRepresentation();
+ }
+
+ /// Get the data layout for the module's target platform.
+ const DataLayout &getDataLayout() const;
+
+ /// Get the target triple which is a string describing the target host.
+ /// @returns a string containing the target triple.
+ const std::string &getTargetTriple() const { return TargetTriple; }
+
+ /// Get the global data context.
+ /// @returns LLVMContext - a container for LLVM's global information
+ LLVMContext &getContext() const { return Context; }
+
+ /// Get any module-scope inline assembly blocks.
+ /// @returns a string containing the module-scope inline assembly blocks.
+ const std::string &getModuleInlineAsm() const { return GlobalScopeAsm; }
+
+ /// Get a RandomNumberGenerator salted for use with this module. The
+ /// RNG can be seeded via -rng-seed=<uint64> and is salted with the
+ /// ModuleID and the provided pass salt. The returned RNG should not
+ /// be shared across threads or passes.
+ ///
+ /// A unique RNG per pass ensures a reproducible random stream even
+ /// when other randomness consuming passes are added or removed. In
+ /// addition, the random stream will be reproducible across LLVM
+ /// versions when the pass does not change.
+ RandomNumberGenerator *createRNG(const Pass* P) const;
+
+/// @}
+/// @name Module Level Mutators
+/// @{
+
+ /// Set the module identifier.
+ void setModuleIdentifier(StringRef ID) { ModuleID = ID; }
+
+ /// Set the data layout
+ void setDataLayout(StringRef Desc);
+ void setDataLayout(const DataLayout &Other);
+
+ /// Set the target triple.
+ void setTargetTriple(StringRef T) { TargetTriple = T; }
+
+ /// Set the module-scope inline assembly blocks.
+ /// A trailing newline is added if the input doesn't have one.
+ void setModuleInlineAsm(StringRef Asm) {
+ GlobalScopeAsm = Asm;
+ if (!GlobalScopeAsm.empty() &&
+ GlobalScopeAsm[GlobalScopeAsm.size()-1] != '\n')
+ GlobalScopeAsm += '\n';
+ }
+
+ /// Append to the module-scope inline assembly blocks.
+ /// A trailing newline is added if the input doesn't have one.
+ void appendModuleInlineAsm(StringRef Asm) {
+ GlobalScopeAsm += Asm;
+ if (!GlobalScopeAsm.empty() &&
+ GlobalScopeAsm[GlobalScopeAsm.size()-1] != '\n')
+ GlobalScopeAsm += '\n';
+ }
+
+/// @}
+/// @name Generic Value Accessors
+/// @{
+
+ /// Return the global value in the module with the specified name, of
+ /// arbitrary type. This method returns null if a global with the specified
+ /// name is not found.
+ GlobalValue *getNamedValue(StringRef Name) const;
+
+ /// Return a unique non-zero ID for the specified metadata kind. This ID is
+ /// uniqued across modules in the current LLVMContext.
+ unsigned getMDKindID(StringRef Name) const;
+
+ /// Populate client supplied SmallVector with the name for custom metadata IDs
+ /// registered in this LLVMContext.
+ void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
+
+ /// Populate client supplied SmallVector with the bundle tags registered in
+ /// this LLVMContext. The bundle tags are ordered by increasing bundle IDs.
+ /// \see LLVMContext::getOperandBundleTagID
+ void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;
+
+ /// Return the type with the specified name, or null if there is none by that
+ /// name.
+ StructType *getTypeByName(StringRef Name) const;
+
+ std::vector<StructType *> getIdentifiedStructTypes() const;
+
+/// @}
+/// @name Function Accessors
+/// @{
+
+ /// Look up the specified function in the module symbol table. Four
+ /// possibilities:
+ /// 1. If it does not exist, add a prototype for the function and return it.
+ /// 2. If it exists, and has a local linkage, the existing function is
+ /// renamed and a new one is inserted.
+ /// 3. Otherwise, if the existing function has the correct prototype, return
+ /// the existing function.
+ /// 4. Finally, the function exists but has the wrong prototype: return the
+ /// function with a constantexpr cast to the right prototype.
+ Constant *getOrInsertFunction(StringRef Name, FunctionType *T,
+ AttributeSet AttributeList);
+
+ Constant *getOrInsertFunction(StringRef Name, FunctionType *T);
+
+ /// Look up the specified function in the module symbol table. If it does not
+ /// exist, add a prototype for the function and return it. This function
+ /// guarantees to return a constant of pointer to the specified function type
+ /// or a ConstantExpr BitCast of that type if the named function has a
+ /// different type. This version of the method takes a null terminated list of
+ /// function arguments, which makes it easier for clients to use.
+ Constant *getOrInsertFunction(StringRef Name,
+ AttributeSet AttributeList,
+ Type *RetTy, ...) LLVM_END_WITH_NULL;
+
+ /// Same as above, but without the attributes.
+ Constant *getOrInsertFunction(StringRef Name, Type *RetTy, ...)
+ LLVM_END_WITH_NULL;
+
+ /// Look up the specified function in the module symbol table. If it does not
+ /// exist, return null.
+ Function *getFunction(StringRef Name) const;
+
+/// @}
+/// @name Global Variable Accessors
+/// @{
+
+ /// Look up the specified global variable in the module symbol table. If it
+ /// does not exist, return null. If AllowInternal is set to true, this
+ /// function will return types that have InternalLinkage. By default, these
+ /// types are not returned.
+ GlobalVariable *getGlobalVariable(StringRef Name) const {
+ return getGlobalVariable(Name, false);
+ }
+
+ GlobalVariable *getGlobalVariable(StringRef Name, bool AllowInternal) const {
+ return const_cast<Module *>(this)->getGlobalVariable(Name, AllowInternal);
+ }
+
+ GlobalVariable *getGlobalVariable(StringRef Name, bool AllowInternal = false);
+
+ /// Return the global variable in the module with the specified name, of
+ /// arbitrary type. This method returns null if a global with the specified
+ /// name is not found.
+ GlobalVariable *getNamedGlobal(StringRef Name) {
+ return getGlobalVariable(Name, true);
+ }
+ const GlobalVariable *getNamedGlobal(StringRef Name) const {
+ return const_cast<Module *>(this)->getNamedGlobal(Name);
+ }
+
+ /// Look up the specified global in the module symbol table.
+ /// 1. If it does not exist, add a declaration of the global and return it.
+ /// 2. Else, the global exists but has the wrong type: return the function
+ /// with a constantexpr cast to the right type.
+ /// 3. Finally, if the existing global is the correct declaration, return
+ /// the existing global.
+ Constant *getOrInsertGlobal(StringRef Name, Type *Ty);
+
+/// @}
+/// @name Global Alias Accessors
+/// @{
+
+ /// Return the global alias in the module with the specified name, of
+ /// arbitrary type. This method returns null if a global with the specified
+ /// name is not found.
+ GlobalAlias *getNamedAlias(StringRef Name) const;
+
+/// @}
+/// @name Named Metadata Accessors
+/// @{
+
+ /// Return the first NamedMDNode in the module with the specified name. This
+ /// method returns null if a NamedMDNode with the specified name is not found.
+ NamedMDNode *getNamedMetadata(const Twine &Name) const;
+
+ /// Return the named MDNode in the module with the specified name. This method
+ /// returns a new NamedMDNode if a NamedMDNode with the specified name is not
+ /// found.
+ NamedMDNode *getOrInsertNamedMetadata(StringRef Name);
+
+ /// Remove the given NamedMDNode from this module and delete it.
+ void eraseNamedMetadata(NamedMDNode *NMD);
+
+/// @}
+/// @name Comdat Accessors
+/// @{
+
+ /// Return the Comdat in the module with the specified name. It is created
+ /// if it didn't already exist.
+ Comdat *getOrInsertComdat(StringRef Name);
+
+/// @}
+/// @name Module Flags Accessors
+/// @{
+
+ /// Returns the module flags in the provided vector.
+ void getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const;
+
+ /// Return the corresponding value if Key appears in module flags, otherwise
+ /// return null.
+ Metadata *getModuleFlag(StringRef Key) const;
+
+ /// Returns the NamedMDNode in the module that represents module-level flags.
+ /// This method returns null if there are no module-level flags.
+ NamedMDNode *getModuleFlagsMetadata() const;
+
+ /// Returns the NamedMDNode in the module that represents module-level flags.
+ /// If module-level flags aren't found, it creates the named metadata that
+ /// contains them.
+ NamedMDNode *getOrInsertModuleFlagsMetadata();
+
+ /// Add a module-level flag to the module-level flags metadata. It will create
+ /// the module-level flags named metadata if it doesn't already exist.
+ void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Metadata *Val);
+ void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Constant *Val);
+ void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, uint32_t Val);
+ void addModuleFlag(MDNode *Node);
+
+/// @}
+/// @name Materialization
+/// @{
+
+ /// Sets the GVMaterializer to GVM. This module must not yet have a
+ /// Materializer. To reset the materializer for a module that already has one,
+ /// call materializeAll first. Destroying this module will destroy
+ /// its materializer without materializing any more GlobalValues. Without
+ /// destroying the Module, there is no way to detach or destroy a materializer
+ /// without materializing all the GVs it controls, to avoid leaving orphan
+ /// unmaterialized GVs.
+ void setMaterializer(GVMaterializer *GVM);
+ /// Retrieves the GVMaterializer, if any, for this Module.
+ GVMaterializer *getMaterializer() const { return Materializer.get(); }
+ bool isMaterialized() const { return !getMaterializer(); }
+
+ /// Make sure the GlobalValue is fully read. If the module is corrupt, this
+ /// returns true and fills in the optional string with information about the
+ /// problem. If successful, this returns false.
+ std::error_code materialize(GlobalValue *GV);
+
+ /// Make sure all GlobalValues in this Module are fully read and clear the
+ /// Materializer.
+ std::error_code materializeAll();
+
+ std::error_code materializeMetadata();
+
+/// @}
+/// @name Direct access to the globals list, functions list, and symbol table
+/// @{
+
+ /// Get the Module's list of global variables (constant).
+ const GlobalListType &getGlobalList() const { return GlobalList; }
+ /// Get the Module's list of global variables.
+ GlobalListType &getGlobalList() { return GlobalList; }
+ static GlobalListType Module::*getSublistAccess(GlobalVariable*) {
+ return &Module::GlobalList;
+ }
+ /// Get the Module's list of functions (constant).
+ const FunctionListType &getFunctionList() const { return FunctionList; }
+ /// Get the Module's list of functions.
+ FunctionListType &getFunctionList() { return FunctionList; }
+ static FunctionListType Module::*getSublistAccess(Function*) {
+ return &Module::FunctionList;
+ }
+ /// Get the Module's list of aliases (constant).
+ const AliasListType &getAliasList() const { return AliasList; }
+ /// Get the Module's list of aliases.
+ AliasListType &getAliasList() { return AliasList; }
+ static AliasListType Module::*getSublistAccess(GlobalAlias*) {
+ return &Module::AliasList;
+ }
+ /// Get the Module's list of named metadata (constant).
+ const NamedMDListType &getNamedMDList() const { return NamedMDList; }
+ /// Get the Module's list of named metadata.
+ NamedMDListType &getNamedMDList() { return NamedMDList; }
+ static NamedMDListType Module::*getSublistAccess(NamedMDNode*) {
+ return &Module::NamedMDList;
+ }
+ /// Get the symbol table of global variable and function identifiers
+ const ValueSymbolTable &getValueSymbolTable() const { return *ValSymTab; }
+ /// Get the Module's symbol table of global variable and function identifiers.
+ ValueSymbolTable &getValueSymbolTable() { return *ValSymTab; }
+ /// Get the Module's symbol table for COMDATs (constant).
+ const ComdatSymTabType &getComdatSymbolTable() const { return ComdatSymTab; }
+ /// Get the Module's symbol table for COMDATs.
+ ComdatSymTabType &getComdatSymbolTable() { return ComdatSymTab; }
+
+/// @}
+/// @name Global Variable Iteration
+/// @{
+
+ global_iterator global_begin() { return GlobalList.begin(); }
+ const_global_iterator global_begin() const { return GlobalList.begin(); }
+ global_iterator global_end () { return GlobalList.end(); }
+ const_global_iterator global_end () const { return GlobalList.end(); }
+ bool global_empty() const { return GlobalList.empty(); }
+
+ iterator_range<global_iterator> globals() {
+ return make_range(global_begin(), global_end());
+ }
+ iterator_range<const_global_iterator> globals() const {
+ return make_range(global_begin(), global_end());
+ }
+
+/// @}
+/// @name Function Iteration
+/// @{
+
+ iterator begin() { return FunctionList.begin(); }
+ const_iterator begin() const { return FunctionList.begin(); }
+ iterator end () { return FunctionList.end(); }
+ const_iterator end () const { return FunctionList.end(); }
+ reverse_iterator rbegin() { return FunctionList.rbegin(); }
+ const_reverse_iterator rbegin() const{ return FunctionList.rbegin(); }
+ reverse_iterator rend() { return FunctionList.rend(); }
+ const_reverse_iterator rend() const { return FunctionList.rend(); }
+ size_t size() const { return FunctionList.size(); }
+ bool empty() const { return FunctionList.empty(); }
+
+ iterator_range<iterator> functions() {
+ return make_range(begin(), end());
+ }
+ iterator_range<const_iterator> functions() const {
+ return make_range(begin(), end());
+ }
+
+/// @}
+/// @name Alias Iteration
+/// @{
+
+ alias_iterator alias_begin() { return AliasList.begin(); }
+ const_alias_iterator alias_begin() const { return AliasList.begin(); }
+ alias_iterator alias_end () { return AliasList.end(); }
+ const_alias_iterator alias_end () const { return AliasList.end(); }
+ size_t alias_size () const { return AliasList.size(); }
+ bool alias_empty() const { return AliasList.empty(); }
+
+ iterator_range<alias_iterator> aliases() {
+ return make_range(alias_begin(), alias_end());
+ }
+ iterator_range<const_alias_iterator> aliases() const {
+ return make_range(alias_begin(), alias_end());
+ }
+
+/// @}
+/// @name Named Metadata Iteration
+/// @{
+
+ named_metadata_iterator named_metadata_begin() { return NamedMDList.begin(); }
+ const_named_metadata_iterator named_metadata_begin() const {
+ return NamedMDList.begin();
+ }
+
+ named_metadata_iterator named_metadata_end() { return NamedMDList.end(); }
+ const_named_metadata_iterator named_metadata_end() const {
+ return NamedMDList.end();
+ }
+
+ size_t named_metadata_size() const { return NamedMDList.size(); }
+ bool named_metadata_empty() const { return NamedMDList.empty(); }
+
+ iterator_range<named_metadata_iterator> named_metadata() {
+ return make_range(named_metadata_begin(), named_metadata_end());
+ }
+ iterator_range<const_named_metadata_iterator> named_metadata() const {
+ return make_range(named_metadata_begin(), named_metadata_end());
+ }
+
+ /// Destroy ConstantArrays in LLVMContext if they are not used.
+ /// ConstantArrays constructed during linking can cause quadratic memory
+ /// explosion. Releasing all unused constants can cause a 20% LTO compile-time
+ /// slowdown for a large application.
+ ///
+ /// NOTE: Constants are currently owned by LLVMContext. This can then only
+ /// be called where all uses of the LLVMContext are understood.
+ void dropTriviallyDeadConstantArrays();
+
+/// @}
+/// @name Utility functions for printing and dumping Module objects
+/// @{
+
+ /// Print the module to an output stream with an optional
+ /// AssemblyAnnotationWriter. If \c ShouldPreserveUseListOrder, then include
+ /// uselistorder directives so that use-lists can be recreated when reading
+ /// the assembly.
+ void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW,
+ bool ShouldPreserveUseListOrder = false,
+ bool IsForDebug = false) const;
+
+ /// Dump the module to stderr (for debugging).
+ void dump() const;
+
+ /// This function causes all the subinstructions to "let go" of all references
+ /// that they are maintaining. This allows one to 'delete' a whole class at
+ /// a time, even though there may be circular references... first all
+ /// references are dropped, and all use counts go to zero. Then everything
+ /// is delete'd for real. Note that no operations are valid on an object
+ /// that has "dropped all references", except operator delete.
+ void dropAllReferences();
+
+/// @}
+/// @name Utility functions for querying Debug information.
+/// @{
+
+ /// \brief Returns the Dwarf Version by checking module flags.
+ unsigned getDwarfVersion() const;
+
+ /// \brief Returns the CodeView Version by checking module flags.
+ /// Returns zero if not present in module.
+ unsigned getCodeViewFlag() const;
+
+/// @}
+/// @name Utility functions for querying and setting PIC level
+/// @{
+
+ /// \brief Returns the PIC level (small or large model)
+ PICLevel::Level getPICLevel() const;
+
+ /// \brief Set the PIC level (small or large model)
+ void setPICLevel(PICLevel::Level PL);
+/// @}
+
+ /// @name Utility functions for querying and setting PGO counts
+ /// @{
+
+ /// \brief Set maximum function count in PGO mode
+ void setMaximumFunctionCount(uint64_t);
+
+ /// \brief Returns maximum function count in PGO mode
+ Optional<uint64_t> getMaximumFunctionCount();
+ /// @}
+};
+
+/// An raw_ostream inserter for modules.
+inline raw_ostream &operator<<(raw_ostream &O, const Module &M) {
+ M.print(O, nullptr);
+ return O;
+}
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Module, LLVMModuleRef)
+
+/* LLVMModuleProviderRef exists for historical reasons, but now just holds a
+ * Module.
+ */
+inline Module *unwrap(LLVMModuleProviderRef MP) {
+ return reinterpret_cast<Module*>(MP);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/ModuleSlotTracker.h b/contrib/llvm/include/llvm/IR/ModuleSlotTracker.h
new file mode 100644
index 0000000..49730a6
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/ModuleSlotTracker.h
@@ -0,0 +1,76 @@
+//===-- llvm/IR/ModuleSlotTracker.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_MODULESLOTTRACKER_H
+#define LLVM_IR_MODULESLOTTRACKER_H
+
+#include <memory>
+
+namespace llvm {
+
+class Module;
+class Function;
+class SlotTracker;
+class Value;
+
+/// Manage lifetime of a slot tracker for printing IR.
+///
+/// Wrapper around the \a SlotTracker used internally by \a AsmWriter. This
+/// class allows callers to share the cost of incorporating the metadata in a
+/// module or a function.
+///
+/// If the IR changes from underneath \a ModuleSlotTracker, strings like
+/// "<badref>" will be printed, or, worse, the wrong slots entirely.
+class ModuleSlotTracker {
+ /// Storage for a slot tracker.
+ std::unique_ptr<SlotTracker> MachineStorage;
+
+ const Module *M = nullptr;
+ const Function *F = nullptr;
+ SlotTracker *Machine = nullptr;
+
+public:
+ /// Wrap a preinitialized SlotTracker.
+ ModuleSlotTracker(SlotTracker &Machine, const Module *M,
+ const Function *F = nullptr);
+
+ /// Construct a slot tracker from a module.
+ ///
+ /// If \a M is \c nullptr, uses a null slot tracker. Otherwise, initializes
+ /// a slot tracker, and initializes all metadata slots. \c
+ /// ShouldInitializeAllMetadata defaults to true because this is expected to
+ /// be shared between multiple callers, and otherwise MDNode references will
+ /// not match up.
+ explicit ModuleSlotTracker(const Module *M,
+ bool ShouldInitializeAllMetadata = true);
+
+ /// Destructor to clean up storage.
+ ~ModuleSlotTracker();
+
+ SlotTracker *getMachine() const { return Machine; }
+ const Module *getModule() const { return M; }
+ const Function *getCurrentFunction() const { return F; }
+
+ /// Incorporate the given function.
+ ///
+ /// Purge the currently incorporated function and incorporate \c F. If \c F
+ /// is currently incorporated, this is a no-op.
+ void incorporateFunction(const Function &F);
+
+ /// Return the slot number of the specified local value.
+ ///
+ /// A function that defines this value should be incorporated prior to calling
+ /// this method.
+ /// Return -1 if the value is not in the function's SlotTracker.
+ int getLocalSlot(const Value *V);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/NoFolder.h b/contrib/llvm/include/llvm/IR/NoFolder.h
new file mode 100644
index 0000000..61f4817
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/NoFolder.h
@@ -0,0 +1,299 @@
+//===- NoFolder.h - Constant folding helper ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NoFolder class, a helper for IRBuilder. It provides
+// IRBuilder with a set of methods for creating unfolded constants. This is
+// useful for learners trying to understand how LLVM IR works, and who don't
+// want details to be hidden by the constant folder. For general constant
+// creation and folding, use ConstantExpr and the routines in
+// llvm/Analysis/ConstantFolding.h.
+//
+// Note: since it is not actually possible to create unfolded constants, this
+// class returns instructions rather than constants.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_NOFOLDER_H
+#define LLVM_IR_NOFOLDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+
+namespace llvm {
+
+/// NoFolder - Create "constants" (actually, instructions) with no folding.
+class NoFolder {
+public:
+ explicit NoFolder() {}
+
+ //===--------------------------------------------------------------------===//
+ // Binary Operators
+ //===--------------------------------------------------------------------===//
+
+ Instruction *CreateAdd(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ BinaryOperator *BO = BinaryOperator::CreateAdd(LHS, RHS);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Instruction *CreateNSWAdd(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNSWAdd(LHS, RHS);
+ }
+ Instruction *CreateNUWAdd(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNUWAdd(LHS, RHS);
+ }
+ Instruction *CreateFAdd(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateFAdd(LHS, RHS);
+ }
+ Instruction *CreateSub(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ BinaryOperator *BO = BinaryOperator::CreateSub(LHS, RHS);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Instruction *CreateNSWSub(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNSWSub(LHS, RHS);
+ }
+ Instruction *CreateNUWSub(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNUWSub(LHS, RHS);
+ }
+ Instruction *CreateFSub(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateFSub(LHS, RHS);
+ }
+ Instruction *CreateMul(Constant *LHS, Constant *RHS,
+ bool HasNUW = false, bool HasNSW = false) const {
+ BinaryOperator *BO = BinaryOperator::CreateMul(LHS, RHS);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Instruction *CreateNSWMul(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNSWMul(LHS, RHS);
+ }
+ Instruction *CreateNUWMul(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateNUWMul(LHS, RHS);
+ }
+ Instruction *CreateFMul(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateFMul(LHS, RHS);
+ }
+ Instruction *CreateUDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ if (!isExact)
+ return BinaryOperator::CreateUDiv(LHS, RHS);
+ return BinaryOperator::CreateExactUDiv(LHS, RHS);
+ }
+ Instruction *CreateExactUDiv(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateExactUDiv(LHS, RHS);
+ }
+ Instruction *CreateSDiv(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ if (!isExact)
+ return BinaryOperator::CreateSDiv(LHS, RHS);
+ return BinaryOperator::CreateExactSDiv(LHS, RHS);
+ }
+ Instruction *CreateExactSDiv(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateExactSDiv(LHS, RHS);
+ }
+ Instruction *CreateFDiv(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateFDiv(LHS, RHS);
+ }
+ Instruction *CreateURem(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateURem(LHS, RHS);
+ }
+ Instruction *CreateSRem(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateSRem(LHS, RHS);
+ }
+ Instruction *CreateFRem(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateFRem(LHS, RHS);
+ }
+ Instruction *CreateShl(Constant *LHS, Constant *RHS, bool HasNUW = false,
+ bool HasNSW = false) const {
+ BinaryOperator *BO = BinaryOperator::CreateShl(LHS, RHS);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Instruction *CreateLShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ if (!isExact)
+ return BinaryOperator::CreateLShr(LHS, RHS);
+ return BinaryOperator::CreateExactLShr(LHS, RHS);
+ }
+ Instruction *CreateAShr(Constant *LHS, Constant *RHS,
+ bool isExact = false) const {
+ if (!isExact)
+ return BinaryOperator::CreateAShr(LHS, RHS);
+ return BinaryOperator::CreateExactAShr(LHS, RHS);
+ }
+ Instruction *CreateAnd(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateAnd(LHS, RHS);
+ }
+ Instruction *CreateOr(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateOr(LHS, RHS);
+ }
+ Instruction *CreateXor(Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::CreateXor(LHS, RHS);
+ }
+
+ Instruction *CreateBinOp(Instruction::BinaryOps Opc,
+ Constant *LHS, Constant *RHS) const {
+ return BinaryOperator::Create(Opc, LHS, RHS);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Unary Operators
+ //===--------------------------------------------------------------------===//
+
+ Instruction *CreateNeg(Constant *C,
+ bool HasNUW = false, bool HasNSW = false) const {
+ BinaryOperator *BO = BinaryOperator::CreateNeg(C);
+ if (HasNUW) BO->setHasNoUnsignedWrap();
+ if (HasNSW) BO->setHasNoSignedWrap();
+ return BO;
+ }
+ Instruction *CreateNSWNeg(Constant *C) const {
+ return BinaryOperator::CreateNSWNeg(C);
+ }
+ Instruction *CreateNUWNeg(Constant *C) const {
+ return BinaryOperator::CreateNUWNeg(C);
+ }
+ Instruction *CreateFNeg(Constant *C) const {
+ return BinaryOperator::CreateFNeg(C);
+ }
+ Instruction *CreateNot(Constant *C) const {
+ return BinaryOperator::CreateNot(C);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Memory Instructions
+ //===--------------------------------------------------------------------===//
+
+ Constant *CreateGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Constant *> IdxList) const {
+ return ConstantExpr::getGetElementPtr(Ty, C, IdxList);
+ }
+ Constant *CreateGetElementPtr(Type *Ty, Constant *C, Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getGetElementPtr(Ty, C, Idx);
+ }
+ Instruction *CreateGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> IdxList) const {
+ return GetElementPtrInst::Create(Ty, C, IdxList);
+ }
+
+ Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Constant *> IdxList) const {
+ return ConstantExpr::getInBoundsGetElementPtr(Ty, C, IdxList);
+ }
+ Constant *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+ Constant *Idx) const {
+ // This form of the function only exists to avoid ambiguous overload
+ // warnings about whether to convert Idx to ArrayRef<Constant *> or
+ // ArrayRef<Value *>.
+ return ConstantExpr::getInBoundsGetElementPtr(Ty, C, Idx);
+ }
+ Instruction *CreateInBoundsGetElementPtr(Type *Ty, Constant *C,
+ ArrayRef<Value *> IdxList) const {
+ return GetElementPtrInst::CreateInBounds(Ty, C, IdxList);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Cast/Conversion Operators
+ //===--------------------------------------------------------------------===//
+
+ Instruction *CreateCast(Instruction::CastOps Op, Constant *C,
+ Type *DestTy) const {
+ return CastInst::Create(Op, C, DestTy);
+ }
+ Instruction *CreatePointerCast(Constant *C, Type *DestTy) const {
+ return CastInst::CreatePointerCast(C, DestTy);
+ }
+ Instruction *CreateIntCast(Constant *C, Type *DestTy,
+ bool isSigned) const {
+ return CastInst::CreateIntegerCast(C, DestTy, isSigned);
+ }
+ Instruction *CreateFPCast(Constant *C, Type *DestTy) const {
+ return CastInst::CreateFPCast(C, DestTy);
+ }
+
+ Instruction *CreateBitCast(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::BitCast, C, DestTy);
+ }
+ Instruction *CreateIntToPtr(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::IntToPtr, C, DestTy);
+ }
+ Instruction *CreatePtrToInt(Constant *C, Type *DestTy) const {
+ return CreateCast(Instruction::PtrToInt, C, DestTy);
+ }
+ Instruction *CreateZExtOrBitCast(Constant *C, Type *DestTy) const {
+ return CastInst::CreateZExtOrBitCast(C, DestTy);
+ }
+ Instruction *CreateSExtOrBitCast(Constant *C, Type *DestTy) const {
+ return CastInst::CreateSExtOrBitCast(C, DestTy);
+ }
+
+ Instruction *CreateTruncOrBitCast(Constant *C, Type *DestTy) const {
+ return CastInst::CreateTruncOrBitCast(C, DestTy);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Compare Instructions
+ //===--------------------------------------------------------------------===//
+
+ Instruction *CreateICmp(CmpInst::Predicate P,
+ Constant *LHS, Constant *RHS) const {
+ return new ICmpInst(P, LHS, RHS);
+ }
+ Instruction *CreateFCmp(CmpInst::Predicate P,
+ Constant *LHS, Constant *RHS) const {
+ return new FCmpInst(P, LHS, RHS);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Other Instructions
+ //===--------------------------------------------------------------------===//
+
+ Instruction *CreateSelect(Constant *C,
+ Constant *True, Constant *False) const {
+ return SelectInst::Create(C, True, False);
+ }
+
+ Instruction *CreateExtractElement(Constant *Vec, Constant *Idx) const {
+ return ExtractElementInst::Create(Vec, Idx);
+ }
+
+ Instruction *CreateInsertElement(Constant *Vec, Constant *NewElt,
+ Constant *Idx) const {
+ return InsertElementInst::Create(Vec, NewElt, Idx);
+ }
+
+ Instruction *CreateShuffleVector(Constant *V1, Constant *V2,
+ Constant *Mask) const {
+ return new ShuffleVectorInst(V1, V2, Mask);
+ }
+
+ Instruction *CreateExtractValue(Constant *Agg,
+ ArrayRef<unsigned> IdxList) const {
+ return ExtractValueInst::Create(Agg, IdxList);
+ }
+
+ Instruction *CreateInsertValue(Constant *Agg, Constant *Val,
+ ArrayRef<unsigned> IdxList) const {
+ return InsertValueInst::Create(Agg, Val, IdxList);
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/OperandTraits.h b/contrib/llvm/include/llvm/IR/OperandTraits.h
new file mode 100644
index 0000000..e97a800
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/OperandTraits.h
@@ -0,0 +1,160 @@
+//===-- llvm/OperandTraits.h - OperandTraits class definition ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the traits classes that are handy for enforcing the correct
+// layout of various User subclasses. It also provides the means for accessing
+// the operands in the most efficient manner.
+//
+
+#ifndef LLVM_IR_OPERANDTRAITS_H
+#define LLVM_IR_OPERANDTRAITS_H
+
+#include "llvm/IR/User.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// FixedNumOperand Trait Class
+//===----------------------------------------------------------------------===//
+
+/// FixedNumOperandTraits - determine the allocation regime of the Use array
+/// when it is a prefix to the User object, and the number of Use objects is
+/// known at compile time.
+
+template <typename SubClass, unsigned ARITY>
+struct FixedNumOperandTraits {
+ static Use *op_begin(SubClass* U) {
+ return reinterpret_cast<Use*>(U) - ARITY;
+ }
+ static Use *op_end(SubClass* U) {
+ return reinterpret_cast<Use*>(U);
+ }
+ static unsigned operands(const User*) {
+ return ARITY;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// OptionalOperand Trait Class
+//===----------------------------------------------------------------------===//
+
+/// OptionalOperandTraits - when the number of operands may change at runtime.
+/// Naturally it may only decrease, because the allocations may not change.
+
+template <typename SubClass, unsigned ARITY = 1>
+struct OptionalOperandTraits : public FixedNumOperandTraits<SubClass, ARITY> {
+ static unsigned operands(const User *U) {
+ return U->getNumOperands();
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// VariadicOperand Trait Class
+//===----------------------------------------------------------------------===//
+
+/// VariadicOperandTraits - determine the allocation regime of the Use array
+/// when it is a prefix to the User object, and the number of Use objects is
+/// only known at allocation time.
+
+template <typename SubClass, unsigned MINARITY = 0>
+struct VariadicOperandTraits {
+ static Use *op_begin(SubClass* U) {
+ return reinterpret_cast<Use*>(U) - static_cast<User*>(U)->getNumOperands();
+ }
+ static Use *op_end(SubClass* U) {
+ return reinterpret_cast<Use*>(U);
+ }
+ static unsigned operands(const User *U) {
+ return U->getNumOperands();
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// HungoffOperand Trait Class
+//===----------------------------------------------------------------------===//
+
+/// HungoffOperandTraits - determine the allocation regime of the Use array
+/// when it is not a prefix to the User object, but allocated at an unrelated
+/// heap address.
+/// Assumes that the User subclass that is determined by this traits class
+/// has an OperandList member of type User::op_iterator. [Note: this is now
+/// trivially satisfied, because User has that member for historic reasons.]
+///
+/// This is the traits class that is needed when the Use array must be
+/// resizable.
+
+template <unsigned MINARITY = 1>
+struct HungoffOperandTraits {
+ static Use *op_begin(User* U) {
+ return U->getOperandList();
+ }
+ static Use *op_end(User* U) {
+ return U->getOperandList() + U->getNumOperands();
+ }
+ static unsigned operands(const User *U) {
+ return U->getNumOperands();
+ }
+};
+
+/// Macro for generating in-class operand accessor declarations.
+/// It should only be called in the public section of the interface.
+///
+#define DECLARE_TRANSPARENT_OPERAND_ACCESSORS(VALUECLASS) \
+ public: \
+ inline VALUECLASS *getOperand(unsigned) const; \
+ inline void setOperand(unsigned, VALUECLASS*); \
+ inline op_iterator op_begin(); \
+ inline const_op_iterator op_begin() const; \
+ inline op_iterator op_end(); \
+ inline const_op_iterator op_end() const; \
+ protected: \
+ template <int> inline Use &Op(); \
+ template <int> inline const Use &Op() const; \
+ public: \
+ inline unsigned getNumOperands() const
+
+/// Macro for generating out-of-class operand accessor definitions
+#define DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CLASS, VALUECLASS) \
+CLASS::op_iterator CLASS::op_begin() { \
+ return OperandTraits<CLASS>::op_begin(this); \
+} \
+CLASS::const_op_iterator CLASS::op_begin() const { \
+ return OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this)); \
+} \
+CLASS::op_iterator CLASS::op_end() { \
+ return OperandTraits<CLASS>::op_end(this); \
+} \
+CLASS::const_op_iterator CLASS::op_end() const { \
+ return OperandTraits<CLASS>::op_end(const_cast<CLASS*>(this)); \
+} \
+VALUECLASS *CLASS::getOperand(unsigned i_nocapture) const { \
+ assert(i_nocapture < OperandTraits<CLASS>::operands(this) \
+ && "getOperand() out of range!"); \
+ return cast_or_null<VALUECLASS>( \
+ OperandTraits<CLASS>::op_begin(const_cast<CLASS*>(this))[i_nocapture].get()); \
+} \
+void CLASS::setOperand(unsigned i_nocapture, VALUECLASS *Val_nocapture) { \
+ assert(i_nocapture < OperandTraits<CLASS>::operands(this) \
+ && "setOperand() out of range!"); \
+ OperandTraits<CLASS>::op_begin(this)[i_nocapture] = Val_nocapture; \
+} \
+unsigned CLASS::getNumOperands() const { \
+ return OperandTraits<CLASS>::operands(this); \
+} \
+template <int Idx_nocapture> Use &CLASS::Op() { \
+ return this->OpFrom<Idx_nocapture>(this); \
+} \
+template <int Idx_nocapture> const Use &CLASS::Op() const { \
+ return this->OpFrom<Idx_nocapture>(this); \
+}
+
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Operator.h b/contrib/llvm/include/llvm/IR/Operator.h
new file mode 100644
index 0000000..372b254
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Operator.h
@@ -0,0 +1,497 @@
+//===-- llvm/Operator.h - Operator utility subclass -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various classes for working with Instructions and
+// ConstantExprs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_OPERATOR_H
+#define LLVM_IR_OPERATOR_H
+
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Type.h"
+
+namespace llvm {
+
+class GetElementPtrInst;
+class BinaryOperator;
+class ConstantExpr;
+
+/// This is a utility class that provides an abstraction for the common
+/// functionality between Instructions and ConstantExprs.
+class Operator : public User {
+private:
+ // The Operator class is intended to be used as a utility, and is never itself
+ // instantiated.
+ void *operator new(size_t, unsigned) = delete;
+ void *operator new(size_t s) = delete;
+ Operator() = delete;
+
+protected:
+ // NOTE: Cannot use = delete because it's not legal to delete
+ // an overridden method that's not deleted in the base class. Cannot leave
+ // this unimplemented because that leads to an ODR-violation.
+ ~Operator() override;
+
+public:
+ /// Return the opcode for this Instruction or ConstantExpr.
+ unsigned getOpcode() const {
+ if (const Instruction *I = dyn_cast<Instruction>(this))
+ return I->getOpcode();
+ return cast<ConstantExpr>(this)->getOpcode();
+ }
+
+ /// If V is an Instruction or ConstantExpr, return its opcode.
+ /// Otherwise return UserOp1.
+ static unsigned getOpcode(const Value *V) {
+ if (const Instruction *I = dyn_cast<Instruction>(V))
+ return I->getOpcode();
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+ return CE->getOpcode();
+ return Instruction::UserOp1;
+ }
+
+ static inline bool classof(const Instruction *) { return true; }
+ static inline bool classof(const ConstantExpr *) { return true; }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) || isa<ConstantExpr>(V);
+ }
+};
+
+/// Utility class for integer arithmetic operators which may exhibit overflow -
+/// Add, Sub, and Mul. It does not include SDiv, despite that operator having
+/// the potential for overflow.
+class OverflowingBinaryOperator : public Operator {
+public:
+ enum {
+ NoUnsignedWrap = (1 << 0),
+ NoSignedWrap = (1 << 1)
+ };
+
+private:
+ friend class BinaryOperator;
+ friend class ConstantExpr;
+ void setHasNoUnsignedWrap(bool B) {
+ SubclassOptionalData =
+ (SubclassOptionalData & ~NoUnsignedWrap) | (B * NoUnsignedWrap);
+ }
+ void setHasNoSignedWrap(bool B) {
+ SubclassOptionalData =
+ (SubclassOptionalData & ~NoSignedWrap) | (B * NoSignedWrap);
+ }
+
+public:
+ /// Test whether this operation is known to never
+ /// undergo unsigned overflow, aka the nuw property.
+ bool hasNoUnsignedWrap() const {
+ return SubclassOptionalData & NoUnsignedWrap;
+ }
+
+ /// Test whether this operation is known to never
+ /// undergo signed overflow, aka the nsw property.
+ bool hasNoSignedWrap() const {
+ return (SubclassOptionalData & NoSignedWrap) != 0;
+ }
+
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Instruction::Add ||
+ I->getOpcode() == Instruction::Sub ||
+ I->getOpcode() == Instruction::Mul ||
+ I->getOpcode() == Instruction::Shl;
+ }
+ static inline bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Instruction::Add ||
+ CE->getOpcode() == Instruction::Sub ||
+ CE->getOpcode() == Instruction::Mul ||
+ CE->getOpcode() == Instruction::Shl;
+ }
+ static inline bool classof(const Value *V) {
+ return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
+ (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
+ }
+};
+
+/// A udiv or sdiv instruction, which can be marked as "exact",
+/// indicating that no bits are destroyed.
+class PossiblyExactOperator : public Operator {
+public:
+ enum {
+ IsExact = (1 << 0)
+ };
+
+private:
+ friend class BinaryOperator;
+ friend class ConstantExpr;
+ void setIsExact(bool B) {
+ SubclassOptionalData = (SubclassOptionalData & ~IsExact) | (B * IsExact);
+ }
+
+public:
+ /// Test whether this division is known to be exact, with zero remainder.
+ bool isExact() const {
+ return SubclassOptionalData & IsExact;
+ }
+
+ static bool isPossiblyExactOpcode(unsigned OpC) {
+ return OpC == Instruction::SDiv ||
+ OpC == Instruction::UDiv ||
+ OpC == Instruction::AShr ||
+ OpC == Instruction::LShr;
+ }
+ static inline bool classof(const ConstantExpr *CE) {
+ return isPossiblyExactOpcode(CE->getOpcode());
+ }
+ static inline bool classof(const Instruction *I) {
+ return isPossiblyExactOpcode(I->getOpcode());
+ }
+ static inline bool classof(const Value *V) {
+ return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
+ (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
+ }
+};
+
+/// Convenience struct for specifying and reasoning about fast-math flags.
+class FastMathFlags {
+private:
+ friend class FPMathOperator;
+ unsigned Flags;
+ FastMathFlags(unsigned F) : Flags(F) { }
+
+public:
+ enum {
+ UnsafeAlgebra = (1 << 0),
+ NoNaNs = (1 << 1),
+ NoInfs = (1 << 2),
+ NoSignedZeros = (1 << 3),
+ AllowReciprocal = (1 << 4)
+ };
+
+ FastMathFlags() : Flags(0)
+ { }
+
+ /// Whether any flag is set
+ bool any() const { return Flags != 0; }
+
+ /// Set all the flags to false
+ void clear() { Flags = 0; }
+
+ /// Flag queries
+ bool noNaNs() const { return 0 != (Flags & NoNaNs); }
+ bool noInfs() const { return 0 != (Flags & NoInfs); }
+ bool noSignedZeros() const { return 0 != (Flags & NoSignedZeros); }
+ bool allowReciprocal() const { return 0 != (Flags & AllowReciprocal); }
+ bool unsafeAlgebra() const { return 0 != (Flags & UnsafeAlgebra); }
+
+ /// Flag setters
+ void setNoNaNs() { Flags |= NoNaNs; }
+ void setNoInfs() { Flags |= NoInfs; }
+ void setNoSignedZeros() { Flags |= NoSignedZeros; }
+ void setAllowReciprocal() { Flags |= AllowReciprocal; }
+ void setUnsafeAlgebra() {
+ Flags |= UnsafeAlgebra;
+ setNoNaNs();
+ setNoInfs();
+ setNoSignedZeros();
+ setAllowReciprocal();
+ }
+
+ void operator&=(const FastMathFlags &OtherFlags) {
+ Flags &= OtherFlags.Flags;
+ }
+};
+
+
+/// Utility class for floating point operations which can have
+/// information about relaxed accuracy requirements attached to them.
+class FPMathOperator : public Operator {
+private:
+ friend class Instruction;
+
+ void setHasUnsafeAlgebra(bool B) {
+ SubclassOptionalData =
+ (SubclassOptionalData & ~FastMathFlags::UnsafeAlgebra) |
+ (B * FastMathFlags::UnsafeAlgebra);
+
+ // Unsafe algebra implies all the others
+ if (B) {
+ setHasNoNaNs(true);
+ setHasNoInfs(true);
+ setHasNoSignedZeros(true);
+ setHasAllowReciprocal(true);
+ }
+ }
+ void setHasNoNaNs(bool B) {
+ SubclassOptionalData =
+ (SubclassOptionalData & ~FastMathFlags::NoNaNs) |
+ (B * FastMathFlags::NoNaNs);
+ }
+ void setHasNoInfs(bool B) {
+ SubclassOptionalData =
+ (SubclassOptionalData & ~FastMathFlags::NoInfs) |
+ (B * FastMathFlags::NoInfs);
+ }
+ void setHasNoSignedZeros(bool B) {
+ SubclassOptionalData =
+ (SubclassOptionalData & ~FastMathFlags::NoSignedZeros) |
+ (B * FastMathFlags::NoSignedZeros);
+ }
+ void setHasAllowReciprocal(bool B) {
+ SubclassOptionalData =
+ (SubclassOptionalData & ~FastMathFlags::AllowReciprocal) |
+ (B * FastMathFlags::AllowReciprocal);
+ }
+
+ /// Convenience function for setting multiple fast-math flags.
+ /// FMF is a mask of the bits to set.
+ void setFastMathFlags(FastMathFlags FMF) {
+ SubclassOptionalData |= FMF.Flags;
+ }
+
+ /// Convenience function for copying all fast-math flags.
+ /// All values in FMF are transferred to this operator.
+ void copyFastMathFlags(FastMathFlags FMF) {
+ SubclassOptionalData = FMF.Flags;
+ }
+
+public:
+ /// Test whether this operation is permitted to be
+ /// algebraically transformed, aka the 'A' fast-math property.
+ bool hasUnsafeAlgebra() const {
+ return (SubclassOptionalData & FastMathFlags::UnsafeAlgebra) != 0;
+ }
+
+ /// Test whether this operation's arguments and results are to be
+ /// treated as non-NaN, aka the 'N' fast-math property.
+ bool hasNoNaNs() const {
+ return (SubclassOptionalData & FastMathFlags::NoNaNs) != 0;
+ }
+
+ /// Test whether this operation's arguments and results are to be
+ /// treated as NoN-Inf, aka the 'I' fast-math property.
+ bool hasNoInfs() const {
+ return (SubclassOptionalData & FastMathFlags::NoInfs) != 0;
+ }
+
+ /// Test whether this operation can treat the sign of zero
+ /// as insignificant, aka the 'S' fast-math property.
+ bool hasNoSignedZeros() const {
+ return (SubclassOptionalData & FastMathFlags::NoSignedZeros) != 0;
+ }
+
+ /// Test whether this operation is permitted to use
+ /// reciprocal instead of division, aka the 'R' fast-math property.
+ bool hasAllowReciprocal() const {
+ return (SubclassOptionalData & FastMathFlags::AllowReciprocal) != 0;
+ }
+
+ /// Convenience function for getting all the fast-math flags
+ FastMathFlags getFastMathFlags() const {
+ return FastMathFlags(SubclassOptionalData);
+ }
+
+ /// \brief Get the maximum error permitted by this operation in ULPs. An
+ /// accuracy of 0.0 means that the operation should be performed with the
+ /// default precision.
+ float getFPAccuracy() const;
+
+ static inline bool classof(const Instruction *I) {
+ return I->getType()->isFPOrFPVectorTy() ||
+ I->getOpcode() == Instruction::FCmp;
+ }
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) && classof(cast<Instruction>(V));
+ }
+};
+
+
+/// A helper template for defining operators for individual opcodes.
+template<typename SuperClass, unsigned Opc>
+class ConcreteOperator : public SuperClass {
+public:
+ static inline bool classof(const Instruction *I) {
+ return I->getOpcode() == Opc;
+ }
+ static inline bool classof(const ConstantExpr *CE) {
+ return CE->getOpcode() == Opc;
+ }
+ static inline bool classof(const Value *V) {
+ return (isa<Instruction>(V) && classof(cast<Instruction>(V))) ||
+ (isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
+ }
+};
+
+class AddOperator
+ : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Add> {
+};
+class SubOperator
+ : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Sub> {
+};
+class MulOperator
+ : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Mul> {
+};
+class ShlOperator
+ : public ConcreteOperator<OverflowingBinaryOperator, Instruction::Shl> {
+};
+
+
+class SDivOperator
+ : public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> {
+};
+class UDivOperator
+ : public ConcreteOperator<PossiblyExactOperator, Instruction::UDiv> {
+};
+class AShrOperator
+ : public ConcreteOperator<PossiblyExactOperator, Instruction::AShr> {
+};
+class LShrOperator
+ : public ConcreteOperator<PossiblyExactOperator, Instruction::LShr> {
+};
+
+
+class ZExtOperator : public ConcreteOperator<Operator, Instruction::ZExt> {};
+
+
+class GEPOperator
+ : public ConcreteOperator<Operator, Instruction::GetElementPtr> {
+ enum {
+ IsInBounds = (1 << 0)
+ };
+
+ friend class GetElementPtrInst;
+ friend class ConstantExpr;
+ void setIsInBounds(bool B) {
+ SubclassOptionalData =
+ (SubclassOptionalData & ~IsInBounds) | (B * IsInBounds);
+ }
+
+public:
+ /// Test whether this is an inbounds GEP, as defined by LangRef.html.
+ bool isInBounds() const {
+ return SubclassOptionalData & IsInBounds;
+ }
+
+ inline op_iterator idx_begin() { return op_begin()+1; }
+ inline const_op_iterator idx_begin() const { return op_begin()+1; }
+ inline op_iterator idx_end() { return op_end(); }
+ inline const_op_iterator idx_end() const { return op_end(); }
+
+ Value *getPointerOperand() {
+ return getOperand(0);
+ }
+ const Value *getPointerOperand() const {
+ return getOperand(0);
+ }
+ static unsigned getPointerOperandIndex() {
+ return 0U; // get index for modifying correct operand
+ }
+
+ /// Method to return the pointer operand as a PointerType.
+ Type *getPointerOperandType() const {
+ return getPointerOperand()->getType();
+ }
+
+ Type *getSourceElementType() const;
+
+ /// Method to return the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return getPointerOperandType()->getPointerAddressSpace();
+ }
+
+ unsigned getNumIndices() const { // Note: always non-negative
+ return getNumOperands() - 1;
+ }
+
+ bool hasIndices() const {
+ return getNumOperands() > 1;
+ }
+
+ /// Return true if all of the indices of this GEP are zeros.
+ /// If so, the result pointer and the first operand have the same
+ /// value, just potentially different types.
+ bool hasAllZeroIndices() const {
+ for (const_op_iterator I = idx_begin(), E = idx_end(); I != E; ++I) {
+ if (ConstantInt *C = dyn_cast<ConstantInt>(I))
+ if (C->isZero())
+ continue;
+ return false;
+ }
+ return true;
+ }
+
+ /// Return true if all of the indices of this GEP are constant integers.
+ /// If so, the result pointer and the first operand have
+ /// a constant offset between them.
+ bool hasAllConstantIndices() const {
+ for (const_op_iterator I = idx_begin(), E = idx_end(); I != E; ++I) {
+ if (!isa<ConstantInt>(I))
+ return false;
+ }
+ return true;
+ }
+
+ /// \brief Accumulate the constant address offset of this GEP if possible.
+ ///
+ /// This routine accepts an APInt into which it will accumulate the constant
+ /// offset of this GEP if the GEP is in fact constant. If the GEP is not
+ /// all-constant, it returns false and the value of the offset APInt is
+ /// undefined (it is *not* preserved!). The APInt passed into this routine
+ /// must be at exactly as wide as the IntPtr type for the address space of the
+ /// base GEP pointer.
+ bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
+};
+
+class PtrToIntOperator
+ : public ConcreteOperator<Operator, Instruction::PtrToInt> {
+ friend class PtrToInt;
+ friend class ConstantExpr;
+
+public:
+ Value *getPointerOperand() {
+ return getOperand(0);
+ }
+ const Value *getPointerOperand() const {
+ return getOperand(0);
+ }
+ static unsigned getPointerOperandIndex() {
+ return 0U; // get index for modifying correct operand
+ }
+
+ /// Method to return the pointer operand as a PointerType.
+ Type *getPointerOperandType() const {
+ return getPointerOperand()->getType();
+ }
+
+ /// Method to return the address space of the pointer operand.
+ unsigned getPointerAddressSpace() const {
+ return cast<PointerType>(getPointerOperandType())->getAddressSpace();
+ }
+};
+
+class BitCastOperator
+ : public ConcreteOperator<Operator, Instruction::BitCast> {
+ friend class BitCastInst;
+ friend class ConstantExpr;
+
+public:
+ Type *getSrcTy() const {
+ return getOperand(0)->getType();
+ }
+
+ Type *getDestTy() const {
+ return getType();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/PassManager.h b/contrib/llvm/include/llvm/IR/PassManager.h
new file mode 100644
index 0000000..2ceb53d
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/PassManager.h
@@ -0,0 +1,896 @@
+//===- PassManager.h - Pass management infrastructure -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header defines various interfaces for pass management in LLVM. There
+/// is no "pass" interface in LLVM per se. Instead, an instance of any class
+/// which supports a method to 'run' it over a unit of IR can be used as
+/// a pass. A pass manager is generally a tool to collect a sequence of passes
+/// which run over a particular IR construct, and run each of them in sequence
+/// over each such construct in the containing IR construct. As there is no
+/// containing IR construct for a Module, a manager for passes over modules
+/// forms the base case which runs its managed passes in sequence over the
+/// single module provided.
+///
+/// The core IR library provides managers for running passes over
+/// modules and functions.
+///
+/// * FunctionPassManager can run over a Module, runs each pass over
+/// a Function.
+/// * ModulePassManager must be directly run, runs each pass over the Module.
+///
+/// Note that the implementations of the pass managers use concept-based
+/// polymorphism as outlined in the "Value Semantics and Concept-based
+/// Polymorphism" talk (or its abbreviated sibling "Inheritance Is The Base
+/// Class of Evil") by Sean Parent:
+/// * http://github.com/sean-parent/sean-parent.github.com/wiki/Papers-and-Presentations
+/// * http://www.youtube.com/watch?v=_BpMYeUFXv8
+/// * http://channel9.msdn.com/Events/GoingNative/2013/Inheritance-Is-The-Base-Class-of-Evil
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PASSMANAGER_H
+#define LLVM_IR_PASSMANAGER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManagerInternal.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/type_traits.h"
+#include <list>
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+class Module;
+class Function;
+
+/// \brief An abstract set of preserved analyses following a transformation pass
+/// run.
+///
+/// When a transformation pass is run, it can return a set of analyses whose
+/// results were preserved by that transformation. The default set is "none",
+/// and preserving analyses must be done explicitly.
+///
+/// There is also an explicit all state which can be used (for example) when
+/// the IR is not mutated at all.
+class PreservedAnalyses {
+public:
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ PreservedAnalyses() {}
+ PreservedAnalyses(const PreservedAnalyses &Arg)
+ : PreservedPassIDs(Arg.PreservedPassIDs) {}
+ PreservedAnalyses(PreservedAnalyses &&Arg)
+ : PreservedPassIDs(std::move(Arg.PreservedPassIDs)) {}
+ friend void swap(PreservedAnalyses &LHS, PreservedAnalyses &RHS) {
+ using std::swap;
+ swap(LHS.PreservedPassIDs, RHS.PreservedPassIDs);
+ }
+ PreservedAnalyses &operator=(PreservedAnalyses RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief Convenience factory function for the empty preserved set.
+ static PreservedAnalyses none() { return PreservedAnalyses(); }
+
+ /// \brief Construct a special preserved set that preserves all passes.
+ static PreservedAnalyses all() {
+ PreservedAnalyses PA;
+ PA.PreservedPassIDs.insert((void *)AllPassesID);
+ return PA;
+ }
+
+ /// \brief Mark a particular pass as preserved, adding it to the set.
+ template <typename PassT> void preserve() { preserve(PassT::ID()); }
+
+ /// \brief Mark an abstract PassID as preserved, adding it to the set.
+ void preserve(void *PassID) {
+ if (!areAllPreserved())
+ PreservedPassIDs.insert(PassID);
+ }
+
+ /// \brief Intersect this set with another in place.
+ ///
+ /// This is a mutating operation on this preserved set, removing all
+ /// preserved passes which are not also preserved in the argument.
+ void intersect(const PreservedAnalyses &Arg) {
+ if (Arg.areAllPreserved())
+ return;
+ if (areAllPreserved()) {
+ PreservedPassIDs = Arg.PreservedPassIDs;
+ return;
+ }
+ for (void *P : PreservedPassIDs)
+ if (!Arg.PreservedPassIDs.count(P))
+ PreservedPassIDs.erase(P);
+ }
+
+ /// \brief Intersect this set with a temporary other set in place.
+ ///
+ /// This is a mutating operation on this preserved set, removing all
+ /// preserved passes which are not also preserved in the argument.
+ void intersect(PreservedAnalyses &&Arg) {
+ if (Arg.areAllPreserved())
+ return;
+ if (areAllPreserved()) {
+ PreservedPassIDs = std::move(Arg.PreservedPassIDs);
+ return;
+ }
+ for (void *P : PreservedPassIDs)
+ if (!Arg.PreservedPassIDs.count(P))
+ PreservedPassIDs.erase(P);
+ }
+
+ /// \brief Query whether a pass is marked as preserved by this set.
+ template <typename PassT> bool preserved() const {
+ return preserved(PassT::ID());
+ }
+
+ /// \brief Query whether an abstract pass ID is marked as preserved by this
+ /// set.
+ bool preserved(void *PassID) const {
+ return PreservedPassIDs.count((void *)AllPassesID) ||
+ PreservedPassIDs.count(PassID);
+ }
+
+ /// \brief Test whether all passes are preserved.
+ ///
+ /// This is used primarily to optimize for the case of no changes which will
+ /// common in many scenarios.
+ bool areAllPreserved() const {
+ return PreservedPassIDs.count((void *)AllPassesID);
+ }
+
+private:
+ // Note that this must not be -1 or -2 as those are already used by the
+ // SmallPtrSet.
+ static const uintptr_t AllPassesID = (intptr_t)(-3);
+
+ SmallPtrSet<void *, 2> PreservedPassIDs;
+};
+
+// Forward declare the analysis manager template.
+template <typename IRUnitT> class AnalysisManager;
+
+/// \brief Manages a sequence of passes over units of IR.
+///
+/// A pass manager contains a sequence of passes to run over units of IR. It is
+/// itself a valid pass over that unit of IR, and when over some given IR will
+/// run each pass in sequence. This is the primary and most basic building
+/// block of a pass pipeline.
+///
+/// If it is run with an \c AnalysisManager<IRUnitT> argument, it will propagate
+/// that analysis manager to each pass it runs, as well as calling the analysis
+/// manager's invalidation routine with the PreservedAnalyses of each pass it
+/// runs.
+template <typename IRUnitT> class PassManager {
+public:
+ /// \brief Construct a pass manager.
+ ///
+ /// It can be passed a flag to get debug logging as the passes are run.
+ PassManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ PassManager(PassManager &&Arg)
+ : Passes(std::move(Arg.Passes)),
+ DebugLogging(std::move(Arg.DebugLogging)) {}
+ PassManager &operator=(PassManager &&RHS) {
+ Passes = std::move(RHS.Passes);
+ DebugLogging = std::move(RHS.DebugLogging);
+ return *this;
+ }
+
+ /// \brief Run all of the passes in this manager over the IR.
+ PreservedAnalyses run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM = nullptr) {
+ PreservedAnalyses PA = PreservedAnalyses::all();
+
+ if (DebugLogging)
+ dbgs() << "Starting pass manager run.\n";
+
+ for (unsigned Idx = 0, Size = Passes.size(); Idx != Size; ++Idx) {
+ if (DebugLogging)
+ dbgs() << "Running pass: " << Passes[Idx]->name() << " on "
+ << IR.getName() << "\n";
+
+ PreservedAnalyses PassPA = Passes[Idx]->run(IR, AM);
+
+ // If we have an active analysis manager at this level we want to ensure
+ // we update it as each pass runs and potentially invalidates analyses.
+ // We also update the preserved set of analyses based on what analyses we
+ // have already handled the invalidation for here and don't need to
+ // invalidate when finished.
+ if (AM)
+ PassPA = AM->invalidate(IR, std::move(PassPA));
+
+ // Finally, we intersect the final preserved analyses to compute the
+ // aggregate preserved set for this pass manager.
+ PA.intersect(std::move(PassPA));
+
+ // FIXME: Historically, the pass managers all called the LLVM context's
+ // yield function here. We don't have a generic way to acquire the
+ // context and it isn't yet clear what the right pattern is for yielding
+ // in the new pass manager so it is currently omitted.
+ //IR.getContext().yield();
+ }
+
+ if (DebugLogging)
+ dbgs() << "Finished pass manager run.\n";
+
+ return PA;
+ }
+
+ template <typename PassT> void addPass(PassT Pass) {
+ typedef detail::PassModel<IRUnitT, PassT> PassModelT;
+ Passes.emplace_back(new PassModelT(std::move(Pass)));
+ }
+
+ static StringRef name() { return "PassManager"; }
+
+private:
+ typedef detail::PassConcept<IRUnitT> PassConceptT;
+
+ PassManager(const PassManager &) = delete;
+ PassManager &operator=(const PassManager &) = delete;
+
+ std::vector<std::unique_ptr<PassConceptT>> Passes;
+
+ /// \brief Flag indicating whether we should do debug logging.
+ bool DebugLogging;
+};
+
+/// \brief Convenience typedef for a pass manager over modules.
+typedef PassManager<Module> ModulePassManager;
+
+/// \brief Convenience typedef for a pass manager over functions.
+typedef PassManager<Function> FunctionPassManager;
+
+namespace detail {
+
+/// \brief A CRTP base used to implement analysis managers.
+///
+/// This class template serves as the boiler plate of an analysis manager. Any
+/// analysis manager can be implemented on top of this base class. Any
+/// implementation will be required to provide specific hooks:
+///
+/// - getResultImpl
+/// - getCachedResultImpl
+/// - invalidateImpl
+///
+/// The details of the call pattern are within.
+///
+/// Note that there is also a generic analysis manager template which implements
+/// the above required functions along with common datastructures used for
+/// managing analyses. This base class is factored so that if you need to
+/// customize the handling of a specific IR unit, you can do so without
+/// replicating *all* of the boilerplate.
+template <typename DerivedT, typename IRUnitT> class AnalysisManagerBase {
+ DerivedT *derived_this() { return static_cast<DerivedT *>(this); }
+ const DerivedT *derived_this() const {
+ return static_cast<const DerivedT *>(this);
+ }
+
+ AnalysisManagerBase(const AnalysisManagerBase &) = delete;
+ AnalysisManagerBase &
+ operator=(const AnalysisManagerBase &) = delete;
+
+protected:
+ typedef detail::AnalysisResultConcept<IRUnitT> ResultConceptT;
+ typedef detail::AnalysisPassConcept<IRUnitT> PassConceptT;
+
+ // FIXME: Provide template aliases for the models when we're using C++11 in
+ // a mode supporting them.
+
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisManagerBase() {}
+ AnalysisManagerBase(AnalysisManagerBase &&Arg)
+ : AnalysisPasses(std::move(Arg.AnalysisPasses)) {}
+ AnalysisManagerBase &operator=(AnalysisManagerBase &&RHS) {
+ AnalysisPasses = std::move(RHS.AnalysisPasses);
+ return *this;
+ }
+
+public:
+ /// \brief Get the result of an analysis pass for this module.
+ ///
+ /// If there is not a valid cached result in the manager already, this will
+ /// re-run the analysis to produce a valid result.
+ template <typename PassT> typename PassT::Result &getResult(IRUnitT &IR) {
+ assert(AnalysisPasses.count(PassT::ID()) &&
+ "This analysis pass was not registered prior to being queried");
+
+ ResultConceptT &ResultConcept =
+ derived_this()->getResultImpl(PassT::ID(), IR);
+ typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ ResultModelT;
+ return static_cast<ResultModelT &>(ResultConcept).Result;
+ }
+
+ /// \brief Get the cached result of an analysis pass for this module.
+ ///
+ /// This method never runs the analysis.
+ ///
+ /// \returns null if there is no cached result.
+ template <typename PassT>
+ typename PassT::Result *getCachedResult(IRUnitT &IR) const {
+ assert(AnalysisPasses.count(PassT::ID()) &&
+ "This analysis pass was not registered prior to being queried");
+
+ ResultConceptT *ResultConcept =
+ derived_this()->getCachedResultImpl(PassT::ID(), IR);
+ if (!ResultConcept)
+ return nullptr;
+
+ typedef detail::AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ ResultModelT;
+ return &static_cast<ResultModelT *>(ResultConcept)->Result;
+ }
+
+ /// \brief Register an analysis pass with the manager.
+ ///
+ /// This provides an initialized and set-up analysis pass to the analysis
+ /// manager. Whomever is setting up analysis passes must use this to populate
+ /// the manager with all of the analysis passes available.
+ template <typename PassT> void registerPass(PassT Pass) {
+ assert(!AnalysisPasses.count(PassT::ID()) &&
+ "Registered the same analysis pass twice!");
+ typedef detail::AnalysisPassModel<IRUnitT, PassT> PassModelT;
+ AnalysisPasses[PassT::ID()].reset(new PassModelT(std::move(Pass)));
+ }
+
+ /// \brief Invalidate a specific analysis pass for an IR module.
+ ///
+ /// Note that the analysis result can disregard invalidation.
+ template <typename PassT> void invalidate(IRUnitT &IR) {
+ assert(AnalysisPasses.count(PassT::ID()) &&
+ "This analysis pass was not registered prior to being invalidated");
+ derived_this()->invalidateImpl(PassT::ID(), IR);
+ }
+
+ /// \brief Invalidate analyses cached for an IR unit.
+ ///
+ /// Walk through all of the analyses pertaining to this unit of IR and
+ /// invalidate them unless they are preserved by the PreservedAnalyses set.
+ /// We accept the PreservedAnalyses set by value and update it with each
+ /// analyis pass which has been successfully invalidated and thus can be
+ /// preserved going forward. The updated set is returned.
+ PreservedAnalyses invalidate(IRUnitT &IR, PreservedAnalyses PA) {
+ return derived_this()->invalidateImpl(IR, std::move(PA));
+ }
+
+protected:
+ /// \brief Lookup a registered analysis pass.
+ PassConceptT &lookupPass(void *PassID) {
+ typename AnalysisPassMapT::iterator PI = AnalysisPasses.find(PassID);
+ assert(PI != AnalysisPasses.end() &&
+ "Analysis passes must be registered prior to being queried!");
+ return *PI->second;
+ }
+
+ /// \brief Lookup a registered analysis pass.
+ const PassConceptT &lookupPass(void *PassID) const {
+ typename AnalysisPassMapT::const_iterator PI = AnalysisPasses.find(PassID);
+ assert(PI != AnalysisPasses.end() &&
+ "Analysis passes must be registered prior to being queried!");
+ return *PI->second;
+ }
+
+private:
+ /// \brief Map type from module analysis pass ID to pass concept pointer.
+ typedef DenseMap<void *, std::unique_ptr<PassConceptT>> AnalysisPassMapT;
+
+ /// \brief Collection of module analysis passes, indexed by ID.
+ AnalysisPassMapT AnalysisPasses;
+};
+
+} // End namespace detail
+
+/// \brief A generic analysis pass manager with lazy running and caching of
+/// results.
+///
+/// This analysis manager can be used for any IR unit where the address of the
+/// IR unit sufficies as its identity. It manages the cache for a unit of IR via
+/// the address of each unit of IR cached.
+template <typename IRUnitT>
+class AnalysisManager
+ : public detail::AnalysisManagerBase<AnalysisManager<IRUnitT>, IRUnitT> {
+ friend class detail::AnalysisManagerBase<AnalysisManager<IRUnitT>, IRUnitT>;
+ typedef detail::AnalysisManagerBase<AnalysisManager<IRUnitT>, IRUnitT> BaseT;
+ typedef typename BaseT::ResultConceptT ResultConceptT;
+ typedef typename BaseT::PassConceptT PassConceptT;
+
+public:
+ // Most public APIs are inherited from the CRTP base class.
+
+ /// \brief Construct an empty analysis manager.
+ ///
+ /// A flag can be passed to indicate that the manager should perform debug
+ /// logging.
+ AnalysisManager(bool DebugLogging = false) : DebugLogging(DebugLogging) {}
+
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisManager(AnalysisManager &&Arg)
+ : BaseT(std::move(static_cast<BaseT &>(Arg))),
+ AnalysisResults(std::move(Arg.AnalysisResults)),
+ DebugLogging(std::move(Arg.DebugLogging)) {}
+ AnalysisManager &operator=(AnalysisManager &&RHS) {
+ BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
+ AnalysisResults = std::move(RHS.AnalysisResults);
+ DebugLogging = std::move(RHS.DebugLogging);
+ return *this;
+ }
+
+ /// \brief Returns true if the analysis manager has an empty results cache.
+ bool empty() const {
+ assert(AnalysisResults.empty() == AnalysisResultLists.empty() &&
+ "The storage and index of analysis results disagree on how many "
+ "there are!");
+ return AnalysisResults.empty();
+ }
+
+ /// \brief Clear the analysis result cache.
+ ///
+ /// This routine allows cleaning up when the set of IR units itself has
+ /// potentially changed, and thus we can't even look up a a result and
+ /// invalidate it directly. Notably, this does *not* call invalidate functions
+ /// as there is nothing to be done for them.
+ void clear() {
+ AnalysisResults.clear();
+ AnalysisResultLists.clear();
+ }
+
+private:
+ AnalysisManager(const AnalysisManager &) = delete;
+ AnalysisManager &operator=(const AnalysisManager &) = delete;
+
+ /// \brief Get an analysis result, running the pass if necessary.
+ ResultConceptT &getResultImpl(void *PassID, IRUnitT &IR) {
+ typename AnalysisResultMapT::iterator RI;
+ bool Inserted;
+ std::tie(RI, Inserted) = AnalysisResults.insert(std::make_pair(
+ std::make_pair(PassID, &IR), typename AnalysisResultListT::iterator()));
+
+ // If we don't have a cached result for this function, look up the pass and
+ // run it to produce a result, which we then add to the cache.
+ if (Inserted) {
+ auto &P = this->lookupPass(PassID);
+ if (DebugLogging)
+ dbgs() << "Running analysis: " << P.name() << "\n";
+ AnalysisResultListT &ResultList = AnalysisResultLists[&IR];
+ ResultList.emplace_back(PassID, P.run(IR, this));
+
+ // P.run may have inserted elements into AnalysisResults and invalidated
+ // RI.
+ RI = AnalysisResults.find(std::make_pair(PassID, &IR));
+ assert(RI != AnalysisResults.end() && "we just inserted it!");
+
+ RI->second = std::prev(ResultList.end());
+ }
+
+ return *RI->second->second;
+ }
+
+ /// \brief Get a cached analysis result or return null.
+ ResultConceptT *getCachedResultImpl(void *PassID, IRUnitT &IR) const {
+ typename AnalysisResultMapT::const_iterator RI =
+ AnalysisResults.find(std::make_pair(PassID, &IR));
+ return RI == AnalysisResults.end() ? nullptr : &*RI->second->second;
+ }
+
+ /// \brief Invalidate a function pass result.
+ void invalidateImpl(void *PassID, IRUnitT &IR) {
+ typename AnalysisResultMapT::iterator RI =
+ AnalysisResults.find(std::make_pair(PassID, &IR));
+ if (RI == AnalysisResults.end())
+ return;
+
+ if (DebugLogging)
+ dbgs() << "Invalidating analysis: " << this->lookupPass(PassID).name()
+ << "\n";
+ AnalysisResultLists[&IR].erase(RI->second);
+ AnalysisResults.erase(RI);
+ }
+
+ /// \brief Invalidate the results for a function..
+ PreservedAnalyses invalidateImpl(IRUnitT &IR, PreservedAnalyses PA) {
+ // Short circuit for a common case of all analyses being preserved.
+ if (PA.areAllPreserved())
+ return PA;
+
+ if (DebugLogging)
+ dbgs() << "Invalidating all non-preserved analyses for: "
+ << IR.getName() << "\n";
+
+ // Clear all the invalidated results associated specifically with this
+ // function.
+ SmallVector<void *, 8> InvalidatedPassIDs;
+ AnalysisResultListT &ResultsList = AnalysisResultLists[&IR];
+ for (typename AnalysisResultListT::iterator I = ResultsList.begin(),
+ E = ResultsList.end();
+ I != E;) {
+ void *PassID = I->first;
+
+ // Pass the invalidation down to the pass itself to see if it thinks it is
+ // necessary. The analysis pass can return false if no action on the part
+ // of the analysis manager is required for this invalidation event.
+ if (I->second->invalidate(IR, PA)) {
+ if (DebugLogging)
+ dbgs() << "Invalidating analysis: " << this->lookupPass(PassID).name()
+ << "\n";
+
+ InvalidatedPassIDs.push_back(I->first);
+ I = ResultsList.erase(I);
+ } else {
+ ++I;
+ }
+
+ // After handling each pass, we mark it as preserved. Once we've
+ // invalidated any stale results, the rest of the system is allowed to
+ // start preserving this analysis again.
+ PA.preserve(PassID);
+ }
+ while (!InvalidatedPassIDs.empty())
+ AnalysisResults.erase(
+ std::make_pair(InvalidatedPassIDs.pop_back_val(), &IR));
+ if (ResultsList.empty())
+ AnalysisResultLists.erase(&IR);
+
+ return PA;
+ }
+
+ /// \brief List of function analysis pass IDs and associated concept pointers.
+ ///
+ /// Requires iterators to be valid across appending new entries and arbitrary
+ /// erases. Provides both the pass ID and concept pointer such that it is
+ /// half of a bijection and provides storage for the actual result concept.
+ typedef std::list<std::pair<
+ void *, std::unique_ptr<detail::AnalysisResultConcept<IRUnitT>>>>
+ AnalysisResultListT;
+
+ /// \brief Map type from function pointer to our custom list type.
+ typedef DenseMap<IRUnitT *, AnalysisResultListT> AnalysisResultListMapT;
+
+ /// \brief Map from function to a list of function analysis results.
+ ///
+ /// Provides linear time removal of all analysis results for a function and
+ /// the ultimate storage for a particular cached analysis result.
+ AnalysisResultListMapT AnalysisResultLists;
+
+ /// \brief Map type from a pair of analysis ID and function pointer to an
+ /// iterator into a particular result list.
+ typedef DenseMap<std::pair<void *, IRUnitT *>,
+ typename AnalysisResultListT::iterator> AnalysisResultMapT;
+
+ /// \brief Map from an analysis ID and function to a particular cached
+ /// analysis result.
+ AnalysisResultMapT AnalysisResults;
+
+ /// \brief A flag indicating whether debug logging is enabled.
+ bool DebugLogging;
+};
+
+/// \brief Convenience typedef for the Module analysis manager.
+typedef AnalysisManager<Module> ModuleAnalysisManager;
+
+/// \brief Convenience typedef for the Function analysis manager.
+typedef AnalysisManager<Function> FunctionAnalysisManager;
+
+/// \brief A module analysis which acts as a proxy for a function analysis
+/// manager.
+///
+/// This primarily proxies invalidation information from the module analysis
+/// manager and module pass manager to a function analysis manager. You should
+/// never use a function analysis manager from within (transitively) a module
+/// pass manager unless your parent module pass has received a proxy result
+/// object for it.
+class FunctionAnalysisManagerModuleProxy {
+public:
+ class Result;
+
+ static void *ID() { return (void *)&PassID; }
+
+ static StringRef name() { return "FunctionAnalysisManagerModuleProxy"; }
+
+ explicit FunctionAnalysisManagerModuleProxy(FunctionAnalysisManager &FAM)
+ : FAM(&FAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ FunctionAnalysisManagerModuleProxy(
+ const FunctionAnalysisManagerModuleProxy &Arg)
+ : FAM(Arg.FAM) {}
+ FunctionAnalysisManagerModuleProxy(FunctionAnalysisManagerModuleProxy &&Arg)
+ : FAM(std::move(Arg.FAM)) {}
+ FunctionAnalysisManagerModuleProxy &
+ operator=(FunctionAnalysisManagerModuleProxy RHS) {
+ std::swap(FAM, RHS.FAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ ///
+ /// This doesn't do any interesting work, it is primarily used to insert our
+ /// proxy result object into the module analysis cache so that we can proxy
+ /// invalidation to the function analysis manager.
+ ///
+ /// In debug builds, it will also assert that the analysis manager is empty
+ /// as no queries should arrive at the function analysis manager prior to
+ /// this analysis being requested.
+ Result run(Module &M);
+
+private:
+ static char PassID;
+
+ FunctionAnalysisManager *FAM;
+};
+
+/// \brief The result proxy object for the
+/// \c FunctionAnalysisManagerModuleProxy.
+///
+/// See its documentation for more information.
+class FunctionAnalysisManagerModuleProxy::Result {
+public:
+ explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ Result(const Result &Arg) : FAM(Arg.FAM) {}
+ Result(Result &&Arg) : FAM(std::move(Arg.FAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(FAM, RHS.FAM);
+ return *this;
+ }
+ ~Result();
+
+ /// \brief Accessor for the \c FunctionAnalysisManager.
+ FunctionAnalysisManager &getManager() { return *FAM; }
+
+ /// \brief Handler for invalidation of the module.
+ ///
+ /// If this analysis itself is preserved, then we assume that the set of \c
+ /// Function objects in the \c Module hasn't changed and thus we don't need
+ /// to invalidate *all* cached data associated with a \c Function* in the \c
+ /// FunctionAnalysisManager.
+ ///
+ /// Regardless of whether this analysis is marked as preserved, all of the
+ /// analyses in the \c FunctionAnalysisManager are potentially invalidated
+ /// based on the set of preserved analyses.
+ bool invalidate(Module &M, const PreservedAnalyses &PA);
+
+private:
+ FunctionAnalysisManager *FAM;
+};
+
+/// \brief A function analysis which acts as a proxy for a module analysis
+/// manager.
+///
+/// This primarily provides an accessor to a parent module analysis manager to
+/// function passes. Only the const interface of the module analysis manager is
+/// provided to indicate that once inside of a function analysis pass you
+/// cannot request a module analysis to actually run. Instead, the user must
+/// rely on the \c getCachedResult API.
+///
+/// This proxy *doesn't* manage the invalidation in any way. That is handled by
+/// the recursive return path of each layer of the pass manager and the
+/// returned PreservedAnalysis set.
+class ModuleAnalysisManagerFunctionProxy {
+public:
+ /// \brief Result proxy object for \c ModuleAnalysisManagerFunctionProxy.
+ class Result {
+ public:
+ explicit Result(const ModuleAnalysisManager &MAM) : MAM(&MAM) {}
+ // We have to explicitly define all the special member functions because
+ // MSVC refuses to generate them.
+ Result(const Result &Arg) : MAM(Arg.MAM) {}
+ Result(Result &&Arg) : MAM(std::move(Arg.MAM)) {}
+ Result &operator=(Result RHS) {
+ std::swap(MAM, RHS.MAM);
+ return *this;
+ }
+
+ const ModuleAnalysisManager &getManager() const { return *MAM; }
+
+ /// \brief Handle invalidation by ignoring it, this pass is immutable.
+ bool invalidate(Function &) { return false; }
+
+ private:
+ const ModuleAnalysisManager *MAM;
+ };
+
+ static void *ID() { return (void *)&PassID; }
+
+ static StringRef name() { return "ModuleAnalysisManagerFunctionProxy"; }
+
+ ModuleAnalysisManagerFunctionProxy(const ModuleAnalysisManager &MAM)
+ : MAM(&MAM) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ ModuleAnalysisManagerFunctionProxy(
+ const ModuleAnalysisManagerFunctionProxy &Arg)
+ : MAM(Arg.MAM) {}
+ ModuleAnalysisManagerFunctionProxy(ModuleAnalysisManagerFunctionProxy &&Arg)
+ : MAM(std::move(Arg.MAM)) {}
+ ModuleAnalysisManagerFunctionProxy &
+ operator=(ModuleAnalysisManagerFunctionProxy RHS) {
+ std::swap(MAM, RHS.MAM);
+ return *this;
+ }
+
+ /// \brief Run the analysis pass and create our proxy result object.
+ /// Nothing to see here, it just forwards the \c MAM reference into the
+ /// result.
+ Result run(Function &) { return Result(*MAM); }
+
+private:
+ static char PassID;
+
+ const ModuleAnalysisManager *MAM;
+};
+
+/// \brief Trivial adaptor that maps from a module to its functions.
+///
+/// Designed to allow composition of a FunctionPass(Manager) and
+/// a ModulePassManager. Note that if this pass is constructed with a pointer
+/// to a \c ModuleAnalysisManager it will run the
+/// \c FunctionAnalysisManagerModuleProxy analysis prior to running the function
+/// pass over the module to enable a \c FunctionAnalysisManager to be used
+/// within this run safely.
+///
+/// Function passes run within this adaptor can rely on having exclusive access
+/// to the function they are run over. They should not read or modify any other
+/// functions! Other threads or systems may be manipulating other functions in
+/// the module, and so their state should never be relied on.
+/// FIXME: Make the above true for all of LLVM's actual passes, some still
+/// violate this principle.
+///
+/// Function passes can also read the module containing the function, but they
+/// should not modify that module outside of the use lists of various globals.
+/// For example, a function pass is not permitted to add functions to the
+/// module.
+/// FIXME: Make the above true for all of LLVM's actual passes, some still
+/// violate this principle.
+template <typename FunctionPassT> class ModuleToFunctionPassAdaptor {
+public:
+ explicit ModuleToFunctionPassAdaptor(FunctionPassT Pass)
+ : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ ModuleToFunctionPassAdaptor(const ModuleToFunctionPassAdaptor &Arg)
+ : Pass(Arg.Pass) {}
+ ModuleToFunctionPassAdaptor(ModuleToFunctionPassAdaptor &&Arg)
+ : Pass(std::move(Arg.Pass)) {}
+ friend void swap(ModuleToFunctionPassAdaptor &LHS,
+ ModuleToFunctionPassAdaptor &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ ModuleToFunctionPassAdaptor &operator=(ModuleToFunctionPassAdaptor RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief Runs the function pass across every function in the module.
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager *AM) {
+ FunctionAnalysisManager *FAM = nullptr;
+ if (AM)
+ // Setup the function analysis manager from its proxy.
+ FAM = &AM->getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+
+ PreservedAnalyses PA = PreservedAnalyses::all();
+ for (Function &F : M) {
+ if (F.isDeclaration())
+ continue;
+
+ PreservedAnalyses PassPA = Pass.run(F, FAM);
+
+ // We know that the function pass couldn't have invalidated any other
+ // function's analyses (that's the contract of a function pass), so
+ // directly handle the function analysis manager's invalidation here and
+ // update our preserved set to reflect that these have already been
+ // handled.
+ if (FAM)
+ PassPA = FAM->invalidate(F, std::move(PassPA));
+
+ // Then intersect the preserved set so that invalidation of module
+ // analyses will eventually occur when the module pass completes.
+ PA.intersect(std::move(PassPA));
+ }
+
+ // By definition we preserve the proxy. This precludes *any* invalidation
+ // of function analyses by the proxy, but that's OK because we've taken
+ // care to invalidate analyses in the function analysis manager
+ // incrementally above.
+ PA.preserve<FunctionAnalysisManagerModuleProxy>();
+ return PA;
+ }
+
+ static StringRef name() { return "ModuleToFunctionPassAdaptor"; }
+
+private:
+ FunctionPassT Pass;
+};
+
+/// \brief A function to deduce a function pass type and wrap it in the
+/// templated adaptor.
+template <typename FunctionPassT>
+ModuleToFunctionPassAdaptor<FunctionPassT>
+createModuleToFunctionPassAdaptor(FunctionPassT Pass) {
+ return ModuleToFunctionPassAdaptor<FunctionPassT>(std::move(Pass));
+}
+
+/// \brief A template utility pass to force an analysis result to be available.
+///
+/// This is a no-op pass which simply forces a specific analysis pass's result
+/// to be available when it is run.
+template <typename AnalysisT> struct RequireAnalysisPass {
+ /// \brief Run this pass over some unit of IR.
+ ///
+ /// This pass can be run over any unit of IR and use any analysis manager
+ /// provided they satisfy the basic API requirements. When this pass is
+ /// created, these methods can be instantiated to satisfy whatever the
+ /// context requires.
+ template <typename IRUnitT>
+ PreservedAnalyses run(IRUnitT &Arg, AnalysisManager<IRUnitT> *AM) {
+ if (AM)
+ (void)AM->template getResult<AnalysisT>(Arg);
+
+ return PreservedAnalyses::all();
+ }
+
+ static StringRef name() { return "RequireAnalysisPass"; }
+};
+
+/// \brief A template utility pass to force an analysis result to be
+/// invalidated.
+///
+/// This is a no-op pass which simply forces a specific analysis result to be
+/// invalidated when it is run.
+template <typename AnalysisT> struct InvalidateAnalysisPass {
+ /// \brief Run this pass over some unit of IR.
+ ///
+ /// This pass can be run over any unit of IR and use any analysis manager
+ /// provided they satisfy the basic API requirements. When this pass is
+ /// created, these methods can be instantiated to satisfy whatever the
+ /// context requires.
+ template <typename IRUnitT>
+ PreservedAnalyses run(IRUnitT &Arg, AnalysisManager<IRUnitT> *AM) {
+ if (AM)
+ // We have to directly invalidate the analysis result as we can't
+ // enumerate all other analyses and use the preserved set to control it.
+ (void)AM->template invalidate<AnalysisT>(Arg);
+
+ return PreservedAnalyses::all();
+ }
+
+ static StringRef name() { return "InvalidateAnalysisPass"; }
+};
+
+/// \brief A utility pass that does nothing but preserves no analyses.
+///
+/// As a consequence fo not preserving any analyses, this pass will force all
+/// analysis passes to be re-run to produce fresh results if any are needed.
+struct InvalidateAllAnalysesPass {
+ /// \brief Run this pass over some unit of IR.
+ template <typename IRUnitT> PreservedAnalyses run(IRUnitT &Arg) {
+ return PreservedAnalyses::none();
+ }
+
+ static StringRef name() { return "InvalidateAllAnalysesPass"; }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/PassManagerInternal.h b/contrib/llvm/include/llvm/IR/PassManagerInternal.h
new file mode 100644
index 0000000..92de10b
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/PassManagerInternal.h
@@ -0,0 +1,350 @@
+//===- PassManager internal APIs and implementation details -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This header provides internal APIs and implementation details used by the
+/// pass management interfaces exposed in PassManager.h. To understand more
+/// context of why these particular interfaces are needed, see that header
+/// file. None of these APIs should be used elsewhere.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PASSMANAGERINTERNAL_H
+#define LLVM_IR_PASSMANAGERINTERNAL_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/STLExtras.h"
+
+namespace llvm {
+
+template <typename IRUnitT> class AnalysisManager;
+class PreservedAnalyses;
+
+/// \brief Implementation details of the pass manager interfaces.
+namespace detail {
+
+/// \brief Template for the abstract base class used to dispatch
+/// polymorphically over pass objects.
+template <typename IRUnitT> struct PassConcept {
+ // Boiler plate necessary for the container of derived classes.
+ virtual ~PassConcept() {}
+
+ /// \brief The polymorphic API which runs the pass over a given IR entity.
+ ///
+ /// Note that actual pass object can omit the analysis manager argument if
+ /// desired. Also that the analysis manager may be null if there is no
+ /// analysis manager in the pass pipeline.
+ virtual PreservedAnalyses run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM) = 0;
+
+ /// \brief Polymorphic method to access the name of a pass.
+ virtual StringRef name() = 0;
+};
+
+/// \brief SFINAE metafunction for computing whether \c PassT has a run method
+/// accepting an \c AnalysisManager<IRUnitT>.
+template <typename IRUnitT, typename PassT, typename ResultT>
+class PassRunAcceptsAnalysisManager {
+ typedef char SmallType;
+ struct BigType {
+ char a, b;
+ };
+
+ template <typename T, ResultT (T::*)(IRUnitT &, AnalysisManager<IRUnitT> *)>
+ struct Checker;
+
+ template <typename T> static SmallType f(Checker<T, &T::run> *);
+ template <typename T> static BigType f(...);
+
+public:
+ enum { Value = sizeof(f<PassT>(nullptr)) == sizeof(SmallType) };
+};
+
+/// \brief A template wrapper used to implement the polymorphic API.
+///
+/// Can be instantiated for any object which provides a \c run method accepting
+/// an \c IRUnitT. It requires the pass to be a copyable object. When the
+/// \c run method also accepts an \c AnalysisManager<IRUnitT>*, we pass it
+/// along.
+template <typename IRUnitT, typename PassT,
+ typename PreservedAnalysesT = PreservedAnalyses,
+ bool AcceptsAnalysisManager = PassRunAcceptsAnalysisManager<
+ IRUnitT, PassT, PreservedAnalysesT>::Value>
+struct PassModel;
+
+/// \brief Specialization of \c PassModel for passes that accept an analyis
+/// manager.
+template <typename IRUnitT, typename PassT, typename PreservedAnalysesT>
+struct PassModel<IRUnitT, PassT, PreservedAnalysesT, true>
+ : PassConcept<IRUnitT> {
+ explicit PassModel(PassT Pass) : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ PassModel(const PassModel &Arg) : Pass(Arg.Pass) {}
+ PassModel(PassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+ friend void swap(PassModel &LHS, PassModel &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ PassModel &operator=(PassModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ PreservedAnalysesT run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM) override {
+ return Pass.run(IR, AM);
+ }
+ StringRef name() override { return PassT::name(); }
+ PassT Pass;
+};
+
+/// \brief Specialization of \c PassModel for passes that accept an analyis
+/// manager.
+template <typename IRUnitT, typename PassT, typename PreservedAnalysesT>
+struct PassModel<IRUnitT, PassT, PreservedAnalysesT, false>
+ : PassConcept<IRUnitT> {
+ explicit PassModel(PassT Pass) : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ PassModel(const PassModel &Arg) : Pass(Arg.Pass) {}
+ PassModel(PassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+ friend void swap(PassModel &LHS, PassModel &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ PassModel &operator=(PassModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ PreservedAnalysesT run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM) override {
+ return Pass.run(IR);
+ }
+ StringRef name() override { return PassT::name(); }
+ PassT Pass;
+};
+
+/// \brief Abstract concept of an analysis result.
+///
+/// This concept is parameterized over the IR unit that this result pertains
+/// to.
+template <typename IRUnitT> struct AnalysisResultConcept {
+ virtual ~AnalysisResultConcept() {}
+
+ /// \brief Method to try and mark a result as invalid.
+ ///
+ /// When the outer analysis manager detects a change in some underlying
+ /// unit of the IR, it will call this method on all of the results cached.
+ ///
+ /// This method also receives a set of preserved analyses which can be used
+ /// to avoid invalidation because the pass which changed the underlying IR
+ /// took care to update or preserve the analysis result in some way.
+ ///
+ /// \returns true if the result is indeed invalid (the default).
+ virtual bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA) = 0;
+};
+
+/// \brief SFINAE metafunction for computing whether \c ResultT provides an
+/// \c invalidate member function.
+template <typename IRUnitT, typename ResultT> class ResultHasInvalidateMethod {
+ typedef char SmallType;
+ struct BigType {
+ char a, b;
+ };
+
+ template <typename T, bool (T::*)(IRUnitT &, const PreservedAnalyses &)>
+ struct Checker;
+
+ template <typename T> static SmallType f(Checker<T, &T::invalidate> *);
+ template <typename T> static BigType f(...);
+
+public:
+ enum { Value = sizeof(f<ResultT>(nullptr)) == sizeof(SmallType) };
+};
+
+/// \brief Wrapper to model the analysis result concept.
+///
+/// By default, this will implement the invalidate method with a trivial
+/// implementation so that the actual analysis result doesn't need to provide
+/// an invalidation handler. It is only selected when the invalidation handler
+/// is not part of the ResultT's interface.
+template <typename IRUnitT, typename PassT, typename ResultT,
+ typename PreservedAnalysesT = PreservedAnalyses,
+ bool HasInvalidateHandler =
+ ResultHasInvalidateMethod<IRUnitT, ResultT>::Value>
+struct AnalysisResultModel;
+
+/// \brief Specialization of \c AnalysisResultModel which provides the default
+/// invalidate functionality.
+template <typename IRUnitT, typename PassT, typename ResultT,
+ typename PreservedAnalysesT>
+struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT, false>
+ : AnalysisResultConcept<IRUnitT> {
+ explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {}
+ AnalysisResultModel(AnalysisResultModel &&Arg)
+ : Result(std::move(Arg.Result)) {}
+ friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) {
+ using std::swap;
+ swap(LHS.Result, RHS.Result);
+ }
+ AnalysisResultModel &operator=(AnalysisResultModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief The model bases invalidation solely on being in the preserved set.
+ //
+ // FIXME: We should actually use two different concepts for analysis results
+ // rather than two different models, and avoid the indirect function call for
+ // ones that use the trivial behavior.
+ bool invalidate(IRUnitT &, const PreservedAnalysesT &PA) override {
+ return !PA.preserved(PassT::ID());
+ }
+
+ ResultT Result;
+};
+
+/// \brief Specialization of \c AnalysisResultModel which delegates invalidate
+/// handling to \c ResultT.
+template <typename IRUnitT, typename PassT, typename ResultT,
+ typename PreservedAnalysesT>
+struct AnalysisResultModel<IRUnitT, PassT, ResultT, PreservedAnalysesT, true>
+ : AnalysisResultConcept<IRUnitT> {
+ explicit AnalysisResultModel(ResultT Result) : Result(std::move(Result)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisResultModel(const AnalysisResultModel &Arg) : Result(Arg.Result) {}
+ AnalysisResultModel(AnalysisResultModel &&Arg)
+ : Result(std::move(Arg.Result)) {}
+ friend void swap(AnalysisResultModel &LHS, AnalysisResultModel &RHS) {
+ using std::swap;
+ swap(LHS.Result, RHS.Result);
+ }
+ AnalysisResultModel &operator=(AnalysisResultModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ /// \brief The model delegates to the \c ResultT method.
+ bool invalidate(IRUnitT &IR, const PreservedAnalysesT &PA) override {
+ return Result.invalidate(IR, PA);
+ }
+
+ ResultT Result;
+};
+
+/// \brief Abstract concept of an analysis pass.
+///
+/// This concept is parameterized over the IR unit that it can run over and
+/// produce an analysis result.
+template <typename IRUnitT> struct AnalysisPassConcept {
+ virtual ~AnalysisPassConcept() {}
+
+ /// \brief Method to run this analysis over a unit of IR.
+ /// \returns A unique_ptr to the analysis result object to be queried by
+ /// users.
+ virtual std::unique_ptr<AnalysisResultConcept<IRUnitT>>
+ run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM) = 0;
+
+ /// \brief Polymorphic method to access the name of a pass.
+ virtual StringRef name() = 0;
+};
+
+/// \brief Wrapper to model the analysis pass concept.
+///
+/// Can wrap any type which implements a suitable \c run method. The method
+/// must accept the IRUnitT as an argument and produce an object which can be
+/// wrapped in a \c AnalysisResultModel.
+template <typename IRUnitT, typename PassT,
+ bool AcceptsAnalysisManager = PassRunAcceptsAnalysisManager<
+ IRUnitT, PassT, typename PassT::Result>::Value>
+struct AnalysisPassModel;
+
+/// \brief Specialization of \c AnalysisPassModel which passes an
+/// \c AnalysisManager to PassT's run method.
+template <typename IRUnitT, typename PassT>
+struct AnalysisPassModel<IRUnitT, PassT, true> : AnalysisPassConcept<IRUnitT> {
+ explicit AnalysisPassModel(PassT Pass) : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisPassModel(const AnalysisPassModel &Arg) : Pass(Arg.Pass) {}
+ AnalysisPassModel(AnalysisPassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+ friend void swap(AnalysisPassModel &LHS, AnalysisPassModel &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ AnalysisPassModel &operator=(AnalysisPassModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ // FIXME: Replace PassT::Result with type traits when we use C++11.
+ typedef AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ ResultModelT;
+
+ /// \brief The model delegates to the \c PassT::run method.
+ ///
+ /// The return is wrapped in an \c AnalysisResultModel.
+ std::unique_ptr<AnalysisResultConcept<IRUnitT>>
+ run(IRUnitT &IR, AnalysisManager<IRUnitT> *AM) override {
+ return make_unique<ResultModelT>(Pass.run(IR, AM));
+ }
+
+ /// \brief The model delegates to a static \c PassT::name method.
+ ///
+ /// The returned string ref must point to constant immutable data!
+ StringRef name() override { return PassT::name(); }
+
+ PassT Pass;
+};
+
+/// \brief Specialization of \c AnalysisPassModel which does not pass an
+/// \c AnalysisManager to PassT's run method.
+template <typename IRUnitT, typename PassT>
+struct AnalysisPassModel<IRUnitT, PassT, false> : AnalysisPassConcept<IRUnitT> {
+ explicit AnalysisPassModel(PassT Pass) : Pass(std::move(Pass)) {}
+ // We have to explicitly define all the special member functions because MSVC
+ // refuses to generate them.
+ AnalysisPassModel(const AnalysisPassModel &Arg) : Pass(Arg.Pass) {}
+ AnalysisPassModel(AnalysisPassModel &&Arg) : Pass(std::move(Arg.Pass)) {}
+ friend void swap(AnalysisPassModel &LHS, AnalysisPassModel &RHS) {
+ using std::swap;
+ swap(LHS.Pass, RHS.Pass);
+ }
+ AnalysisPassModel &operator=(AnalysisPassModel RHS) {
+ swap(*this, RHS);
+ return *this;
+ }
+
+ // FIXME: Replace PassT::Result with type traits when we use C++11.
+ typedef AnalysisResultModel<IRUnitT, PassT, typename PassT::Result>
+ ResultModelT;
+
+ /// \brief The model delegates to the \c PassT::run method.
+ ///
+ /// The return is wrapped in an \c AnalysisResultModel.
+ std::unique_ptr<AnalysisResultConcept<IRUnitT>>
+ run(IRUnitT &IR, AnalysisManager<IRUnitT> *) override {
+ return make_unique<ResultModelT>(Pass.run(IR));
+ }
+
+ /// \brief The model delegates to a static \c PassT::name method.
+ ///
+ /// The returned string ref must point to constant immutable data!
+ StringRef name() override { return PassT::name(); }
+
+ PassT Pass;
+};
+
+} // End namespace detail
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/PatternMatch.h b/contrib/llvm/include/llvm/IR/PatternMatch.h
new file mode 100644
index 0000000..f4d7d8c
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/PatternMatch.h
@@ -0,0 +1,1318 @@
+//===- PatternMatch.h - Match on the LLVM IR --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a simple and efficient mechanism for performing general
+// tree-based pattern matches on the LLVM IR. The power of these routines is
+// that it allows you to write concise patterns that are expressive and easy to
+// understand. The other major advantage of this is that it allows you to
+// trivially capture/bind elements in the pattern to variables. For example,
+// you can do something like this:
+//
+// Value *Exp = ...
+// Value *X, *Y; ConstantInt *C1, *C2; // (X & C1) | (Y & C2)
+// if (match(Exp, m_Or(m_And(m_Value(X), m_ConstantInt(C1)),
+// m_And(m_Value(Y), m_ConstantInt(C2))))) {
+// ... Pattern is matched and variables are bound ...
+// }
+//
+// This is primarily useful to things like the instruction combiner, but can
+// also be useful for static analysis tools or code generators.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PATTERNMATCH_H
+#define LLVM_IR_PATTERNMATCH_H
+
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Operator.h"
+
+namespace llvm {
+namespace PatternMatch {
+
+template <typename Val, typename Pattern> bool match(Val *V, const Pattern &P) {
+ return const_cast<Pattern &>(P).match(V);
+}
+
+template <typename SubPattern_t> struct OneUse_match {
+ SubPattern_t SubPattern;
+
+ OneUse_match(const SubPattern_t &SP) : SubPattern(SP) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ return V->hasOneUse() && SubPattern.match(V);
+ }
+};
+
+template <typename T> inline OneUse_match<T> m_OneUse(const T &SubPattern) {
+ return SubPattern;
+}
+
+template <typename Class> struct class_match {
+ template <typename ITy> bool match(ITy *V) { return isa<Class>(V); }
+};
+
+/// \brief Match an arbitrary value and ignore it.
+inline class_match<Value> m_Value() { return class_match<Value>(); }
+
+/// \brief Match an arbitrary binary operation and ignore it.
+inline class_match<BinaryOperator> m_BinOp() {
+ return class_match<BinaryOperator>();
+}
+
+/// \brief Matches any compare instruction and ignore it.
+inline class_match<CmpInst> m_Cmp() { return class_match<CmpInst>(); }
+
+/// \brief Match an arbitrary ConstantInt and ignore it.
+inline class_match<ConstantInt> m_ConstantInt() {
+ return class_match<ConstantInt>();
+}
+
+/// \brief Match an arbitrary undef constant.
+inline class_match<UndefValue> m_Undef() { return class_match<UndefValue>(); }
+
+/// \brief Match an arbitrary Constant and ignore it.
+inline class_match<Constant> m_Constant() { return class_match<Constant>(); }
+
+/// Matching combinators
+template <typename LTy, typename RTy> struct match_combine_or {
+ LTy L;
+ RTy R;
+
+ match_combine_or(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
+
+ template <typename ITy> bool match(ITy *V) {
+ if (L.match(V))
+ return true;
+ if (R.match(V))
+ return true;
+ return false;
+ }
+};
+
+template <typename LTy, typename RTy> struct match_combine_and {
+ LTy L;
+ RTy R;
+
+ match_combine_and(const LTy &Left, const RTy &Right) : L(Left), R(Right) {}
+
+ template <typename ITy> bool match(ITy *V) {
+ if (L.match(V))
+ if (R.match(V))
+ return true;
+ return false;
+ }
+};
+
+/// Combine two pattern matchers matching L || R
+template <typename LTy, typename RTy>
+inline match_combine_or<LTy, RTy> m_CombineOr(const LTy &L, const RTy &R) {
+ return match_combine_or<LTy, RTy>(L, R);
+}
+
+/// Combine two pattern matchers matching L && R
+template <typename LTy, typename RTy>
+inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
+ return match_combine_and<LTy, RTy>(L, R);
+}
+
+struct match_zero {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *C = dyn_cast<Constant>(V))
+ return C->isNullValue();
+ return false;
+ }
+};
+
+/// \brief Match an arbitrary zero/null constant. This includes
+/// zero_initializer for vectors and ConstantPointerNull for pointers.
+inline match_zero m_Zero() { return match_zero(); }
+
+struct match_neg_zero {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *C = dyn_cast<Constant>(V))
+ return C->isNegativeZeroValue();
+ return false;
+ }
+};
+
+/// \brief Match an arbitrary zero/null constant. This includes
+/// zero_initializer for vectors and ConstantPointerNull for pointers. For
+/// floating point constants, this will match negative zero but not positive
+/// zero
+inline match_neg_zero m_NegZero() { return match_neg_zero(); }
+
+/// \brief - Match an arbitrary zero/null constant. This includes
+/// zero_initializer for vectors and ConstantPointerNull for pointers. For
+/// floating point constants, this will match negative zero and positive zero
+inline match_combine_or<match_zero, match_neg_zero> m_AnyZero() {
+ return m_CombineOr(m_Zero(), m_NegZero());
+}
+
+struct apint_match {
+ const APInt *&Res;
+ apint_match(const APInt *&R) : Res(R) {}
+ template <typename ITy> bool match(ITy *V) {
+ if (auto *CI = dyn_cast<ConstantInt>(V)) {
+ Res = &CI->getValue();
+ return true;
+ }
+ if (V->getType()->isVectorTy())
+ if (const auto *C = dyn_cast<Constant>(V))
+ if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
+ return false;
+ }
+};
+
+/// \brief Match a ConstantInt or splatted ConstantVector, binding the
+/// specified pointer to the contained APInt.
+inline apint_match m_APInt(const APInt *&Res) { return Res; }
+
+template <int64_t Val> struct constantint_match {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *CI = dyn_cast<ConstantInt>(V)) {
+ const APInt &CIV = CI->getValue();
+ if (Val >= 0)
+ return CIV == static_cast<uint64_t>(Val);
+ // If Val is negative, and CI is shorter than it, truncate to the right
+ // number of bits. If it is larger, then we have to sign extend. Just
+ // compare their negated values.
+ return -CIV == -Val;
+ }
+ return false;
+ }
+};
+
+/// \brief Match a ConstantInt with a specific value.
+template <int64_t Val> inline constantint_match<Val> m_ConstantInt() {
+ return constantint_match<Val>();
+}
+
+/// \brief This helper class is used to match scalar and vector constants that
+/// satisfy a specified predicate.
+template <typename Predicate> struct cst_pred_ty : public Predicate {
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *CI = dyn_cast<ConstantInt>(V))
+ return this->isValue(CI->getValue());
+ if (V->getType()->isVectorTy())
+ if (const auto *C = dyn_cast<Constant>(V))
+ if (const auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
+ return this->isValue(CI->getValue());
+ return false;
+ }
+};
+
+/// \brief This helper class is used to match scalar and vector constants that
+/// satisfy a specified predicate, and bind them to an APInt.
+template <typename Predicate> struct api_pred_ty : public Predicate {
+ const APInt *&Res;
+ api_pred_ty(const APInt *&R) : Res(R) {}
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *CI = dyn_cast<ConstantInt>(V))
+ if (this->isValue(CI->getValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
+ if (V->getType()->isVectorTy())
+ if (const auto *C = dyn_cast<Constant>(V))
+ if (auto *CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue()))
+ if (this->isValue(CI->getValue())) {
+ Res = &CI->getValue();
+ return true;
+ }
+
+ return false;
+ }
+};
+
+struct is_one {
+ bool isValue(const APInt &C) { return C == 1; }
+};
+
+/// \brief Match an integer 1 or a vector with all elements equal to 1.
+inline cst_pred_ty<is_one> m_One() { return cst_pred_ty<is_one>(); }
+inline api_pred_ty<is_one> m_One(const APInt *&V) { return V; }
+
+struct is_all_ones {
+ bool isValue(const APInt &C) { return C.isAllOnesValue(); }
+};
+
+/// \brief Match an integer or vector with all bits set to true.
+inline cst_pred_ty<is_all_ones> m_AllOnes() {
+ return cst_pred_ty<is_all_ones>();
+}
+inline api_pred_ty<is_all_ones> m_AllOnes(const APInt *&V) { return V; }
+
+struct is_sign_bit {
+ bool isValue(const APInt &C) { return C.isSignBit(); }
+};
+
+/// \brief Match an integer or vector with only the sign bit(s) set.
+inline cst_pred_ty<is_sign_bit> m_SignBit() {
+ return cst_pred_ty<is_sign_bit>();
+}
+inline api_pred_ty<is_sign_bit> m_SignBit(const APInt *&V) { return V; }
+
+struct is_power2 {
+ bool isValue(const APInt &C) { return C.isPowerOf2(); }
+};
+
+/// \brief Match an integer or vector power of 2.
+inline cst_pred_ty<is_power2> m_Power2() { return cst_pred_ty<is_power2>(); }
+inline api_pred_ty<is_power2> m_Power2(const APInt *&V) { return V; }
+
+struct is_maxsignedvalue {
+ bool isValue(const APInt &C) { return C.isMaxSignedValue(); }
+};
+
+inline cst_pred_ty<is_maxsignedvalue> m_MaxSignedValue() { return cst_pred_ty<is_maxsignedvalue>(); }
+inline api_pred_ty<is_maxsignedvalue> m_MaxSignedValue(const APInt *&V) { return V; }
+
+template <typename Class> struct bind_ty {
+ Class *&VR;
+ bind_ty(Class *&V) : VR(V) {}
+
+ template <typename ITy> bool match(ITy *V) {
+ if (auto *CV = dyn_cast<Class>(V)) {
+ VR = CV;
+ return true;
+ }
+ return false;
+ }
+};
+
+/// \brief Match a value, capturing it if we match.
+inline bind_ty<Value> m_Value(Value *&V) { return V; }
+
+/// \brief Match an instruction, capturing it if we match.
+inline bind_ty<Instruction> m_Instruction(Instruction *&I) { return I; }
+
+/// \brief Match a binary operator, capturing it if we match.
+inline bind_ty<BinaryOperator> m_BinOp(BinaryOperator *&I) { return I; }
+
+/// \brief Match a ConstantInt, capturing the value if we match.
+inline bind_ty<ConstantInt> m_ConstantInt(ConstantInt *&CI) { return CI; }
+
+/// \brief Match a Constant, capturing the value if we match.
+inline bind_ty<Constant> m_Constant(Constant *&C) { return C; }
+
+/// \brief Match a ConstantFP, capturing the value if we match.
+inline bind_ty<ConstantFP> m_ConstantFP(ConstantFP *&C) { return C; }
+
+/// \brief Match a specified Value*.
+struct specificval_ty {
+ const Value *Val;
+ specificval_ty(const Value *V) : Val(V) {}
+
+ template <typename ITy> bool match(ITy *V) { return V == Val; }
+};
+
+/// \brief Match if we have a specific specified value.
+inline specificval_ty m_Specific(const Value *V) { return V; }
+
+/// \brief Match a specified floating point value or vector of all elements of
+/// that value.
+struct specific_fpval {
+ double Val;
+ specific_fpval(double V) : Val(V) {}
+
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *CFP = dyn_cast<ConstantFP>(V))
+ return CFP->isExactlyValue(Val);
+ if (V->getType()->isVectorTy())
+ if (const auto *C = dyn_cast<Constant>(V))
+ if (auto *CFP = dyn_cast_or_null<ConstantFP>(C->getSplatValue()))
+ return CFP->isExactlyValue(Val);
+ return false;
+ }
+};
+
+/// \brief Match a specific floating point value or vector with all elements
+/// equal to the value.
+inline specific_fpval m_SpecificFP(double V) { return specific_fpval(V); }
+
+/// \brief Match a float 1.0 or vector with all elements equal to 1.0.
+inline specific_fpval m_FPOne() { return m_SpecificFP(1.0); }
+
+struct bind_const_intval_ty {
+ uint64_t &VR;
+ bind_const_intval_ty(uint64_t &V) : VR(V) {}
+
+ template <typename ITy> bool match(ITy *V) {
+ if (const auto *CV = dyn_cast<ConstantInt>(V))
+ if (CV->getBitWidth() <= 64) {
+ VR = CV->getZExtValue();
+ return true;
+ }
+ return false;
+ }
+};
+
+/// \brief Match a specified integer value or vector of all elements of that
+// value.
+struct specific_intval {
+ uint64_t Val;
+ specific_intval(uint64_t V) : Val(V) {}
+
+ template <typename ITy> bool match(ITy *V) {
+ const auto *CI = dyn_cast<ConstantInt>(V);
+ if (!CI && V->getType()->isVectorTy())
+ if (const auto *C = dyn_cast<Constant>(V))
+ CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue());
+
+ if (CI && CI->getBitWidth() <= 64)
+ return CI->getZExtValue() == Val;
+
+ return false;
+ }
+};
+
+/// \brief Match a specific integer value or vector with all elements equal to
+/// the value.
+inline specific_intval m_SpecificInt(uint64_t V) { return specific_intval(V); }
+
+/// \brief Match a ConstantInt and bind to its value. This does not match
+/// ConstantInts wider than 64-bits.
+inline bind_const_intval_ty m_ConstantInt(uint64_t &V) { return V; }
+
+//===----------------------------------------------------------------------===//
+// Matcher for any binary operator.
+//
+template <typename LHS_t, typename RHS_t> struct AnyBinaryOp_match {
+ LHS_t L;
+ RHS_t R;
+
+ AnyBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *I = dyn_cast<BinaryOperator>(V))
+ return L.match(I->getOperand(0)) && R.match(I->getOperand(1));
+ return false;
+ }
+};
+
+template <typename LHS, typename RHS>
+inline AnyBinaryOp_match<LHS, RHS> m_BinOp(const LHS &L, const RHS &R) {
+ return AnyBinaryOp_match<LHS, RHS>(L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for specific binary operators.
+//
+
+template <typename LHS_t, typename RHS_t, unsigned Opcode>
+struct BinaryOp_match {
+ LHS_t L;
+ RHS_t R;
+
+ BinaryOp_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (V->getValueID() == Value::InstructionVal + Opcode) {
+ auto *I = cast<BinaryOperator>(V);
+ return L.match(I->getOperand(0)) && R.match(I->getOperand(1));
+ }
+ if (auto *CE = dyn_cast<ConstantExpr>(V))
+ return CE->getOpcode() == Opcode && L.match(CE->getOperand(0)) &&
+ R.match(CE->getOperand(1));
+ return false;
+ }
+};
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Add> m_Add(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Add>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FAdd> m_FAdd(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::FAdd>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Sub> m_Sub(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Sub>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FSub> m_FSub(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::FSub>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Mul> m_Mul(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Mul>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FMul> m_FMul(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::FMul>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::UDiv> m_UDiv(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::UDiv>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::SDiv> m_SDiv(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::SDiv>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FDiv> m_FDiv(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::FDiv>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::URem> m_URem(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::URem>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::SRem> m_SRem(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::SRem>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::FRem> m_FRem(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::FRem>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::And> m_And(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::And>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Or> m_Or(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Or>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Xor> m_Xor(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Xor>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::Shl> m_Shl(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::Shl>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::LShr> m_LShr(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::LShr>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline BinaryOp_match<LHS, RHS, Instruction::AShr> m_AShr(const LHS &L,
+ const RHS &R) {
+ return BinaryOp_match<LHS, RHS, Instruction::AShr>(L, R);
+}
+
+template <typename LHS_t, typename RHS_t, unsigned Opcode,
+ unsigned WrapFlags = 0>
+struct OverflowingBinaryOp_match {
+ LHS_t L;
+ RHS_t R;
+
+ OverflowingBinaryOp_match(const LHS_t &LHS, const RHS_t &RHS)
+ : L(LHS), R(RHS) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *Op = dyn_cast<OverflowingBinaryOperator>(V)) {
+ if (Op->getOpcode() != Opcode)
+ return false;
+ if (WrapFlags & OverflowingBinaryOperator::NoUnsignedWrap &&
+ !Op->hasNoUnsignedWrap())
+ return false;
+ if (WrapFlags & OverflowingBinaryOperator::NoSignedWrap &&
+ !Op->hasNoSignedWrap())
+ return false;
+ return L.match(Op->getOperand(0)) && R.match(Op->getOperand(1));
+ }
+ return false;
+ }
+};
+
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoSignedWrap>
+m_NSWAdd(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoSignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+ OverflowingBinaryOperator::NoSignedWrap>
+m_NSWSub(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+ OverflowingBinaryOperator::NoSignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+ OverflowingBinaryOperator::NoSignedWrap>
+m_NSWMul(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+ OverflowingBinaryOperator::NoSignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+ OverflowingBinaryOperator::NoSignedWrap>
+m_NSWShl(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+ OverflowingBinaryOperator::NoSignedWrap>(
+ L, R);
+}
+
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWAdd(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Add,
+ OverflowingBinaryOperator::NoUnsignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+ OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWSub(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Sub,
+ OverflowingBinaryOperator::NoUnsignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+ OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWMul(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Mul,
+ OverflowingBinaryOperator::NoUnsignedWrap>(
+ L, R);
+}
+template <typename LHS, typename RHS>
+inline OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+ OverflowingBinaryOperator::NoUnsignedWrap>
+m_NUWShl(const LHS &L, const RHS &R) {
+ return OverflowingBinaryOp_match<LHS, RHS, Instruction::Shl,
+ OverflowingBinaryOperator::NoUnsignedWrap>(
+ L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Class that matches two different binary ops.
+//
+template <typename LHS_t, typename RHS_t, unsigned Opc1, unsigned Opc2>
+struct BinOp2_match {
+ LHS_t L;
+ RHS_t R;
+
+ BinOp2_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (V->getValueID() == Value::InstructionVal + Opc1 ||
+ V->getValueID() == Value::InstructionVal + Opc2) {
+ auto *I = cast<BinaryOperator>(V);
+ return L.match(I->getOperand(0)) && R.match(I->getOperand(1));
+ }
+ if (auto *CE = dyn_cast<ConstantExpr>(V))
+ return (CE->getOpcode() == Opc1 || CE->getOpcode() == Opc2) &&
+ L.match(CE->getOperand(0)) && R.match(CE->getOperand(1));
+ return false;
+ }
+};
+
+/// \brief Matches LShr or AShr.
+template <typename LHS, typename RHS>
+inline BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::AShr>
+m_Shr(const LHS &L, const RHS &R) {
+ return BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::AShr>(L, R);
+}
+
+/// \brief Matches LShr or Shl.
+template <typename LHS, typename RHS>
+inline BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::Shl>
+m_LogicalShift(const LHS &L, const RHS &R) {
+ return BinOp2_match<LHS, RHS, Instruction::LShr, Instruction::Shl>(L, R);
+}
+
+/// \brief Matches UDiv and SDiv.
+template <typename LHS, typename RHS>
+inline BinOp2_match<LHS, RHS, Instruction::SDiv, Instruction::UDiv>
+m_IDiv(const LHS &L, const RHS &R) {
+ return BinOp2_match<LHS, RHS, Instruction::SDiv, Instruction::UDiv>(L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Class that matches exact binary ops.
+//
+template <typename SubPattern_t> struct Exact_match {
+ SubPattern_t SubPattern;
+
+ Exact_match(const SubPattern_t &SP) : SubPattern(SP) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (PossiblyExactOperator *PEO = dyn_cast<PossiblyExactOperator>(V))
+ return PEO->isExact() && SubPattern.match(V);
+ return false;
+ }
+};
+
+template <typename T> inline Exact_match<T> m_Exact(const T &SubPattern) {
+ return SubPattern;
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for CmpInst classes
+//
+
+template <typename LHS_t, typename RHS_t, typename Class, typename PredicateTy>
+struct CmpClass_match {
+ PredicateTy &Predicate;
+ LHS_t L;
+ RHS_t R;
+
+ CmpClass_match(PredicateTy &Pred, const LHS_t &LHS, const RHS_t &RHS)
+ : Predicate(Pred), L(LHS), R(RHS) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (Class *I = dyn_cast<Class>(V))
+ if (L.match(I->getOperand(0)) && R.match(I->getOperand(1))) {
+ Predicate = I->getPredicate();
+ return true;
+ }
+ return false;
+ }
+};
+
+template <typename LHS, typename RHS>
+inline CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>
+m_Cmp(CmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
+ return CmpClass_match<LHS, RHS, CmpInst, CmpInst::Predicate>(Pred, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>
+m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
+ return CmpClass_match<LHS, RHS, ICmpInst, ICmpInst::Predicate>(Pred, L, R);
+}
+
+template <typename LHS, typename RHS>
+inline CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>
+m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R) {
+ return CmpClass_match<LHS, RHS, FCmpInst, FCmpInst::Predicate>(Pred, L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for SelectInst classes
+//
+
+template <typename Cond_t, typename LHS_t, typename RHS_t>
+struct SelectClass_match {
+ Cond_t C;
+ LHS_t L;
+ RHS_t R;
+
+ SelectClass_match(const Cond_t &Cond, const LHS_t &LHS, const RHS_t &RHS)
+ : C(Cond), L(LHS), R(RHS) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *I = dyn_cast<SelectInst>(V))
+ return C.match(I->getOperand(0)) && L.match(I->getOperand(1)) &&
+ R.match(I->getOperand(2));
+ return false;
+ }
+};
+
+template <typename Cond, typename LHS, typename RHS>
+inline SelectClass_match<Cond, LHS, RHS> m_Select(const Cond &C, const LHS &L,
+ const RHS &R) {
+ return SelectClass_match<Cond, LHS, RHS>(C, L, R);
+}
+
+/// \brief This matches a select of two constants, e.g.:
+/// m_SelectCst<-1, 0>(m_Value(V))
+template <int64_t L, int64_t R, typename Cond>
+inline SelectClass_match<Cond, constantint_match<L>, constantint_match<R>>
+m_SelectCst(const Cond &C) {
+ return m_Select(C, m_ConstantInt<L>(), m_ConstantInt<R>());
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for CastInst classes
+//
+
+template <typename Op_t, unsigned Opcode> struct CastClass_match {
+ Op_t Op;
+
+ CastClass_match(const Op_t &OpMatch) : Op(OpMatch) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *O = dyn_cast<Operator>(V))
+ return O->getOpcode() == Opcode && Op.match(O->getOperand(0));
+ return false;
+ }
+};
+
+/// \brief Matches BitCast.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::BitCast> m_BitCast(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::BitCast>(Op);
+}
+
+/// \brief Matches PtrToInt.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::PtrToInt> m_PtrToInt(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::PtrToInt>(Op);
+}
+
+/// \brief Matches Trunc.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::Trunc> m_Trunc(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::Trunc>(Op);
+}
+
+/// \brief Matches SExt.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::SExt> m_SExt(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::SExt>(Op);
+}
+
+/// \brief Matches ZExt.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::ZExt> m_ZExt(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::ZExt>(Op);
+}
+
+/// \brief Matches UIToFP.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::UIToFP> m_UIToFP(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::UIToFP>(Op);
+}
+
+/// \brief Matches SIToFP.
+template <typename OpTy>
+inline CastClass_match<OpTy, Instruction::SIToFP> m_SIToFP(const OpTy &Op) {
+ return CastClass_match<OpTy, Instruction::SIToFP>(Op);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for unary operators
+//
+
+template <typename LHS_t> struct not_match {
+ LHS_t L;
+
+ not_match(const LHS_t &LHS) : L(LHS) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *O = dyn_cast<Operator>(V))
+ if (O->getOpcode() == Instruction::Xor)
+ return matchIfNot(O->getOperand(0), O->getOperand(1));
+ return false;
+ }
+
+private:
+ bool matchIfNot(Value *LHS, Value *RHS) {
+ return (isa<ConstantInt>(RHS) || isa<ConstantDataVector>(RHS) ||
+ // FIXME: Remove CV.
+ isa<ConstantVector>(RHS)) &&
+ cast<Constant>(RHS)->isAllOnesValue() && L.match(LHS);
+ }
+};
+
+template <typename LHS> inline not_match<LHS> m_Not(const LHS &L) { return L; }
+
+template <typename LHS_t> struct neg_match {
+ LHS_t L;
+
+ neg_match(const LHS_t &LHS) : L(LHS) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *O = dyn_cast<Operator>(V))
+ if (O->getOpcode() == Instruction::Sub)
+ return matchIfNeg(O->getOperand(0), O->getOperand(1));
+ return false;
+ }
+
+private:
+ bool matchIfNeg(Value *LHS, Value *RHS) {
+ return ((isa<ConstantInt>(LHS) && cast<ConstantInt>(LHS)->isZero()) ||
+ isa<ConstantAggregateZero>(LHS)) &&
+ L.match(RHS);
+ }
+};
+
+/// \brief Match an integer negate.
+template <typename LHS> inline neg_match<LHS> m_Neg(const LHS &L) { return L; }
+
+template <typename LHS_t> struct fneg_match {
+ LHS_t L;
+
+ fneg_match(const LHS_t &LHS) : L(LHS) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *O = dyn_cast<Operator>(V))
+ if (O->getOpcode() == Instruction::FSub)
+ return matchIfFNeg(O->getOperand(0), O->getOperand(1));
+ return false;
+ }
+
+private:
+ bool matchIfFNeg(Value *LHS, Value *RHS) {
+ if (const auto *C = dyn_cast<ConstantFP>(LHS))
+ return C->isNegativeZeroValue() && L.match(RHS);
+ return false;
+ }
+};
+
+/// \brief Match a floating point negate.
+template <typename LHS> inline fneg_match<LHS> m_FNeg(const LHS &L) {
+ return L;
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for control flow.
+//
+
+struct br_match {
+ BasicBlock *&Succ;
+ br_match(BasicBlock *&Succ) : Succ(Succ) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *BI = dyn_cast<BranchInst>(V))
+ if (BI->isUnconditional()) {
+ Succ = BI->getSuccessor(0);
+ return true;
+ }
+ return false;
+ }
+};
+
+inline br_match m_UnconditionalBr(BasicBlock *&Succ) { return br_match(Succ); }
+
+template <typename Cond_t> struct brc_match {
+ Cond_t Cond;
+ BasicBlock *&T, *&F;
+ brc_match(const Cond_t &C, BasicBlock *&t, BasicBlock *&f)
+ : Cond(C), T(t), F(f) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (auto *BI = dyn_cast<BranchInst>(V))
+ if (BI->isConditional() && Cond.match(BI->getCondition())) {
+ T = BI->getSuccessor(0);
+ F = BI->getSuccessor(1);
+ return true;
+ }
+ return false;
+ }
+};
+
+template <typename Cond_t>
+inline brc_match<Cond_t> m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
+ return brc_match<Cond_t>(C, T, F);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
+//
+
+template <typename CmpInst_t, typename LHS_t, typename RHS_t, typename Pred_t>
+struct MaxMin_match {
+ LHS_t L;
+ RHS_t R;
+
+ MaxMin_match(const LHS_t &LHS, const RHS_t &RHS) : L(LHS), R(RHS) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ // Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
+ auto *SI = dyn_cast<SelectInst>(V);
+ if (!SI)
+ return false;
+ auto *Cmp = dyn_cast<CmpInst_t>(SI->getCondition());
+ if (!Cmp)
+ return false;
+ // At this point we have a select conditioned on a comparison. Check that
+ // it is the values returned by the select that are being compared.
+ Value *TrueVal = SI->getTrueValue();
+ Value *FalseVal = SI->getFalseValue();
+ Value *LHS = Cmp->getOperand(0);
+ Value *RHS = Cmp->getOperand(1);
+ if ((TrueVal != LHS || FalseVal != RHS) &&
+ (TrueVal != RHS || FalseVal != LHS))
+ return false;
+ typename CmpInst_t::Predicate Pred =
+ LHS == TrueVal ? Cmp->getPredicate() : Cmp->getSwappedPredicate();
+ // Does "(x pred y) ? x : y" represent the desired max/min operation?
+ if (!Pred_t::match(Pred))
+ return false;
+ // It does! Bind the operands.
+ return L.match(LHS) && R.match(RHS);
+ }
+};
+
+/// \brief Helper class for identifying signed max predicates.
+struct smax_pred_ty {
+ static bool match(ICmpInst::Predicate Pred) {
+ return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
+ }
+};
+
+/// \brief Helper class for identifying signed min predicates.
+struct smin_pred_ty {
+ static bool match(ICmpInst::Predicate Pred) {
+ return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
+ }
+};
+
+/// \brief Helper class for identifying unsigned max predicates.
+struct umax_pred_ty {
+ static bool match(ICmpInst::Predicate Pred) {
+ return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
+ }
+};
+
+/// \brief Helper class for identifying unsigned min predicates.
+struct umin_pred_ty {
+ static bool match(ICmpInst::Predicate Pred) {
+ return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
+ }
+};
+
+/// \brief Helper class for identifying ordered max predicates.
+struct ofmax_pred_ty {
+ static bool match(FCmpInst::Predicate Pred) {
+ return Pred == CmpInst::FCMP_OGT || Pred == CmpInst::FCMP_OGE;
+ }
+};
+
+/// \brief Helper class for identifying ordered min predicates.
+struct ofmin_pred_ty {
+ static bool match(FCmpInst::Predicate Pred) {
+ return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE;
+ }
+};
+
+/// \brief Helper class for identifying unordered max predicates.
+struct ufmax_pred_ty {
+ static bool match(FCmpInst::Predicate Pred) {
+ return Pred == CmpInst::FCMP_UGT || Pred == CmpInst::FCMP_UGE;
+ }
+};
+
+/// \brief Helper class for identifying unordered min predicates.
+struct ufmin_pred_ty {
+ static bool match(FCmpInst::Predicate Pred) {
+ return Pred == CmpInst::FCMP_ULT || Pred == CmpInst::FCMP_ULE;
+ }
+};
+
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty> m_SMax(const LHS &L,
+ const RHS &R) {
+ return MaxMin_match<ICmpInst, LHS, RHS, smax_pred_ty>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty> m_SMin(const LHS &L,
+ const RHS &R) {
+ return MaxMin_match<ICmpInst, LHS, RHS, smin_pred_ty>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty> m_UMax(const LHS &L,
+ const RHS &R) {
+ return MaxMin_match<ICmpInst, LHS, RHS, umax_pred_ty>(L, R);
+}
+
+template <typename LHS, typename RHS>
+inline MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty> m_UMin(const LHS &L,
+ const RHS &R) {
+ return MaxMin_match<ICmpInst, LHS, RHS, umin_pred_ty>(L, R);
+}
+
+/// \brief Match an 'ordered' floating point maximum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(ogt/ge, L, R), L, R) semantics matched by this predicate.
+///
+/// max(L, R) iff L and R are not NaN
+/// m_OrdFMax(L, R) = R iff L or R are NaN
+template <typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
+ const RHS &R) {
+ return MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty>(L, R);
+}
+
+/// \brief Match an 'ordered' floating point minimum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
+///
+/// max(L, R) iff L and R are not NaN
+/// m_OrdFMin(L, R) = R iff L or R are NaN
+template <typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
+ const RHS &R) {
+ return MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty>(L, R);
+}
+
+/// \brief Match an 'unordered' floating point maximum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'maximum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
+///
+/// max(L, R) iff L and R are not NaN
+/// m_UnordFMin(L, R) = L iff L or R are NaN
+template <typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
+m_UnordFMax(const LHS &L, const RHS &R) {
+ return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
+}
+
+//===----------------------------------------------------------------------===//
+// Matchers for overflow check patterns: e.g. (a + b) u< a
+//
+
+template <typename LHS_t, typename RHS_t, typename Sum_t>
+struct UAddWithOverflow_match {
+ LHS_t L;
+ RHS_t R;
+ Sum_t S;
+
+ UAddWithOverflow_match(const LHS_t &L, const RHS_t &R, const Sum_t &S)
+ : L(L), R(R), S(S) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ Value *ICmpLHS, *ICmpRHS;
+ ICmpInst::Predicate Pred;
+ if (!m_ICmp(Pred, m_Value(ICmpLHS), m_Value(ICmpRHS)).match(V))
+ return false;
+
+ Value *AddLHS, *AddRHS;
+ auto AddExpr = m_Add(m_Value(AddLHS), m_Value(AddRHS));
+
+ // (a + b) u< a, (a + b) u< b
+ if (Pred == ICmpInst::ICMP_ULT)
+ if (AddExpr.match(ICmpLHS) && (ICmpRHS == AddLHS || ICmpRHS == AddRHS))
+ return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpLHS);
+
+ // a >u (a + b), b >u (a + b)
+ if (Pred == ICmpInst::ICMP_UGT)
+ if (AddExpr.match(ICmpRHS) && (ICmpLHS == AddLHS || ICmpLHS == AddRHS))
+ return L.match(AddLHS) && R.match(AddRHS) && S.match(ICmpRHS);
+
+ return false;
+ }
+};
+
+/// \brief Match an icmp instruction checking for unsigned overflow on addition.
+///
+/// S is matched to the addition whose result is being checked for overflow, and
+/// L and R are matched to the LHS and RHS of S.
+template <typename LHS_t, typename RHS_t, typename Sum_t>
+UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>
+m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
+ return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
+}
+
+/// \brief Match an 'unordered' floating point minimum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
+///
+/// max(L, R) iff L and R are not NaN
+/// m_UnordFMin(L, R) = L iff L or R are NaN
+template <typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
+m_UnordFMin(const LHS &L, const RHS &R) {
+ return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
+}
+
+template <typename Opnd_t> struct Argument_match {
+ unsigned OpI;
+ Opnd_t Val;
+ Argument_match(unsigned OpIdx, const Opnd_t &V) : OpI(OpIdx), Val(V) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ CallSite CS(V);
+ return CS.isCall() && Val.match(CS.getArgument(OpI));
+ }
+};
+
+/// \brief Match an argument.
+template <unsigned OpI, typename Opnd_t>
+inline Argument_match<Opnd_t> m_Argument(const Opnd_t &Op) {
+ return Argument_match<Opnd_t>(OpI, Op);
+}
+
+/// \brief Intrinsic matchers.
+struct IntrinsicID_match {
+ unsigned ID;
+ IntrinsicID_match(Intrinsic::ID IntrID) : ID(IntrID) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ if (const auto *CI = dyn_cast<CallInst>(V))
+ if (const auto *F = CI->getCalledFunction())
+ return F->getIntrinsicID() == ID;
+ return false;
+ }
+};
+
+/// Intrinsic matches are combinations of ID matchers, and argument
+/// matchers. Higher arity matcher are defined recursively in terms of and-ing
+/// them with lower arity matchers. Here's some convenient typedefs for up to
+/// several arguments, and more can be added as needed
+template <typename T0 = void, typename T1 = void, typename T2 = void,
+ typename T3 = void, typename T4 = void, typename T5 = void,
+ typename T6 = void, typename T7 = void, typename T8 = void,
+ typename T9 = void, typename T10 = void>
+struct m_Intrinsic_Ty;
+template <typename T0> struct m_Intrinsic_Ty<T0> {
+ typedef match_combine_and<IntrinsicID_match, Argument_match<T0>> Ty;
+};
+template <typename T0, typename T1> struct m_Intrinsic_Ty<T0, T1> {
+ typedef match_combine_and<typename m_Intrinsic_Ty<T0>::Ty, Argument_match<T1>>
+ Ty;
+};
+template <typename T0, typename T1, typename T2>
+struct m_Intrinsic_Ty<T0, T1, T2> {
+ typedef match_combine_and<typename m_Intrinsic_Ty<T0, T1>::Ty,
+ Argument_match<T2>> Ty;
+};
+template <typename T0, typename T1, typename T2, typename T3>
+struct m_Intrinsic_Ty<T0, T1, T2, T3> {
+ typedef match_combine_and<typename m_Intrinsic_Ty<T0, T1, T2>::Ty,
+ Argument_match<T3>> Ty;
+};
+
+/// \brief Match intrinsic calls like this:
+/// m_Intrinsic<Intrinsic::fabs>(m_Value(X))
+template <Intrinsic::ID IntrID> inline IntrinsicID_match m_Intrinsic() {
+ return IntrinsicID_match(IntrID);
+}
+
+template <Intrinsic::ID IntrID, typename T0>
+inline typename m_Intrinsic_Ty<T0>::Ty m_Intrinsic(const T0 &Op0) {
+ return m_CombineAnd(m_Intrinsic<IntrID>(), m_Argument<0>(Op0));
+}
+
+template <Intrinsic::ID IntrID, typename T0, typename T1>
+inline typename m_Intrinsic_Ty<T0, T1>::Ty m_Intrinsic(const T0 &Op0,
+ const T1 &Op1) {
+ return m_CombineAnd(m_Intrinsic<IntrID>(Op0), m_Argument<1>(Op1));
+}
+
+template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2>
+inline typename m_Intrinsic_Ty<T0, T1, T2>::Ty
+m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2) {
+ return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1), m_Argument<2>(Op2));
+}
+
+template <Intrinsic::ID IntrID, typename T0, typename T1, typename T2,
+ typename T3>
+inline typename m_Intrinsic_Ty<T0, T1, T2, T3>::Ty
+m_Intrinsic(const T0 &Op0, const T1 &Op1, const T2 &Op2, const T3 &Op3) {
+ return m_CombineAnd(m_Intrinsic<IntrID>(Op0, Op1, Op2), m_Argument<3>(Op3));
+}
+
+// Helper intrinsic matching specializations.
+template <typename Opnd0>
+inline typename m_Intrinsic_Ty<Opnd0>::Ty m_BSwap(const Opnd0 &Op0) {
+ return m_Intrinsic<Intrinsic::bswap>(Op0);
+}
+
+template <typename Opnd0, typename Opnd1>
+inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMin(const Opnd0 &Op0,
+ const Opnd1 &Op1) {
+ return m_Intrinsic<Intrinsic::minnum>(Op0, Op1);
+}
+
+template <typename Opnd0, typename Opnd1>
+inline typename m_Intrinsic_Ty<Opnd0, Opnd1>::Ty m_FMax(const Opnd0 &Op0,
+ const Opnd1 &Op1) {
+ return m_Intrinsic<Intrinsic::maxnum>(Op0, Op1);
+}
+
+template <typename Opnd_t> struct Signum_match {
+ Opnd_t Val;
+ Signum_match(const Opnd_t &V) : Val(V) {}
+
+ template <typename OpTy> bool match(OpTy *V) {
+ unsigned TypeSize = V->getType()->getScalarSizeInBits();
+ if (TypeSize == 0)
+ return false;
+
+ unsigned ShiftWidth = TypeSize - 1;
+ Value *OpL = nullptr, *OpR = nullptr;
+
+ // This is the representation of signum we match:
+ //
+ // signum(x) == (x >> 63) | (-x >>u 63)
+ //
+ // An i1 value is its own signum, so it's correct to match
+ //
+ // signum(x) == (x >> 0) | (-x >>u 0)
+ //
+ // for i1 values.
+
+ auto LHS = m_AShr(m_Value(OpL), m_SpecificInt(ShiftWidth));
+ auto RHS = m_LShr(m_Neg(m_Value(OpR)), m_SpecificInt(ShiftWidth));
+ auto Signum = m_Or(LHS, RHS);
+
+ return Signum.match(V) && OpL == OpR && Val.match(OpL);
+ }
+};
+
+/// \brief Matches a signum pattern.
+///
+/// signum(x) =
+/// x > 0 -> 1
+/// x == 0 -> 0
+/// x < 0 -> -1
+template <typename Val_t> inline Signum_match<Val_t> m_Signum(const Val_t &V) {
+ return Signum_match<Val_t>(V);
+}
+
+} // end namespace PatternMatch
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/PredIteratorCache.h b/contrib/llvm/include/llvm/IR/PredIteratorCache.h
new file mode 100644
index 0000000..118310a
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/PredIteratorCache.h
@@ -0,0 +1,79 @@
+//===- PredIteratorCache.h - pred_iterator Cache ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PredIteratorCache class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_PREDITERATORCACHE_H
+#define LLVM_IR_PREDITERATORCACHE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+
+/// PredIteratorCache - This class is an extremely trivial cache for
+/// predecessor iterator queries. This is useful for code that repeatedly
+/// wants the predecessor list for the same blocks.
+class PredIteratorCache {
+ /// BlockToPredsMap - Pointer to null-terminated list.
+ DenseMap<BasicBlock *, BasicBlock **> BlockToPredsMap;
+ DenseMap<BasicBlock *, unsigned> BlockToPredCountMap;
+
+ /// Memory - This is the space that holds cached preds.
+ BumpPtrAllocator Memory;
+
+private:
+ /// GetPreds - Get a cached list for the null-terminated predecessor list of
+ /// the specified block. This can be used in a loop like this:
+ /// for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI)
+ /// use(*PI);
+ /// instead of:
+ /// for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
+ BasicBlock **GetPreds(BasicBlock *BB) {
+ BasicBlock **&Entry = BlockToPredsMap[BB];
+ if (Entry)
+ return Entry;
+
+ SmallVector<BasicBlock *, 32> PredCache(pred_begin(BB), pred_end(BB));
+ PredCache.push_back(nullptr); // null terminator.
+
+ BlockToPredCountMap[BB] = PredCache.size() - 1;
+
+ Entry = Memory.Allocate<BasicBlock *>(PredCache.size());
+ std::copy(PredCache.begin(), PredCache.end(), Entry);
+ return Entry;
+ }
+
+ unsigned GetNumPreds(BasicBlock *BB) {
+ GetPreds(BB);
+ return BlockToPredCountMap[BB];
+ }
+
+public:
+ size_t size(BasicBlock *BB) { return GetNumPreds(BB); }
+ ArrayRef<BasicBlock *> get(BasicBlock *BB) {
+ return makeArrayRef(GetPreds(BB), GetNumPreds(BB));
+ }
+
+ /// clear - Remove all information.
+ void clear() {
+ BlockToPredsMap.clear();
+ BlockToPredCountMap.clear();
+ Memory.Reset();
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Statepoint.h b/contrib/llvm/include/llvm/IR/Statepoint.h
new file mode 100644
index 0000000..7310c56
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Statepoint.h
@@ -0,0 +1,411 @@
+//===-- llvm/IR/Statepoint.h - gc.statepoint utilities ------ --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains utility functions and a wrapper class analogous to
+// CallSite for accessing the fields of gc.statepoint, gc.relocate, and
+// gc.result intrinsics
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_STATEPOINT_H
+#define LLVM_IR_STATEPOINT_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+/// The statepoint intrinsic accepts a set of flags as its third argument.
+/// Valid values come out of this set.
+enum class StatepointFlags {
+ None = 0,
+ GCTransition = 1, ///< Indicates that this statepoint is a transition from
+ ///< GC-aware code to code that is not GC-aware.
+
+ MaskAll = GCTransition ///< A bitmask that includes all valid flags.
+};
+
+class GCRelocateOperands;
+class ImmutableStatepoint;
+
+bool isStatepoint(const ImmutableCallSite &CS);
+bool isStatepoint(const Value *V);
+bool isStatepoint(const Value &V);
+
+bool isGCRelocate(const Value *V);
+bool isGCRelocate(const ImmutableCallSite &CS);
+
+bool isGCResult(const Value *V);
+bool isGCResult(const ImmutableCallSite &CS);
+
+/// Analogous to CallSiteBase, this provides most of the actual
+/// functionality for Statepoint and ImmutableStatepoint. It is
+/// templatized to allow easily specializing of const and non-const
+/// concrete subtypes. This is structured analogous to CallSite
+/// rather than the IntrinsicInst.h helpers since we want to support
+/// invokable statepoints in the near future.
+template <typename FunTy, typename InstructionTy, typename ValueTy,
+ typename CallSiteTy>
+class StatepointBase {
+ CallSiteTy StatepointCS;
+ void *operator new(size_t, unsigned) = delete;
+ void *operator new(size_t s) = delete;
+
+protected:
+ explicit StatepointBase(InstructionTy *I) {
+ if (isStatepoint(I)) {
+ StatepointCS = CallSiteTy(I);
+ assert(StatepointCS && "isStatepoint implies CallSite");
+ }
+ }
+ explicit StatepointBase(CallSiteTy CS) {
+ if (isStatepoint(CS))
+ StatepointCS = CS;
+ }
+
+public:
+ typedef typename CallSiteTy::arg_iterator arg_iterator;
+
+ enum {
+ IDPos = 0,
+ NumPatchBytesPos = 1,
+ CalledFunctionPos = 2,
+ NumCallArgsPos = 3,
+ FlagsPos = 4,
+ CallArgsBeginPos = 5,
+ };
+
+ explicit operator bool() const {
+ // We do not assign non-statepoint CallSites to StatepointCS.
+ return (bool)StatepointCS;
+ }
+
+ /// Return the underlying CallSite.
+ CallSiteTy getCallSite() const {
+ assert(*this && "check validity first!");
+ return StatepointCS;
+ }
+
+ uint64_t getFlags() const {
+ return cast<ConstantInt>(getCallSite().getArgument(FlagsPos))
+ ->getZExtValue();
+ }
+
+ /// Return the ID associated with this statepoint.
+ uint64_t getID() const {
+ const Value *IDVal = getCallSite().getArgument(IDPos);
+ return cast<ConstantInt>(IDVal)->getZExtValue();
+ }
+
+ /// Return the number of patchable bytes associated with this statepoint.
+ uint32_t getNumPatchBytes() const {
+ const Value *NumPatchBytesVal = getCallSite().getArgument(NumPatchBytesPos);
+ uint64_t NumPatchBytes =
+ cast<ConstantInt>(NumPatchBytesVal)->getZExtValue();
+ assert(isInt<32>(NumPatchBytes) && "should fit in 32 bits!");
+ return NumPatchBytes;
+ }
+
+ /// Return the value actually being called or invoked.
+ ValueTy *getCalledValue() const {
+ return getCallSite().getArgument(CalledFunctionPos);
+ }
+
+ InstructionTy *getInstruction() const {
+ return getCallSite().getInstruction();
+ }
+
+ /// Return the function being called if this is a direct call, otherwise
+ /// return null (if it's an indirect call).
+ FunTy *getCalledFunction() const {
+ return dyn_cast<Function>(getCalledValue());
+ }
+
+ /// Return the caller function for this statepoint.
+ FunTy *getCaller() const { return getCallSite().getCaller(); }
+
+ /// Determine if the statepoint cannot unwind.
+ bool doesNotThrow() const {
+ Function *F = getCalledFunction();
+ return getCallSite().doesNotThrow() || (F ? F->doesNotThrow() : false);
+ }
+
+ /// Return the type of the value returned by the call underlying the
+ /// statepoint.
+ Type *getActualReturnType() const {
+ auto *FTy = cast<FunctionType>(
+ cast<PointerType>(getCalledValue()->getType())->getElementType());
+ return FTy->getReturnType();
+ }
+
+ /// Number of arguments to be passed to the actual callee.
+ int getNumCallArgs() const {
+ const Value *NumCallArgsVal = getCallSite().getArgument(NumCallArgsPos);
+ return cast<ConstantInt>(NumCallArgsVal)->getZExtValue();
+ }
+
+ size_t arg_size() const { return getNumCallArgs(); }
+ typename CallSiteTy::arg_iterator arg_begin() const {
+ assert(CallArgsBeginPos <= (int)getCallSite().arg_size());
+ return getCallSite().arg_begin() + CallArgsBeginPos;
+ }
+ typename CallSiteTy::arg_iterator arg_end() const {
+ auto I = arg_begin() + arg_size();
+ assert((getCallSite().arg_end() - I) >= 0);
+ return I;
+ }
+
+ ValueTy *getArgument(unsigned Index) {
+ assert(Index < arg_size() && "out of bounds!");
+ return *(arg_begin() + Index);
+ }
+
+ /// range adapter for call arguments
+ iterator_range<arg_iterator> call_args() const {
+ return make_range(arg_begin(), arg_end());
+ }
+
+ /// \brief Return true if the call or the callee has the given attribute.
+ bool paramHasAttr(unsigned i, Attribute::AttrKind A) const {
+ Function *F = getCalledFunction();
+ return getCallSite().paramHasAttr(i + CallArgsBeginPos, A) ||
+ (F ? F->getAttributes().hasAttribute(i, A) : false);
+ }
+
+ /// Number of GC transition args.
+ int getNumTotalGCTransitionArgs() const {
+ const Value *NumGCTransitionArgs = *arg_end();
+ return cast<ConstantInt>(NumGCTransitionArgs)->getZExtValue();
+ }
+ typename CallSiteTy::arg_iterator gc_transition_args_begin() const {
+ auto I = arg_end() + 1;
+ assert((getCallSite().arg_end() - I) >= 0);
+ return I;
+ }
+ typename CallSiteTy::arg_iterator gc_transition_args_end() const {
+ auto I = gc_transition_args_begin() + getNumTotalGCTransitionArgs();
+ assert((getCallSite().arg_end() - I) >= 0);
+ return I;
+ }
+
+ /// range adapter for GC transition arguments
+ iterator_range<arg_iterator> gc_transition_args() const {
+ return make_range(gc_transition_args_begin(), gc_transition_args_end());
+ }
+
+ /// Number of additional arguments excluding those intended
+ /// for garbage collection.
+ int getNumTotalVMSArgs() const {
+ const Value *NumVMSArgs = *gc_transition_args_end();
+ return cast<ConstantInt>(NumVMSArgs)->getZExtValue();
+ }
+
+ typename CallSiteTy::arg_iterator vm_state_begin() const {
+ auto I = gc_transition_args_end() + 1;
+ assert((getCallSite().arg_end() - I) >= 0);
+ return I;
+ }
+ typename CallSiteTy::arg_iterator vm_state_end() const {
+ auto I = vm_state_begin() + getNumTotalVMSArgs();
+ assert((getCallSite().arg_end() - I) >= 0);
+ return I;
+ }
+
+ /// range adapter for vm state arguments
+ iterator_range<arg_iterator> vm_state_args() const {
+ return make_range(vm_state_begin(), vm_state_end());
+ }
+
+ typename CallSiteTy::arg_iterator gc_args_begin() const {
+ return vm_state_end();
+ }
+ typename CallSiteTy::arg_iterator gc_args_end() const {
+ return getCallSite().arg_end();
+ }
+
+ unsigned gcArgsStartIdx() const {
+ return gc_args_begin() - getInstruction()->op_begin();
+ }
+
+ /// range adapter for gc arguments
+ iterator_range<arg_iterator> gc_args() const {
+ return make_range(gc_args_begin(), gc_args_end());
+ }
+
+ /// Get list of all gc reloactes linked to this statepoint
+ /// May contain several relocations for the same base/derived pair.
+ /// For example this could happen due to relocations on unwinding
+ /// path of invoke.
+ std::vector<GCRelocateOperands> getRelocates() const;
+
+ /// Get the experimental_gc_result call tied to this statepoint. Can be
+ /// nullptr if there isn't a gc_result tied to this statepoint. Guaranteed to
+ /// be a CallInst if non-null.
+ InstructionTy *getGCResult() const {
+ for (auto *U : getInstruction()->users())
+ if (isGCResult(U))
+ return cast<CallInst>(U);
+
+ return nullptr;
+ }
+
+#ifndef NDEBUG
+ /// Asserts if this statepoint is malformed. Common cases for failure
+ /// include incorrect length prefixes for variable length sections or
+ /// illegal values for parameters.
+ void verify() {
+ assert(getNumCallArgs() >= 0 &&
+ "number of arguments to actually callee can't be negative");
+
+ // The internal asserts in the iterator accessors do the rest.
+ (void)arg_begin();
+ (void)arg_end();
+ (void)gc_transition_args_begin();
+ (void)gc_transition_args_end();
+ (void)vm_state_begin();
+ (void)vm_state_end();
+ (void)gc_args_begin();
+ (void)gc_args_end();
+ }
+#endif
+};
+
+/// A specialization of it's base class for read only access
+/// to a gc.statepoint.
+class ImmutableStatepoint
+ : public StatepointBase<const Function, const Instruction, const Value,
+ ImmutableCallSite> {
+ typedef StatepointBase<const Function, const Instruction, const Value,
+ ImmutableCallSite> Base;
+
+public:
+ explicit ImmutableStatepoint(const Instruction *I) : Base(I) {}
+ explicit ImmutableStatepoint(ImmutableCallSite CS) : Base(CS) {}
+};
+
+/// A specialization of it's base class for read-write access
+/// to a gc.statepoint.
+class Statepoint
+ : public StatepointBase<Function, Instruction, Value, CallSite> {
+ typedef StatepointBase<Function, Instruction, Value, CallSite> Base;
+
+public:
+ explicit Statepoint(Instruction *I) : Base(I) {}
+ explicit Statepoint(CallSite CS) : Base(CS) {}
+};
+
+/// Wraps a call to a gc.relocate and provides access to it's operands.
+/// TODO: This should likely be refactored to resememble the wrappers in
+/// InstrinsicInst.h.
+class GCRelocateOperands {
+ ImmutableCallSite RelocateCS;
+
+public:
+ GCRelocateOperands(const User *U) : RelocateCS(U) { assert(isGCRelocate(U)); }
+ GCRelocateOperands(const Instruction *inst) : RelocateCS(inst) {
+ assert(isGCRelocate(inst));
+ }
+ GCRelocateOperands(CallSite CS) : RelocateCS(CS) { assert(isGCRelocate(CS)); }
+
+ /// Return true if this relocate is tied to the invoke statepoint.
+ /// This includes relocates which are on the unwinding path.
+ bool isTiedToInvoke() const {
+ const Value *Token = RelocateCS.getArgument(0);
+
+ return isa<LandingPadInst>(Token) || isa<InvokeInst>(Token);
+ }
+
+ /// Get enclosed relocate intrinsic
+ ImmutableCallSite getUnderlyingCallSite() { return RelocateCS; }
+
+ /// The statepoint with which this gc.relocate is associated.
+ const Instruction *getStatepoint() {
+ const Value *Token = RelocateCS.getArgument(0);
+
+ // This takes care both of relocates for call statepoints and relocates
+ // on normal path of invoke statepoint.
+ if (!isa<LandingPadInst>(Token)) {
+ return cast<Instruction>(Token);
+ }
+
+ // This relocate is on exceptional path of an invoke statepoint
+ const BasicBlock *InvokeBB =
+ cast<Instruction>(Token)->getParent()->getUniquePredecessor();
+
+ assert(InvokeBB && "safepoints should have unique landingpads");
+ assert(InvokeBB->getTerminator() &&
+ "safepoint block should be well formed");
+ assert(isStatepoint(InvokeBB->getTerminator()));
+
+ return InvokeBB->getTerminator();
+ }
+
+ /// The index into the associate statepoint's argument list
+ /// which contains the base pointer of the pointer whose
+ /// relocation this gc.relocate describes.
+ unsigned getBasePtrIndex() {
+ return cast<ConstantInt>(RelocateCS.getArgument(1))->getZExtValue();
+ }
+
+ /// The index into the associate statepoint's argument list which
+ /// contains the pointer whose relocation this gc.relocate describes.
+ unsigned getDerivedPtrIndex() {
+ return cast<ConstantInt>(RelocateCS.getArgument(2))->getZExtValue();
+ }
+
+ Value *getBasePtr() {
+ ImmutableCallSite CS(getStatepoint());
+ return *(CS.arg_begin() + getBasePtrIndex());
+ }
+
+ Value *getDerivedPtr() {
+ ImmutableCallSite CS(getStatepoint());
+ return *(CS.arg_begin() + getDerivedPtrIndex());
+ }
+};
+
+template <typename FunTy, typename InstructionTy, typename ValueTy,
+ typename CallSiteTy>
+std::vector<GCRelocateOperands>
+StatepointBase<FunTy, InstructionTy, ValueTy, CallSiteTy>::getRelocates()
+ const {
+
+ std::vector<GCRelocateOperands> Result;
+
+ CallSiteTy StatepointCS = getCallSite();
+
+ // Search for relocated pointers. Note that working backwards from the
+ // gc_relocates ensures that we only get pairs which are actually relocated
+ // and used after the statepoint.
+ for (const User *U : getInstruction()->users())
+ if (isGCRelocate(U))
+ Result.push_back(GCRelocateOperands(U));
+
+ if (!StatepointCS.isInvoke())
+ return Result;
+
+ // We need to scan thorough exceptional relocations if it is invoke statepoint
+ LandingPadInst *LandingPad =
+ cast<InvokeInst>(getInstruction())->getLandingPadInst();
+
+ // Search for gc relocates that are attached to this landingpad.
+ for (const User *LandingPadUser : LandingPad->users()) {
+ if (isGCRelocate(LandingPadUser))
+ Result.push_back(GCRelocateOperands(LandingPadUser));
+ }
+ return Result;
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/SymbolTableListTraits.h b/contrib/llvm/include/llvm/IR/SymbolTableListTraits.h
new file mode 100644
index 0000000..5fc48d1
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/SymbolTableListTraits.h
@@ -0,0 +1,122 @@
+//===-- llvm/SymbolTableListTraits.h - Traits for iplist --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a generic class that is used to implement the automatic
+// symbol table manipulation that occurs when you put (for example) a named
+// instruction into a basic block.
+//
+// The way that this is implemented is by using a special traits class with the
+// intrusive list that makes up the list of instructions in a basic block. When
+// a new element is added to the list of instructions, the traits class is
+// notified, allowing the symbol table to be updated.
+//
+// This generic class implements the traits class. It must be generic so that
+// it can work for all uses it, which include lists of instructions, basic
+// blocks, arguments, functions, global variables, etc...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_SYMBOLTABLELISTTRAITS_H
+#define LLVM_IR_SYMBOLTABLELISTTRAITS_H
+
+#include "llvm/ADT/ilist.h"
+
+namespace llvm {
+class ValueSymbolTable;
+
+template <typename NodeTy> class ilist_iterator;
+template <typename NodeTy, typename Traits> class iplist;
+template <typename Ty> struct ilist_traits;
+
+template <typename NodeTy>
+struct SymbolTableListSentinelTraits
+ : public ilist_embedded_sentinel_traits<NodeTy> {};
+
+/// Template metafunction to get the parent type for a symbol table list.
+///
+/// Implementations create a typedef called \c type so that we only need a
+/// single template parameter for the list and traits.
+template <typename NodeTy> struct SymbolTableListParentType {};
+class Argument;
+class BasicBlock;
+class Function;
+class Instruction;
+class GlobalVariable;
+class GlobalAlias;
+class Module;
+#define DEFINE_SYMBOL_TABLE_PARENT_TYPE(NODE, PARENT) \
+ template <> struct SymbolTableListParentType<NODE> { typedef PARENT type; };
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(Instruction, BasicBlock)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(BasicBlock, Function)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(Argument, Function)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(Function, Module)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(GlobalVariable, Module)
+DEFINE_SYMBOL_TABLE_PARENT_TYPE(GlobalAlias, Module)
+#undef DEFINE_SYMBOL_TABLE_PARENT_TYPE
+
+template <typename NodeTy> class SymbolTableList;
+
+// ValueSubClass - The type of objects that I hold, e.g. Instruction.
+// ItemParentClass - The type of object that owns the list, e.g. BasicBlock.
+//
+template <typename ValueSubClass>
+class SymbolTableListTraits
+ : public ilist_nextprev_traits<ValueSubClass>,
+ public SymbolTableListSentinelTraits<ValueSubClass>,
+ public ilist_node_traits<ValueSubClass> {
+ typedef SymbolTableList<ValueSubClass> ListTy;
+ typedef
+ typename SymbolTableListParentType<ValueSubClass>::type ItemParentClass;
+
+public:
+ SymbolTableListTraits() {}
+
+private:
+ /// getListOwner - Return the object that owns this list. If this is a list
+ /// of instructions, it returns the BasicBlock that owns them.
+ ItemParentClass *getListOwner() {
+ size_t Offset(size_t(&((ItemParentClass*)nullptr->*ItemParentClass::
+ getSublistAccess(static_cast<ValueSubClass*>(nullptr)))));
+ ListTy *Anchor(static_cast<ListTy *>(this));
+ return reinterpret_cast<ItemParentClass*>(reinterpret_cast<char*>(Anchor)-
+ Offset);
+ }
+
+ static ListTy &getList(ItemParentClass *Par) {
+ return Par->*(Par->getSublistAccess((ValueSubClass*)nullptr));
+ }
+
+ static ValueSymbolTable *getSymTab(ItemParentClass *Par) {
+ return Par ? toPtr(Par->getValueSymbolTable()) : nullptr;
+ }
+
+public:
+ void addNodeToList(ValueSubClass *V);
+ void removeNodeFromList(ValueSubClass *V);
+ void transferNodesFromList(SymbolTableListTraits &L2,
+ ilist_iterator<ValueSubClass> first,
+ ilist_iterator<ValueSubClass> last);
+//private:
+ template<typename TPtr>
+ void setSymTabObject(TPtr *, TPtr);
+ static ValueSymbolTable *toPtr(ValueSymbolTable *P) { return P; }
+ static ValueSymbolTable *toPtr(ValueSymbolTable &R) { return &R; }
+};
+
+/// List that automatically updates parent links and symbol tables.
+///
+/// When nodes are inserted into and removed from this list, the associated
+/// symbol table will be automatically updated. Similarly, parent links get
+/// updated automatically.
+template <typename NodeTy>
+class SymbolTableList : public iplist<NodeTy, SymbolTableListTraits<NodeTy>> {};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/TrackingMDRef.h b/contrib/llvm/include/llvm/IR/TrackingMDRef.h
new file mode 100644
index 0000000..97efaff
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/TrackingMDRef.h
@@ -0,0 +1,166 @@
+//===- llvm/IR/TrackingMDRef.h - Tracking Metadata references ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// References to metadata that track RAUW.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_TRACKINGMDREF_H
+#define LLVM_IR_TRACKINGMDREF_H
+
+#include "llvm/IR/Metadata.h"
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+
+/// \brief Tracking metadata reference.
+///
+/// This class behaves like \a TrackingVH, but for metadata.
+class TrackingMDRef {
+ Metadata *MD;
+
+public:
+ TrackingMDRef() : MD(nullptr) {}
+ explicit TrackingMDRef(Metadata *MD) : MD(MD) { track(); }
+
+ TrackingMDRef(TrackingMDRef &&X) : MD(X.MD) { retrack(X); }
+ TrackingMDRef(const TrackingMDRef &X) : MD(X.MD) { track(); }
+ TrackingMDRef &operator=(TrackingMDRef &&X) {
+ if (&X == this)
+ return *this;
+
+ untrack();
+ MD = X.MD;
+ retrack(X);
+ return *this;
+ }
+ TrackingMDRef &operator=(const TrackingMDRef &X) {
+ if (&X == this)
+ return *this;
+
+ untrack();
+ MD = X.MD;
+ track();
+ return *this;
+ }
+ ~TrackingMDRef() { untrack(); }
+
+ Metadata *get() const { return MD; }
+ operator Metadata *() const { return get(); }
+ Metadata *operator->() const { return get(); }
+ Metadata &operator*() const { return *get(); }
+
+ void reset() {
+ untrack();
+ MD = nullptr;
+ }
+ void reset(Metadata *MD) {
+ untrack();
+ this->MD = MD;
+ track();
+ }
+
+ /// \brief Check whether this has a trivial destructor.
+ ///
+ /// If \c MD isn't replaceable, the destructor will be a no-op.
+ bool hasTrivialDestructor() const {
+ return !MD || !MetadataTracking::isReplaceable(*MD);
+ }
+
+ bool operator==(const TrackingMDRef &X) const { return MD == X.MD; }
+ bool operator!=(const TrackingMDRef &X) const { return MD != X.MD; }
+
+private:
+ void track() {
+ if (MD)
+ MetadataTracking::track(MD);
+ }
+ void untrack() {
+ if (MD)
+ MetadataTracking::untrack(MD);
+ }
+ void retrack(TrackingMDRef &X) {
+ assert(MD == X.MD && "Expected values to match");
+ if (X.MD) {
+ MetadataTracking::retrack(X.MD, MD);
+ X.MD = nullptr;
+ }
+ }
+};
+
+/// \brief Typed tracking ref.
+///
+/// Track refererences of a particular type. It's useful to use this for \a
+/// MDNode and \a ValueAsMetadata.
+template <class T> class TypedTrackingMDRef {
+ TrackingMDRef Ref;
+
+public:
+ TypedTrackingMDRef() {}
+ explicit TypedTrackingMDRef(T *MD) : Ref(static_cast<Metadata *>(MD)) {}
+
+ TypedTrackingMDRef(TypedTrackingMDRef &&X) : Ref(std::move(X.Ref)) {}
+ TypedTrackingMDRef(const TypedTrackingMDRef &X) : Ref(X.Ref) {}
+ TypedTrackingMDRef &operator=(TypedTrackingMDRef &&X) {
+ Ref = std::move(X.Ref);
+ return *this;
+ }
+ TypedTrackingMDRef &operator=(const TypedTrackingMDRef &X) {
+ Ref = X.Ref;
+ return *this;
+ }
+
+ T *get() const { return (T *)Ref.get(); }
+ operator T *() const { return get(); }
+ T *operator->() const { return get(); }
+ T &operator*() const { return *get(); }
+
+ bool operator==(const TypedTrackingMDRef &X) const { return Ref == X.Ref; }
+ bool operator!=(const TypedTrackingMDRef &X) const { return Ref != X.Ref; }
+
+ void reset() { Ref.reset(); }
+ void reset(T *MD) { Ref.reset(static_cast<Metadata *>(MD)); }
+
+ /// \brief Check whether this has a trivial destructor.
+ bool hasTrivialDestructor() const { return Ref.hasTrivialDestructor(); }
+};
+
+typedef TypedTrackingMDRef<MDNode> TrackingMDNodeRef;
+typedef TypedTrackingMDRef<ValueAsMetadata> TrackingValueAsMetadataRef;
+
+// Expose the underlying metadata to casting.
+template <> struct simplify_type<TrackingMDRef> {
+ typedef Metadata *SimpleType;
+ static SimpleType getSimplifiedValue(TrackingMDRef &MD) { return MD.get(); }
+};
+
+template <> struct simplify_type<const TrackingMDRef> {
+ typedef Metadata *SimpleType;
+ static SimpleType getSimplifiedValue(const TrackingMDRef &MD) {
+ return MD.get();
+ }
+};
+
+template <class T> struct simplify_type<TypedTrackingMDRef<T>> {
+ typedef T *SimpleType;
+ static SimpleType getSimplifiedValue(TypedTrackingMDRef<T> &MD) {
+ return MD.get();
+ }
+};
+
+template <class T> struct simplify_type<const TypedTrackingMDRef<T>> {
+ typedef T *SimpleType;
+ static SimpleType getSimplifiedValue(const TypedTrackingMDRef<T> &MD) {
+ return MD.get();
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Type.h b/contrib/llvm/include/llvm/IR/Type.h
new file mode 100644
index 0000000..b2920dd
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Type.h
@@ -0,0 +1,480 @@
+//===-- llvm/Type.h - Classes for handling data types -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the Type class. For more "Type"
+// stuff, look in DerivedTypes.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_TYPE_H
+#define LLVM_IR_TYPE_H
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+class PointerType;
+class IntegerType;
+class raw_ostream;
+class Module;
+class LLVMContext;
+class LLVMContextImpl;
+class StringRef;
+template<class GraphType> struct GraphTraits;
+
+/// The instances of the Type class are immutable: once they are created,
+/// they are never changed. Also note that only one instance of a particular
+/// type is ever created. Thus seeing if two types are equal is a matter of
+/// doing a trivial pointer comparison. To enforce that no two equal instances
+/// are created, Type instances can only be created via static factory methods
+/// in class Type and in derived classes. Once allocated, Types are never
+/// free'd.
+///
+class Type {
+public:
+ //===--------------------------------------------------------------------===//
+ /// Definitions of all of the base types for the Type system. Based on this
+ /// value, you can cast to a class defined in DerivedTypes.h.
+ /// Note: If you add an element to this, you need to add an element to the
+ /// Type::getPrimitiveType function, or else things will break!
+ /// Also update LLVMTypeKind and LLVMGetTypeKind () in the C binding.
+ ///
+ enum TypeID {
+ // PrimitiveTypes - make sure LastPrimitiveTyID stays up to date.
+ VoidTyID = 0, ///< 0: type with no size
+ HalfTyID, ///< 1: 16-bit floating point type
+ FloatTyID, ///< 2: 32-bit floating point type
+ DoubleTyID, ///< 3: 64-bit floating point type
+ X86_FP80TyID, ///< 4: 80-bit floating point type (X87)
+ FP128TyID, ///< 5: 128-bit floating point type (112-bit mantissa)
+ PPC_FP128TyID, ///< 6: 128-bit floating point type (two 64-bits, PowerPC)
+ LabelTyID, ///< 7: Labels
+ MetadataTyID, ///< 8: Metadata
+ X86_MMXTyID, ///< 9: MMX vectors (64 bits, X86 specific)
+ TokenTyID, ///< 10: Tokens
+
+ // Derived types... see DerivedTypes.h file.
+ // Make sure FirstDerivedTyID stays up to date!
+ IntegerTyID, ///< 11: Arbitrary bit width integers
+ FunctionTyID, ///< 12: Functions
+ StructTyID, ///< 13: Structures
+ ArrayTyID, ///< 14: Arrays
+ PointerTyID, ///< 15: Pointers
+ VectorTyID ///< 16: SIMD 'packed' format, or other vector type
+ };
+
+private:
+ /// Context - This refers to the LLVMContext in which this type was uniqued.
+ LLVMContext &Context;
+
+ TypeID ID : 8; // The current base type of this type.
+ unsigned SubclassData : 24; // Space for subclasses to store data.
+
+protected:
+ friend class LLVMContextImpl;
+ explicit Type(LLVMContext &C, TypeID tid)
+ : Context(C), ID(tid), SubclassData(0),
+ NumContainedTys(0), ContainedTys(nullptr) {}
+ ~Type() = default;
+
+ unsigned getSubclassData() const { return SubclassData; }
+
+ void setSubclassData(unsigned val) {
+ SubclassData = val;
+ // Ensure we don't have any accidental truncation.
+ assert(getSubclassData() == val && "Subclass data too large for field");
+ }
+
+ /// NumContainedTys - Keeps track of how many Type*'s there are in the
+ /// ContainedTys list.
+ unsigned NumContainedTys;
+
+ /// ContainedTys - A pointer to the array of Types contained by this Type.
+ /// For example, this includes the arguments of a function type, the elements
+ /// of a structure, the pointee of a pointer, the element type of an array,
+ /// etc. This pointer may be 0 for types that don't contain other types
+ /// (Integer, Double, Float).
+ Type * const *ContainedTys;
+
+public:
+ void print(raw_ostream &O, bool IsForDebug = false) const;
+ void dump() const;
+
+ /// getContext - Return the LLVMContext in which this type was uniqued.
+ LLVMContext &getContext() const { return Context; }
+
+ //===--------------------------------------------------------------------===//
+ // Accessors for working with types.
+ //
+
+ /// getTypeID - Return the type id for the type. This will return one
+ /// of the TypeID enum elements defined above.
+ ///
+ TypeID getTypeID() const { return ID; }
+
+ /// isVoidTy - Return true if this is 'void'.
+ bool isVoidTy() const { return getTypeID() == VoidTyID; }
+
+ /// isHalfTy - Return true if this is 'half', a 16-bit IEEE fp type.
+ bool isHalfTy() const { return getTypeID() == HalfTyID; }
+
+ /// isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type.
+ bool isFloatTy() const { return getTypeID() == FloatTyID; }
+
+ /// isDoubleTy - Return true if this is 'double', a 64-bit IEEE fp type.
+ bool isDoubleTy() const { return getTypeID() == DoubleTyID; }
+
+ /// isX86_FP80Ty - Return true if this is x86 long double.
+ bool isX86_FP80Ty() const { return getTypeID() == X86_FP80TyID; }
+
+ /// isFP128Ty - Return true if this is 'fp128'.
+ bool isFP128Ty() const { return getTypeID() == FP128TyID; }
+
+ /// isPPC_FP128Ty - Return true if this is powerpc long double.
+ bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; }
+
+ /// isFloatingPointTy - Return true if this is one of the six floating point
+ /// types
+ bool isFloatingPointTy() const {
+ return getTypeID() == HalfTyID || getTypeID() == FloatTyID ||
+ getTypeID() == DoubleTyID ||
+ getTypeID() == X86_FP80TyID || getTypeID() == FP128TyID ||
+ getTypeID() == PPC_FP128TyID;
+ }
+
+ const fltSemantics &getFltSemantics() const {
+ switch (getTypeID()) {
+ case HalfTyID: return APFloat::IEEEhalf;
+ case FloatTyID: return APFloat::IEEEsingle;
+ case DoubleTyID: return APFloat::IEEEdouble;
+ case X86_FP80TyID: return APFloat::x87DoubleExtended;
+ case FP128TyID: return APFloat::IEEEquad;
+ case PPC_FP128TyID: return APFloat::PPCDoubleDouble;
+ default: llvm_unreachable("Invalid floating type");
+ }
+ }
+
+ /// isX86_MMXTy - Return true if this is X86 MMX.
+ bool isX86_MMXTy() const { return getTypeID() == X86_MMXTyID; }
+
+ /// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP.
+ ///
+ bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); }
+
+ /// isLabelTy - Return true if this is 'label'.
+ bool isLabelTy() const { return getTypeID() == LabelTyID; }
+
+ /// isMetadataTy - Return true if this is 'metadata'.
+ bool isMetadataTy() const { return getTypeID() == MetadataTyID; }
+
+ /// isTokenTy - Return true if this is 'token'.
+ bool isTokenTy() const { return getTypeID() == TokenTyID; }
+
+ /// isIntegerTy - True if this is an instance of IntegerType.
+ ///
+ bool isIntegerTy() const { return getTypeID() == IntegerTyID; }
+
+ /// isIntegerTy - Return true if this is an IntegerType of the given width.
+ bool isIntegerTy(unsigned Bitwidth) const;
+
+ /// isIntOrIntVectorTy - Return true if this is an integer type or a vector of
+ /// integer types.
+ ///
+ bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); }
+
+ /// isFunctionTy - True if this is an instance of FunctionType.
+ ///
+ bool isFunctionTy() const { return getTypeID() == FunctionTyID; }
+
+ /// isStructTy - True if this is an instance of StructType.
+ ///
+ bool isStructTy() const { return getTypeID() == StructTyID; }
+
+ /// isArrayTy - True if this is an instance of ArrayType.
+ ///
+ bool isArrayTy() const { return getTypeID() == ArrayTyID; }
+
+ /// isPointerTy - True if this is an instance of PointerType.
+ ///
+ bool isPointerTy() const { return getTypeID() == PointerTyID; }
+
+ /// isPtrOrPtrVectorTy - Return true if this is a pointer type or a vector of
+ /// pointer types.
+ ///
+ bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); }
+
+ /// isVectorTy - True if this is an instance of VectorType.
+ ///
+ bool isVectorTy() const { return getTypeID() == VectorTyID; }
+
+ /// canLosslesslyBitCastTo - Return true if this type could be converted
+ /// with a lossless BitCast to type 'Ty'. For example, i8* to i32*. BitCasts
+ /// are valid for types of the same size only where no re-interpretation of
+ /// the bits is done.
+ /// @brief Determine if this type could be losslessly bitcast to Ty
+ bool canLosslesslyBitCastTo(Type *Ty) const;
+
+ /// isEmptyTy - Return true if this type is empty, that is, it has no
+ /// elements or all its elements are empty.
+ bool isEmptyTy() const;
+
+ /// isFirstClassType - Return true if the type is "first class", meaning it
+ /// is a valid type for a Value.
+ ///
+ bool isFirstClassType() const {
+ return getTypeID() != FunctionTyID && getTypeID() != VoidTyID;
+ }
+
+ /// isSingleValueType - Return true if the type is a valid type for a
+ /// register in codegen. This includes all first-class types except struct
+ /// and array types.
+ ///
+ bool isSingleValueType() const {
+ return isFloatingPointTy() || isX86_MMXTy() || isIntegerTy() ||
+ isPointerTy() || isVectorTy();
+ }
+
+ /// isAggregateType - Return true if the type is an aggregate type. This
+ /// means it is valid as the first operand of an insertvalue or
+ /// extractvalue instruction. This includes struct and array types, but
+ /// does not include vector types.
+ ///
+ bool isAggregateType() const {
+ return getTypeID() == StructTyID || getTypeID() == ArrayTyID;
+ }
+
+ /// isSized - Return true if it makes sense to take the size of this type. To
+ /// get the actual size for a particular target, it is reasonable to use the
+ /// DataLayout subsystem to do this.
+ ///
+ bool isSized(SmallPtrSetImpl<Type*> *Visited = nullptr) const {
+ // If it's a primitive, it is always sized.
+ if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
+ getTypeID() == PointerTyID ||
+ getTypeID() == X86_MMXTyID)
+ return true;
+ // If it is not something that can have a size (e.g. a function or label),
+ // it doesn't have a size.
+ if (getTypeID() != StructTyID && getTypeID() != ArrayTyID &&
+ getTypeID() != VectorTyID)
+ return false;
+ // Otherwise we have to try harder to decide.
+ return isSizedDerivedType(Visited);
+ }
+
+ /// getPrimitiveSizeInBits - Return the basic size of this type if it is a
+ /// primitive type. These are fixed by LLVM and are not target dependent.
+ /// This will return zero if the type does not have a size or is not a
+ /// primitive type.
+ ///
+ /// Note that this may not reflect the size of memory allocated for an
+ /// instance of the type or the number of bytes that are written when an
+ /// instance of the type is stored to memory. The DataLayout class provides
+ /// additional query functions to provide this information.
+ ///
+ unsigned getPrimitiveSizeInBits() const LLVM_READONLY;
+
+ /// getScalarSizeInBits - If this is a vector type, return the
+ /// getPrimitiveSizeInBits value for the element type. Otherwise return the
+ /// getPrimitiveSizeInBits value for this type.
+ unsigned getScalarSizeInBits() const LLVM_READONLY;
+
+ /// getFPMantissaWidth - Return the width of the mantissa of this type. This
+ /// is only valid on floating point types. If the FP type does not
+ /// have a stable mantissa (e.g. ppc long double), this method returns -1.
+ int getFPMantissaWidth() const;
+
+ /// getScalarType - If this is a vector type, return the element type,
+ /// otherwise return 'this'.
+ Type *getScalarType() const LLVM_READONLY;
+
+ //===--------------------------------------------------------------------===//
+ // Type Iteration support.
+ //
+ typedef Type * const *subtype_iterator;
+ subtype_iterator subtype_begin() const { return ContainedTys; }
+ subtype_iterator subtype_end() const { return &ContainedTys[NumContainedTys];}
+ ArrayRef<Type*> subtypes() const {
+ return makeArrayRef(subtype_begin(), subtype_end());
+ }
+
+ typedef std::reverse_iterator<subtype_iterator> subtype_reverse_iterator;
+ subtype_reverse_iterator subtype_rbegin() const {
+ return subtype_reverse_iterator(subtype_end());
+ }
+ subtype_reverse_iterator subtype_rend() const {
+ return subtype_reverse_iterator(subtype_begin());
+ }
+
+ /// getContainedType - This method is used to implement the type iterator
+ /// (defined at the end of the file). For derived types, this returns the
+ /// types 'contained' in the derived type.
+ ///
+ Type *getContainedType(unsigned i) const {
+ assert(i < NumContainedTys && "Index out of range!");
+ return ContainedTys[i];
+ }
+
+ /// getNumContainedTypes - Return the number of types in the derived type.
+ ///
+ unsigned getNumContainedTypes() const { return NumContainedTys; }
+
+ //===--------------------------------------------------------------------===//
+ // Helper methods corresponding to subclass methods. This forces a cast to
+ // the specified subclass and calls its accessor. "getVectorNumElements" (for
+ // example) is shorthand for cast<VectorType>(Ty)->getNumElements(). This is
+ // only intended to cover the core methods that are frequently used, helper
+ // methods should not be added here.
+
+ inline unsigned getIntegerBitWidth() const;
+
+ inline Type *getFunctionParamType(unsigned i) const;
+ inline unsigned getFunctionNumParams() const;
+ inline bool isFunctionVarArg() const;
+
+ inline StringRef getStructName() const;
+ inline unsigned getStructNumElements() const;
+ inline Type *getStructElementType(unsigned N) const;
+
+ inline Type *getSequentialElementType() const;
+
+ inline uint64_t getArrayNumElements() const;
+ Type *getArrayElementType() const { return getSequentialElementType(); }
+
+ inline unsigned getVectorNumElements() const;
+ Type *getVectorElementType() const { return getSequentialElementType(); }
+
+ Type *getPointerElementType() const { return getSequentialElementType(); }
+
+ /// \brief Get the address space of this pointer or pointer vector type.
+ inline unsigned getPointerAddressSpace() const;
+
+ //===--------------------------------------------------------------------===//
+ // Static members exported by the Type class itself. Useful for getting
+ // instances of Type.
+ //
+
+ /// getPrimitiveType - Return a type based on an identifier.
+ static Type *getPrimitiveType(LLVMContext &C, TypeID IDNumber);
+
+ //===--------------------------------------------------------------------===//
+ // These are the builtin types that are always available.
+ //
+ static Type *getVoidTy(LLVMContext &C);
+ static Type *getLabelTy(LLVMContext &C);
+ static Type *getHalfTy(LLVMContext &C);
+ static Type *getFloatTy(LLVMContext &C);
+ static Type *getDoubleTy(LLVMContext &C);
+ static Type *getMetadataTy(LLVMContext &C);
+ static Type *getX86_FP80Ty(LLVMContext &C);
+ static Type *getFP128Ty(LLVMContext &C);
+ static Type *getPPC_FP128Ty(LLVMContext &C);
+ static Type *getX86_MMXTy(LLVMContext &C);
+ static Type *getTokenTy(LLVMContext &C);
+ static IntegerType *getIntNTy(LLVMContext &C, unsigned N);
+ static IntegerType *getInt1Ty(LLVMContext &C);
+ static IntegerType *getInt8Ty(LLVMContext &C);
+ static IntegerType *getInt16Ty(LLVMContext &C);
+ static IntegerType *getInt32Ty(LLVMContext &C);
+ static IntegerType *getInt64Ty(LLVMContext &C);
+ static IntegerType *getInt128Ty(LLVMContext &C);
+
+ //===--------------------------------------------------------------------===//
+ // Convenience methods for getting pointer types with one of the above builtin
+ // types as pointee.
+ //
+ static PointerType *getHalfPtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getFloatPtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getDoublePtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getX86_FP80PtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getFP128PtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getPPC_FP128PtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getX86_MMXPtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getIntNPtrTy(LLVMContext &C, unsigned N, unsigned AS = 0);
+ static PointerType *getInt1PtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getInt8PtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getInt16PtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0);
+ static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0);
+
+ /// getPointerTo - Return a pointer to the current type. This is equivalent
+ /// to PointerType::get(Foo, AddrSpace).
+ PointerType *getPointerTo(unsigned AddrSpace = 0) const;
+
+private:
+ /// isSizedDerivedType - Derived types like structures and arrays are sized
+ /// iff all of the members of the type are sized as well. Since asking for
+ /// their size is relatively uncommon, move this operation out of line.
+ bool isSizedDerivedType(SmallPtrSetImpl<Type*> *Visited = nullptr) const;
+};
+
+// Printing of types.
+static inline raw_ostream &operator<<(raw_ostream &OS, Type &T) {
+ T.print(OS);
+ return OS;
+}
+
+// allow isa<PointerType>(x) to work without DerivedTypes.h included.
+template <> struct isa_impl<PointerType, Type> {
+ static inline bool doit(const Type &Ty) {
+ return Ty.getTypeID() == Type::PointerTyID;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Provide specializations of GraphTraits to be able to treat a type as a
+// graph of sub types.
+
+template <> struct GraphTraits<Type *> {
+ typedef Type NodeType;
+ typedef Type::subtype_iterator ChildIteratorType;
+
+ static inline NodeType *getEntryNode(Type *T) { return T; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->subtype_begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->subtype_end();
+ }
+};
+
+template <> struct GraphTraits<const Type*> {
+ typedef const Type NodeType;
+ typedef Type::subtype_iterator ChildIteratorType;
+
+ static inline NodeType *getEntryNode(NodeType *T) { return T; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->subtype_begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->subtype_end();
+ }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_ISA_CONVERSION_FUNCTIONS(Type, LLVMTypeRef)
+
+/* Specialized opaque type conversions.
+ */
+inline Type **unwrap(LLVMTypeRef* Tys) {
+ return reinterpret_cast<Type**>(Tys);
+}
+
+inline LLVMTypeRef *wrap(Type **Tys) {
+ return reinterpret_cast<LLVMTypeRef*>(const_cast<Type**>(Tys));
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/TypeBuilder.h b/contrib/llvm/include/llvm/IR/TypeBuilder.h
new file mode 100644
index 0000000..d2c6f00
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/TypeBuilder.h
@@ -0,0 +1,407 @@
+//===---- llvm/TypeBuilder.h - Builder for LLVM types -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeBuilder class, which is used as a convenient way to
+// create LLVM types with a consistent and simplified interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_TYPEBUILDER_H
+#define LLVM_IR_TYPEBUILDER_H
+
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/LLVMContext.h"
+#include <climits>
+
+namespace llvm {
+
+/// TypeBuilder - This provides a uniform API for looking up types
+/// known at compile time. To support cross-compilation, we define a
+/// series of tag types in the llvm::types namespace, like i<N>,
+/// ieee_float, ppc_fp128, etc. TypeBuilder<T, false> allows T to be
+/// any of these, a native C type (whose size may depend on the host
+/// compiler), or a pointer, function, or struct type built out of
+/// these. TypeBuilder<T, true> removes native C types from this set
+/// to guarantee that its result is suitable for cross-compilation.
+/// We define the primitive types, pointer types, and functions up to
+/// 5 arguments here, but to use this class with your own types,
+/// you'll need to specialize it. For example, say you want to call a
+/// function defined externally as:
+///
+/// \code{.cpp}
+///
+/// struct MyType {
+/// int32 a;
+/// int32 *b;
+/// void *array[1]; // Intended as a flexible array.
+/// };
+/// int8 AFunction(struct MyType *value);
+///
+/// \endcode
+///
+/// You'll want to use
+/// Function::Create(TypeBuilder<types::i<8>(MyType*), true>::get(), ...)
+/// to declare the function, but when you first try this, your compiler will
+/// complain that TypeBuilder<MyType, true>::get() doesn't exist. To fix this,
+/// write:
+///
+/// \code{.cpp}
+///
+/// namespace llvm {
+/// template<bool xcompile> class TypeBuilder<MyType, xcompile> {
+/// public:
+/// static StructType *get(LLVMContext &Context) {
+/// // If you cache this result, be sure to cache it separately
+/// // for each LLVMContext.
+/// return StructType::get(
+/// TypeBuilder<types::i<32>, xcompile>::get(Context),
+/// TypeBuilder<types::i<32>*, xcompile>::get(Context),
+/// TypeBuilder<types::i<8>*[], xcompile>::get(Context),
+/// nullptr);
+/// }
+///
+/// // You may find this a convenient place to put some constants
+/// // to help with getelementptr. They don't have any effect on
+/// // the operation of TypeBuilder.
+/// enum Fields {
+/// FIELD_A,
+/// FIELD_B,
+/// FIELD_ARRAY
+/// };
+/// }
+/// } // namespace llvm
+///
+/// \endcode
+///
+/// TypeBuilder cannot handle recursive types or types you only know at runtime.
+/// If you try to give it a recursive type, it will deadlock, infinitely
+/// recurse, or do something similarly undesirable.
+template<typename T, bool cross_compilable> class TypeBuilder {};
+
+// Types for use with cross-compilable TypeBuilders. These correspond
+// exactly with an LLVM-native type.
+namespace types {
+/// i<N> corresponds to the LLVM IntegerType with N bits.
+template<uint32_t num_bits> class i {};
+
+// The following classes represent the LLVM floating types.
+class ieee_float {};
+class ieee_double {};
+class x86_fp80 {};
+class fp128 {};
+class ppc_fp128 {};
+// X86 MMX.
+class x86_mmx {};
+} // namespace types
+
+// LLVM doesn't have const or volatile types.
+template<typename T, bool cross> class TypeBuilder<const T, cross>
+ : public TypeBuilder<T, cross> {};
+template<typename T, bool cross> class TypeBuilder<volatile T, cross>
+ : public TypeBuilder<T, cross> {};
+template<typename T, bool cross> class TypeBuilder<const volatile T, cross>
+ : public TypeBuilder<T, cross> {};
+
+// Pointers
+template<typename T, bool cross> class TypeBuilder<T*, cross> {
+public:
+ static PointerType *get(LLVMContext &Context) {
+ return PointerType::getUnqual(TypeBuilder<T,cross>::get(Context));
+ }
+};
+
+/// There is no support for references
+template<typename T, bool cross> class TypeBuilder<T&, cross> {};
+
+// Arrays
+template<typename T, size_t N, bool cross> class TypeBuilder<T[N], cross> {
+public:
+ static ArrayType *get(LLVMContext &Context) {
+ return ArrayType::get(TypeBuilder<T, cross>::get(Context), N);
+ }
+};
+/// LLVM uses an array of length 0 to represent an unknown-length array.
+template<typename T, bool cross> class TypeBuilder<T[], cross> {
+public:
+ static ArrayType *get(LLVMContext &Context) {
+ return ArrayType::get(TypeBuilder<T, cross>::get(Context), 0);
+ }
+};
+
+// Define the C integral types only for TypeBuilder<T, false>.
+//
+// C integral types do not have a defined size. It would be nice to use the
+// stdint.h-defined typedefs that do have defined sizes, but we'd run into the
+// following problem:
+//
+// On an ILP32 machine, stdint.h might define:
+//
+// typedef int int32_t;
+// typedef long long int64_t;
+// typedef long size_t;
+//
+// If we defined TypeBuilder<int32_t> and TypeBuilder<int64_t>, then any use of
+// TypeBuilder<size_t> would fail. We couldn't define TypeBuilder<size_t> in
+// addition to the defined-size types because we'd get duplicate definitions on
+// platforms where stdint.h instead defines:
+//
+// typedef int int32_t;
+// typedef long long int64_t;
+// typedef int size_t;
+//
+// So we define all the primitive C types and nothing else.
+#define DEFINE_INTEGRAL_TYPEBUILDER(T) \
+template<> class TypeBuilder<T, false> { \
+public: \
+ static IntegerType *get(LLVMContext &Context) { \
+ return IntegerType::get(Context, sizeof(T) * CHAR_BIT); \
+ } \
+}; \
+template<> class TypeBuilder<T, true> { \
+ /* We provide a definition here so users don't accidentally */ \
+ /* define these types to work. */ \
+}
+DEFINE_INTEGRAL_TYPEBUILDER(char);
+DEFINE_INTEGRAL_TYPEBUILDER(signed char);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned char);
+DEFINE_INTEGRAL_TYPEBUILDER(short);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned short);
+DEFINE_INTEGRAL_TYPEBUILDER(int);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned int);
+DEFINE_INTEGRAL_TYPEBUILDER(long);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned long);
+#ifdef _MSC_VER
+DEFINE_INTEGRAL_TYPEBUILDER(__int64);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned __int64);
+#else /* _MSC_VER */
+DEFINE_INTEGRAL_TYPEBUILDER(long long);
+DEFINE_INTEGRAL_TYPEBUILDER(unsigned long long);
+#endif /* _MSC_VER */
+#undef DEFINE_INTEGRAL_TYPEBUILDER
+
+template<uint32_t num_bits, bool cross>
+class TypeBuilder<types::i<num_bits>, cross> {
+public:
+ static IntegerType *get(LLVMContext &C) {
+ return IntegerType::get(C, num_bits);
+ }
+};
+
+template<> class TypeBuilder<float, false> {
+public:
+ static Type *get(LLVMContext& C) {
+ return Type::getFloatTy(C);
+ }
+};
+template<> class TypeBuilder<float, true> {};
+
+template<> class TypeBuilder<double, false> {
+public:
+ static Type *get(LLVMContext& C) {
+ return Type::getDoubleTy(C);
+ }
+};
+template<> class TypeBuilder<double, true> {};
+
+template<bool cross> class TypeBuilder<types::ieee_float, cross> {
+public:
+ static Type *get(LLVMContext& C) { return Type::getFloatTy(C); }
+};
+template<bool cross> class TypeBuilder<types::ieee_double, cross> {
+public:
+ static Type *get(LLVMContext& C) { return Type::getDoubleTy(C); }
+};
+template<bool cross> class TypeBuilder<types::x86_fp80, cross> {
+public:
+ static Type *get(LLVMContext& C) { return Type::getX86_FP80Ty(C); }
+};
+template<bool cross> class TypeBuilder<types::fp128, cross> {
+public:
+ static Type *get(LLVMContext& C) { return Type::getFP128Ty(C); }
+};
+template<bool cross> class TypeBuilder<types::ppc_fp128, cross> {
+public:
+ static Type *get(LLVMContext& C) { return Type::getPPC_FP128Ty(C); }
+};
+template<bool cross> class TypeBuilder<types::x86_mmx, cross> {
+public:
+ static Type *get(LLVMContext& C) { return Type::getX86_MMXTy(C); }
+};
+
+template<bool cross> class TypeBuilder<void, cross> {
+public:
+ static Type *get(LLVMContext &C) {
+ return Type::getVoidTy(C);
+ }
+};
+
+/// void* is disallowed in LLVM types, but it occurs often enough in C code that
+/// we special case it.
+template<> class TypeBuilder<void*, false>
+ : public TypeBuilder<types::i<8>*, false> {};
+template<> class TypeBuilder<const void*, false>
+ : public TypeBuilder<types::i<8>*, false> {};
+template<> class TypeBuilder<volatile void*, false>
+ : public TypeBuilder<types::i<8>*, false> {};
+template<> class TypeBuilder<const volatile void*, false>
+ : public TypeBuilder<types::i<8>*, false> {};
+
+template<typename R, bool cross> class TypeBuilder<R(), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context), false);
+ }
+};
+template<typename R, typename A1, bool cross> class TypeBuilder<R(A1), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ Type *params[] = {
+ TypeBuilder<A1, cross>::get(Context),
+ };
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+ params, false);
+ }
+};
+template<typename R, typename A1, typename A2, bool cross>
+class TypeBuilder<R(A1, A2), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ Type *params[] = {
+ TypeBuilder<A1, cross>::get(Context),
+ TypeBuilder<A2, cross>::get(Context),
+ };
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+ params, false);
+ }
+};
+template<typename R, typename A1, typename A2, typename A3, bool cross>
+class TypeBuilder<R(A1, A2, A3), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ Type *params[] = {
+ TypeBuilder<A1, cross>::get(Context),
+ TypeBuilder<A2, cross>::get(Context),
+ TypeBuilder<A3, cross>::get(Context),
+ };
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+ params, false);
+ }
+};
+
+template<typename R, typename A1, typename A2, typename A3, typename A4,
+ bool cross>
+class TypeBuilder<R(A1, A2, A3, A4), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ Type *params[] = {
+ TypeBuilder<A1, cross>::get(Context),
+ TypeBuilder<A2, cross>::get(Context),
+ TypeBuilder<A3, cross>::get(Context),
+ TypeBuilder<A4, cross>::get(Context),
+ };
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+ params, false);
+ }
+};
+
+template<typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, bool cross>
+class TypeBuilder<R(A1, A2, A3, A4, A5), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ Type *params[] = {
+ TypeBuilder<A1, cross>::get(Context),
+ TypeBuilder<A2, cross>::get(Context),
+ TypeBuilder<A3, cross>::get(Context),
+ TypeBuilder<A4, cross>::get(Context),
+ TypeBuilder<A5, cross>::get(Context),
+ };
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+ params, false);
+ }
+};
+
+template<typename R, bool cross> class TypeBuilder<R(...), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context), true);
+ }
+};
+template<typename R, typename A1, bool cross>
+class TypeBuilder<R(A1, ...), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ Type *params[] = {
+ TypeBuilder<A1, cross>::get(Context),
+ };
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context), params, true);
+ }
+};
+template<typename R, typename A1, typename A2, bool cross>
+class TypeBuilder<R(A1, A2, ...), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ Type *params[] = {
+ TypeBuilder<A1, cross>::get(Context),
+ TypeBuilder<A2, cross>::get(Context),
+ };
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+ params, true);
+ }
+};
+template<typename R, typename A1, typename A2, typename A3, bool cross>
+class TypeBuilder<R(A1, A2, A3, ...), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ Type *params[] = {
+ TypeBuilder<A1, cross>::get(Context),
+ TypeBuilder<A2, cross>::get(Context),
+ TypeBuilder<A3, cross>::get(Context),
+ };
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+ params, true);
+ }
+};
+
+template<typename R, typename A1, typename A2, typename A3, typename A4,
+ bool cross>
+class TypeBuilder<R(A1, A2, A3, A4, ...), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ Type *params[] = {
+ TypeBuilder<A1, cross>::get(Context),
+ TypeBuilder<A2, cross>::get(Context),
+ TypeBuilder<A3, cross>::get(Context),
+ TypeBuilder<A4, cross>::get(Context),
+ };
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+ params, true);
+ }
+};
+
+template<typename R, typename A1, typename A2, typename A3, typename A4,
+ typename A5, bool cross>
+class TypeBuilder<R(A1, A2, A3, A4, A5, ...), cross> {
+public:
+ static FunctionType *get(LLVMContext &Context) {
+ Type *params[] = {
+ TypeBuilder<A1, cross>::get(Context),
+ TypeBuilder<A2, cross>::get(Context),
+ TypeBuilder<A3, cross>::get(Context),
+ TypeBuilder<A4, cross>::get(Context),
+ TypeBuilder<A5, cross>::get(Context),
+ };
+ return FunctionType::get(TypeBuilder<R, cross>::get(Context),
+ params, true);
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/TypeFinder.h b/contrib/llvm/include/llvm/IR/TypeFinder.h
new file mode 100644
index 0000000..5f38543
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/TypeFinder.h
@@ -0,0 +1,79 @@
+//===-- llvm/IR/TypeFinder.h - Class to find used struct types --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the TypeFinder class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_TYPEFINDER_H
+#define LLVM_IR_TYPEFINDER_H
+
+#include "llvm/ADT/DenseSet.h"
+#include <vector>
+
+namespace llvm {
+
+class MDNode;
+class Module;
+class StructType;
+class Type;
+class Value;
+
+/// TypeFinder - Walk over a module, identifying all of the types that are
+/// used by the module.
+class TypeFinder {
+ // To avoid walking constant expressions multiple times and other IR
+ // objects, we keep several helper maps.
+ DenseSet<const Value*> VisitedConstants;
+ DenseSet<const MDNode *> VisitedMetadata;
+ DenseSet<Type*> VisitedTypes;
+
+ std::vector<StructType*> StructTypes;
+ bool OnlyNamed;
+
+public:
+ TypeFinder() : OnlyNamed(false) {}
+
+ void run(const Module &M, bool onlyNamed);
+ void clear();
+
+ typedef std::vector<StructType*>::iterator iterator;
+ typedef std::vector<StructType*>::const_iterator const_iterator;
+
+ iterator begin() { return StructTypes.begin(); }
+ iterator end() { return StructTypes.end(); }
+
+ const_iterator begin() const { return StructTypes.begin(); }
+ const_iterator end() const { return StructTypes.end(); }
+
+ bool empty() const { return StructTypes.empty(); }
+ size_t size() const { return StructTypes.size(); }
+ iterator erase(iterator I, iterator E) { return StructTypes.erase(I, E); }
+
+ StructType *&operator[](unsigned Idx) { return StructTypes[Idx]; }
+
+private:
+ /// incorporateType - This method adds the type to the list of used
+ /// structures if it's not in there already.
+ void incorporateType(Type *Ty);
+
+ /// incorporateValue - This method is used to walk operand lists finding types
+ /// hiding in constant expressions and other operands that won't be walked in
+ /// other ways. GlobalValues, basic blocks, instructions, and inst operands
+ /// are all explicitly enumerated.
+ void incorporateValue(const Value *V);
+
+ /// incorporateMDNode - This method is used to walk the operands of an MDNode
+ /// to find types hiding within.
+ void incorporateMDNode(const MDNode *V);
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Use.h b/contrib/llvm/include/llvm/IR/Use.h
new file mode 100644
index 0000000..a738677
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Use.h
@@ -0,0 +1,172 @@
+//===-- llvm/Use.h - Definition of the Use class ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This defines the Use class. The Use class represents the operand of an
+/// instruction or some other User instance which refers to a Value. The Use
+/// class keeps the "use list" of the referenced value up to date.
+///
+/// Pointer tagging is used to efficiently find the User corresponding to a Use
+/// without having to store a User pointer in every Use. A User is preceded in
+/// memory by all the Uses corresponding to its operands, and the low bits of
+/// one of the fields (Prev) of the Use class are used to encode offsets to be
+/// able to find that User given a pointer to any Use. For details, see:
+///
+/// http://www.llvm.org/docs/ProgrammersManual.html#UserLayout
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_USE_H
+#define LLVM_IR_USE_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Compiler.h"
+#include <cstddef>
+#include <iterator>
+
+namespace llvm {
+
+class Value;
+class User;
+class Use;
+template <typename> struct simplify_type;
+
+// Use** is only 4-byte aligned.
+template <> class PointerLikeTypeTraits<Use **> {
+public:
+ static inline void *getAsVoidPointer(Use **P) { return P; }
+ static inline Use **getFromVoidPointer(void *P) {
+ return static_cast<Use **>(P);
+ }
+ enum { NumLowBitsAvailable = 2 };
+};
+
+/// \brief A Use represents the edge between a Value definition and its users.
+///
+/// This is notionally a two-dimensional linked list. It supports traversing
+/// all of the uses for a particular value definition. It also supports jumping
+/// directly to the used value when we arrive from the User's operands, and
+/// jumping directly to the User when we arrive from the Value's uses.
+///
+/// The pointer to the used Value is explicit, and the pointer to the User is
+/// implicit. The implicit pointer is found via a waymarking algorithm
+/// described in the programmer's manual:
+///
+/// http://www.llvm.org/docs/ProgrammersManual.html#the-waymarking-algorithm
+///
+/// This is essentially the single most memory intensive object in LLVM because
+/// of the number of uses in the system. At the same time, the constant time
+/// operations it allows are essential to many optimizations having reasonable
+/// time complexity.
+class Use {
+public:
+ /// \brief Provide a fast substitute to std::swap<Use>
+ /// that also works with less standard-compliant compilers
+ void swap(Use &RHS);
+
+ // A type for the word following an array of hung-off Uses in memory, which is
+ // a pointer back to their User with the bottom bit set.
+ typedef PointerIntPair<User *, 1, unsigned> UserRef;
+
+private:
+ Use(const Use &U) = delete;
+
+ /// Destructor - Only for zap()
+ ~Use() {
+ if (Val)
+ removeFromList();
+ }
+
+ enum PrevPtrTag { zeroDigitTag, oneDigitTag, stopTag, fullStopTag };
+
+ /// Constructor
+ Use(PrevPtrTag tag) : Val(nullptr) { Prev.setInt(tag); }
+
+public:
+ operator Value *() const { return Val; }
+ Value *get() const { return Val; }
+
+ /// \brief Returns the User that contains this Use.
+ ///
+ /// For an instruction operand, for example, this will return the
+ /// instruction.
+ User *getUser() const;
+
+ inline void set(Value *Val);
+
+ Value *operator=(Value *RHS) {
+ set(RHS);
+ return RHS;
+ }
+ const Use &operator=(const Use &RHS) {
+ set(RHS.Val);
+ return *this;
+ }
+
+ Value *operator->() { return Val; }
+ const Value *operator->() const { return Val; }
+
+ Use *getNext() const { return Next; }
+
+ /// \brief Return the operand # of this use in its User.
+ unsigned getOperandNo() const;
+
+ /// \brief Initializes the waymarking tags on an array of Uses.
+ ///
+ /// This sets up the array of Uses such that getUser() can find the User from
+ /// any of those Uses.
+ static Use *initTags(Use *Start, Use *Stop);
+
+ /// \brief Destroys Use operands when the number of operands of
+ /// a User changes.
+ static void zap(Use *Start, const Use *Stop, bool del = false);
+
+private:
+ const Use *getImpliedUser() const;
+
+ Value *Val;
+ Use *Next;
+ PointerIntPair<Use **, 2, PrevPtrTag> Prev;
+
+ void setPrev(Use **NewPrev) { Prev.setPointer(NewPrev); }
+ void addToList(Use **List) {
+ Next = *List;
+ if (Next)
+ Next->setPrev(&Next);
+ setPrev(List);
+ *List = this;
+ }
+ void removeFromList() {
+ Use **StrippedPrev = Prev.getPointer();
+ *StrippedPrev = Next;
+ if (Next)
+ Next->setPrev(StrippedPrev);
+ }
+
+ friend class Value;
+};
+
+/// \brief Allow clients to treat uses just like values when using
+/// casting operators.
+template <> struct simplify_type<Use> {
+ typedef Value *SimpleType;
+ static SimpleType getSimplifiedValue(Use &Val) { return Val.get(); }
+};
+template <> struct simplify_type<const Use> {
+ typedef /*const*/ Value *SimpleType;
+ static SimpleType getSimplifiedValue(const Use &Val) { return Val.get(); }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Use, LLVMUseRef)
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/UseListOrder.h b/contrib/llvm/include/llvm/IR/UseListOrder.h
new file mode 100644
index 0000000..1cabf03
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/UseListOrder.h
@@ -0,0 +1,56 @@
+//===- llvm/IR/UseListOrder.h - LLVM Use List Order -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file has structures and command-line options for preserving use-list
+// order.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_USELISTORDER_H
+#define LLVM_IR_USELISTORDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include <vector>
+
+namespace llvm {
+
+class Module;
+class Function;
+class Value;
+
+/// \brief Structure to hold a use-list order.
+struct UseListOrder {
+ const Value *V;
+ const Function *F;
+ std::vector<unsigned> Shuffle;
+
+ UseListOrder(const Value *V, const Function *F, size_t ShuffleSize)
+ : V(V), F(F), Shuffle(ShuffleSize) {}
+
+ UseListOrder() : V(nullptr), F(nullptr) {}
+ UseListOrder(UseListOrder &&X)
+ : V(X.V), F(X.F), Shuffle(std::move(X.Shuffle)) {}
+ UseListOrder &operator=(UseListOrder &&X) {
+ V = X.V;
+ F = X.F;
+ Shuffle = std::move(X.Shuffle);
+ return *this;
+ }
+
+private:
+ UseListOrder(const UseListOrder &X) = delete;
+ UseListOrder &operator=(const UseListOrder &X) = delete;
+};
+
+typedef std::vector<UseListOrder> UseListOrderStack;
+
+} // end namespace llvm
+
+#endif // LLVM_IR_USELISTORDER_H
diff --git a/contrib/llvm/include/llvm/IR/User.h b/contrib/llvm/include/llvm/IR/User.h
new file mode 100644
index 0000000..885ae19
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/User.h
@@ -0,0 +1,271 @@
+//===-- llvm/User.h - User class definition ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class defines the interface that one who uses a Value must implement.
+// Each instance of the Value class keeps track of what User's have handles
+// to it.
+//
+// * Instructions are the largest class of Users.
+// * Constants may be users of other constants (think arrays and stuff)
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_USER_H
+#define LLVM_IR_USER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+/// \brief Compile-time customization of User operands.
+///
+/// Customizes operand-related allocators and accessors.
+template <class>
+struct OperandTraits;
+
+class User : public Value {
+ User(const User &) = delete;
+ template <unsigned>
+ friend struct HungoffOperandTraits;
+ virtual void anchor();
+
+ LLVM_ATTRIBUTE_ALWAYS_INLINE inline static void *
+ allocateFixedOperandUser(size_t, unsigned, unsigned);
+
+protected:
+ /// Allocate a User with an operand pointer co-allocated.
+ ///
+ /// This is used for subclasses which need to allocate a variable number
+ /// of operands, ie, 'hung off uses'.
+ void *operator new(size_t Size);
+
+ /// Allocate a User with the operands co-allocated.
+ ///
+ /// This is used for subclasses which have a fixed number of operands.
+ void *operator new(size_t Size, unsigned Us);
+
+ /// Allocate a User with the operands co-allocated. If DescBytes is non-zero
+ /// then allocate an additional DescBytes bytes before the operands. These
+ /// bytes can be accessed by calling getDescriptor.
+ ///
+ /// DescBytes needs to be divisible by sizeof(void *). The allocated
+ /// descriptor, if any, is aligned to sizeof(void *) bytes.
+ ///
+ /// This is used for subclasses which have a fixed number of operands.
+ void *operator new(size_t Size, unsigned Us, unsigned DescBytes);
+
+ User(Type *ty, unsigned vty, Use *, unsigned NumOps)
+ : Value(ty, vty) {
+ assert(NumOps < (1u << NumUserOperandsBits) && "Too many operands");
+ NumUserOperands = NumOps;
+ // If we have hung off uses, then the operand list should initially be
+ // null.
+ assert((!HasHungOffUses || !getOperandList()) &&
+ "Error in initializing hung off uses for User");
+ }
+
+ /// \brief Allocate the array of Uses, followed by a pointer
+ /// (with bottom bit set) to the User.
+ /// \param IsPhi identifies callers which are phi nodes and which need
+ /// N BasicBlock* allocated along with N
+ void allocHungoffUses(unsigned N, bool IsPhi = false);
+
+ /// \brief Grow the number of hung off uses. Note that allocHungoffUses
+ /// should be called if there are no uses.
+ void growHungoffUses(unsigned N, bool IsPhi = false);
+
+public:
+ ~User() override {
+ }
+ /// \brief Free memory allocated for User and Use objects.
+ void operator delete(void *Usr);
+ /// \brief Placement delete - required by std, but never called.
+ void operator delete(void*, unsigned) {
+ llvm_unreachable("Constructor throws?");
+ }
+ /// \brief Placement delete - required by std, but never called.
+ void operator delete(void*, unsigned, bool) {
+ llvm_unreachable("Constructor throws?");
+ }
+protected:
+ template <int Idx, typename U> static Use &OpFrom(const U *that) {
+ return Idx < 0
+ ? OperandTraits<U>::op_end(const_cast<U*>(that))[Idx]
+ : OperandTraits<U>::op_begin(const_cast<U*>(that))[Idx];
+ }
+ template <int Idx> Use &Op() {
+ return OpFrom<Idx>(this);
+ }
+ template <int Idx> const Use &Op() const {
+ return OpFrom<Idx>(this);
+ }
+private:
+ Use *&getHungOffOperands() { return *(reinterpret_cast<Use **>(this) - 1); }
+
+ Use *getIntrusiveOperands() {
+ return reinterpret_cast<Use *>(this) - NumUserOperands;
+ }
+
+ void setOperandList(Use *NewList) {
+ assert(HasHungOffUses &&
+ "Setting operand list only required for hung off uses");
+ getHungOffOperands() = NewList;
+ }
+public:
+ Use *getOperandList() {
+ return HasHungOffUses ? getHungOffOperands() : getIntrusiveOperands();
+ }
+ const Use *getOperandList() const {
+ return const_cast<User *>(this)->getOperandList();
+ }
+ Value *getOperand(unsigned i) const {
+ assert(i < NumUserOperands && "getOperand() out of range!");
+ return getOperandList()[i];
+ }
+ void setOperand(unsigned i, Value *Val) {
+ assert(i < NumUserOperands && "setOperand() out of range!");
+ assert((!isa<Constant>((const Value*)this) ||
+ isa<GlobalValue>((const Value*)this)) &&
+ "Cannot mutate a constant with setOperand!");
+ getOperandList()[i] = Val;
+ }
+ const Use &getOperandUse(unsigned i) const {
+ assert(i < NumUserOperands && "getOperandUse() out of range!");
+ return getOperandList()[i];
+ }
+ Use &getOperandUse(unsigned i) {
+ assert(i < NumUserOperands && "getOperandUse() out of range!");
+ return getOperandList()[i];
+ }
+
+ unsigned getNumOperands() const { return NumUserOperands; }
+
+ /// Returns the descriptor co-allocated with this User instance.
+ ArrayRef<const uint8_t> getDescriptor() const;
+
+ /// Returns the descriptor co-allocated with this User instance.
+ MutableArrayRef<uint8_t> getDescriptor();
+
+ /// Set the number of operands on a GlobalVariable.
+ ///
+ /// GlobalVariable always allocates space for a single operands, but
+ /// doesn't always use it.
+ ///
+ /// FIXME: As that the number of operands is used to find the start of
+ /// the allocated memory in operator delete, we need to always think we have
+ /// 1 operand before delete.
+ void setGlobalVariableNumOperands(unsigned NumOps) {
+ assert(NumOps <= 1 && "GlobalVariable can only have 0 or 1 operands");
+ NumUserOperands = NumOps;
+ }
+
+ /// \brief Subclasses with hung off uses need to manage the operand count
+ /// themselves. In these instances, the operand count isn't used to find the
+ /// OperandList, so there's no issue in having the operand count change.
+ void setNumHungOffUseOperands(unsigned NumOps) {
+ assert(HasHungOffUses && "Must have hung off uses to use this method");
+ assert(NumOps < (1u << NumUserOperandsBits) && "Too many operands");
+ NumUserOperands = NumOps;
+ }
+
+ // ---------------------------------------------------------------------------
+ // Operand Iterator interface...
+ //
+ typedef Use* op_iterator;
+ typedef const Use* const_op_iterator;
+ typedef iterator_range<op_iterator> op_range;
+ typedef iterator_range<const_op_iterator> const_op_range;
+
+ op_iterator op_begin() { return getOperandList(); }
+ const_op_iterator op_begin() const { return getOperandList(); }
+ op_iterator op_end() {
+ return getOperandList() + NumUserOperands;
+ }
+ const_op_iterator op_end() const {
+ return getOperandList() + NumUserOperands;
+ }
+ op_range operands() {
+ return op_range(op_begin(), op_end());
+ }
+ const_op_range operands() const {
+ return const_op_range(op_begin(), op_end());
+ }
+
+ /// \brief Iterator for directly iterating over the operand Values.
+ struct value_op_iterator
+ : iterator_adaptor_base<value_op_iterator, op_iterator,
+ std::random_access_iterator_tag, Value *,
+ ptrdiff_t, Value *, Value *> {
+ explicit value_op_iterator(Use *U = nullptr) : iterator_adaptor_base(U) {}
+
+ Value *operator*() const { return *I; }
+ Value *operator->() const { return operator*(); }
+ };
+
+ value_op_iterator value_op_begin() {
+ return value_op_iterator(op_begin());
+ }
+ value_op_iterator value_op_end() {
+ return value_op_iterator(op_end());
+ }
+ iterator_range<value_op_iterator> operand_values() {
+ return make_range(value_op_begin(), value_op_end());
+ }
+
+ /// \brief Drop all references to operands.
+ ///
+ /// This function is in charge of "letting go" of all objects that this User
+ /// refers to. This allows one to 'delete' a whole class at a time, even
+ /// though there may be circular references... First all references are
+ /// dropped, and all use counts go to zero. Then everything is deleted for
+ /// real. Note that no operations are valid on an object that has "dropped
+ /// all references", except operator delete.
+ void dropAllReferences() {
+ for (Use &U : operands())
+ U.set(nullptr);
+ }
+
+ /// \brief Replace uses of one Value with another.
+ ///
+ /// Replaces all references to the "From" definition with references to the
+ /// "To" definition.
+ void replaceUsesOfWith(Value *From, Value *To);
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static inline bool classof(const Value *V) {
+ return isa<Instruction>(V) || isa<Constant>(V);
+ }
+};
+// Either Use objects, or a Use pointer can be prepended to User.
+static_assert(AlignOf<Use>::Alignment >= AlignOf<User>::Alignment,
+ "Alignment is insufficient after objects prepended to User");
+static_assert(AlignOf<Use *>::Alignment >= AlignOf<User>::Alignment,
+ "Alignment is insufficient after objects prepended to User");
+
+template<> struct simplify_type<User::op_iterator> {
+ typedef Value* SimpleType;
+ static SimpleType getSimplifiedValue(User::op_iterator &Val) {
+ return Val->get();
+ }
+};
+template<> struct simplify_type<User::const_op_iterator> {
+ typedef /*const*/ Value* SimpleType;
+ static SimpleType getSimplifiedValue(User::const_op_iterator &Val) {
+ return Val->get();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Value.def b/contrib/llvm/include/llvm/IR/Value.def
new file mode 100644
index 0000000..4c5d452
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Value.def
@@ -0,0 +1,91 @@
+//===-------- llvm/IR/Value.def - File that describes Values ---v-*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains descriptions of the various LLVM values. This is
+// used as a central place for enumerating the different values.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+// Provide definitions of macros so that users of this file do not have to
+// define everything to use it...
+//
+#if !(defined HANDLE_GLOBAL_VALUE || defined HANDLE_CONSTANT || \
+ defined HANDLE_INSTRUCTION || defined HANDLE_INLINE_ASM_VALUE || \
+ defined HANDLE_METADATA_VALUE || defined HANDLE_VALUE || \
+ defined HANDLE_CONSTANT_MARKER)
+#error "Missing macro definition of HANDLE_VALUE*"
+#endif
+
+#ifndef HANDLE_GLOBAL_VALUE
+#define HANDLE_GLOBAL_VALUE(ValueName) HANDLE_CONSTANT(ValueName)
+#endif
+
+#ifndef HANDLE_CONSTANT
+#define HANDLE_CONSTANT(ValueName) HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_INSTRUCTION
+#define HANDLE_INSTRUCTION(ValueName) HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_INLINE_ASM_VALUE
+#define HANDLE_INLINE_ASM_VALUE(ValueName) HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_METADATA_VALUE
+#define HANDLE_METADATA_VALUE(ValueName) HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_VALUE
+#define HANDLE_VALUE(ValueName)
+#endif
+
+#ifndef HANDLE_CONSTANT_MARKER
+#define HANDLE_CONSTANT_MARKER(MarkerName, ValueName)
+#endif
+
+HANDLE_VALUE(Argument)
+HANDLE_VALUE(BasicBlock)
+
+HANDLE_GLOBAL_VALUE(Function)
+HANDLE_GLOBAL_VALUE(GlobalAlias)
+HANDLE_GLOBAL_VALUE(GlobalVariable)
+HANDLE_CONSTANT(UndefValue)
+HANDLE_CONSTANT(BlockAddress)
+HANDLE_CONSTANT(ConstantExpr)
+HANDLE_CONSTANT(ConstantAggregateZero)
+HANDLE_CONSTANT(ConstantDataArray)
+HANDLE_CONSTANT(ConstantDataVector)
+HANDLE_CONSTANT(ConstantInt)
+HANDLE_CONSTANT(ConstantFP)
+HANDLE_CONSTANT(ConstantArray)
+HANDLE_CONSTANT(ConstantStruct)
+HANDLE_CONSTANT(ConstantVector)
+HANDLE_CONSTANT(ConstantPointerNull)
+HANDLE_CONSTANT(ConstantTokenNone)
+
+HANDLE_METADATA_VALUE(MetadataAsValue)
+HANDLE_INLINE_ASM_VALUE(InlineAsm)
+
+HANDLE_INSTRUCTION(Instruction)
+// Enum values starting at InstructionVal are used for Instructions;
+// don't add new values here!
+
+HANDLE_CONSTANT_MARKER(ConstantFirstVal, Function)
+HANDLE_CONSTANT_MARKER(ConstantLastVal, ConstantTokenNone)
+
+#undef HANDLE_GLOBAL_VALUE
+#undef HANDLE_CONSTANT
+#undef HANDLE_INSTRUCTION
+#undef HANDLE_METADATA_VALUE
+#undef HANDLE_INLINE_ASM_VALUE
+#undef HANDLE_VALUE
+#undef HANDLE_CONSTANT_MARKER
diff --git a/contrib/llvm/include/llvm/IR/Value.h b/contrib/llvm/include/llvm/IR/Value.h
new file mode 100644
index 0000000..bb7ff27
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Value.h
@@ -0,0 +1,767 @@
+//===-- llvm/Value.h - Definition of the Value class ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Value class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VALUE_H
+#define LLVM_IR_VALUE_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/IR/Use.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+
+class APInt;
+class Argument;
+class AssemblyAnnotationWriter;
+class BasicBlock;
+class Constant;
+class DataLayout;
+class Function;
+class GlobalAlias;
+class GlobalObject;
+class GlobalValue;
+class GlobalVariable;
+class InlineAsm;
+class Instruction;
+class LLVMContext;
+class Module;
+class ModuleSlotTracker;
+class StringRef;
+class Twine;
+class Type;
+class ValueHandleBase;
+class ValueSymbolTable;
+class raw_ostream;
+
+template<typename ValueTy> class StringMapEntry;
+typedef StringMapEntry<Value*> ValueName;
+
+//===----------------------------------------------------------------------===//
+// Value Class
+//===----------------------------------------------------------------------===//
+
+/// \brief LLVM Value Representation
+///
+/// This is a very important LLVM class. It is the base class of all values
+/// computed by a program that may be used as operands to other values. Value is
+/// the super class of other important classes such as Instruction and Function.
+/// All Values have a Type. Type is not a subclass of Value. Some values can
+/// have a name and they belong to some Module. Setting the name on the Value
+/// automatically updates the module's symbol table.
+///
+/// Every value has a "use list" that keeps track of which other Values are
+/// using this Value. A Value can also have an arbitrary number of ValueHandle
+/// objects that watch it and listen to RAUW and Destroy events. See
+/// llvm/IR/ValueHandle.h for details.
+class Value {
+ Type *VTy;
+ Use *UseList;
+
+ friend class ValueAsMetadata; // Allow access to IsUsedByMD.
+ friend class ValueHandleBase;
+
+ const unsigned char SubclassID; // Subclass identifier (for isa/dyn_cast)
+ unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this?
+protected:
+ /// \brief Hold subclass data that can be dropped.
+ ///
+ /// This member is similar to SubclassData, however it is for holding
+ /// information which may be used to aid optimization, but which may be
+ /// cleared to zero without affecting conservative interpretation.
+ unsigned char SubclassOptionalData : 7;
+
+private:
+ /// \brief Hold arbitrary subclass data.
+ ///
+ /// This member is defined by this class, but is not used for anything.
+ /// Subclasses can use it to hold whatever state they find useful. This
+ /// field is initialized to zero by the ctor.
+ unsigned short SubclassData;
+
+protected:
+ /// \brief The number of operands in the subclass.
+ ///
+ /// This member is defined by this class, but not used for anything.
+ /// Subclasses can use it to store their number of operands, if they have
+ /// any.
+ ///
+ /// This is stored here to save space in User on 64-bit hosts. Since most
+ /// instances of Value have operands, 32-bit hosts aren't significantly
+ /// affected.
+ ///
+ /// Note, this should *NOT* be used directly by any class other than User.
+ /// User uses this value to find the Use list.
+ enum : unsigned { NumUserOperandsBits = 28 };
+ unsigned NumUserOperands : NumUserOperandsBits;
+
+ bool IsUsedByMD : 1;
+ bool HasName : 1;
+ bool HasHungOffUses : 1;
+ bool HasDescriptor : 1;
+
+private:
+ template <typename UseT> // UseT == 'Use' or 'const Use'
+ class use_iterator_impl
+ : public std::iterator<std::forward_iterator_tag, UseT *> {
+ UseT *U;
+ explicit use_iterator_impl(UseT *u) : U(u) {}
+ friend class Value;
+
+ public:
+ use_iterator_impl() : U() {}
+
+ bool operator==(const use_iterator_impl &x) const { return U == x.U; }
+ bool operator!=(const use_iterator_impl &x) const { return !operator==(x); }
+
+ use_iterator_impl &operator++() { // Preincrement
+ assert(U && "Cannot increment end iterator!");
+ U = U->getNext();
+ return *this;
+ }
+ use_iterator_impl operator++(int) { // Postincrement
+ auto tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ UseT &operator*() const {
+ assert(U && "Cannot dereference end iterator!");
+ return *U;
+ }
+
+ UseT *operator->() const { return &operator*(); }
+
+ operator use_iterator_impl<const UseT>() const {
+ return use_iterator_impl<const UseT>(U);
+ }
+ };
+
+ template <typename UserTy> // UserTy == 'User' or 'const User'
+ class user_iterator_impl
+ : public std::iterator<std::forward_iterator_tag, UserTy *> {
+ use_iterator_impl<Use> UI;
+ explicit user_iterator_impl(Use *U) : UI(U) {}
+ friend class Value;
+
+ public:
+ user_iterator_impl() {}
+
+ bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
+ bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }
+
+ /// \brief Returns true if this iterator is equal to user_end() on the value.
+ bool atEnd() const { return *this == user_iterator_impl(); }
+
+ user_iterator_impl &operator++() { // Preincrement
+ ++UI;
+ return *this;
+ }
+ user_iterator_impl operator++(int) { // Postincrement
+ auto tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ // Retrieve a pointer to the current User.
+ UserTy *operator*() const {
+ return UI->getUser();
+ }
+
+ UserTy *operator->() const { return operator*(); }
+
+ operator user_iterator_impl<const UserTy>() const {
+ return user_iterator_impl<const UserTy>(*UI);
+ }
+
+ Use &getUse() const { return *UI; }
+ };
+
+ void operator=(const Value &) = delete;
+ Value(const Value &) = delete;
+
+protected:
+ Value(Type *Ty, unsigned scid);
+public:
+ virtual ~Value();
+
+ /// \brief Support for debugging, callable in GDB: V->dump()
+ void dump() const;
+
+ /// \brief Implement operator<< on Value.
+ /// @{
+ void print(raw_ostream &O, bool IsForDebug = false) const;
+ void print(raw_ostream &O, ModuleSlotTracker &MST,
+ bool IsForDebug = false) const;
+ /// @}
+
+ /// \brief Print the name of this Value out to the specified raw_ostream.
+ ///
+ /// This is useful when you just want to print 'int %reg126', not the
+ /// instruction that generated it. If you specify a Module for context, then
+ /// even constanst get pretty-printed; for example, the type of a null
+ /// pointer is printed symbolically.
+ /// @{
+ void printAsOperand(raw_ostream &O, bool PrintType = true,
+ const Module *M = nullptr) const;
+ void printAsOperand(raw_ostream &O, bool PrintType,
+ ModuleSlotTracker &MST) const;
+ /// @}
+
+ /// \brief All values are typed, get the type of this value.
+ Type *getType() const { return VTy; }
+
+ /// \brief All values hold a context through their type.
+ LLVMContext &getContext() const;
+
+ // \brief All values can potentially be named.
+ bool hasName() const { return HasName; }
+ ValueName *getValueName() const;
+ void setValueName(ValueName *VN);
+
+private:
+ void destroyValueName();
+ void setNameImpl(const Twine &Name);
+
+public:
+ /// \brief Return a constant reference to the value's name.
+ ///
+ /// This is cheap and guaranteed to return the same reference as long as the
+ /// value is not modified.
+ StringRef getName() const;
+
+ /// \brief Change the name of the value.
+ ///
+ /// Choose a new unique name if the provided name is taken.
+ ///
+ /// \param Name The new name; or "" if the value's name should be removed.
+ void setName(const Twine &Name);
+
+
+ /// \brief Transfer the name from V to this value.
+ ///
+ /// After taking V's name, sets V's name to empty.
+ ///
+ /// \note It is an error to call V->takeName(V).
+ void takeName(Value *V);
+
+ /// \brief Change all uses of this to point to a new Value.
+ ///
+ /// Go through the uses list for this definition and make each use point to
+ /// "V" instead of "this". After this completes, 'this's use list is
+ /// guaranteed to be empty.
+ void replaceAllUsesWith(Value *V);
+
+ /// replaceUsesOutsideBlock - Go through the uses list for this definition and
+ /// make each use point to "V" instead of "this" when the use is outside the
+ /// block. 'This's use list is expected to have at least one element.
+ /// Unlike replaceAllUsesWith this function does not support basic block
+ /// values or constant users.
+ void replaceUsesOutsideBlock(Value *V, BasicBlock *BB);
+
+ //----------------------------------------------------------------------
+ // Methods for handling the chain of uses of this Value.
+ //
+ // Materializing a function can introduce new uses, so these methods come in
+ // two variants:
+ // The methods that start with materialized_ check the uses that are
+ // currently known given which functions are materialized. Be very careful
+ // when using them since you might not get all uses.
+ // The methods that don't start with materialized_ assert that modules is
+ // fully materialized.
+#ifdef NDEBUG
+ void assertModuleIsMaterialized() const {}
+#else
+ void assertModuleIsMaterialized() const;
+#endif
+
+ bool use_empty() const {
+ assertModuleIsMaterialized();
+ return UseList == nullptr;
+ }
+
+ typedef use_iterator_impl<Use> use_iterator;
+ typedef use_iterator_impl<const Use> const_use_iterator;
+ use_iterator materialized_use_begin() { return use_iterator(UseList); }
+ const_use_iterator materialized_use_begin() const {
+ return const_use_iterator(UseList);
+ }
+ use_iterator use_begin() {
+ assertModuleIsMaterialized();
+ return materialized_use_begin();
+ }
+ const_use_iterator use_begin() const {
+ assertModuleIsMaterialized();
+ return materialized_use_begin();
+ }
+ use_iterator use_end() { return use_iterator(); }
+ const_use_iterator use_end() const { return const_use_iterator(); }
+ iterator_range<use_iterator> materialized_uses() {
+ return make_range(materialized_use_begin(), use_end());
+ }
+ iterator_range<const_use_iterator> materialized_uses() const {
+ return make_range(materialized_use_begin(), use_end());
+ }
+ iterator_range<use_iterator> uses() {
+ assertModuleIsMaterialized();
+ return materialized_uses();
+ }
+ iterator_range<const_use_iterator> uses() const {
+ assertModuleIsMaterialized();
+ return materialized_uses();
+ }
+
+ bool user_empty() const {
+ assertModuleIsMaterialized();
+ return UseList == nullptr;
+ }
+
+ typedef user_iterator_impl<User> user_iterator;
+ typedef user_iterator_impl<const User> const_user_iterator;
+ user_iterator materialized_user_begin() { return user_iterator(UseList); }
+ const_user_iterator materialized_user_begin() const {
+ return const_user_iterator(UseList);
+ }
+ user_iterator user_begin() {
+ assertModuleIsMaterialized();
+ return materialized_user_begin();
+ }
+ const_user_iterator user_begin() const {
+ assertModuleIsMaterialized();
+ return materialized_user_begin();
+ }
+ user_iterator user_end() { return user_iterator(); }
+ const_user_iterator user_end() const { return const_user_iterator(); }
+ User *user_back() {
+ assertModuleIsMaterialized();
+ return *materialized_user_begin();
+ }
+ const User *user_back() const {
+ assertModuleIsMaterialized();
+ return *materialized_user_begin();
+ }
+ iterator_range<user_iterator> users() {
+ assertModuleIsMaterialized();
+ return make_range(materialized_user_begin(), user_end());
+ }
+ iterator_range<const_user_iterator> users() const {
+ assertModuleIsMaterialized();
+ return make_range(materialized_user_begin(), user_end());
+ }
+
+ /// \brief Return true if there is exactly one user of this value.
+ ///
+ /// This is specialized because it is a common request and does not require
+ /// traversing the whole use list.
+ bool hasOneUse() const {
+ const_use_iterator I = use_begin(), E = use_end();
+ if (I == E) return false;
+ return ++I == E;
+ }
+
+ /// \brief Return true if this Value has exactly N users.
+ bool hasNUses(unsigned N) const;
+
+ /// \brief Return true if this value has N users or more.
+ ///
+ /// This is logically equivalent to getNumUses() >= N.
+ bool hasNUsesOrMore(unsigned N) const;
+
+ /// \brief Check if this value is used in the specified basic block.
+ bool isUsedInBasicBlock(const BasicBlock *BB) const;
+
+ /// \brief This method computes the number of uses of this Value.
+ ///
+ /// This is a linear time operation. Use hasOneUse, hasNUses, or
+ /// hasNUsesOrMore to check for specific values.
+ unsigned getNumUses() const;
+
+ /// \brief This method should only be used by the Use class.
+ void addUse(Use &U) { U.addToList(&UseList); }
+
+ /// \brief Concrete subclass of this.
+ ///
+ /// An enumeration for keeping track of the concrete subclass of Value that
+ /// is actually instantiated. Values of this enumeration are kept in the
+ /// Value classes SubclassID field. They are used for concrete type
+ /// identification.
+ enum ValueTy {
+#define HANDLE_VALUE(Name) Name##Val,
+#include "llvm/IR/Value.def"
+
+ // Markers:
+#define HANDLE_CONSTANT_MARKER(Marker, Constant) Marker = Constant##Val,
+#include "llvm/IR/Value.def"
+ };
+
+ /// \brief Return an ID for the concrete type of this object.
+ ///
+ /// This is used to implement the classof checks. This should not be used
+ /// for any other purpose, as the values may change as LLVM evolves. Also,
+ /// note that for instructions, the Instruction's opcode is added to
+ /// InstructionVal. So this means three things:
+ /// # there is no value with code InstructionVal (no opcode==0).
+ /// # there are more possible values for the value type than in ValueTy enum.
+ /// # the InstructionVal enumerator must be the highest valued enumerator in
+ /// the ValueTy enum.
+ unsigned getValueID() const {
+ return SubclassID;
+ }
+
+ /// \brief Return the raw optional flags value contained in this value.
+ ///
+ /// This should only be used when testing two Values for equivalence.
+ unsigned getRawSubclassOptionalData() const {
+ return SubclassOptionalData;
+ }
+
+ /// \brief Clear the optional flags contained in this value.
+ void clearSubclassOptionalData() {
+ SubclassOptionalData = 0;
+ }
+
+ /// \brief Check the optional flags for equality.
+ bool hasSameSubclassOptionalData(const Value *V) const {
+ return SubclassOptionalData == V->SubclassOptionalData;
+ }
+
+ /// \brief Clear any optional flags not set in the given Value.
+ void intersectOptionalDataWith(const Value *V) {
+ SubclassOptionalData &= V->SubclassOptionalData;
+ }
+
+ /// \brief Return true if there is a value handle associated with this value.
+ bool hasValueHandle() const { return HasValueHandle; }
+
+ /// \brief Return true if there is metadata referencing this value.
+ bool isUsedByMetadata() const { return IsUsedByMD; }
+
+ /// \brief Strip off pointer casts, all-zero GEPs, and aliases.
+ ///
+ /// Returns the original uncasted value. If this is called on a non-pointer
+ /// value, it returns 'this'.
+ Value *stripPointerCasts();
+ const Value *stripPointerCasts() const {
+ return const_cast<Value*>(this)->stripPointerCasts();
+ }
+
+ /// \brief Strip off pointer casts and all-zero GEPs.
+ ///
+ /// Returns the original uncasted value. If this is called on a non-pointer
+ /// value, it returns 'this'.
+ Value *stripPointerCastsNoFollowAliases();
+ const Value *stripPointerCastsNoFollowAliases() const {
+ return const_cast<Value*>(this)->stripPointerCastsNoFollowAliases();
+ }
+
+ /// \brief Strip off pointer casts and all-constant inbounds GEPs.
+ ///
+ /// Returns the original pointer value. If this is called on a non-pointer
+ /// value, it returns 'this'.
+ Value *stripInBoundsConstantOffsets();
+ const Value *stripInBoundsConstantOffsets() const {
+ return const_cast<Value*>(this)->stripInBoundsConstantOffsets();
+ }
+
+ /// \brief Accumulate offsets from \a stripInBoundsConstantOffsets().
+ ///
+ /// Stores the resulting constant offset stripped into the APInt provided.
+ /// The provided APInt will be extended or truncated as needed to be the
+ /// correct bitwidth for an offset of this pointer type.
+ ///
+ /// If this is called on a non-pointer value, it returns 'this'.
+ Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
+ APInt &Offset);
+ const Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
+ APInt &Offset) const {
+ return const_cast<Value *>(this)
+ ->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
+ }
+
+ /// \brief Strip off pointer casts and inbounds GEPs.
+ ///
+ /// Returns the original pointer value. If this is called on a non-pointer
+ /// value, it returns 'this'.
+ Value *stripInBoundsOffsets();
+ const Value *stripInBoundsOffsets() const {
+ return const_cast<Value*>(this)->stripInBoundsOffsets();
+ }
+
+ /// \brief Translate PHI node to its predecessor from the given basic block.
+ ///
+ /// If this value is a PHI node with CurBB as its parent, return the value in
+ /// the PHI node corresponding to PredBB. If not, return ourself. This is
+ /// useful if you want to know the value something has in a predecessor
+ /// block.
+ Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB);
+
+ const Value *DoPHITranslation(const BasicBlock *CurBB,
+ const BasicBlock *PredBB) const{
+ return const_cast<Value*>(this)->DoPHITranslation(CurBB, PredBB);
+ }
+
+ /// \brief The maximum alignment for instructions.
+ ///
+ /// This is the greatest alignment value supported by load, store, and alloca
+ /// instructions, and global values.
+ static const unsigned MaxAlignmentExponent = 29;
+ static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
+
+ /// \brief Mutate the type of this Value to be of the specified type.
+ ///
+ /// Note that this is an extremely dangerous operation which can create
+ /// completely invalid IR very easily. It is strongly recommended that you
+ /// recreate IR objects with the right types instead of mutating them in
+ /// place.
+ void mutateType(Type *Ty) {
+ VTy = Ty;
+ }
+
+ /// \brief Sort the use-list.
+ ///
+ /// Sorts the Value's use-list by Cmp using a stable mergesort. Cmp is
+ /// expected to compare two \a Use references.
+ template <class Compare> void sortUseList(Compare Cmp);
+
+ /// \brief Reverse the use-list.
+ void reverseUseList();
+
+private:
+ /// \brief Merge two lists together.
+ ///
+ /// Merges \c L and \c R using \c Cmp. To enable stable sorts, always pushes
+ /// "equal" items from L before items from R.
+ ///
+ /// \return the first element in the list.
+ ///
+ /// \note Completely ignores \a Use::Prev (doesn't read, doesn't update).
+ template <class Compare>
+ static Use *mergeUseLists(Use *L, Use *R, Compare Cmp) {
+ Use *Merged;
+ Use **Next = &Merged;
+
+ for (;;) {
+ if (!L) {
+ *Next = R;
+ break;
+ }
+ if (!R) {
+ *Next = L;
+ break;
+ }
+ if (Cmp(*R, *L)) {
+ *Next = R;
+ Next = &R->Next;
+ R = R->Next;
+ } else {
+ *Next = L;
+ Next = &L->Next;
+ L = L->Next;
+ }
+ }
+
+ return Merged;
+ }
+
+ /// \brief Tail-recursive helper for \a mergeUseLists().
+ ///
+ /// \param[out] Next the first element in the list.
+ template <class Compare>
+ static void mergeUseListsImpl(Use *L, Use *R, Use **Next, Compare Cmp);
+
+protected:
+ unsigned short getSubclassDataFromValue() const { return SubclassData; }
+ void setValueSubclassData(unsigned short D) { SubclassData = D; }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) {
+ V.print(OS);
+ return OS;
+}
+
+void Use::set(Value *V) {
+ if (Val) removeFromList();
+ Val = V;
+ if (V) V->addUse(*this);
+}
+
+template <class Compare> void Value::sortUseList(Compare Cmp) {
+ if (!UseList || !UseList->Next)
+ // No need to sort 0 or 1 uses.
+ return;
+
+ // Note: this function completely ignores Prev pointers until the end when
+ // they're fixed en masse.
+
+ // Create a binomial vector of sorted lists, visiting uses one at a time and
+ // merging lists as necessary.
+ const unsigned MaxSlots = 32;
+ Use *Slots[MaxSlots];
+
+ // Collect the first use, turning it into a single-item list.
+ Use *Next = UseList->Next;
+ UseList->Next = nullptr;
+ unsigned NumSlots = 1;
+ Slots[0] = UseList;
+
+ // Collect all but the last use.
+ while (Next->Next) {
+ Use *Current = Next;
+ Next = Current->Next;
+
+ // Turn Current into a single-item list.
+ Current->Next = nullptr;
+
+ // Save Current in the first available slot, merging on collisions.
+ unsigned I;
+ for (I = 0; I < NumSlots; ++I) {
+ if (!Slots[I])
+ break;
+
+ // Merge two lists, doubling the size of Current and emptying slot I.
+ //
+ // Since the uses in Slots[I] originally preceded those in Current, send
+ // Slots[I] in as the left parameter to maintain a stable sort.
+ Current = mergeUseLists(Slots[I], Current, Cmp);
+ Slots[I] = nullptr;
+ }
+ // Check if this is a new slot.
+ if (I == NumSlots) {
+ ++NumSlots;
+ assert(NumSlots <= MaxSlots && "Use list bigger than 2^32");
+ }
+
+ // Found an open slot.
+ Slots[I] = Current;
+ }
+
+ // Merge all the lists together.
+ assert(Next && "Expected one more Use");
+ assert(!Next->Next && "Expected only one Use");
+ UseList = Next;
+ for (unsigned I = 0; I < NumSlots; ++I)
+ if (Slots[I])
+ // Since the uses in Slots[I] originally preceded those in UseList, send
+ // Slots[I] in as the left parameter to maintain a stable sort.
+ UseList = mergeUseLists(Slots[I], UseList, Cmp);
+
+ // Fix the Prev pointers.
+ for (Use *I = UseList, **Prev = &UseList; I; I = I->Next) {
+ I->setPrev(Prev);
+ Prev = &I->Next;
+ }
+}
+
+// isa - Provide some specializations of isa so that we don't have to include
+// the subtype header files to test to see if the value is a subclass...
+//
+template <> struct isa_impl<Constant, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() >= Value::ConstantFirstVal &&
+ Val.getValueID() <= Value::ConstantLastVal;
+ }
+};
+
+template <> struct isa_impl<Argument, Value> {
+ static inline bool doit (const Value &Val) {
+ return Val.getValueID() == Value::ArgumentVal;
+ }
+};
+
+template <> struct isa_impl<InlineAsm, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::InlineAsmVal;
+ }
+};
+
+template <> struct isa_impl<Instruction, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() >= Value::InstructionVal;
+ }
+};
+
+template <> struct isa_impl<BasicBlock, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::BasicBlockVal;
+ }
+};
+
+template <> struct isa_impl<Function, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::FunctionVal;
+ }
+};
+
+template <> struct isa_impl<GlobalVariable, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::GlobalVariableVal;
+ }
+};
+
+template <> struct isa_impl<GlobalAlias, Value> {
+ static inline bool doit(const Value &Val) {
+ return Val.getValueID() == Value::GlobalAliasVal;
+ }
+};
+
+template <> struct isa_impl<GlobalValue, Value> {
+ static inline bool doit(const Value &Val) {
+ return isa<GlobalObject>(Val) || isa<GlobalAlias>(Val);
+ }
+};
+
+template <> struct isa_impl<GlobalObject, Value> {
+ static inline bool doit(const Value &Val) {
+ return isa<GlobalVariable>(Val) || isa<Function>(Val);
+ }
+};
+
+// Value* is only 4-byte aligned.
+template<>
+class PointerLikeTypeTraits<Value*> {
+ typedef Value* PT;
+public:
+ static inline void *getAsVoidPointer(PT P) { return P; }
+ static inline PT getFromVoidPointer(void *P) {
+ return static_cast<PT>(P);
+ }
+ enum { NumLowBitsAvailable = 2 };
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef)
+
+/* Specialized opaque value conversions.
+ */
+inline Value **unwrap(LLVMValueRef *Vals) {
+ return reinterpret_cast<Value**>(Vals);
+}
+
+template<typename T>
+inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
+#ifdef DEBUG
+ for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
+ cast<T>(*I);
+#endif
+ (void)Length;
+ return reinterpret_cast<T**>(Vals);
+}
+
+inline LLVMValueRef *wrap(const Value **Vals) {
+ return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals));
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/ValueHandle.h b/contrib/llvm/include/llvm/IR/ValueHandle.h
new file mode 100644
index 0000000..3c28059
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/ValueHandle.h
@@ -0,0 +1,386 @@
+//===- ValueHandle.h - Value Smart Pointer classes --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ValueHandle class and its sub-classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VALUEHANDLE_H
+#define LLVM_IR_VALUEHANDLE_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/IR/Value.h"
+
+namespace llvm {
+class ValueHandleBase;
+template<typename From> struct simplify_type;
+
+// ValueHandleBase** is only 4-byte aligned.
+template<>
+class PointerLikeTypeTraits<ValueHandleBase**> {
+public:
+ static inline void *getAsVoidPointer(ValueHandleBase** P) { return P; }
+ static inline ValueHandleBase **getFromVoidPointer(void *P) {
+ return static_cast<ValueHandleBase**>(P);
+ }
+ enum { NumLowBitsAvailable = 2 };
+};
+
+/// \brief This is the common base class of value handles.
+///
+/// ValueHandle's are smart pointers to Value's that have special behavior when
+/// the value is deleted or ReplaceAllUsesWith'd. See the specific handles
+/// below for details.
+class ValueHandleBase {
+ friend class Value;
+protected:
+ /// \brief This indicates what sub class the handle actually is.
+ ///
+ /// This is to avoid having a vtable for the light-weight handle pointers. The
+ /// fully general Callback version does have a vtable.
+ enum HandleBaseKind {
+ Assert,
+ Callback,
+ Tracking,
+ Weak
+ };
+
+ ValueHandleBase(const ValueHandleBase &RHS)
+ : ValueHandleBase(RHS.PrevPair.getInt(), RHS) {}
+
+ ValueHandleBase(HandleBaseKind Kind, const ValueHandleBase &RHS)
+ : PrevPair(nullptr, Kind), Next(nullptr), V(RHS.V) {
+ if (isValid(V))
+ AddToExistingUseList(RHS.getPrevPtr());
+ }
+
+private:
+ PointerIntPair<ValueHandleBase**, 2, HandleBaseKind> PrevPair;
+ ValueHandleBase *Next;
+
+ Value* V;
+
+public:
+ explicit ValueHandleBase(HandleBaseKind Kind)
+ : PrevPair(nullptr, Kind), Next(nullptr), V(nullptr) {}
+ ValueHandleBase(HandleBaseKind Kind, Value *V)
+ : PrevPair(nullptr, Kind), Next(nullptr), V(V) {
+ if (isValid(V))
+ AddToUseList();
+ }
+
+ ~ValueHandleBase() {
+ if (isValid(V))
+ RemoveFromUseList();
+ }
+
+ Value *operator=(Value *RHS) {
+ if (V == RHS) return RHS;
+ if (isValid(V)) RemoveFromUseList();
+ V = RHS;
+ if (isValid(V)) AddToUseList();
+ return RHS;
+ }
+
+ Value *operator=(const ValueHandleBase &RHS) {
+ if (V == RHS.V) return RHS.V;
+ if (isValid(V)) RemoveFromUseList();
+ V = RHS.V;
+ if (isValid(V)) AddToExistingUseList(RHS.getPrevPtr());
+ return V;
+ }
+
+ Value *operator->() const { return V; }
+ Value &operator*() const { return *V; }
+
+protected:
+ Value *getValPtr() const { return V; }
+
+ static bool isValid(Value *V) {
+ return V &&
+ V != DenseMapInfo<Value *>::getEmptyKey() &&
+ V != DenseMapInfo<Value *>::getTombstoneKey();
+ }
+
+public:
+ // Callbacks made from Value.
+ static void ValueIsDeleted(Value *V);
+ static void ValueIsRAUWd(Value *Old, Value *New);
+
+private:
+ // Internal implementation details.
+ ValueHandleBase **getPrevPtr() const { return PrevPair.getPointer(); }
+ HandleBaseKind getKind() const { return PrevPair.getInt(); }
+ void setPrevPtr(ValueHandleBase **Ptr) { PrevPair.setPointer(Ptr); }
+
+ /// \brief Add this ValueHandle to the use list for V.
+ ///
+ /// List is the address of either the head of the list or a Next node within
+ /// the existing use list.
+ void AddToExistingUseList(ValueHandleBase **List);
+
+ /// \brief Add this ValueHandle to the use list after Node.
+ void AddToExistingUseListAfter(ValueHandleBase *Node);
+
+ /// \brief Add this ValueHandle to the use list for V.
+ void AddToUseList();
+ /// \brief Remove this ValueHandle from its current use list.
+ void RemoveFromUseList();
+};
+
+/// \brief Value handle that is nullable, but tries to track the Value.
+///
+/// This is a value handle that tries hard to point to a Value, even across
+/// RAUW operations, but will null itself out if the value is destroyed. this
+/// is useful for advisory sorts of information, but should not be used as the
+/// key of a map (since the map would have to rearrange itself when the pointer
+/// changes).
+class WeakVH : public ValueHandleBase {
+public:
+ WeakVH() : ValueHandleBase(Weak) {}
+ WeakVH(Value *P) : ValueHandleBase(Weak, P) {}
+ WeakVH(const WeakVH &RHS)
+ : ValueHandleBase(Weak, RHS) {}
+
+ WeakVH &operator=(const WeakVH &RHS) = default;
+
+ Value *operator=(Value *RHS) {
+ return ValueHandleBase::operator=(RHS);
+ }
+ Value *operator=(const ValueHandleBase &RHS) {
+ return ValueHandleBase::operator=(RHS);
+ }
+
+ operator Value*() const {
+ return getValPtr();
+ }
+};
+
+// Specialize simplify_type to allow WeakVH to participate in
+// dyn_cast, isa, etc.
+template <> struct simplify_type<WeakVH> {
+ typedef Value *SimpleType;
+ static SimpleType getSimplifiedValue(WeakVH &WVH) { return WVH; }
+};
+template <> struct simplify_type<const WeakVH> {
+ typedef Value *SimpleType;
+ static SimpleType getSimplifiedValue(const WeakVH &WVH) { return WVH; }
+};
+
+/// \brief Value handle that asserts if the Value is deleted.
+///
+/// This is a Value Handle that points to a value and asserts out if the value
+/// is destroyed while the handle is still live. This is very useful for
+/// catching dangling pointer bugs and other things which can be non-obvious.
+/// One particularly useful place to use this is as the Key of a map. Dangling
+/// pointer bugs often lead to really subtle bugs that only occur if another
+/// object happens to get allocated to the same address as the old one. Using
+/// an AssertingVH ensures that an assert is triggered as soon as the bad
+/// delete occurs.
+///
+/// Note that an AssertingVH handle does *not* follow values across RAUW
+/// operations. This means that RAUW's need to explicitly update the
+/// AssertingVH's as it moves. This is required because in non-assert mode this
+/// class turns into a trivial wrapper around a pointer.
+template <typename ValueTy>
+class AssertingVH
+#ifndef NDEBUG
+ : public ValueHandleBase
+#endif
+ {
+ friend struct DenseMapInfo<AssertingVH<ValueTy> >;
+
+#ifndef NDEBUG
+ Value *getRawValPtr() const { return ValueHandleBase::getValPtr(); }
+ void setRawValPtr(Value *P) { ValueHandleBase::operator=(P); }
+#else
+ Value *ThePtr;
+ Value *getRawValPtr() const { return ThePtr; }
+ void setRawValPtr(Value *P) { ThePtr = P; }
+#endif
+ // Convert a ValueTy*, which may be const, to the raw Value*.
+ static Value *GetAsValue(Value *V) { return V; }
+ static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); }
+
+ ValueTy *getValPtr() const { return static_cast<ValueTy *>(getRawValPtr()); }
+ void setValPtr(ValueTy *P) { setRawValPtr(GetAsValue(P)); }
+
+public:
+#ifndef NDEBUG
+ AssertingVH() : ValueHandleBase(Assert) {}
+ AssertingVH(ValueTy *P) : ValueHandleBase(Assert, GetAsValue(P)) {}
+ AssertingVH(const AssertingVH &RHS) : ValueHandleBase(Assert, RHS) {}
+#else
+ AssertingVH() : ThePtr(nullptr) {}
+ AssertingVH(ValueTy *P) : ThePtr(GetAsValue(P)) {}
+#endif
+
+ operator ValueTy*() const {
+ return getValPtr();
+ }
+
+ ValueTy *operator=(ValueTy *RHS) {
+ setValPtr(RHS);
+ return getValPtr();
+ }
+ ValueTy *operator=(const AssertingVH<ValueTy> &RHS) {
+ setValPtr(RHS.getValPtr());
+ return getValPtr();
+ }
+
+ ValueTy *operator->() const { return getValPtr(); }
+ ValueTy &operator*() const { return *getValPtr(); }
+};
+
+// Specialize DenseMapInfo to allow AssertingVH to participate in DenseMap.
+template<typename T>
+struct DenseMapInfo<AssertingVH<T> > {
+ static inline AssertingVH<T> getEmptyKey() {
+ AssertingVH<T> Res;
+ Res.setRawValPtr(DenseMapInfo<Value *>::getEmptyKey());
+ return Res;
+ }
+ static inline AssertingVH<T> getTombstoneKey() {
+ AssertingVH<T> Res;
+ Res.setRawValPtr(DenseMapInfo<Value *>::getTombstoneKey());
+ return Res;
+ }
+ static unsigned getHashValue(const AssertingVH<T> &Val) {
+ return DenseMapInfo<Value *>::getHashValue(Val.getRawValPtr());
+ }
+ static bool isEqual(const AssertingVH<T> &LHS, const AssertingVH<T> &RHS) {
+ return DenseMapInfo<Value *>::isEqual(LHS.getRawValPtr(),
+ RHS.getRawValPtr());
+ }
+};
+
+template <typename T>
+struct isPodLike<AssertingVH<T> > {
+#ifdef NDEBUG
+ static const bool value = true;
+#else
+ static const bool value = false;
+#endif
+};
+
+
+/// \brief Value handle that tracks a Value across RAUW.
+///
+/// TrackingVH is designed for situations where a client needs to hold a handle
+/// to a Value (or subclass) across some operations which may move that value,
+/// but should never destroy it or replace it with some unacceptable type.
+///
+/// It is an error to do anything with a TrackingVH whose value has been
+/// destroyed, except to destruct it.
+///
+/// It is an error to attempt to replace a value with one of a type which is
+/// incompatible with any of its outstanding TrackingVHs.
+template<typename ValueTy>
+class TrackingVH : public ValueHandleBase {
+ void CheckValidity() const {
+ Value *VP = ValueHandleBase::getValPtr();
+
+ // Null is always ok.
+ if (!VP) return;
+
+ // Check that this value is valid (i.e., it hasn't been deleted). We
+ // explicitly delay this check until access to avoid requiring clients to be
+ // unnecessarily careful w.r.t. destruction.
+ assert(ValueHandleBase::isValid(VP) && "Tracked Value was deleted!");
+
+ // Check that the value is a member of the correct subclass. We would like
+ // to check this property on assignment for better debugging, but we don't
+ // want to require a virtual interface on this VH. Instead we allow RAUW to
+ // replace this value with a value of an invalid type, and check it here.
+ assert(isa<ValueTy>(VP) &&
+ "Tracked Value was replaced by one with an invalid type!");
+ }
+
+ ValueTy *getValPtr() const {
+ CheckValidity();
+ return (ValueTy*)ValueHandleBase::getValPtr();
+ }
+ void setValPtr(ValueTy *P) {
+ CheckValidity();
+ ValueHandleBase::operator=(GetAsValue(P));
+ }
+
+ // Convert a ValueTy*, which may be const, to the type the base
+ // class expects.
+ static Value *GetAsValue(Value *V) { return V; }
+ static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); }
+
+public:
+ TrackingVH() : ValueHandleBase(Tracking) {}
+ TrackingVH(ValueTy *P) : ValueHandleBase(Tracking, GetAsValue(P)) {}
+
+ operator ValueTy*() const {
+ return getValPtr();
+ }
+
+ ValueTy *operator=(ValueTy *RHS) {
+ setValPtr(RHS);
+ return getValPtr();
+ }
+
+ ValueTy *operator->() const { return getValPtr(); }
+ ValueTy &operator*() const { return *getValPtr(); }
+};
+
+/// \brief Value handle with callbacks on RAUW and destruction.
+///
+/// This is a value handle that allows subclasses to define callbacks that run
+/// when the underlying Value has RAUW called on it or is destroyed. This
+/// class can be used as the key of a map, as long as the user takes it out of
+/// the map before calling setValPtr() (since the map has to rearrange itself
+/// when the pointer changes). Unlike ValueHandleBase, this class has a vtable.
+class CallbackVH : public ValueHandleBase {
+ virtual void anchor();
+protected:
+ ~CallbackVH() = default;
+ CallbackVH(const CallbackVH &) = default;
+ CallbackVH &operator=(const CallbackVH &) = default;
+
+ void setValPtr(Value *P) {
+ ValueHandleBase::operator=(P);
+ }
+
+public:
+ CallbackVH() : ValueHandleBase(Callback) {}
+ CallbackVH(Value *P) : ValueHandleBase(Callback, P) {}
+
+ operator Value*() const {
+ return getValPtr();
+ }
+
+ /// \brief Callback for Value destruction.
+ ///
+ /// Called when this->getValPtr() is destroyed, inside ~Value(), so you
+ /// may call any non-virtual Value method on getValPtr(), but no subclass
+ /// methods. If WeakVH were implemented as a CallbackVH, it would use this
+ /// method to call setValPtr(NULL). AssertingVH would use this method to
+ /// cause an assertion failure.
+ ///
+ /// All implementations must remove the reference from this object to the
+ /// Value that's being destroyed.
+ virtual void deleted() { setValPtr(nullptr); }
+
+ /// \brief Callback for Value RAUW.
+ ///
+ /// Called when this->getValPtr()->replaceAllUsesWith(new_value) is called,
+ /// _before_ any of the uses have actually been replaced. If WeakVH were
+ /// implemented as a CallbackVH, it would use this method to call
+ /// setValPtr(new_value). AssertingVH would do nothing in this method.
+ virtual void allUsesReplacedWith(Value *) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/ValueMap.h b/contrib/llvm/include/llvm/IR/ValueMap.h
new file mode 100644
index 0000000..ad518ac
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/ValueMap.h
@@ -0,0 +1,398 @@
+//===- ValueMap.h - Safe map from Values to data ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ValueMap class. ValueMap maps Value* or any subclass
+// to an arbitrary other type. It provides the DenseMap interface but updates
+// itself to remain safe when keys are RAUWed or deleted. By default, when a
+// key is RAUWed from V1 to V2, the old mapping V1->target is removed, and a new
+// mapping V2->target is added. If V2 already existed, its old target is
+// overwritten. When a key is deleted, its mapping is removed.
+//
+// You can override a ValueMap's Config parameter to control exactly what
+// happens on RAUW and destruction and to get called back on each event. It's
+// legal to call back into the ValueMap from a Config's callbacks. Config
+// parameters should inherit from ValueMapConfig<KeyT> to get default
+// implementations of all the methods ValueMap uses. See ValueMapConfig for
+// documentation of the functions you can override.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VALUEMAP_H
+#define LLVM_IR_VALUEMAP_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/UniqueLock.h"
+#include "llvm/Support/type_traits.h"
+#include <iterator>
+#include <memory>
+
+namespace llvm {
+
+template<typename KeyT, typename ValueT, typename Config>
+class ValueMapCallbackVH;
+
+template<typename DenseMapT, typename KeyT>
+class ValueMapIterator;
+template<typename DenseMapT, typename KeyT>
+class ValueMapConstIterator;
+
+/// This class defines the default behavior for configurable aspects of
+/// ValueMap<>. User Configs should inherit from this class to be as compatible
+/// as possible with future versions of ValueMap.
+template<typename KeyT, typename MutexT = sys::Mutex>
+struct ValueMapConfig {
+ typedef MutexT mutex_type;
+
+ /// If FollowRAUW is true, the ValueMap will update mappings on RAUW. If it's
+ /// false, the ValueMap will leave the original mapping in place.
+ enum { FollowRAUW = true };
+
+ // All methods will be called with a first argument of type ExtraData. The
+ // default implementations in this class take a templated first argument so
+ // that users' subclasses can use any type they want without having to
+ // override all the defaults.
+ struct ExtraData {};
+
+ template<typename ExtraDataT>
+ static void onRAUW(const ExtraDataT & /*Data*/, KeyT /*Old*/, KeyT /*New*/) {}
+ template<typename ExtraDataT>
+ static void onDelete(const ExtraDataT &/*Data*/, KeyT /*Old*/) {}
+
+ /// Returns a mutex that should be acquired around any changes to the map.
+ /// This is only acquired from the CallbackVH (and held around calls to onRAUW
+ /// and onDelete) and not inside other ValueMap methods. NULL means that no
+ /// mutex is necessary.
+ template<typename ExtraDataT>
+ static mutex_type *getMutex(const ExtraDataT &/*Data*/) { return nullptr; }
+};
+
+/// See the file comment.
+template<typename KeyT, typename ValueT, typename Config =ValueMapConfig<KeyT> >
+class ValueMap {
+ friend class ValueMapCallbackVH<KeyT, ValueT, Config>;
+ typedef ValueMapCallbackVH<KeyT, ValueT, Config> ValueMapCVH;
+ typedef DenseMap<ValueMapCVH, ValueT, DenseMapInfo<ValueMapCVH> > MapT;
+ typedef DenseMap<const Metadata *, TrackingMDRef> MDMapT;
+ typedef typename Config::ExtraData ExtraData;
+ MapT Map;
+ std::unique_ptr<MDMapT> MDMap;
+ ExtraData Data;
+ ValueMap(const ValueMap&) = delete;
+ ValueMap& operator=(const ValueMap&) = delete;
+public:
+ typedef KeyT key_type;
+ typedef ValueT mapped_type;
+ typedef std::pair<KeyT, ValueT> value_type;
+ typedef unsigned size_type;
+
+ explicit ValueMap(unsigned NumInitBuckets = 64)
+ : Map(NumInitBuckets), Data() {}
+ explicit ValueMap(const ExtraData &Data, unsigned NumInitBuckets = 64)
+ : Map(NumInitBuckets), Data(Data) {}
+
+ bool hasMD() const { return MDMap; }
+ MDMapT &MD() {
+ if (!MDMap)
+ MDMap.reset(new MDMapT);
+ return *MDMap;
+ }
+
+ typedef ValueMapIterator<MapT, KeyT> iterator;
+ typedef ValueMapConstIterator<MapT, KeyT> const_iterator;
+ inline iterator begin() { return iterator(Map.begin()); }
+ inline iterator end() { return iterator(Map.end()); }
+ inline const_iterator begin() const { return const_iterator(Map.begin()); }
+ inline const_iterator end() const { return const_iterator(Map.end()); }
+
+ bool empty() const { return Map.empty(); }
+ size_type size() const { return Map.size(); }
+
+ /// Grow the map so that it has at least Size buckets. Does not shrink
+ void resize(size_t Size) { Map.resize(Size); }
+
+ void clear() {
+ Map.clear();
+ MDMap.reset();
+ }
+
+ /// Return 1 if the specified key is in the map, 0 otherwise.
+ size_type count(const KeyT &Val) const {
+ return Map.find_as(Val) == Map.end() ? 0 : 1;
+ }
+
+ iterator find(const KeyT &Val) {
+ return iterator(Map.find_as(Val));
+ }
+ const_iterator find(const KeyT &Val) const {
+ return const_iterator(Map.find_as(Val));
+ }
+
+ /// lookup - Return the entry for the specified key, or a default
+ /// constructed value if no such entry exists.
+ ValueT lookup(const KeyT &Val) const {
+ typename MapT::const_iterator I = Map.find_as(Val);
+ return I != Map.end() ? I->second : ValueT();
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
+ auto MapResult = Map.insert(std::make_pair(Wrap(KV.first), KV.second));
+ return std::make_pair(iterator(MapResult.first), MapResult.second);
+ }
+
+ std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
+ auto MapResult =
+ Map.insert(std::make_pair(Wrap(KV.first), std::move(KV.second)));
+ return std::make_pair(iterator(MapResult.first), MapResult.second);
+ }
+
+ /// insert - Range insertion of pairs.
+ template<typename InputIt>
+ void insert(InputIt I, InputIt E) {
+ for (; I != E; ++I)
+ insert(*I);
+ }
+
+
+ bool erase(const KeyT &Val) {
+ typename MapT::iterator I = Map.find_as(Val);
+ if (I == Map.end())
+ return false;
+
+ Map.erase(I);
+ return true;
+ }
+ void erase(iterator I) {
+ return Map.erase(I.base());
+ }
+
+ value_type& FindAndConstruct(const KeyT &Key) {
+ return Map.FindAndConstruct(Wrap(Key));
+ }
+
+ ValueT &operator[](const KeyT &Key) {
+ return Map[Wrap(Key)];
+ }
+
+ /// isPointerIntoBucketsArray - Return true if the specified pointer points
+ /// somewhere into the ValueMap's array of buckets (i.e. either to a key or
+ /// value in the ValueMap).
+ bool isPointerIntoBucketsArray(const void *Ptr) const {
+ return Map.isPointerIntoBucketsArray(Ptr);
+ }
+
+ /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
+ /// array. In conjunction with the previous method, this can be used to
+ /// determine whether an insertion caused the ValueMap to reallocate.
+ const void *getPointerIntoBucketsArray() const {
+ return Map.getPointerIntoBucketsArray();
+ }
+
+private:
+ // Takes a key being looked up in the map and wraps it into a
+ // ValueMapCallbackVH, the actual key type of the map. We use a helper
+ // function because ValueMapCVH is constructed with a second parameter.
+ ValueMapCVH Wrap(KeyT key) const {
+ // The only way the resulting CallbackVH could try to modify *this (making
+ // the const_cast incorrect) is if it gets inserted into the map. But then
+ // this function must have been called from a non-const method, making the
+ // const_cast ok.
+ return ValueMapCVH(key, const_cast<ValueMap*>(this));
+ }
+};
+
+// This CallbackVH updates its ValueMap when the contained Value changes,
+// according to the user's preferences expressed through the Config object.
+template <typename KeyT, typename ValueT, typename Config>
+class ValueMapCallbackVH final : public CallbackVH {
+ friend class ValueMap<KeyT, ValueT, Config>;
+ friend struct DenseMapInfo<ValueMapCallbackVH>;
+ typedef ValueMap<KeyT, ValueT, Config> ValueMapT;
+ typedef typename std::remove_pointer<KeyT>::type KeySansPointerT;
+
+ ValueMapT *Map;
+
+ ValueMapCallbackVH(KeyT Key, ValueMapT *Map)
+ : CallbackVH(const_cast<Value*>(static_cast<const Value*>(Key))),
+ Map(Map) {}
+
+ // Private constructor used to create empty/tombstone DenseMap keys.
+ ValueMapCallbackVH(Value *V) : CallbackVH(V), Map(nullptr) {}
+
+public:
+ KeyT Unwrap() const { return cast_or_null<KeySansPointerT>(getValPtr()); }
+
+ void deleted() override {
+ // Make a copy that won't get changed even when *this is destroyed.
+ ValueMapCallbackVH Copy(*this);
+ typename Config::mutex_type *M = Config::getMutex(Copy.Map->Data);
+ unique_lock<typename Config::mutex_type> Guard;
+ if (M)
+ Guard = unique_lock<typename Config::mutex_type>(*M);
+ Config::onDelete(Copy.Map->Data, Copy.Unwrap()); // May destroy *this.
+ Copy.Map->Map.erase(Copy); // Definitely destroys *this.
+ }
+ void allUsesReplacedWith(Value *new_key) override {
+ assert(isa<KeySansPointerT>(new_key) &&
+ "Invalid RAUW on key of ValueMap<>");
+ // Make a copy that won't get changed even when *this is destroyed.
+ ValueMapCallbackVH Copy(*this);
+ typename Config::mutex_type *M = Config::getMutex(Copy.Map->Data);
+ unique_lock<typename Config::mutex_type> Guard;
+ if (M)
+ Guard = unique_lock<typename Config::mutex_type>(*M);
+
+ KeyT typed_new_key = cast<KeySansPointerT>(new_key);
+ // Can destroy *this:
+ Config::onRAUW(Copy.Map->Data, Copy.Unwrap(), typed_new_key);
+ if (Config::FollowRAUW) {
+ typename ValueMapT::MapT::iterator I = Copy.Map->Map.find(Copy);
+ // I could == Copy.Map->Map.end() if the onRAUW callback already
+ // removed the old mapping.
+ if (I != Copy.Map->Map.end()) {
+ ValueT Target(std::move(I->second));
+ Copy.Map->Map.erase(I); // Definitely destroys *this.
+ Copy.Map->insert(std::make_pair(typed_new_key, std::move(Target)));
+ }
+ }
+ }
+};
+
+template<typename KeyT, typename ValueT, typename Config>
+struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config> > {
+ typedef ValueMapCallbackVH<KeyT, ValueT, Config> VH;
+
+ static inline VH getEmptyKey() {
+ return VH(DenseMapInfo<Value *>::getEmptyKey());
+ }
+ static inline VH getTombstoneKey() {
+ return VH(DenseMapInfo<Value *>::getTombstoneKey());
+ }
+ static unsigned getHashValue(const VH &Val) {
+ return DenseMapInfo<KeyT>::getHashValue(Val.Unwrap());
+ }
+ static unsigned getHashValue(const KeyT &Val) {
+ return DenseMapInfo<KeyT>::getHashValue(Val);
+ }
+ static bool isEqual(const VH &LHS, const VH &RHS) {
+ return LHS == RHS;
+ }
+ static bool isEqual(const KeyT &LHS, const VH &RHS) {
+ return LHS == RHS.getValPtr();
+ }
+};
+
+
+template<typename DenseMapT, typename KeyT>
+class ValueMapIterator :
+ public std::iterator<std::forward_iterator_tag,
+ std::pair<KeyT, typename DenseMapT::mapped_type>,
+ ptrdiff_t> {
+ typedef typename DenseMapT::iterator BaseT;
+ typedef typename DenseMapT::mapped_type ValueT;
+ BaseT I;
+public:
+ ValueMapIterator() : I() {}
+
+ ValueMapIterator(BaseT I) : I(I) {}
+
+ BaseT base() const { return I; }
+
+ struct ValueTypeProxy {
+ const KeyT first;
+ ValueT& second;
+ ValueTypeProxy *operator->() { return this; }
+ operator std::pair<KeyT, ValueT>() const {
+ return std::make_pair(first, second);
+ }
+ };
+
+ ValueTypeProxy operator*() const {
+ ValueTypeProxy Result = {I->first.Unwrap(), I->second};
+ return Result;
+ }
+
+ ValueTypeProxy operator->() const {
+ return operator*();
+ }
+
+ bool operator==(const ValueMapIterator &RHS) const {
+ return I == RHS.I;
+ }
+ bool operator!=(const ValueMapIterator &RHS) const {
+ return I != RHS.I;
+ }
+
+ inline ValueMapIterator& operator++() { // Preincrement
+ ++I;
+ return *this;
+ }
+ ValueMapIterator operator++(int) { // Postincrement
+ ValueMapIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+template<typename DenseMapT, typename KeyT>
+class ValueMapConstIterator :
+ public std::iterator<std::forward_iterator_tag,
+ std::pair<KeyT, typename DenseMapT::mapped_type>,
+ ptrdiff_t> {
+ typedef typename DenseMapT::const_iterator BaseT;
+ typedef typename DenseMapT::mapped_type ValueT;
+ BaseT I;
+public:
+ ValueMapConstIterator() : I() {}
+ ValueMapConstIterator(BaseT I) : I(I) {}
+ ValueMapConstIterator(ValueMapIterator<DenseMapT, KeyT> Other)
+ : I(Other.base()) {}
+
+ BaseT base() const { return I; }
+
+ struct ValueTypeProxy {
+ const KeyT first;
+ const ValueT& second;
+ ValueTypeProxy *operator->() { return this; }
+ operator std::pair<KeyT, ValueT>() const {
+ return std::make_pair(first, second);
+ }
+ };
+
+ ValueTypeProxy operator*() const {
+ ValueTypeProxy Result = {I->first.Unwrap(), I->second};
+ return Result;
+ }
+
+ ValueTypeProxy operator->() const {
+ return operator*();
+ }
+
+ bool operator==(const ValueMapConstIterator &RHS) const {
+ return I == RHS.I;
+ }
+ bool operator!=(const ValueMapConstIterator &RHS) const {
+ return I != RHS.I;
+ }
+
+ inline ValueMapConstIterator& operator++() { // Preincrement
+ ++I;
+ return *this;
+ }
+ ValueMapConstIterator operator++(int) { // Postincrement
+ ValueMapConstIterator tmp = *this; ++*this; return tmp;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/ValueSymbolTable.h b/contrib/llvm/include/llvm/IR/ValueSymbolTable.h
new file mode 100644
index 0000000..65bd7fc
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/ValueSymbolTable.h
@@ -0,0 +1,133 @@
+//===-- llvm/ValueSymbolTable.h - Implement a Value Symtab ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the name/Value symbol table for LLVM.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VALUESYMBOLTABLE_H
+#define LLVM_IR_VALUESYMBOLTABLE_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+ template <typename ValueSubClass> class SymbolTableListTraits;
+ class BasicBlock;
+ class Function;
+ class NamedMDNode;
+ class Module;
+ class StringRef;
+
+/// This class provides a symbol table of name/value pairs. It is essentially
+/// a std::map<std::string,Value*> but has a controlled interface provided by
+/// LLVM as well as ensuring uniqueness of names.
+///
+class ValueSymbolTable {
+ friend class Value;
+ friend class SymbolTableListTraits<Argument>;
+ friend class SymbolTableListTraits<BasicBlock>;
+ friend class SymbolTableListTraits<Instruction>;
+ friend class SymbolTableListTraits<Function>;
+ friend class SymbolTableListTraits<GlobalVariable>;
+ friend class SymbolTableListTraits<GlobalAlias>;
+/// @name Types
+/// @{
+public:
+ /// @brief A mapping of names to values.
+ typedef StringMap<Value*> ValueMap;
+
+ /// @brief An iterator over a ValueMap.
+ typedef ValueMap::iterator iterator;
+
+ /// @brief A const_iterator over a ValueMap.
+ typedef ValueMap::const_iterator const_iterator;
+
+/// @}
+/// @name Constructors
+/// @{
+public:
+ ValueSymbolTable() : vmap(0), LastUnique(0) {}
+ ~ValueSymbolTable();
+
+/// @}
+/// @name Accessors
+/// @{
+public:
+ /// This method finds the value with the given \p Name in the
+ /// the symbol table.
+ /// @returns the value associated with the \p Name
+ /// @brief Lookup a named Value.
+ Value *lookup(StringRef Name) const { return vmap.lookup(Name); }
+
+ /// @returns true iff the symbol table is empty
+ /// @brief Determine if the symbol table is empty
+ inline bool empty() const { return vmap.empty(); }
+
+ /// @brief The number of name/type pairs is returned.
+ inline unsigned size() const { return unsigned(vmap.size()); }
+
+ /// This function can be used from the debugger to display the
+ /// content of the symbol table while debugging.
+ /// @brief Print out symbol table on stderr
+ void dump() const;
+
+/// @}
+/// @name Iteration
+/// @{
+public:
+ /// @brief Get an iterator that from the beginning of the symbol table.
+ inline iterator begin() { return vmap.begin(); }
+
+ /// @brief Get a const_iterator that from the beginning of the symbol table.
+ inline const_iterator begin() const { return vmap.begin(); }
+
+ /// @brief Get an iterator to the end of the symbol table.
+ inline iterator end() { return vmap.end(); }
+
+ /// @brief Get a const_iterator to the end of the symbol table.
+ inline const_iterator end() const { return vmap.end(); }
+
+ /// @}
+ /// @name Mutators
+ /// @{
+private:
+ ValueName *makeUniqueName(Value *V, SmallString<256> &UniqueName);
+
+ /// This method adds the provided value \p N to the symbol table. The Value
+ /// must have a name which is used to place the value in the symbol table.
+ /// If the inserted name conflicts, this renames the value.
+ /// @brief Add a named value to the symbol table
+ void reinsertValue(Value *V);
+
+ /// createValueName - This method attempts to create a value name and insert
+ /// it into the symbol table with the specified name. If it conflicts, it
+ /// auto-renames the name and returns that instead.
+ ValueName *createValueName(StringRef Name, Value *V);
+
+ /// This method removes a value from the symbol table. It leaves the
+ /// ValueName attached to the value, but it is no longer inserted in the
+ /// symtab.
+ void removeValueName(ValueName *V);
+
+ /// @}
+ /// @name Internal Data
+ /// @{
+private:
+ ValueMap vmap; ///< The map that holds the symbol table.
+ mutable uint32_t LastUnique; ///< Counter for tracking unique names
+
+/// @}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IR/Verifier.h b/contrib/llvm/include/llvm/IR/Verifier.h
new file mode 100644
index 0000000..89039d2
--- /dev/null
+++ b/contrib/llvm/include/llvm/IR/Verifier.h
@@ -0,0 +1,77 @@
+//===- Verifier.h - LLVM IR Verifier ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the function verifier interface, that can be used for some
+// sanity checking of input to the system, and for checking that transformations
+// haven't done something bad.
+//
+// Note that this does not provide full 'java style' security and verifications,
+// instead it just tries to ensure that code is well formed.
+//
+// To see what specifically is checked, look at the top of Verifier.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IR_VERIFIER_H
+#define LLVM_IR_VERIFIER_H
+
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace llvm {
+
+class Function;
+class FunctionPass;
+class ModulePass;
+class Module;
+class PreservedAnalyses;
+class raw_ostream;
+
+/// \brief Check a function for errors, useful for use when debugging a
+/// pass.
+///
+/// If there are no errors, the function returns false. If an error is found,
+/// a message describing the error is written to OS (if non-null) and true is
+/// returned.
+bool verifyFunction(const Function &F, raw_ostream *OS = nullptr);
+
+/// \brief Check a module for errors.
+///
+/// If there are no errors, the function returns false. If an error is found,
+/// a message describing the error is written to OS (if non-null) and true is
+/// returned.
+bool verifyModule(const Module &M, raw_ostream *OS = nullptr);
+
+/// \brief Create a verifier pass.
+///
+/// Check a module or function for validity. This is essentially a pass wrapped
+/// around the above verifyFunction and verifyModule routines and
+/// functionality. When the pass detects a verification error it is always
+/// printed to stderr, and by default they are fatal. You can override that by
+/// passing \c false to \p FatalErrors.
+///
+/// Note that this creates a pass suitable for the legacy pass manager. It has
+/// nothing to do with \c VerifierPass.
+FunctionPass *createVerifierPass(bool FatalErrors = true);
+
+class VerifierPass {
+ bool FatalErrors;
+
+public:
+ explicit VerifierPass(bool FatalErrors = true) : FatalErrors(FatalErrors) {}
+
+ PreservedAnalyses run(Module &M);
+ PreservedAnalyses run(Function &F);
+
+ static StringRef name() { return "VerifierPass"; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/IRReader/IRReader.h b/contrib/llvm/include/llvm/IRReader/IRReader.h
new file mode 100644
index 0000000..523cd3d
--- /dev/null
+++ b/contrib/llvm/include/llvm/IRReader/IRReader.h
@@ -0,0 +1,49 @@
+//===---- llvm/IRReader/IRReader.h - Reader for LLVM IR files ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions for reading LLVM IR. They support both
+// Bitcode and Assembly, automatically detecting the input format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_IRREADER_IRREADER_H
+#define LLVM_IRREADER_IRREADER_H
+
+#include "llvm/Support/MemoryBuffer.h"
+#include <string>
+
+namespace llvm {
+
+class Module;
+class SMDiagnostic;
+class LLVMContext;
+
+/// If the given file holds a bitcode image, return a Module
+/// for it which does lazy deserialization of function bodies. Otherwise,
+/// attempt to parse it as LLVM Assembly and return a fully populated
+/// Module. The ShouldLazyLoadMetadata flag is passed down to the bitcode
+/// reader to optionally enable lazy metadata loading.
+std::unique_ptr<Module>
+getLazyIRFileModule(StringRef Filename, SMDiagnostic &Err, LLVMContext &Context,
+ bool ShouldLazyLoadMetadata = false);
+
+/// If the given MemoryBuffer holds a bitcode image, return a Module
+/// for it. Otherwise, attempt to parse it as LLVM Assembly and return
+/// a Module for it.
+std::unique_ptr<Module> parseIR(MemoryBufferRef Buffer, SMDiagnostic &Err,
+ LLVMContext &Context);
+
+/// If the given file holds a bitcode image, return a Module for it.
+/// Otherwise, attempt to parse it as LLVM Assembly and return a Module
+/// for it.
+std::unique_ptr<Module> parseIRFile(StringRef Filename, SMDiagnostic &Err,
+ LLVMContext &Context);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/InitializePasses.h b/contrib/llvm/include/llvm/InitializePasses.h
new file mode 100644
index 0000000..cb2b139
--- /dev/null
+++ b/contrib/llvm/include/llvm/InitializePasses.h
@@ -0,0 +1,314 @@
+//===- llvm/InitializePasses.h -------- Initialize All Passes ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations for the pass initialization routines
+// for the entire LLVM project.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_INITIALIZEPASSES_H
+#define LLVM_INITIALIZEPASSES_H
+
+namespace llvm {
+
+class PassRegistry;
+
+/// initializeCore - Initialize all passes linked into the
+/// TransformUtils library.
+void initializeCore(PassRegistry&);
+
+/// initializeTransformUtils - Initialize all passes linked into the
+/// TransformUtils library.
+void initializeTransformUtils(PassRegistry&);
+
+/// initializeScalarOpts - Initialize all passes linked into the
+/// ScalarOpts library.
+void initializeScalarOpts(PassRegistry&);
+
+/// initializeObjCARCOpts - Initialize all passes linked into the ObjCARCOpts
+/// library.
+void initializeObjCARCOpts(PassRegistry&);
+
+/// initializeVectorization - Initialize all passes linked into the
+/// Vectorize library.
+void initializeVectorization(PassRegistry&);
+
+/// initializeInstCombine - Initialize all passes linked into the
+/// InstCombine library.
+void initializeInstCombine(PassRegistry&);
+
+/// initializeIPO - Initialize all passes linked into the IPO library.
+void initializeIPO(PassRegistry&);
+
+/// initializeInstrumentation - Initialize all passes linked into the
+/// Instrumentation library.
+void initializeInstrumentation(PassRegistry&);
+
+/// initializeAnalysis - Initialize all passes linked into the Analysis library.
+void initializeAnalysis(PassRegistry&);
+
+/// initializeCodeGen - Initialize all passes linked into the CodeGen library.
+void initializeCodeGen(PassRegistry&);
+
+/// initializeCodeGen - Initialize all passes linked into the CodeGen library.
+void initializeTarget(PassRegistry&);
+
+void initializeAAEvalPass(PassRegistry&);
+void initializeAddDiscriminatorsPass(PassRegistry&);
+void initializeADCELegacyPassPass(PassRegistry&);
+void initializeBDCEPass(PassRegistry&);
+void initializeAliasSetPrinterPass(PassRegistry&);
+void initializeAlwaysInlinerPass(PassRegistry&);
+void initializeArgPromotionPass(PassRegistry&);
+void initializeAtomicExpandPass(PassRegistry&);
+void initializeSampleProfileLoaderPass(PassRegistry&);
+void initializeAlignmentFromAssumptionsPass(PassRegistry&);
+void initializeBarrierNoopPass(PassRegistry&);
+void initializeBasicAAWrapperPassPass(PassRegistry&);
+void initializeCallGraphWrapperPassPass(PassRegistry &);
+void initializeBlockExtractorPassPass(PassRegistry&);
+void initializeBlockFrequencyInfoWrapperPassPass(PassRegistry&);
+void initializeBoundsCheckingPass(PassRegistry&);
+void initializeBranchFolderPassPass(PassRegistry&);
+void initializeBranchProbabilityInfoWrapperPassPass(PassRegistry&);
+void initializeBreakCriticalEdgesPass(PassRegistry&);
+void initializeCallGraphPrinterPass(PassRegistry&);
+void initializeCallGraphViewerPass(PassRegistry&);
+void initializeCFGOnlyPrinterPass(PassRegistry&);
+void initializeCFGOnlyViewerPass(PassRegistry&);
+void initializeCFGPrinterPass(PassRegistry&);
+void initializeCFGSimplifyPassPass(PassRegistry&);
+void initializeCFLAAWrapperPassPass(PassRegistry&);
+void initializeExternalAAWrapperPassPass(PassRegistry&);
+void initializeForwardControlFlowIntegrityPass(PassRegistry&);
+void initializeFlattenCFGPassPass(PassRegistry&);
+void initializeStructurizeCFGPass(PassRegistry&);
+void initializeCFGViewerPass(PassRegistry&);
+void initializeConstantHoistingPass(PassRegistry&);
+void initializeCodeGenPreparePass(PassRegistry&);
+void initializeConstantMergePass(PassRegistry&);
+void initializeConstantPropagationPass(PassRegistry&);
+void initializeMachineCopyPropagationPass(PassRegistry&);
+void initializeCostModelAnalysisPass(PassRegistry&);
+void initializeCorrelatedValuePropagationPass(PassRegistry&);
+void initializeCrossDSOCFIPass(PassRegistry&);
+void initializeDAEPass(PassRegistry&);
+void initializeDAHPass(PassRegistry&);
+void initializeDCEPass(PassRegistry&);
+void initializeDSEPass(PassRegistry&);
+void initializeDeadInstEliminationPass(PassRegistry&);
+void initializeDeadMachineInstructionElimPass(PassRegistry&);
+void initializeDelinearizationPass(PassRegistry &);
+void initializeDependenceAnalysisPass(PassRegistry&);
+void initializeDivergenceAnalysisPass(PassRegistry&);
+void initializeDomOnlyPrinterPass(PassRegistry&);
+void initializeDomOnlyViewerPass(PassRegistry&);
+void initializeDomPrinterPass(PassRegistry&);
+void initializeDomViewerPass(PassRegistry&);
+void initializeDominanceFrontierPass(PassRegistry&);
+void initializeDominatorTreeWrapperPassPass(PassRegistry&);
+void initializeEarlyIfConverterPass(PassRegistry&);
+void initializeEdgeBundlesPass(PassRegistry&);
+void initializeExpandPostRAPass(PassRegistry&);
+void initializeAAResultsWrapperPassPass(PassRegistry &);
+void initializeGCOVProfilerPass(PassRegistry&);
+void initializePGOInstrumentationGenPass(PassRegistry&);
+void initializePGOInstrumentationUsePass(PassRegistry&);
+void initializeInstrProfilingPass(PassRegistry&);
+void initializeAddressSanitizerPass(PassRegistry&);
+void initializeAddressSanitizerModulePass(PassRegistry&);
+void initializeMemorySanitizerPass(PassRegistry&);
+void initializeThreadSanitizerPass(PassRegistry&);
+void initializeSanitizerCoverageModulePass(PassRegistry&);
+void initializeDataFlowSanitizerPass(PassRegistry&);
+void initializeScalarizerPass(PassRegistry&);
+void initializeEarlyCSELegacyPassPass(PassRegistry &);
+void initializeEliminateAvailableExternallyPass(PassRegistry&);
+void initializeExpandISelPseudosPass(PassRegistry&);
+void initializeForceFunctionAttrsLegacyPassPass(PassRegistry&);
+void initializeFunctionAttrsPass(PassRegistry&);
+void initializeGCMachineCodeAnalysisPass(PassRegistry&);
+void initializeGCModuleInfoPass(PassRegistry&);
+void initializeGVNPass(PassRegistry&);
+void initializeGlobalDCEPass(PassRegistry&);
+void initializeGlobalOptPass(PassRegistry&);
+void initializeGlobalsAAWrapperPassPass(PassRegistry&);
+void initializeIPCPPass(PassRegistry&);
+void initializeIPSCCPPass(PassRegistry&);
+void initializeIVUsersPass(PassRegistry&);
+void initializeIfConverterPass(PassRegistry&);
+void initializeInductiveRangeCheckEliminationPass(PassRegistry&);
+void initializeIndVarSimplifyPass(PassRegistry&);
+void initializeInferFunctionAttrsLegacyPassPass(PassRegistry&);
+void initializeInlineCostAnalysisPass(PassRegistry&);
+void initializeInstructionCombiningPassPass(PassRegistry&);
+void initializeInstCountPass(PassRegistry&);
+void initializeInstNamerPass(PassRegistry&);
+void initializeInternalizePassPass(PassRegistry&);
+void initializeIntervalPartitionPass(PassRegistry&);
+void initializeJumpThreadingPass(PassRegistry&);
+void initializeLCSSAPass(PassRegistry&);
+void initializeLICMPass(PassRegistry&);
+void initializeLazyValueInfoPass(PassRegistry&);
+void initializeLintPass(PassRegistry&);
+void initializeLiveDebugVariablesPass(PassRegistry&);
+void initializeLiveIntervalsPass(PassRegistry&);
+void initializeLiveRegMatrixPass(PassRegistry&);
+void initializeLiveStacksPass(PassRegistry&);
+void initializeLiveVariablesPass(PassRegistry&);
+void initializeLoaderPassPass(PassRegistry&);
+void initializeLocalStackSlotPassPass(PassRegistry&);
+void initializeLoopDeletionPass(PassRegistry&);
+void initializeLoopExtractorPass(PassRegistry&);
+void initializeLoopInfoWrapperPassPass(PassRegistry&);
+void initializeLoopInterchangePass(PassRegistry &);
+void initializeLoopInstSimplifyPass(PassRegistry&);
+void initializeLoopRotatePass(PassRegistry&);
+void initializeLoopSimplifyPass(PassRegistry&);
+void initializeLoopStrengthReducePass(PassRegistry&);
+void initializeGlobalMergePass(PassRegistry&);
+void initializeLoopRerollPass(PassRegistry&);
+void initializeLoopUnrollPass(PassRegistry&);
+void initializeLoopUnswitchPass(PassRegistry&);
+void initializeLoopIdiomRecognizePass(PassRegistry&);
+void initializeLowerAtomicPass(PassRegistry&);
+void initializeLowerBitSetsPass(PassRegistry&);
+void initializeLowerExpectIntrinsicPass(PassRegistry&);
+void initializeLowerIntrinsicsPass(PassRegistry&);
+void initializeLowerInvokePass(PassRegistry&);
+void initializeLowerSwitchPass(PassRegistry&);
+void initializeMachineBlockFrequencyInfoPass(PassRegistry&);
+void initializeMachineBlockPlacementPass(PassRegistry&);
+void initializeMachineBlockPlacementStatsPass(PassRegistry&);
+void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
+void initializeMachineCSEPass(PassRegistry&);
+void initializeImplicitNullChecksPass(PassRegistry&);
+void initializeMachineDominatorTreePass(PassRegistry&);
+void initializeMachineDominanceFrontierPass(PassRegistry&);
+void initializeMachinePostDominatorTreePass(PassRegistry&);
+void initializeMachineLICMPass(PassRegistry&);
+void initializeMachineLoopInfoPass(PassRegistry&);
+void initializeMachineModuleInfoPass(PassRegistry&);
+void initializeMachineRegionInfoPassPass(PassRegistry&);
+void initializeMachineSchedulerPass(PassRegistry&);
+void initializeMachineSinkingPass(PassRegistry&);
+void initializeMachineTraceMetricsPass(PassRegistry&);
+void initializeMachineVerifierPassPass(PassRegistry&);
+void initializeMemCpyOptPass(PassRegistry&);
+void initializeMemDepPrinterPass(PassRegistry&);
+void initializeMemDerefPrinterPass(PassRegistry&);
+void initializeMemoryDependenceAnalysisPass(PassRegistry&);
+void initializeMergedLoadStoreMotionPass(PassRegistry &);
+void initializeMetaRenamerPass(PassRegistry&);
+void initializeMergeFunctionsPass(PassRegistry&);
+void initializeModuleDebugInfoPrinterPass(PassRegistry&);
+void initializeNaryReassociatePass(PassRegistry&);
+void initializeNoAAPass(PassRegistry&);
+void initializeObjCARCAAWrapperPassPass(PassRegistry&);
+void initializeObjCARCAPElimPass(PassRegistry&);
+void initializeObjCARCExpandPass(PassRegistry&);
+void initializeObjCARCContractPass(PassRegistry&);
+void initializeObjCARCOptPass(PassRegistry&);
+void initializePAEvalPass(PassRegistry &);
+void initializeOptimizePHIsPass(PassRegistry&);
+void initializePartiallyInlineLibCallsPass(PassRegistry&);
+void initializePEIPass(PassRegistry&);
+void initializePHIEliminationPass(PassRegistry&);
+void initializePartialInlinerPass(PassRegistry&);
+void initializePeepholeOptimizerPass(PassRegistry&);
+void initializePostDomOnlyPrinterPass(PassRegistry&);
+void initializePostDomOnlyViewerPass(PassRegistry&);
+void initializePostDomPrinterPass(PassRegistry&);
+void initializePostDomViewerPass(PassRegistry&);
+void initializePostDominatorTreePass(PassRegistry&);
+void initializePostRASchedulerPass(PassRegistry&);
+void initializePostMachineSchedulerPass(PassRegistry&);
+void initializePrintFunctionPassWrapperPass(PassRegistry&);
+void initializePrintModulePassWrapperPass(PassRegistry&);
+void initializePrintBasicBlockPassPass(PassRegistry&);
+void initializeProcessImplicitDefsPass(PassRegistry&);
+void initializePromotePassPass(PassRegistry&);
+void initializePruneEHPass(PassRegistry&);
+void initializeReassociatePass(PassRegistry&);
+void initializeRegToMemPass(PassRegistry&);
+void initializeRegionInfoPassPass(PassRegistry&);
+void initializeRegionOnlyPrinterPass(PassRegistry&);
+void initializeRegionOnlyViewerPass(PassRegistry&);
+void initializeRegionPrinterPass(PassRegistry&);
+void initializeRegionViewerPass(PassRegistry&);
+void initializeRewriteStatepointsForGCPass(PassRegistry&);
+void initializeSafeStackPass(PassRegistry&);
+void initializeSCCPPass(PassRegistry&);
+void initializeSROALegacyPassPass(PassRegistry&);
+void initializeSROA_DTPass(PassRegistry&);
+void initializeSROA_SSAUpPass(PassRegistry&);
+void initializeSCEVAAWrapperPassPass(PassRegistry&);
+void initializeScalarEvolutionWrapperPassPass(PassRegistry&);
+void initializeShrinkWrapPass(PassRegistry &);
+void initializeSimpleInlinerPass(PassRegistry&);
+void initializeShadowStackGCLoweringPass(PassRegistry&);
+void initializeRegisterCoalescerPass(PassRegistry&);
+void initializeSingleLoopExtractorPass(PassRegistry&);
+void initializeSinkingPass(PassRegistry&);
+void initializeSeparateConstOffsetFromGEPPass(PassRegistry &);
+void initializeSlotIndexesPass(PassRegistry&);
+void initializeSpillPlacementPass(PassRegistry&);
+void initializeSpeculativeExecutionPass(PassRegistry&);
+void initializeStackProtectorPass(PassRegistry&);
+void initializeStackColoringPass(PassRegistry&);
+void initializeStackSlotColoringPass(PassRegistry&);
+void initializeStraightLineStrengthReducePass(PassRegistry &);
+void initializeStripDeadDebugInfoPass(PassRegistry&);
+void initializeStripDeadPrototypesLegacyPassPass(PassRegistry&);
+void initializeStripDebugDeclarePass(PassRegistry&);
+void initializeStripNonDebugSymbolsPass(PassRegistry&);
+void initializeStripSymbolsPass(PassRegistry&);
+void initializeTailCallElimPass(PassRegistry&);
+void initializeTailDuplicatePassPass(PassRegistry&);
+void initializeTargetPassConfigPass(PassRegistry&);
+void initializeTargetTransformInfoWrapperPassPass(PassRegistry &);
+void initializeTargetLibraryInfoWrapperPassPass(PassRegistry &);
+void initializeAssumptionCacheTrackerPass(PassRegistry &);
+void initializeTwoAddressInstructionPassPass(PassRegistry&);
+void initializeTypeBasedAAWrapperPassPass(PassRegistry&);
+void initializeScopedNoAliasAAWrapperPassPass(PassRegistry&);
+void initializeUnifyFunctionExitNodesPass(PassRegistry&);
+void initializeUnreachableBlockElimPass(PassRegistry&);
+void initializeUnreachableMachineBlockElimPass(PassRegistry&);
+void initializeVerifierLegacyPassPass(PassRegistry&);
+void initializeVirtRegMapPass(PassRegistry&);
+void initializeVirtRegRewriterPass(PassRegistry&);
+void initializeInstSimplifierPass(PassRegistry&);
+void initializeUnpackMachineBundlesPass(PassRegistry&);
+void initializeFinalizeMachineBundlesPass(PassRegistry&);
+void initializeLoopAccessAnalysisPass(PassRegistry&);
+void initializeLoopVectorizePass(PassRegistry&);
+void initializeSLPVectorizerPass(PassRegistry&);
+void initializeBBVectorizePass(PassRegistry&);
+void initializeMachineFunctionPrinterPassPass(PassRegistry&);
+void initializeMIRPrintingPassPass(PassRegistry&);
+void initializeStackMapLivenessPass(PassRegistry&);
+void initializeLiveDebugValuesPass(PassRegistry&);
+void initializeMachineCombinerPass(PassRegistry &);
+void initializeLoadCombinePass(PassRegistry&);
+void initializeRewriteSymbolsPass(PassRegistry&);
+void initializeWinEHPreparePass(PassRegistry&);
+void initializePlaceBackedgeSafepointsImplPass(PassRegistry&);
+void initializePlaceSafepointsPass(PassRegistry&);
+void initializeDwarfEHPreparePass(PassRegistry&);
+void initializeFloat2IntPass(PassRegistry&);
+void initializeLoopDistributePass(PassRegistry&);
+void initializeSjLjEHPreparePass(PassRegistry&);
+void initializeDemandedBitsPass(PassRegistry&);
+void initializeFuncletLayoutPass(PassRegistry &);
+void initializeLoopLoadEliminationPass(PassRegistry&);
+void initializeFunctionImportPassPass(PassRegistry &);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/LTO/LTOCodeGenerator.h b/contrib/llvm/include/llvm/LTO/LTOCodeGenerator.h
new file mode 100644
index 0000000..3820b21
--- /dev/null
+++ b/contrib/llvm/include/llvm/LTO/LTOCodeGenerator.h
@@ -0,0 +1,196 @@
+//===-LTOCodeGenerator.h - LLVM Link Time Optimizer -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the LTOCodeGenerator class.
+//
+// LTO compilation consists of three phases: Pre-IPO, IPO and Post-IPO.
+//
+// The Pre-IPO phase compiles source code into bitcode file. The resulting
+// bitcode files, along with object files and libraries, will be fed to the
+// linker to through the IPO and Post-IPO phases. By using obj-file extension,
+// the resulting bitcode file disguises itself as an object file, and therefore
+// obviates the need of writing a special set of the make-rules only for LTO
+// compilation.
+//
+// The IPO phase perform inter-procedural analyses and optimizations, and
+// the Post-IPO consists two sub-phases: intra-procedural scalar optimizations
+// (SOPT), and intra-procedural target-dependent code generator (CG).
+//
+// As of this writing, we don't separate IPO and the Post-IPO SOPT. They
+// are intermingled together, and are driven by a single pass manager (see
+// PassManagerBuilder::populateLTOPassManager()).
+//
+// The "LTOCodeGenerator" is the driver for the IPO and Post-IPO stages.
+// The "CodeGenerator" here is bit confusing. Don't confuse the "CodeGenerator"
+// with the machine specific code generator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_LTOCODEGENERATOR_H
+#define LLVM_LTO_LTOCODEGENERATOR_H
+
+#include "llvm-c/lto.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+ class LLVMContext;
+ class DiagnosticInfo;
+ class GlobalValue;
+ class Linker;
+ class Mangler;
+ class MemoryBuffer;
+ class TargetLibraryInfo;
+ class TargetMachine;
+ class raw_ostream;
+ class raw_pwrite_stream;
+
+//===----------------------------------------------------------------------===//
+/// C++ class which implements the opaque lto_code_gen_t type.
+///
+struct LTOCodeGenerator {
+ static const char *getVersionString();
+
+ LTOCodeGenerator(LLVMContext &Context);
+ ~LTOCodeGenerator();
+
+ /// Merge given module. Return true on success.
+ bool addModule(struct LTOModule *);
+
+ /// Set the destination module.
+ void setModule(std::unique_ptr<LTOModule> M);
+
+ void setTargetOptions(TargetOptions Options);
+ void setDebugInfo(lto_debug_model);
+ void setCodePICModel(Reloc::Model Model) { RelocModel = Model; }
+
+ /// Set the file type to be emitted (assembly or object code).
+ /// The default is TargetMachine::CGFT_ObjectFile.
+ void setFileType(TargetMachine::CodeGenFileType FT) { FileType = FT; }
+
+ void setCpu(const char *MCpu) { this->MCpu = MCpu; }
+ void setAttr(const char *MAttr) { this->MAttr = MAttr; }
+ void setOptLevel(unsigned OptLevel);
+
+ void setShouldInternalize(bool Value) { ShouldInternalize = Value; }
+ void setShouldEmbedUselists(bool Value) { ShouldEmbedUselists = Value; }
+
+ void addMustPreserveSymbol(StringRef Sym) { MustPreserveSymbols[Sym] = 1; }
+
+ /// Pass options to the driver and optimization passes.
+ ///
+ /// These options are not necessarily for debugging purpose (the function
+ /// name is misleading). This function should be called before
+ /// LTOCodeGenerator::compilexxx(), and
+ /// LTOCodeGenerator::writeMergedModules().
+ void setCodeGenDebugOptions(const char *Opts);
+
+ /// Parse the options set in setCodeGenDebugOptions.
+ ///
+ /// Like \a setCodeGenDebugOptions(), this must be called before
+ /// LTOCodeGenerator::compilexxx() and
+ /// LTOCodeGenerator::writeMergedModules().
+ void parseCodeGenDebugOptions();
+
+ /// Write the merged module to the file specified by the given path. Return
+ /// true on success.
+ bool writeMergedModules(const char *Path);
+
+ /// Compile the merged module into a *single* output file; the path to output
+ /// file is returned to the caller via argument "name". Return true on
+ /// success.
+ ///
+ /// \note It is up to the linker to remove the intermediate output file. Do
+ /// not try to remove the object file in LTOCodeGenerator's destructor as we
+ /// don't who (LTOCodeGenerator or the output file) will last longer.
+ bool compile_to_file(const char **Name, bool DisableVerify,
+ bool DisableInline, bool DisableGVNLoadPRE,
+ bool DisableVectorization);
+
+ /// As with compile_to_file(), this function compiles the merged module into
+ /// single output file. Instead of returning the output file path to the
+ /// caller (linker), it brings the output to a buffer, and returns the buffer
+ /// to the caller. This function should delete the intermediate file once
+ /// its content is brought to memory. Return NULL if the compilation was not
+ /// successful.
+ std::unique_ptr<MemoryBuffer> compile(bool DisableVerify, bool DisableInline,
+ bool DisableGVNLoadPRE,
+ bool DisableVectorization);
+
+ /// Optimizes the merged module. Returns true on success.
+ bool optimize(bool DisableVerify, bool DisableInline, bool DisableGVNLoadPRE,
+ bool DisableVectorization);
+
+ /// Compiles the merged optimized module into a single output file. It brings
+ /// the output to a buffer, and returns the buffer to the caller. Return NULL
+ /// if the compilation was not successful.
+ std::unique_ptr<MemoryBuffer> compileOptimized();
+
+ /// Compile the merged optimized module into out.size() output files each
+ /// representing a linkable partition of the module. If out contains more
+ /// than one element, code generation is done in parallel with out.size()
+ /// threads. Output files will be written to members of out. Returns true on
+ /// success.
+ bool compileOptimized(ArrayRef<raw_pwrite_stream *> Out);
+
+ void setDiagnosticHandler(lto_diagnostic_handler_t, void *);
+
+ LLVMContext &getContext() { return Context; }
+
+ void resetMergedModule() { MergedModule.reset(); }
+
+private:
+ void initializeLTOPasses();
+
+ bool compileOptimizedToFile(const char **Name);
+ void applyScopeRestrictions();
+ void applyRestriction(GlobalValue &GV, ArrayRef<StringRef> Libcalls,
+ std::vector<const char *> &MustPreserveList,
+ SmallPtrSetImpl<GlobalValue *> &AsmUsed,
+ Mangler &Mangler);
+ bool determineTarget();
+
+ static void DiagnosticHandler(const DiagnosticInfo &DI, void *Context);
+
+ void DiagnosticHandler2(const DiagnosticInfo &DI);
+
+ void emitError(const std::string &ErrMsg);
+
+ typedef StringMap<uint8_t> StringSet;
+
+ LLVMContext &Context;
+ std::unique_ptr<Module> MergedModule;
+ std::unique_ptr<Linker> TheLinker;
+ std::unique_ptr<TargetMachine> TargetMach;
+ bool EmitDwarfDebugInfo = false;
+ bool ScopeRestrictionsDone = false;
+ Reloc::Model RelocModel = Reloc::Default;
+ StringSet MustPreserveSymbols;
+ StringSet AsmUndefinedRefs;
+ std::vector<std::string> CodegenOptions;
+ std::string FeatureStr;
+ std::string MCpu;
+ std::string MAttr;
+ std::string NativeObjectPath;
+ TargetOptions Options;
+ CodeGenOpt::Level CGOptLevel = CodeGenOpt::Default;
+ unsigned OptLevel = 2;
+ lto_diagnostic_handler_t DiagHandler = nullptr;
+ void *DiagContext = nullptr;
+ bool ShouldInternalize = true;
+ bool ShouldEmbedUselists = false;
+ TargetMachine::CodeGenFileType FileType = TargetMachine::CGFT_ObjectFile;
+};
+}
+#endif
diff --git a/contrib/llvm/include/llvm/LTO/LTOModule.h b/contrib/llvm/include/llvm/LTO/LTOModule.h
new file mode 100644
index 0000000..97b5865
--- /dev/null
+++ b/contrib/llvm/include/llvm/LTO/LTOModule.h
@@ -0,0 +1,214 @@
+//===-LTOModule.h - LLVM Link Time Optimizer ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the LTOModule class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LTO_LTOMODULE_H
+#define LLVM_LTO_LTOMODULE_H
+
+#include "llvm-c/lto.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
+#include <string>
+#include <vector>
+
+// Forward references to llvm classes.
+namespace llvm {
+ class Function;
+ class GlobalValue;
+ class MemoryBuffer;
+ class TargetOptions;
+ class Value;
+
+//===----------------------------------------------------------------------===//
+/// C++ class which implements the opaque lto_module_t type.
+///
+struct LTOModule {
+private:
+ struct NameAndAttributes {
+ const char *name;
+ uint32_t attributes;
+ bool isFunction;
+ const GlobalValue *symbol;
+ };
+
+ std::unique_ptr<LLVMContext> OwnedContext;
+
+ std::string LinkerOpts;
+
+ std::unique_ptr<object::IRObjectFile> IRFile;
+ std::unique_ptr<TargetMachine> _target;
+ std::vector<NameAndAttributes> _symbols;
+
+ // _defines and _undefines only needed to disambiguate tentative definitions
+ StringSet<> _defines;
+ StringMap<NameAndAttributes> _undefines;
+ std::vector<const char*> _asm_undefines;
+
+ LTOModule(std::unique_ptr<object::IRObjectFile> Obj, TargetMachine *TM);
+ LTOModule(std::unique_ptr<object::IRObjectFile> Obj, TargetMachine *TM,
+ std::unique_ptr<LLVMContext> Context);
+
+public:
+ ~LTOModule();
+
+ /// Returns 'true' if the file or memory contents is LLVM bitcode.
+ static bool isBitcodeFile(const void *mem, size_t length);
+ static bool isBitcodeFile(const char *path);
+
+ /// Returns 'true' if the memory buffer is LLVM bitcode for the specified
+ /// triple.
+ static bool isBitcodeForTarget(MemoryBuffer *memBuffer,
+ StringRef triplePrefix);
+
+ /// Returns a string representing the producer identification stored in the
+ /// bitcode, or "" if the bitcode does not contains any.
+ ///
+ static std::string getProducerString(MemoryBuffer *Buffer);
+
+ /// Create a MemoryBuffer from a memory range with an optional name.
+ static std::unique_ptr<MemoryBuffer>
+ makeBuffer(const void *mem, size_t length, StringRef name = "");
+
+ /// Create an LTOModule. N.B. These methods take ownership of the buffer. The
+ /// caller must have initialized the Targets, the TargetMCs, the AsmPrinters,
+ /// and the AsmParsers by calling:
+ ///
+ /// InitializeAllTargets();
+ /// InitializeAllTargetMCs();
+ /// InitializeAllAsmPrinters();
+ /// InitializeAllAsmParsers();
+ static ErrorOr<std::unique_ptr<LTOModule>>
+ createFromFile(LLVMContext &Context, const char *path, TargetOptions options);
+ static ErrorOr<std::unique_ptr<LTOModule>>
+ createFromOpenFile(LLVMContext &Context, int fd, const char *path,
+ size_t size, TargetOptions options);
+ static ErrorOr<std::unique_ptr<LTOModule>>
+ createFromOpenFileSlice(LLVMContext &Context, int fd, const char *path,
+ size_t map_size, off_t offset, TargetOptions options);
+ static ErrorOr<std::unique_ptr<LTOModule>>
+ createFromBuffer(LLVMContext &Context, const void *mem, size_t length,
+ TargetOptions options, StringRef path = "");
+
+ static ErrorOr<std::unique_ptr<LTOModule>>
+ createInLocalContext(const void *mem, size_t length, TargetOptions options,
+ StringRef path);
+ static ErrorOr<std::unique_ptr<LTOModule>>
+ createInContext(const void *mem, size_t length, TargetOptions options,
+ StringRef path, LLVMContext *Context);
+
+ const Module &getModule() const {
+ return const_cast<LTOModule*>(this)->getModule();
+ }
+ Module &getModule() {
+ return IRFile->getModule();
+ }
+
+ std::unique_ptr<Module> takeModule() { return IRFile->takeModule(); }
+
+ /// Return the Module's target triple.
+ const std::string &getTargetTriple() {
+ return getModule().getTargetTriple();
+ }
+
+ /// Set the Module's target triple.
+ void setTargetTriple(StringRef Triple) {
+ getModule().setTargetTriple(Triple);
+ }
+
+ /// Get the number of symbols
+ uint32_t getSymbolCount() {
+ return _symbols.size();
+ }
+
+ /// Get the attributes for a symbol at the specified index.
+ lto_symbol_attributes getSymbolAttributes(uint32_t index) {
+ if (index < _symbols.size())
+ return lto_symbol_attributes(_symbols[index].attributes);
+ return lto_symbol_attributes(0);
+ }
+
+ /// Get the name of the symbol at the specified index.
+ const char *getSymbolName(uint32_t index) {
+ if (index < _symbols.size())
+ return _symbols[index].name;
+ return nullptr;
+ }
+
+ const GlobalValue *getSymbolGV(uint32_t index) {
+ if (index < _symbols.size())
+ return _symbols[index].symbol;
+ return nullptr;
+ }
+
+ const char *getLinkerOpts() {
+ return LinkerOpts.c_str();
+ }
+
+ const std::vector<const char*> &getAsmUndefinedRefs() {
+ return _asm_undefines;
+ }
+
+private:
+ /// Parse metadata from the module
+ // FIXME: it only parses "Linker Options" metadata at the moment
+ void parseMetadata();
+
+ /// Parse the symbols from the module and model-level ASM and add them to
+ /// either the defined or undefined lists.
+ void parseSymbols();
+
+ /// Add a symbol which isn't defined just yet to a list to be resolved later.
+ void addPotentialUndefinedSymbol(const object::BasicSymbolRef &Sym,
+ bool isFunc);
+
+ /// Add a defined symbol to the list.
+ void addDefinedSymbol(const char *Name, const GlobalValue *def,
+ bool isFunction);
+
+ /// Add a data symbol as defined to the list.
+ void addDefinedDataSymbol(const object::BasicSymbolRef &Sym);
+ void addDefinedDataSymbol(const char*Name, const GlobalValue *v);
+
+ /// Add a function symbol as defined to the list.
+ void addDefinedFunctionSymbol(const object::BasicSymbolRef &Sym);
+ void addDefinedFunctionSymbol(const char *Name, const Function *F);
+
+ /// Add a global symbol from module-level ASM to the defined list.
+ void addAsmGlobalSymbol(const char *, lto_symbol_attributes scope);
+
+ /// Add a global symbol from module-level ASM to the undefined list.
+ void addAsmGlobalSymbolUndef(const char *);
+
+ /// Parse i386/ppc ObjC class data structure.
+ void addObjCClass(const GlobalVariable *clgv);
+
+ /// Parse i386/ppc ObjC category data structure.
+ void addObjCCategory(const GlobalVariable *clgv);
+
+ /// Parse i386/ppc ObjC class list data structure.
+ void addObjCClassRef(const GlobalVariable *clgv);
+
+ /// Get string that the data pointer points to.
+ bool objcClassNameFromExpression(const Constant *c, std::string &name);
+
+ /// Create an LTOModule (private version).
+ static ErrorOr<std::unique_ptr<LTOModule>>
+ makeLTOModule(MemoryBufferRef Buffer, TargetOptions options,
+ LLVMContext *Context);
+};
+}
+#endif
diff --git a/contrib/llvm/include/llvm/LibDriver/LibDriver.h b/contrib/llvm/include/llvm/LibDriver/LibDriver.h
new file mode 100644
index 0000000..0949565
--- /dev/null
+++ b/contrib/llvm/include/llvm/LibDriver/LibDriver.h
@@ -0,0 +1,26 @@
+//===- llvm/LibDriver/LibDriver.h - lib.exe-compatible driver ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines an interface to a lib.exe-compatible driver that also understands
+// bitcode files. Used by llvm-lib and lld-link /lib.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBDRIVER_LIBDRIVER_H
+#define LLVM_LIBDRIVER_LIBDRIVER_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+
+int libDriverMain(llvm::ArrayRef<const char*> ARgs);
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/LineEditor/LineEditor.h b/contrib/llvm/include/llvm/LineEditor/LineEditor.h
new file mode 100644
index 0000000..bb106f8
--- /dev/null
+++ b/contrib/llvm/include/llvm/LineEditor/LineEditor.h
@@ -0,0 +1,153 @@
+//===-- llvm/LineEditor/LineEditor.h - line editor --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LINEEDITOR_LINEEDITOR_H
+#define LLVM_LINEEDITOR_LINEEDITOR_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+class LineEditor {
+public:
+ /// Create a LineEditor object.
+ ///
+ /// \param ProgName The name of the current program. Used to form a default
+ /// prompt.
+ /// \param HistoryPath Path to the file in which to store history data, if
+ /// possible.
+ /// \param In The input stream used by the editor.
+ /// \param Out The output stream used by the editor.
+ /// \param Err The error stream used by the editor.
+ LineEditor(StringRef ProgName, StringRef HistoryPath = "", FILE *In = stdin,
+ FILE *Out = stdout, FILE *Err = stderr);
+ ~LineEditor();
+
+ /// Reads a line.
+ ///
+ /// \return The line, or llvm::Optional<std::string>() on EOF.
+ llvm::Optional<std::string> readLine() const;
+
+ void saveHistory();
+ void loadHistory();
+
+ static std::string getDefaultHistoryPath(StringRef ProgName);
+
+ /// The action to perform upon a completion request.
+ struct CompletionAction {
+ enum ActionKind {
+ /// Insert Text at the cursor position.
+ AK_Insert,
+ /// Show Completions, or beep if the list is empty.
+ AK_ShowCompletions
+ };
+
+ ActionKind Kind;
+
+ /// The text to insert.
+ std::string Text;
+
+ /// The list of completions to show.
+ std::vector<std::string> Completions;
+ };
+
+ /// A possible completion at a given cursor position.
+ struct Completion {
+ Completion() {}
+ Completion(const std::string &TypedText, const std::string &DisplayText)
+ : TypedText(TypedText), DisplayText(DisplayText) {}
+
+ /// The text to insert. If the user has already input some of the
+ /// completion, this should only include the rest of the text.
+ std::string TypedText;
+
+ /// A description of this completion. This may be the completion itself, or
+ /// maybe a summary of its type or arguments.
+ std::string DisplayText;
+ };
+
+ /// Set the completer for this LineEditor. A completer is a function object
+ /// which takes arguments of type StringRef (the string to complete) and
+ /// size_t (the zero-based cursor position in the StringRef) and returns a
+ /// CompletionAction.
+ template <typename T> void setCompleter(T Comp) {
+ Completer.reset(new CompleterModel<T>(Comp));
+ }
+
+ /// Set the completer for this LineEditor to the given list completer.
+ /// A list completer is a function object which takes arguments of type
+ /// StringRef (the string to complete) and size_t (the zero-based cursor
+ /// position in the StringRef) and returns a std::vector<Completion>.
+ template <typename T> void setListCompleter(T Comp) {
+ Completer.reset(new ListCompleterModel<T>(Comp));
+ }
+
+ /// Use the current completer to produce a CompletionAction for the given
+ /// completion request. If the current completer is a list completer, this
+ /// will return an AK_Insert CompletionAction if each completion has a common
+ /// prefix, or an AK_ShowCompletions CompletionAction otherwise.
+ ///
+ /// \param Buffer The string to complete
+ /// \param Pos The zero-based cursor position in the StringRef
+ CompletionAction getCompletionAction(StringRef Buffer, size_t Pos) const;
+
+ const std::string &getPrompt() const { return Prompt; }
+ void setPrompt(const std::string &P) { Prompt = P; }
+
+ // Public so callbacks in LineEditor.cpp can use it.
+ struct InternalData;
+
+private:
+ std::string Prompt;
+ std::string HistoryPath;
+ std::unique_ptr<InternalData> Data;
+
+ struct CompleterConcept {
+ virtual ~CompleterConcept();
+ virtual CompletionAction complete(StringRef Buffer, size_t Pos) const = 0;
+ };
+
+ struct ListCompleterConcept : CompleterConcept {
+ ~ListCompleterConcept() override;
+ CompletionAction complete(StringRef Buffer, size_t Pos) const override;
+ static std::string getCommonPrefix(const std::vector<Completion> &Comps);
+ virtual std::vector<Completion> getCompletions(StringRef Buffer,
+ size_t Pos) const = 0;
+ };
+
+ template <typename T>
+ struct CompleterModel : CompleterConcept {
+ CompleterModel(T Value) : Value(Value) {}
+ CompletionAction complete(StringRef Buffer, size_t Pos) const override {
+ return Value(Buffer, Pos);
+ }
+ T Value;
+ };
+
+ template <typename T>
+ struct ListCompleterModel : ListCompleterConcept {
+ ListCompleterModel(T Value) : Value(Value) {}
+ std::vector<Completion> getCompletions(StringRef Buffer,
+ size_t Pos) const override {
+ return Value(Buffer, Pos);
+ }
+ T Value;
+ };
+
+ std::unique_ptr<const CompleterConcept> Completer;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/LinkAllIR.h b/contrib/llvm/include/llvm/LinkAllIR.h
new file mode 100644
index 0000000..2b0604a
--- /dev/null
+++ b/contrib/llvm/include/llvm/LinkAllIR.h
@@ -0,0 +1,53 @@
+//===----- LinkAllIR.h - Reference All VMCore Code --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all the object modules of the VMCore library so
+// that tools like llc, opt, and lli can ensure they are linked with all symbols
+// from libVMCore.a It should only be used from a tool's main program.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LINKALLIR_H
+#define LLVM_LINKALLIR_H
+
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/TimeValue.h"
+#include <cstdlib>
+
+namespace {
+ struct ForceVMCoreLinking {
+ ForceVMCoreLinking() {
+ // We must reference VMCore in such a way that compilers will not
+ // delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough
+ // to know that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+ (void)new llvm::Module("", llvm::getGlobalContext());
+ (void)new llvm::UnreachableInst(llvm::getGlobalContext());
+ (void) llvm::createVerifierPass();
+ }
+ } ForceVMCoreLinking;
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/LinkAllPasses.h b/contrib/llvm/include/llvm/LinkAllPasses.h
new file mode 100644
index 0000000..29fcd93
--- /dev/null
+++ b/contrib/llvm/include/llvm/LinkAllPasses.h
@@ -0,0 +1,199 @@
+//===- llvm/LinkAllPasses.h ------------ Reference All Passes ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all transformation and analysis passes for tools
+// like opt and bugpoint that need this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LINKALLPASSES_H
+#define LLVM_LINKALLPASSES_H
+
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/CFLAliasAnalysis.h"
+#include "llvm/Analysis/CallPrinter.h"
+#include "llvm/Analysis/DomPrinter.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/IntervalPartition.h"
+#include "llvm/Analysis/Lint.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/PostDominators.h"
+#include "llvm/Analysis/RegionPass.h"
+#include "llvm/Analysis/RegionPrinter.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
+#include "llvm/Analysis/ScopedNoAliasAA.h"
+#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/ObjCARC.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/SymbolRewriter.h"
+#include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h"
+#include "llvm/Transforms/Vectorize.h"
+#include "llvm/Support/Valgrind.h"
+#include <cstdlib>
+
+namespace {
+ struct ForcePassLinking {
+ ForcePassLinking() {
+ // We must reference the passes in such a way that compilers will not
+ // delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough
+ // to know that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+
+ (void) llvm::createAAEvalPass();
+ (void) llvm::createAggressiveDCEPass();
+ (void) llvm::createBitTrackingDCEPass();
+ (void) llvm::createArgumentPromotionPass();
+ (void) llvm::createAlignmentFromAssumptionsPass();
+ (void) llvm::createBasicAAWrapperPass();
+ (void) llvm::createSCEVAAWrapperPass();
+ (void) llvm::createTypeBasedAAWrapperPass();
+ (void) llvm::createScopedNoAliasAAWrapperPass();
+ (void) llvm::createBoundsCheckingPass();
+ (void) llvm::createBreakCriticalEdgesPass();
+ (void) llvm::createCallGraphPrinterPass();
+ (void) llvm::createCallGraphViewerPass();
+ (void) llvm::createCFGSimplificationPass();
+ (void) llvm::createCFLAAWrapperPass();
+ (void) llvm::createStructurizeCFGPass();
+ (void) llvm::createConstantMergePass();
+ (void) llvm::createConstantPropagationPass();
+ (void) llvm::createCostModelAnalysisPass();
+ (void) llvm::createDeadArgEliminationPass();
+ (void) llvm::createDeadCodeEliminationPass();
+ (void) llvm::createDeadInstEliminationPass();
+ (void) llvm::createDeadStoreEliminationPass();
+ (void) llvm::createDependenceAnalysisPass();
+ (void) llvm::createDivergenceAnalysisPass();
+ (void) llvm::createDomOnlyPrinterPass();
+ (void) llvm::createDomPrinterPass();
+ (void) llvm::createDomOnlyViewerPass();
+ (void) llvm::createDomViewerPass();
+ (void) llvm::createGCOVProfilerPass();
+ (void) llvm::createPGOInstrumentationGenPass();
+ (void) llvm::createPGOInstrumentationUsePass();
+ (void) llvm::createInstrProfilingPass();
+ (void) llvm::createFunctionImportPass();
+ (void) llvm::createFunctionInliningPass();
+ (void) llvm::createAlwaysInlinerPass();
+ (void) llvm::createGlobalDCEPass();
+ (void) llvm::createGlobalOptimizerPass();
+ (void) llvm::createGlobalsAAWrapperPass();
+ (void) llvm::createIPConstantPropagationPass();
+ (void) llvm::createIPSCCPPass();
+ (void) llvm::createInductiveRangeCheckEliminationPass();
+ (void) llvm::createIndVarSimplifyPass();
+ (void) llvm::createInstructionCombiningPass();
+ (void) llvm::createInternalizePass();
+ (void) llvm::createLCSSAPass();
+ (void) llvm::createLICMPass();
+ (void) llvm::createLazyValueInfoPass();
+ (void) llvm::createLoopExtractorPass();
+ (void) llvm::createLoopInterchangePass();
+ (void) llvm::createLoopSimplifyPass();
+ (void) llvm::createLoopStrengthReducePass();
+ (void) llvm::createLoopRerollPass();
+ (void) llvm::createLoopUnrollPass();
+ (void) llvm::createLoopUnswitchPass();
+ (void) llvm::createLoopIdiomPass();
+ (void) llvm::createLoopRotatePass();
+ (void) llvm::createLowerExpectIntrinsicPass();
+ (void) llvm::createLowerInvokePass();
+ (void) llvm::createLowerSwitchPass();
+ (void) llvm::createNaryReassociatePass();
+ (void) llvm::createObjCARCAAWrapperPass();
+ (void) llvm::createObjCARCAPElimPass();
+ (void) llvm::createObjCARCExpandPass();
+ (void) llvm::createObjCARCContractPass();
+ (void) llvm::createObjCARCOptPass();
+ (void) llvm::createPAEvalPass();
+ (void) llvm::createPromoteMemoryToRegisterPass();
+ (void) llvm::createDemoteRegisterToMemoryPass();
+ (void) llvm::createPruneEHPass();
+ (void) llvm::createPostDomOnlyPrinterPass();
+ (void) llvm::createPostDomPrinterPass();
+ (void) llvm::createPostDomOnlyViewerPass();
+ (void) llvm::createPostDomViewerPass();
+ (void) llvm::createReassociatePass();
+ (void) llvm::createRegionInfoPass();
+ (void) llvm::createRegionOnlyPrinterPass();
+ (void) llvm::createRegionOnlyViewerPass();
+ (void) llvm::createRegionPrinterPass();
+ (void) llvm::createRegionViewerPass();
+ (void) llvm::createSCCPPass();
+ (void) llvm::createSafeStackPass();
+ (void) llvm::createScalarReplAggregatesPass();
+ (void) llvm::createSingleLoopExtractorPass();
+ (void) llvm::createStripSymbolsPass();
+ (void) llvm::createStripNonDebugSymbolsPass();
+ (void) llvm::createStripDeadDebugInfoPass();
+ (void) llvm::createStripDeadPrototypesPass();
+ (void) llvm::createTailCallEliminationPass();
+ (void) llvm::createJumpThreadingPass();
+ (void) llvm::createUnifyFunctionExitNodesPass();
+ (void) llvm::createInstCountPass();
+ (void) llvm::createConstantHoistingPass();
+ (void) llvm::createCodeGenPreparePass();
+ (void) llvm::createEarlyCSEPass();
+ (void) llvm::createMergedLoadStoreMotionPass();
+ (void) llvm::createGVNPass();
+ (void) llvm::createMemCpyOptPass();
+ (void) llvm::createLoopDeletionPass();
+ (void) llvm::createPostDomTree();
+ (void) llvm::createInstructionNamerPass();
+ (void) llvm::createMetaRenamerPass();
+ (void) llvm::createFunctionAttrsPass();
+ (void) llvm::createMergeFunctionsPass();
+ (void) llvm::createPrintModulePass(*(llvm::raw_ostream*)nullptr);
+ (void) llvm::createPrintFunctionPass(*(llvm::raw_ostream*)nullptr);
+ (void) llvm::createPrintBasicBlockPass(*(llvm::raw_ostream*)nullptr);
+ (void) llvm::createModuleDebugInfoPrinterPass();
+ (void) llvm::createPartialInliningPass();
+ (void) llvm::createLintPass();
+ (void) llvm::createSinkingPass();
+ (void) llvm::createLowerAtomicPass();
+ (void) llvm::createCorrelatedValuePropagationPass();
+ (void) llvm::createMemDepPrinter();
+ (void) llvm::createInstructionSimplifierPass();
+ (void) llvm::createLoopVectorizePass();
+ (void) llvm::createSLPVectorizerPass();
+ (void) llvm::createBBVectorizePass();
+ (void) llvm::createPartiallyInlineLibCallsPass();
+ (void) llvm::createScalarizerPass();
+ (void) llvm::createSeparateConstOffsetFromGEPPass();
+ (void) llvm::createSpeculativeExecutionPass();
+ (void) llvm::createRewriteSymbolsPass();
+ (void) llvm::createStraightLineStrengthReducePass();
+ (void) llvm::createMemDerefPrinter();
+ (void) llvm::createFloat2IntPass();
+ (void) llvm::createEliminateAvailableExternallyPass();
+
+ (void)new llvm::IntervalPartition();
+ (void)new llvm::ScalarEvolutionWrapperPass();
+ ((llvm::Function*)nullptr)->viewCFGOnly();
+ llvm::RGPassManager RGM;
+ ((llvm::RegionPass*)nullptr)->runOnRegion((llvm::Region*)nullptr, RGM);
+ llvm::AliasSetTracker X(*(llvm::AliasAnalysis*)nullptr);
+ X.add(nullptr, 0, llvm::AAMDNodes()); // for -print-alias-sets
+ (void) llvm::AreStatisticsEnabled();
+ (void) llvm::sys::RunningOnValgrind();
+ }
+ } ForcePassLinking; // Force link by creating a global definition.
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Linker/IRMover.h b/contrib/llvm/include/llvm/Linker/IRMover.h
new file mode 100644
index 0000000..a964cc4
--- /dev/null
+++ b/contrib/llvm/include/llvm/Linker/IRMover.h
@@ -0,0 +1,76 @@
+//===- IRMover.h ------------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LINKER_IRMOVER_H
+#define LLVM_LINKER_IRMOVER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include <functional>
+
+namespace llvm {
+class GlobalValue;
+class MDNode;
+class Module;
+class StructType;
+class Type;
+
+class IRMover {
+ struct StructTypeKeyInfo {
+ struct KeyTy {
+ ArrayRef<Type *> ETypes;
+ bool IsPacked;
+ KeyTy(ArrayRef<Type *> E, bool P);
+ KeyTy(const StructType *ST);
+ bool operator==(const KeyTy &that) const;
+ bool operator!=(const KeyTy &that) const;
+ };
+ static StructType *getEmptyKey();
+ static StructType *getTombstoneKey();
+ static unsigned getHashValue(const KeyTy &Key);
+ static unsigned getHashValue(const StructType *ST);
+ static bool isEqual(const KeyTy &LHS, const StructType *RHS);
+ static bool isEqual(const StructType *LHS, const StructType *RHS);
+ };
+
+public:
+ class IdentifiedStructTypeSet {
+ // The set of opaque types is the composite module.
+ DenseSet<StructType *> OpaqueStructTypes;
+
+ // The set of identified but non opaque structures in the composite module.
+ DenseSet<StructType *, StructTypeKeyInfo> NonOpaqueStructTypes;
+
+ public:
+ void addNonOpaque(StructType *Ty);
+ void switchToNonOpaque(StructType *Ty);
+ void addOpaque(StructType *Ty);
+ StructType *findNonOpaque(ArrayRef<Type *> ETypes, bool IsPacked);
+ bool hasType(StructType *Ty);
+ };
+
+ IRMover(Module &M);
+
+ typedef std::function<void(GlobalValue &)> ValueAdder;
+ /// Move in the provide values. The source is destroyed.
+ /// Returns true on error.
+ bool move(Module &Src, ArrayRef<GlobalValue *> ValuesToLink,
+ std::function<void(GlobalValue &GV, ValueAdder Add)> AddLazyFor,
+ DenseMap<unsigned, MDNode *> *ValIDToTempMDMap = nullptr,
+ bool IsMetadataLinkingPostpass = false);
+ Module &getModule() { return Composite; }
+
+private:
+ Module &Composite;
+ IdentifiedStructTypeSet IdentifiedStructTypes;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Linker/Linker.h b/contrib/llvm/include/llvm/Linker/Linker.h
new file mode 100644
index 0000000..dde3f73
--- /dev/null
+++ b/contrib/llvm/include/llvm/Linker/Linker.h
@@ -0,0 +1,77 @@
+//===- Linker.h - Module Linker Interface -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LINKER_LINKER_H
+#define LLVM_LINKER_LINKER_H
+
+#include "llvm/IR/FunctionInfo.h"
+#include "llvm/Linker/IRMover.h"
+
+namespace llvm {
+class Module;
+class StructType;
+class Type;
+
+/// This class provides the core functionality of linking in LLVM. It keeps a
+/// pointer to the merged module so far. It doesn't take ownership of the
+/// module since it is assumed that the user of this class will want to do
+/// something with it after the linking.
+class Linker {
+ IRMover Mover;
+
+public:
+ enum Flags {
+ None = 0,
+ OverrideFromSrc = (1 << 0),
+ LinkOnlyNeeded = (1 << 1),
+ InternalizeLinkedSymbols = (1 << 2)
+ };
+
+ Linker(Module &M);
+
+ /// \brief Link \p Src into the composite.
+ ///
+ /// Passing OverrideSymbols as true will have symbols from Src
+ /// shadow those in the Dest.
+ /// For ThinLTO function importing/exporting the \p FunctionInfoIndex
+ /// is passed. If \p FunctionsToImport is provided, only the functions that
+ /// are part of the set will be imported from the source module.
+ /// The \p ValIDToTempMDMap is populated by the linker when function
+ /// importing is performed.
+ ///
+ /// Returns true on error.
+ bool linkInModule(std::unique_ptr<Module> Src, unsigned Flags = Flags::None,
+ const FunctionInfoIndex *Index = nullptr,
+ DenseSet<const GlobalValue *> *FunctionsToImport = nullptr,
+ DenseMap<unsigned, MDNode *> *ValIDToTempMDMap = nullptr);
+
+ /// This exists to implement the deprecated LLVMLinkModules C api. Don't use
+ /// for anything else.
+ bool linkInModuleForCAPI(Module &Src);
+
+ static bool linkModules(Module &Dest, std::unique_ptr<Module> Src,
+ unsigned Flags = Flags::None);
+
+ /// \brief Link metadata from \p Src into the composite. The source is
+ /// destroyed.
+ ///
+ /// The \p ValIDToTempMDMap sound have been populated earlier during function
+ /// importing from \p Src.
+ bool linkInMetadata(Module &Src,
+ DenseMap<unsigned, MDNode *> *ValIDToTempMDMap);
+};
+
+/// Create a new module with exported local functions renamed and promoted
+/// for ThinLTO.
+std::unique_ptr<Module> renameModuleForThinLTO(std::unique_ptr<Module> M,
+ const FunctionInfoIndex *Index);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/ConstantPools.h b/contrib/llvm/include/llvm/MC/ConstantPools.h
new file mode 100644
index 0000000..552e144
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/ConstantPools.h
@@ -0,0 +1,93 @@
+//===- ConstantPool.h - Keep track of assembler-generated ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ConstantPool and AssemblerConstantPools classes.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_MC_CONSTANTPOOLS_H
+#define LLVM_MC_CONSTANTPOOLS_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/SMLoc.h"
+
+namespace llvm {
+class MCContext;
+class MCExpr;
+class MCSection;
+class MCStreamer;
+class MCSymbol;
+
+struct ConstantPoolEntry {
+ ConstantPoolEntry(MCSymbol *L, const MCExpr *Val, unsigned Sz, SMLoc Loc_)
+ : Label(L), Value(Val), Size(Sz), Loc(Loc_) {}
+ MCSymbol *Label;
+ const MCExpr *Value;
+ unsigned Size;
+ SMLoc Loc;
+};
+
+// A class to keep track of assembler-generated constant pools that are use to
+// implement the ldr-pseudo.
+class ConstantPool {
+ typedef SmallVector<ConstantPoolEntry, 4> EntryVecTy;
+ EntryVecTy Entries;
+
+public:
+ // Initialize a new empty constant pool
+ ConstantPool() {}
+
+ // Add a new entry to the constant pool in the next slot.
+ // \param Value is the new entry to put in the constant pool.
+ // \param Size is the size in bytes of the entry
+ //
+ // \returns a MCExpr that references the newly inserted value
+ const MCExpr *addEntry(const MCExpr *Value, MCContext &Context,
+ unsigned Size, SMLoc Loc);
+
+ // Emit the contents of the constant pool using the provided streamer.
+ void emitEntries(MCStreamer &Streamer);
+
+ // Return true if the constant pool is empty
+ bool empty();
+};
+
+class AssemblerConstantPools {
+ // Map type used to keep track of per-Section constant pools used by the
+ // ldr-pseudo opcode. The map associates a section to its constant pool. The
+ // constant pool is a vector of (label, value) pairs. When the ldr
+ // pseudo is parsed we insert a new (label, value) pair into the constant pool
+ // for the current section and add MCSymbolRefExpr to the new label as
+ // an opcode to the ldr. After we have parsed all the user input we
+ // output the (label, value) pairs in each constant pool at the end of the
+ // section.
+ //
+ // We use the MapVector for the map type to ensure stable iteration of
+ // the sections at the end of the parse. We need to iterate over the
+ // sections in a stable order to ensure that we have print the
+ // constant pools in a deterministic order when printing an assembly
+ // file.
+ typedef MapVector<MCSection *, ConstantPool> ConstantPoolMapTy;
+ ConstantPoolMapTy ConstantPools;
+
+public:
+ void emitAll(MCStreamer &Streamer);
+ void emitForCurrentSection(MCStreamer &Streamer);
+ const MCExpr *addEntry(MCStreamer &Streamer, const MCExpr *Expr,
+ unsigned Size, SMLoc Loc);
+
+private:
+ ConstantPool *getConstantPool(MCSection *Section);
+ ConstantPool &getOrCreateConstantPool(MCSection *Section);
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCAsmBackend.h b/contrib/llvm/include/llvm/MC/MCAsmBackend.h
new file mode 100644
index 0000000..51312ff
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCAsmBackend.h
@@ -0,0 +1,148 @@
+//===-- llvm/MC/MCAsmBackend.h - MC Asm Backend -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMBACKEND_H
+#define LLVM_MC_MCASMBACKEND_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+class MCAsmLayout;
+class MCAssembler;
+class MCELFObjectTargetWriter;
+struct MCFixupKindInfo;
+class MCFragment;
+class MCInst;
+class MCRelaxableFragment;
+class MCObjectWriter;
+class MCSection;
+class MCValue;
+class raw_ostream;
+
+/// Generic interface to target specific assembler backends.
+class MCAsmBackend {
+ MCAsmBackend(const MCAsmBackend &) = delete;
+ void operator=(const MCAsmBackend &) = delete;
+
+protected: // Can only create subclasses.
+ MCAsmBackend();
+
+ unsigned HasDataInCodeSupport : 1;
+
+public:
+ virtual ~MCAsmBackend();
+
+ /// lifetime management
+ virtual void reset() {}
+
+ /// Create a new MCObjectWriter instance for use by the assembler backend to
+ /// emit the final object file.
+ virtual MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const = 0;
+
+ /// Create a new ELFObjectTargetWriter to enable non-standard
+ /// ELFObjectWriters.
+ virtual MCELFObjectTargetWriter *createELFObjectTargetWriter() const {
+ llvm_unreachable("createELFObjectTargetWriter is not supported by asm "
+ "backend");
+ }
+
+ /// Check whether this target implements data-in-code markers. If not, data
+ /// region directives will be ignored.
+ bool hasDataInCodeSupport() const { return HasDataInCodeSupport; }
+
+ /// \name Target Fixup Interfaces
+ /// @{
+
+ /// Get the number of target specific fixup kinds.
+ virtual unsigned getNumFixupKinds() const = 0;
+
+ /// Map a relocation name used in .reloc to a fixup kind.
+ /// Returns true and sets MappedKind if Name is successfully mapped.
+ /// Otherwise returns false and leaves MappedKind unchanged.
+ virtual bool getFixupKind(StringRef Name, MCFixupKind &MappedKind) const;
+
+ /// Get information on a fixup kind.
+ virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const;
+
+ /// Target hook to adjust the literal value of a fixup if necessary.
+ /// IsResolved signals whether the caller believes a relocation is needed; the
+ /// target can modify the value. The default does nothing.
+ virtual void processFixupValue(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFixup &Fixup, const MCFragment *DF,
+ const MCValue &Target, uint64_t &Value,
+ bool &IsResolved) {}
+
+ /// Apply the \p Value for given \p Fixup into the provided data fragment, at
+ /// the offset specified by the fixup and following the fixup kind as
+ /// appropriate.
+ virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
+ uint64_t Value, bool IsPCRel) const = 0;
+
+ /// @}
+
+ /// \name Target Relaxation Interfaces
+ /// @{
+
+ /// Check whether the given instruction may need relaxation.
+ ///
+ /// \param Inst - The instruction to test.
+ virtual bool mayNeedRelaxation(const MCInst &Inst) const = 0;
+
+ /// Target specific predicate for whether a given fixup requires the
+ /// associated instruction to be relaxed.
+ virtual bool fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, bool Resolved,
+ uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const;
+
+ /// Simple predicate for targets where !Resolved implies requiring relaxation
+ virtual bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const = 0;
+
+ /// Relax the instruction in the given fragment to the next wider instruction.
+ ///
+ /// \param Inst The instruction to relax, which may be the same as the
+ /// output.
+ /// \param [out] Res On return, the relaxed instruction.
+ virtual void relaxInstruction(const MCInst &Inst, MCInst &Res) const = 0;
+
+ /// @}
+
+ /// Returns the minimum size of a nop in bytes on this target. The assembler
+ /// will use this to emit excess padding in situations where the padding
+ /// required for simple alignment would be less than the minimum nop size.
+ ///
+ virtual unsigned getMinimumNopSize() const { return 1; }
+
+ /// Write an (optimal) nop sequence of Count bytes to the given output. If the
+ /// target cannot generate such a sequence, it should return an error.
+ ///
+ /// \return - True on success.
+ virtual bool writeNopData(uint64_t Count, MCObjectWriter *OW) const = 0;
+
+ /// Handle any target-specific assembler flags. By default, do nothing.
+ virtual void handleAssemblerFlag(MCAssemblerFlag Flag) {}
+
+ /// \brief Generate the compact unwind encoding for the CFI instructions.
+ virtual uint32_t
+ generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction>) const {
+ return 0;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfo.h b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
new file mode 100644
index 0000000..384584e
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfo.h
@@ -0,0 +1,569 @@
+//===-- llvm/MC/MCAsmInfo.h - Asm info --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a class to be used as the basis for target specific
+// asm writers. This class primarily takes care of global printing constants,
+// which are used in very similar ways across all targets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMINFO_H
+#define LLVM_MC_MCASMINFO_H
+
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCDwarf.h"
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+class MCExpr;
+class MCSection;
+class MCStreamer;
+class MCSymbol;
+class MCContext;
+
+namespace WinEH {
+enum class EncodingType {
+ Invalid, /// Invalid
+ Alpha, /// Windows Alpha
+ Alpha64, /// Windows AXP64
+ ARM, /// Windows NT (Windows on ARM)
+ CE, /// Windows CE ARM, PowerPC, SH3, SH4
+ Itanium, /// Windows x64, Windows Itanium (IA-64)
+ X86, /// Windows x86, uses no CFI, just EH tables
+ MIPS = Alpha,
+};
+}
+
+enum class ExceptionHandling {
+ None, /// No exception support
+ DwarfCFI, /// DWARF-like instruction based exceptions
+ SjLj, /// setjmp/longjmp based exceptions
+ ARM, /// ARM EHABI
+ WinEH, /// Windows Exception Handling
+};
+
+namespace LCOMM {
+enum LCOMMType { NoAlignment, ByteAlignment, Log2Alignment };
+}
+
+/// This class is intended to be used as a base class for asm
+/// properties and features specific to the target.
+class MCAsmInfo {
+protected:
+ //===------------------------------------------------------------------===//
+ // Properties to be set by the target writer, used to configure asm printer.
+ //
+
+ /// Pointer size in bytes. Default is 4.
+ unsigned PointerSize;
+
+ /// Size of the stack slot reserved for callee-saved registers, in bytes.
+ /// Default is same as pointer size.
+ unsigned CalleeSaveStackSlotSize;
+
+ /// True if target is little endian. Default is true.
+ bool IsLittleEndian;
+
+ /// True if target stack grow up. Default is false.
+ bool StackGrowsUp;
+
+ /// True if this target has the MachO .subsections_via_symbols directive.
+ /// Default is false.
+ bool HasSubsectionsViaSymbols;
+
+ /// True if this is a MachO target that supports the macho-specific .zerofill
+ /// directive for emitting BSS Symbols. Default is false.
+ bool HasMachoZeroFillDirective;
+
+ /// True if this is a MachO target that supports the macho-specific .tbss
+ /// directive for emitting thread local BSS Symbols. Default is false.
+ bool HasMachoTBSSDirective;
+
+ /// True if the compiler should emit a ".reference .constructors_used" or
+ /// ".reference .destructors_used" directive after the static ctor/dtor
+ /// list. This directive is only emitted in Static relocation model. Default
+ /// is false.
+ bool HasStaticCtorDtorReferenceInStaticMode;
+
+ /// This is the maximum possible length of an instruction, which is needed to
+ /// compute the size of an inline asm. Defaults to 4.
+ unsigned MaxInstLength;
+
+ /// Every possible instruction length is a multiple of this value. Factored
+ /// out in .debug_frame and .debug_line. Defaults to 1.
+ unsigned MinInstAlignment;
+
+ /// The '$' token, when not referencing an identifier or constant, refers to
+ /// the current PC. Defaults to false.
+ bool DollarIsPC;
+
+ /// This string, if specified, is used to separate instructions from each
+ /// other when on the same line. Defaults to ';'
+ const char *SeparatorString;
+
+ /// This indicates the comment character used by the assembler. Defaults to
+ /// "#"
+ const char *CommentString;
+
+ /// This is appended to emitted labels. Defaults to ":"
+ const char *LabelSuffix;
+
+ // Print the EH begin symbol with an assignment. Defaults to false.
+ bool UseAssignmentForEHBegin;
+
+ // Do we need to create a local symbol for .size?
+ bool NeedsLocalForSize;
+
+ /// This prefix is used for globals like constant pool entries that are
+ /// completely private to the .s file and should not have names in the .o
+ /// file. Defaults to "L"
+ const char *PrivateGlobalPrefix;
+
+ /// This prefix is used for labels for basic blocks. Defaults to the same as
+ /// PrivateGlobalPrefix.
+ const char *PrivateLabelPrefix;
+
+ /// This prefix is used for symbols that should be passed through the
+ /// assembler but be removed by the linker. This is 'l' on Darwin, currently
+ /// used for some ObjC metadata. The default of "" meast that for this system
+ /// a plain private symbol should be used. Defaults to "".
+ const char *LinkerPrivateGlobalPrefix;
+
+ /// If these are nonempty, they contain a directive to emit before and after
+ /// an inline assembly statement. Defaults to "#APP\n", "#NO_APP\n"
+ const char *InlineAsmStart;
+ const char *InlineAsmEnd;
+
+ /// These are assembly directives that tells the assembler to interpret the
+ /// following instructions differently. Defaults to ".code16", ".code32",
+ /// ".code64".
+ const char *Code16Directive;
+ const char *Code32Directive;
+ const char *Code64Directive;
+
+ /// Which dialect of an assembler variant to use. Defaults to 0
+ unsigned AssemblerDialect;
+
+ /// This is true if the assembler allows @ characters in symbol names.
+ /// Defaults to false.
+ bool AllowAtInName;
+
+ /// If this is true, symbol names with invalid characters will be printed in
+ /// quotes.
+ bool SupportsQuotedNames;
+
+ /// This is true if data region markers should be printed as
+ /// ".data_region/.end_data_region" directives. If false, use "$d/$a" labels
+ /// instead.
+ bool UseDataRegionDirectives;
+
+ //===--- Data Emission Directives -------------------------------------===//
+
+ /// This should be set to the directive used to get some number of zero bytes
+ /// emitted to the current section. Common cases are "\t.zero\t" and
+ /// "\t.space\t". If this is set to null, the Data*bitsDirective's will be
+ /// used to emit zero bytes. Defaults to "\t.zero\t"
+ const char *ZeroDirective;
+
+ /// This directive allows emission of an ascii string with the standard C
+ /// escape characters embedded into it. Defaults to "\t.ascii\t"
+ const char *AsciiDirective;
+
+ /// If not null, this allows for special handling of zero terminated strings
+ /// on this target. This is commonly supported as ".asciz". If a target
+ /// doesn't support this, it can be set to null. Defaults to "\t.asciz\t"
+ const char *AscizDirective;
+
+ /// These directives are used to output some unit of integer data to the
+ /// current section. If a data directive is set to null, smaller data
+ /// directives will be used to emit the large sizes. Defaults to "\t.byte\t",
+ /// "\t.short\t", "\t.long\t", "\t.quad\t"
+ const char *Data8bitsDirective;
+ const char *Data16bitsDirective;
+ const char *Data32bitsDirective;
+ const char *Data64bitsDirective;
+
+ /// If non-null, a directive that is used to emit a word which should be
+ /// relocated as a 64-bit GP-relative offset, e.g. .gpdword on Mips. Defaults
+ /// to NULL.
+ const char *GPRel64Directive;
+
+ /// If non-null, a directive that is used to emit a word which should be
+ /// relocated as a 32-bit GP-relative offset, e.g. .gpword on Mips or .gprel32
+ /// on Alpha. Defaults to NULL.
+ const char *GPRel32Directive;
+
+ /// This is true if this target uses "Sun Style" syntax for section switching
+ /// ("#alloc,#write" etc) instead of the normal ELF syntax (,"a,w") in
+ /// .section directives. Defaults to false.
+ bool SunStyleELFSectionSwitchSyntax;
+
+ /// This is true if this target uses ELF '.section' directive before the
+ /// '.bss' one. It's used for PPC/Linux which doesn't support the '.bss'
+ /// directive only. Defaults to false.
+ bool UsesELFSectionDirectiveForBSS;
+
+ bool NeedsDwarfSectionOffsetDirective;
+
+ //===--- Alignment Information ----------------------------------------===//
+
+ /// If this is true (the default) then the asmprinter emits ".align N"
+ /// directives, where N is the number of bytes to align to. Otherwise, it
+ /// emits ".align log2(N)", e.g. 3 to align to an 8 byte boundary. Defaults
+ /// to true.
+ bool AlignmentIsInBytes;
+
+ /// If non-zero, this is used to fill the executable space created as the
+ /// result of a alignment directive. Defaults to 0
+ unsigned TextAlignFillValue;
+
+ //===--- Global Variable Emission Directives --------------------------===//
+
+ /// This is the directive used to declare a global entity. Defaults to
+ /// ".globl".
+ const char *GlobalDirective;
+
+ /// True if the expression
+ /// .long f - g
+ /// uses a relocation but it can be suppressed by writing
+ /// a = f - g
+ /// .long a
+ bool SetDirectiveSuppressesReloc;
+
+ /// False if the assembler requires that we use
+ /// \code
+ /// Lc = a - b
+ /// .long Lc
+ /// \endcode
+ //
+ /// instead of
+ //
+ /// \code
+ /// .long a - b
+ /// \endcode
+ ///
+ /// Defaults to true.
+ bool HasAggressiveSymbolFolding;
+
+ /// True is .comm's and .lcomms optional alignment is to be specified in bytes
+ /// instead of log2(n). Defaults to true.
+ bool COMMDirectiveAlignmentIsInBytes;
+
+ /// Describes if the .lcomm directive for the target supports an alignment
+ /// argument and how it is interpreted. Defaults to NoAlignment.
+ LCOMM::LCOMMType LCOMMDirectiveAlignmentType;
+
+ // True if the target allows .align directives on functions. This is true for
+ // most targets, so defaults to true.
+ bool HasFunctionAlignment;
+
+ /// True if the target has .type and .size directives, this is true for most
+ /// ELF targets. Defaults to true.
+ bool HasDotTypeDotSizeDirective;
+
+ /// True if the target has a single parameter .file directive, this is true
+ /// for ELF targets. Defaults to true.
+ bool HasSingleParameterDotFile;
+
+ /// True if the target has a .ident directive, this is true for ELF targets.
+ /// Defaults to false.
+ bool HasIdentDirective;
+
+ /// True if this target supports the MachO .no_dead_strip directive. Defaults
+ /// to false.
+ bool HasNoDeadStrip;
+
+ /// Used to declare a global as being a weak symbol. Defaults to ".weak".
+ const char *WeakDirective;
+
+ /// This directive, if non-null, is used to declare a global as being a weak
+ /// undefined symbol. Defaults to NULL.
+ const char *WeakRefDirective;
+
+ /// True if we have a directive to declare a global as being a weak defined
+ /// symbol. Defaults to false.
+ bool HasWeakDefDirective;
+
+ /// True if we have a directive to declare a global as being a weak defined
+ /// symbol that can be hidden (unexported). Defaults to false.
+ bool HasWeakDefCanBeHiddenDirective;
+
+ /// True if we have a .linkonce directive. This is used on cygwin/mingw.
+ /// Defaults to false.
+ bool HasLinkOnceDirective;
+
+ /// This attribute, if not MCSA_Invalid, is used to declare a symbol as having
+ /// hidden visibility. Defaults to MCSA_Hidden.
+ MCSymbolAttr HiddenVisibilityAttr;
+
+ /// This attribute, if not MCSA_Invalid, is used to declare an undefined
+ /// symbol as having hidden visibility. Defaults to MCSA_Hidden.
+ MCSymbolAttr HiddenDeclarationVisibilityAttr;
+
+ /// This attribute, if not MCSA_Invalid, is used to declare a symbol as having
+ /// protected visibility. Defaults to MCSA_Protected
+ MCSymbolAttr ProtectedVisibilityAttr;
+
+ //===--- Dwarf Emission Directives -----------------------------------===//
+
+ /// True if target supports emission of debugging information. Defaults to
+ /// false.
+ bool SupportsDebugInformation;
+
+ /// Exception handling format for the target. Defaults to None.
+ ExceptionHandling ExceptionsType;
+
+ /// Windows exception handling data (.pdata) encoding. Defaults to Invalid.
+ WinEH::EncodingType WinEHEncodingType;
+
+ /// True if Dwarf2 output generally uses relocations for references to other
+ /// .debug_* sections.
+ bool DwarfUsesRelocationsAcrossSections;
+
+ /// True if DWARF FDE symbol reference relocations should be replaced by an
+ /// absolute difference.
+ bool DwarfFDESymbolsUseAbsDiff;
+
+ /// True if dwarf register numbers are printed instead of symbolic register
+ /// names in .cfi_* directives. Defaults to false.
+ bool DwarfRegNumForCFI;
+
+ /// True if target uses parens to indicate the symbol variant instead of @.
+ /// For example, foo(plt) instead of foo@plt. Defaults to false.
+ bool UseParensForSymbolVariant;
+
+ //===--- Prologue State ----------------------------------------------===//
+
+ std::vector<MCCFIInstruction> InitialFrameState;
+
+ //===--- Integrated Assembler Information ----------------------------===//
+
+ /// Should we use the integrated assembler?
+ /// The integrated assembler should be enabled by default (by the
+ /// constructors) when failing to parse a valid piece of assembly (inline
+ /// or otherwise) is considered a bug. It may then be overridden after
+ /// construction (see LLVMTargetMachine::initAsmInfo()).
+ bool UseIntegratedAssembler;
+
+ /// Compress DWARF debug sections. Defaults to false.
+ bool CompressDebugSections;
+
+ /// True if the integrated assembler should interpret 'a >> b' constant
+ /// expressions as logical rather than arithmetic.
+ bool UseLogicalShr;
+
+public:
+ explicit MCAsmInfo();
+ virtual ~MCAsmInfo();
+
+ /// Get the pointer size in bytes.
+ unsigned getPointerSize() const { return PointerSize; }
+
+ /// Get the callee-saved register stack slot
+ /// size in bytes.
+ unsigned getCalleeSaveStackSlotSize() const {
+ return CalleeSaveStackSlotSize;
+ }
+
+ /// True if the target is little endian.
+ bool isLittleEndian() const { return IsLittleEndian; }
+
+ /// True if target stack grow up.
+ bool isStackGrowthDirectionUp() const { return StackGrowsUp; }
+
+ bool hasSubsectionsViaSymbols() const { return HasSubsectionsViaSymbols; }
+
+ // Data directive accessors.
+
+ const char *getData8bitsDirective() const { return Data8bitsDirective; }
+ const char *getData16bitsDirective() const { return Data16bitsDirective; }
+ const char *getData32bitsDirective() const { return Data32bitsDirective; }
+ const char *getData64bitsDirective() const { return Data64bitsDirective; }
+ const char *getGPRel64Directive() const { return GPRel64Directive; }
+ const char *getGPRel32Directive() const { return GPRel32Directive; }
+
+ /// Targets can implement this method to specify a section to switch to if the
+ /// translation unit doesn't have any trampolines that require an executable
+ /// stack.
+ virtual MCSection *getNonexecutableStackSection(MCContext &Ctx) const {
+ return nullptr;
+ }
+
+ /// \brief True if the section is atomized using the symbols in it.
+ /// This is false if the section is not atomized at all (most ELF sections) or
+ /// if it is atomized based on its contents (MachO' __TEXT,__cstring for
+ /// example).
+ virtual bool isSectionAtomizableBySymbols(const MCSection &Section) const;
+
+ virtual const MCExpr *getExprForPersonalitySymbol(const MCSymbol *Sym,
+ unsigned Encoding,
+ MCStreamer &Streamer) const;
+
+ virtual const MCExpr *getExprForFDESymbol(const MCSymbol *Sym,
+ unsigned Encoding,
+ MCStreamer &Streamer) const;
+
+ /// Return true if the identifier \p Name does not need quotes to be
+ /// syntactically correct.
+ virtual bool isValidUnquotedName(StringRef Name) const;
+
+ /// Return true if the .section directive should be omitted when
+ /// emitting \p SectionName. For example:
+ ///
+ /// shouldOmitSectionDirective(".text")
+ ///
+ /// returns false => .section .text,#alloc,#execinstr
+ /// returns true => .text
+ virtual bool shouldOmitSectionDirective(StringRef SectionName) const;
+
+ bool usesSunStyleELFSectionSwitchSyntax() const {
+ return SunStyleELFSectionSwitchSyntax;
+ }
+
+ bool usesELFSectionDirectiveForBSS() const {
+ return UsesELFSectionDirectiveForBSS;
+ }
+
+ bool needsDwarfSectionOffsetDirective() const {
+ return NeedsDwarfSectionOffsetDirective;
+ }
+
+ // Accessors.
+
+ bool hasMachoZeroFillDirective() const { return HasMachoZeroFillDirective; }
+ bool hasMachoTBSSDirective() const { return HasMachoTBSSDirective; }
+ bool hasStaticCtorDtorReferenceInStaticMode() const {
+ return HasStaticCtorDtorReferenceInStaticMode;
+ }
+ unsigned getMaxInstLength() const { return MaxInstLength; }
+ unsigned getMinInstAlignment() const { return MinInstAlignment; }
+ bool getDollarIsPC() const { return DollarIsPC; }
+ const char *getSeparatorString() const { return SeparatorString; }
+
+ /// This indicates the column (zero-based) at which asm comments should be
+ /// printed.
+ unsigned getCommentColumn() const { return 40; }
+
+ const char *getCommentString() const { return CommentString; }
+ const char *getLabelSuffix() const { return LabelSuffix; }
+
+ bool useAssignmentForEHBegin() const { return UseAssignmentForEHBegin; }
+ bool needsLocalForSize() const { return NeedsLocalForSize; }
+ const char *getPrivateGlobalPrefix() const { return PrivateGlobalPrefix; }
+ const char *getPrivateLabelPrefix() const { return PrivateLabelPrefix; }
+ bool hasLinkerPrivateGlobalPrefix() const {
+ return LinkerPrivateGlobalPrefix[0] != '\0';
+ }
+ const char *getLinkerPrivateGlobalPrefix() const {
+ if (hasLinkerPrivateGlobalPrefix())
+ return LinkerPrivateGlobalPrefix;
+ return getPrivateGlobalPrefix();
+ }
+ const char *getInlineAsmStart() const { return InlineAsmStart; }
+ const char *getInlineAsmEnd() const { return InlineAsmEnd; }
+ const char *getCode16Directive() const { return Code16Directive; }
+ const char *getCode32Directive() const { return Code32Directive; }
+ const char *getCode64Directive() const { return Code64Directive; }
+ unsigned getAssemblerDialect() const { return AssemblerDialect; }
+ bool doesAllowAtInName() const { return AllowAtInName; }
+ bool supportsNameQuoting() const { return SupportsQuotedNames; }
+ bool doesSupportDataRegionDirectives() const {
+ return UseDataRegionDirectives;
+ }
+ const char *getZeroDirective() const { return ZeroDirective; }
+ const char *getAsciiDirective() const { return AsciiDirective; }
+ const char *getAscizDirective() const { return AscizDirective; }
+ bool getAlignmentIsInBytes() const { return AlignmentIsInBytes; }
+ unsigned getTextAlignFillValue() const { return TextAlignFillValue; }
+ const char *getGlobalDirective() const { return GlobalDirective; }
+ bool doesSetDirectiveSuppressesReloc() const {
+ return SetDirectiveSuppressesReloc;
+ }
+ bool hasAggressiveSymbolFolding() const { return HasAggressiveSymbolFolding; }
+ bool getCOMMDirectiveAlignmentIsInBytes() const {
+ return COMMDirectiveAlignmentIsInBytes;
+ }
+ LCOMM::LCOMMType getLCOMMDirectiveAlignmentType() const {
+ return LCOMMDirectiveAlignmentType;
+ }
+ bool hasFunctionAlignment() const { return HasFunctionAlignment; }
+ bool hasDotTypeDotSizeDirective() const { return HasDotTypeDotSizeDirective; }
+ bool hasSingleParameterDotFile() const { return HasSingleParameterDotFile; }
+ bool hasIdentDirective() const { return HasIdentDirective; }
+ bool hasNoDeadStrip() const { return HasNoDeadStrip; }
+ const char *getWeakDirective() const { return WeakDirective; }
+ const char *getWeakRefDirective() const { return WeakRefDirective; }
+ bool hasWeakDefDirective() const { return HasWeakDefDirective; }
+ bool hasWeakDefCanBeHiddenDirective() const {
+ return HasWeakDefCanBeHiddenDirective;
+ }
+ bool hasLinkOnceDirective() const { return HasLinkOnceDirective; }
+
+ MCSymbolAttr getHiddenVisibilityAttr() const { return HiddenVisibilityAttr; }
+ MCSymbolAttr getHiddenDeclarationVisibilityAttr() const {
+ return HiddenDeclarationVisibilityAttr;
+ }
+ MCSymbolAttr getProtectedVisibilityAttr() const {
+ return ProtectedVisibilityAttr;
+ }
+ bool doesSupportDebugInformation() const { return SupportsDebugInformation; }
+ bool doesSupportExceptionHandling() const {
+ return ExceptionsType != ExceptionHandling::None;
+ }
+ ExceptionHandling getExceptionHandlingType() const { return ExceptionsType; }
+ WinEH::EncodingType getWinEHEncodingType() const { return WinEHEncodingType; }
+
+ /// Returns true if the exception handling method for the platform uses call
+ /// frame information to unwind.
+ bool usesCFIForEH() const {
+ return (ExceptionsType == ExceptionHandling::DwarfCFI ||
+ ExceptionsType == ExceptionHandling::ARM || usesWindowsCFI());
+ }
+
+ bool usesWindowsCFI() const {
+ return ExceptionsType == ExceptionHandling::WinEH &&
+ (WinEHEncodingType != WinEH::EncodingType::Invalid &&
+ WinEHEncodingType != WinEH::EncodingType::X86);
+ }
+
+ bool doesDwarfUseRelocationsAcrossSections() const {
+ return DwarfUsesRelocationsAcrossSections;
+ }
+ bool doDwarfFDESymbolsUseAbsDiff() const { return DwarfFDESymbolsUseAbsDiff; }
+ bool useDwarfRegNumForCFI() const { return DwarfRegNumForCFI; }
+ bool useParensForSymbolVariant() const { return UseParensForSymbolVariant; }
+
+ void addInitialFrameState(const MCCFIInstruction &Inst) {
+ InitialFrameState.push_back(Inst);
+ }
+
+ const std::vector<MCCFIInstruction> &getInitialFrameState() const {
+ return InitialFrameState;
+ }
+
+ /// Return true if assembly (inline or otherwise) should be parsed.
+ bool useIntegratedAssembler() const { return UseIntegratedAssembler; }
+
+ /// Set whether assembly (inline or otherwise) should be parsed.
+ virtual void setUseIntegratedAssembler(bool Value) {
+ UseIntegratedAssembler = Value;
+ }
+
+ bool compressDebugSections() const { return CompressDebugSections; }
+
+ void setCompressDebugSections(bool CompressDebugSections) {
+ this->CompressDebugSections = CompressDebugSections;
+ }
+
+ bool shouldUseLogicalShr() const { return UseLogicalShr; }
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfoCOFF.h b/contrib/llvm/include/llvm/MC/MCAsmInfoCOFF.h
new file mode 100644
index 0000000..56444f3
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfoCOFF.h
@@ -0,0 +1,36 @@
+//===-- MCAsmInfoCOFF.h - COFF asm properties -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMINFOCOFF_H
+#define LLVM_MC_MCASMINFOCOFF_H
+
+#include "llvm/MC/MCAsmInfo.h"
+
+namespace llvm {
+ class MCAsmInfoCOFF : public MCAsmInfo {
+ virtual void anchor();
+ protected:
+ explicit MCAsmInfoCOFF();
+ };
+
+ class MCAsmInfoMicrosoft : public MCAsmInfoCOFF {
+ void anchor() override;
+ protected:
+ explicit MCAsmInfoMicrosoft();
+ };
+
+ class MCAsmInfoGNUCOFF : public MCAsmInfoCOFF {
+ void anchor() override;
+ protected:
+ explicit MCAsmInfoGNUCOFF();
+ };
+}
+
+
+#endif // LLVM_MC_MCASMINFOCOFF_H
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfoDarwin.h b/contrib/llvm/include/llvm/MC/MCAsmInfoDarwin.h
new file mode 100644
index 0000000..d587c3c
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfoDarwin.h
@@ -0,0 +1,29 @@
+//===---- MCAsmInfoDarwin.h - Darwin asm properties -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines target asm properties related what form asm statements
+// should take in general on Darwin-based targets
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMINFODARWIN_H
+#define LLVM_MC_MCASMINFODARWIN_H
+
+#include "llvm/MC/MCAsmInfo.h"
+
+namespace llvm {
+ class MCAsmInfoDarwin : public MCAsmInfo {
+ public:
+ explicit MCAsmInfoDarwin();
+ bool isSectionAtomizableBySymbols(const MCSection &Section) const override;
+ };
+}
+
+
+#endif // LLVM_MC_MCASMINFODARWIN_H
diff --git a/contrib/llvm/include/llvm/MC/MCAsmInfoELF.h b/contrib/llvm/include/llvm/MC/MCAsmInfoELF.h
new file mode 100644
index 0000000..7125f5c
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCAsmInfoELF.h
@@ -0,0 +1,25 @@
+//===-- llvm/MC/MCAsmInfoELF.h - ELF Asm info -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMINFOELF_H
+#define LLVM_MC_MCASMINFOELF_H
+
+#include "llvm/MC/MCAsmInfo.h"
+
+namespace llvm {
+class MCAsmInfoELF : public MCAsmInfo {
+ virtual void anchor();
+ MCSection *getNonexecutableStackSection(MCContext &Ctx) const final;
+
+protected:
+ MCAsmInfoELF();
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCAsmLayout.h b/contrib/llvm/include/llvm/MC/MCAsmLayout.h
new file mode 100644
index 0000000..1b20d5b
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCAsmLayout.h
@@ -0,0 +1,107 @@
+//===- MCAsmLayout.h - Assembly Layout Object -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASMLAYOUT_H
+#define LLVM_MC_MCASMLAYOUT_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+class MCAssembler;
+class MCFragment;
+class MCSection;
+class MCSymbol;
+
+/// Encapsulates the layout of an assembly file at a particular point in time.
+///
+/// Assembly may require computing multiple layouts for a particular assembly
+/// file as part of the relaxation process. This class encapsulates the layout
+/// at a single point in time in such a way that it is always possible to
+/// efficiently compute the exact address of any symbol in the assembly file,
+/// even during the relaxation process.
+class MCAsmLayout {
+ MCAssembler &Assembler;
+
+ /// List of sections in layout order.
+ llvm::SmallVector<MCSection *, 16> SectionOrder;
+
+ /// The last fragment which was laid out, or 0 if nothing has been laid
+ /// out. Fragments are always laid out in order, so all fragments with a
+ /// lower ordinal will be valid.
+ mutable DenseMap<const MCSection *, MCFragment *> LastValidFragment;
+
+ /// \brief Make sure that the layout for the given fragment is valid, lazily
+ /// computing it if necessary.
+ void ensureValid(const MCFragment *F) const;
+
+ /// \brief Is the layout for this fragment valid?
+ bool isFragmentValid(const MCFragment *F) const;
+
+public:
+ MCAsmLayout(MCAssembler &Assembler);
+
+ /// Get the assembler object this is a layout for.
+ MCAssembler &getAssembler() const { return Assembler; }
+
+ /// \brief Invalidate the fragments starting with F because it has been
+ /// resized. The fragment's size should have already been updated, but
+ /// its bundle padding will be recomputed.
+ void invalidateFragmentsFrom(MCFragment *F);
+
+ /// \brief Perform layout for a single fragment, assuming that the previous
+ /// fragment has already been laid out correctly, and the parent section has
+ /// been initialized.
+ void layoutFragment(MCFragment *Fragment);
+
+ /// \name Section Access (in layout order)
+ /// @{
+
+ llvm::SmallVectorImpl<MCSection *> &getSectionOrder() { return SectionOrder; }
+ const llvm::SmallVectorImpl<MCSection *> &getSectionOrder() const {
+ return SectionOrder;
+ }
+
+ /// @}
+ /// \name Fragment Layout Data
+ /// @{
+
+ /// \brief Get the offset of the given fragment inside its containing section.
+ uint64_t getFragmentOffset(const MCFragment *F) const;
+
+ /// @}
+ /// \name Utility Functions
+ /// @{
+
+ /// \brief Get the address space size of the given section, as it effects
+ /// layout. This may differ from the size reported by \see getSectionSize() by
+ /// not including section tail padding.
+ uint64_t getSectionAddressSize(const MCSection *Sec) const;
+
+ /// \brief Get the data size of the given section, as emitted to the object
+ /// file. This may include additional padding, or be 0 for virtual sections.
+ uint64_t getSectionFileSize(const MCSection *Sec) const;
+
+ /// \brief Get the offset of the given symbol, as computed in the current
+ /// layout.
+ /// \return True on success.
+ bool getSymbolOffset(const MCSymbol &S, uint64_t &Val) const;
+
+ /// \brief Variant that reports a fatal error if the offset is not computable.
+ uint64_t getSymbolOffset(const MCSymbol &S) const;
+
+ /// \brief If this symbol is equivalent to A + Constant, return A.
+ const MCSymbol *getBaseSymbol(const MCSymbol &Symbol) const;
+
+ /// @}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCAssembler.h b/contrib/llvm/include/llvm/MC/MCAssembler.h
new file mode 100644
index 0000000..c0bd128
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCAssembler.h
@@ -0,0 +1,425 @@
+//===- MCAssembler.h - Object File Generation -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCASSEMBLER_H
+#define LLVM_MC_MCASSEMBLER_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFragment.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCLinkerOptimizationHint.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+
+namespace llvm {
+class raw_ostream;
+class MCAsmLayout;
+class MCAssembler;
+class MCContext;
+class MCCodeEmitter;
+class MCExpr;
+class MCFragment;
+class MCObjectWriter;
+class MCSection;
+class MCSubtargetInfo;
+class MCValue;
+class MCAsmBackend;
+
+// FIXME: This really doesn't belong here. See comments below.
+struct IndirectSymbolData {
+ MCSymbol *Symbol;
+ MCSection *Section;
+};
+
+// FIXME: Ditto this. Purely so the Streamer and the ObjectWriter can talk
+// to one another.
+struct DataRegionData {
+ // This enum should be kept in sync w/ the mach-o definition in
+ // llvm/Object/MachOFormat.h.
+ enum KindTy { Data = 1, JumpTable8, JumpTable16, JumpTable32 } Kind;
+ MCSymbol *Start;
+ MCSymbol *End;
+};
+
+class MCAssembler {
+ friend class MCAsmLayout;
+
+public:
+ typedef std::vector<MCSection *> SectionListType;
+ typedef std::vector<const MCSymbol *> SymbolDataListType;
+
+ typedef pointee_iterator<SectionListType::const_iterator> const_iterator;
+ typedef pointee_iterator<SectionListType::iterator> iterator;
+
+ typedef pointee_iterator<SymbolDataListType::const_iterator>
+ const_symbol_iterator;
+ typedef pointee_iterator<SymbolDataListType::iterator> symbol_iterator;
+
+ typedef iterator_range<symbol_iterator> symbol_range;
+ typedef iterator_range<const_symbol_iterator> const_symbol_range;
+
+ typedef std::vector<IndirectSymbolData>::const_iterator
+ const_indirect_symbol_iterator;
+ typedef std::vector<IndirectSymbolData>::iterator indirect_symbol_iterator;
+
+ typedef std::vector<DataRegionData>::const_iterator
+ const_data_region_iterator;
+ typedef std::vector<DataRegionData>::iterator data_region_iterator;
+
+ /// MachO specific deployment target version info.
+ // A Major version of 0 indicates that no version information was supplied
+ // and so the corresponding load command should not be emitted.
+ typedef struct {
+ MCVersionMinType Kind;
+ unsigned Major;
+ unsigned Minor;
+ unsigned Update;
+ } VersionMinInfoType;
+
+private:
+ MCAssembler(const MCAssembler &) = delete;
+ void operator=(const MCAssembler &) = delete;
+
+ MCContext &Context;
+
+ MCAsmBackend &Backend;
+
+ MCCodeEmitter &Emitter;
+
+ MCObjectWriter &Writer;
+
+ SectionListType Sections;
+
+ SymbolDataListType Symbols;
+
+ std::vector<IndirectSymbolData> IndirectSymbols;
+
+ std::vector<DataRegionData> DataRegions;
+
+ /// The list of linker options to propagate into the object file.
+ std::vector<std::vector<std::string>> LinkerOptions;
+
+ /// List of declared file names
+ std::vector<std::string> FileNames;
+
+ MCDwarfLineTableParams LTParams;
+
+ /// The set of function symbols for which a .thumb_func directive has
+ /// been seen.
+ //
+ // FIXME: We really would like this in target specific code rather than
+ // here. Maybe when the relocation stuff moves to target specific,
+ // this can go with it? The streamer would need some target specific
+ // refactoring too.
+ mutable SmallPtrSet<const MCSymbol *, 64> ThumbFuncs;
+
+ /// \brief The bundle alignment size currently set in the assembler.
+ ///
+ /// By default it's 0, which means bundling is disabled.
+ unsigned BundleAlignSize;
+
+ unsigned RelaxAll : 1;
+ unsigned SubsectionsViaSymbols : 1;
+ unsigned IncrementalLinkerCompatible : 1;
+
+ /// ELF specific e_header flags
+ // It would be good if there were an MCELFAssembler class to hold this.
+ // ELF header flags are used both by the integrated and standalone assemblers.
+ // Access to the flags is necessary in cases where assembler directives affect
+ // which flags to be set.
+ unsigned ELFHeaderEFlags;
+
+ /// Used to communicate Linker Optimization Hint information between
+ /// the Streamer and the .o writer
+ MCLOHContainer LOHContainer;
+
+ VersionMinInfoType VersionMinInfo;
+
+private:
+ /// Evaluate a fixup to a relocatable expression and the value which should be
+ /// placed into the fixup.
+ ///
+ /// \param Layout The layout to use for evaluation.
+ /// \param Fixup The fixup to evaluate.
+ /// \param DF The fragment the fixup is inside.
+ /// \param Target [out] On return, the relocatable expression the fixup
+ /// evaluates to.
+ /// \param Value [out] On return, the value of the fixup as currently laid
+ /// out.
+ /// \return Whether the fixup value was fully resolved. This is true if the
+ /// \p Value result is fixed, otherwise the value may change due to
+ /// relocation.
+ bool evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup,
+ const MCFragment *DF, MCValue &Target,
+ uint64_t &Value) const;
+
+ /// Check whether a fixup can be satisfied, or whether it needs to be relaxed
+ /// (increased in size, in order to hold its value correctly).
+ bool fixupNeedsRelaxation(const MCFixup &Fixup, const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const;
+
+ /// Check whether the given fragment needs relaxation.
+ bool fragmentNeedsRelaxation(const MCRelaxableFragment *IF,
+ const MCAsmLayout &Layout) const;
+
+ /// \brief Perform one layout iteration and return true if any offsets
+ /// were adjusted.
+ bool layoutOnce(MCAsmLayout &Layout);
+
+ /// \brief Perform one layout iteration of the given section and return true
+ /// if any offsets were adjusted.
+ bool layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec);
+
+ bool relaxInstruction(MCAsmLayout &Layout, MCRelaxableFragment &IF);
+
+ bool relaxLEB(MCAsmLayout &Layout, MCLEBFragment &IF);
+
+ bool relaxDwarfLineAddr(MCAsmLayout &Layout, MCDwarfLineAddrFragment &DF);
+ bool relaxDwarfCallFrameFragment(MCAsmLayout &Layout,
+ MCDwarfCallFrameFragment &DF);
+
+ /// finishLayout - Finalize a layout, including fragment lowering.
+ void finishLayout(MCAsmLayout &Layout);
+
+ std::pair<uint64_t, bool> handleFixup(const MCAsmLayout &Layout,
+ MCFragment &F, const MCFixup &Fixup);
+
+public:
+ /// Compute the effective fragment size assuming it is laid out at the given
+ /// \p SectionAddress and \p FragmentOffset.
+ uint64_t computeFragmentSize(const MCAsmLayout &Layout,
+ const MCFragment &F) const;
+
+ /// Find the symbol which defines the atom containing the given symbol, or
+ /// null if there is no such symbol.
+ const MCSymbol *getAtom(const MCSymbol &S) const;
+
+ /// Check whether a particular symbol is visible to the linker and is required
+ /// in the symbol table, or whether it can be discarded by the assembler. This
+ /// also effects whether the assembler treats the label as potentially
+ /// defining a separate atom.
+ bool isSymbolLinkerVisible(const MCSymbol &SD) const;
+
+ /// Emit the section contents using the given object writer.
+ void writeSectionData(const MCSection *Section,
+ const MCAsmLayout &Layout) const;
+
+ /// Check whether a given symbol has been flagged with .thumb_func.
+ bool isThumbFunc(const MCSymbol *Func) const;
+
+ /// Flag a function symbol as the target of a .thumb_func directive.
+ void setIsThumbFunc(const MCSymbol *Func) { ThumbFuncs.insert(Func); }
+
+ /// ELF e_header flags
+ unsigned getELFHeaderEFlags() const { return ELFHeaderEFlags; }
+ void setELFHeaderEFlags(unsigned Flags) { ELFHeaderEFlags = Flags; }
+
+ /// MachO deployment target version information.
+ const VersionMinInfoType &getVersionMinInfo() const { return VersionMinInfo; }
+ void setVersionMinInfo(MCVersionMinType Kind, unsigned Major, unsigned Minor,
+ unsigned Update) {
+ VersionMinInfo.Kind = Kind;
+ VersionMinInfo.Major = Major;
+ VersionMinInfo.Minor = Minor;
+ VersionMinInfo.Update = Update;
+ }
+
+public:
+ /// Construct a new assembler instance.
+ //
+ // FIXME: How are we going to parameterize this? Two obvious options are stay
+ // concrete and require clients to pass in a target like object. The other
+ // option is to make this abstract, and have targets provide concrete
+ // implementations as we do with AsmParser.
+ MCAssembler(MCContext &Context_, MCAsmBackend &Backend_,
+ MCCodeEmitter &Emitter_, MCObjectWriter &Writer_);
+ ~MCAssembler();
+
+ /// Reuse an assembler instance
+ ///
+ void reset();
+
+ MCContext &getContext() const { return Context; }
+
+ MCAsmBackend &getBackend() const { return Backend; }
+
+ MCCodeEmitter &getEmitter() const { return Emitter; }
+
+ MCObjectWriter &getWriter() const { return Writer; }
+
+ MCDwarfLineTableParams getDWARFLinetableParams() const { return LTParams; }
+ void setDWARFLinetableParams(MCDwarfLineTableParams P) { LTParams = P; }
+
+ /// Finish - Do final processing and write the object to the output stream.
+ /// \p Writer is used for custom object writer (as the MCJIT does),
+ /// if not specified it is automatically created from backend.
+ void Finish();
+
+ // Layout all section and prepare them for emission.
+ void layout(MCAsmLayout &Layout);
+
+ // FIXME: This does not belong here.
+ bool getSubsectionsViaSymbols() const { return SubsectionsViaSymbols; }
+ void setSubsectionsViaSymbols(bool Value) { SubsectionsViaSymbols = Value; }
+
+ bool isIncrementalLinkerCompatible() const {
+ return IncrementalLinkerCompatible;
+ }
+ void setIncrementalLinkerCompatible(bool Value) {
+ IncrementalLinkerCompatible = Value;
+ }
+
+ bool getRelaxAll() const { return RelaxAll; }
+ void setRelaxAll(bool Value) { RelaxAll = Value; }
+
+ bool isBundlingEnabled() const { return BundleAlignSize != 0; }
+
+ unsigned getBundleAlignSize() const { return BundleAlignSize; }
+
+ void setBundleAlignSize(unsigned Size) {
+ assert((Size == 0 || !(Size & (Size - 1))) &&
+ "Expect a power-of-two bundle align size");
+ BundleAlignSize = Size;
+ }
+
+ /// \name Section List Access
+ /// @{
+
+ iterator begin() { return Sections.begin(); }
+ const_iterator begin() const { return Sections.begin(); }
+
+ iterator end() { return Sections.end(); }
+ const_iterator end() const { return Sections.end(); }
+
+ size_t size() const { return Sections.size(); }
+
+ /// @}
+ /// \name Symbol List Access
+ /// @{
+ symbol_iterator symbol_begin() { return Symbols.begin(); }
+ const_symbol_iterator symbol_begin() const { return Symbols.begin(); }
+
+ symbol_iterator symbol_end() { return Symbols.end(); }
+ const_symbol_iterator symbol_end() const { return Symbols.end(); }
+
+ symbol_range symbols() { return make_range(symbol_begin(), symbol_end()); }
+ const_symbol_range symbols() const {
+ return make_range(symbol_begin(), symbol_end());
+ }
+
+ size_t symbol_size() const { return Symbols.size(); }
+
+ /// @}
+ /// \name Indirect Symbol List Access
+ /// @{
+
+ // FIXME: This is a total hack, this should not be here. Once things are
+ // factored so that the streamer has direct access to the .o writer, it can
+ // disappear.
+ std::vector<IndirectSymbolData> &getIndirectSymbols() {
+ return IndirectSymbols;
+ }
+
+ indirect_symbol_iterator indirect_symbol_begin() {
+ return IndirectSymbols.begin();
+ }
+ const_indirect_symbol_iterator indirect_symbol_begin() const {
+ return IndirectSymbols.begin();
+ }
+
+ indirect_symbol_iterator indirect_symbol_end() {
+ return IndirectSymbols.end();
+ }
+ const_indirect_symbol_iterator indirect_symbol_end() const {
+ return IndirectSymbols.end();
+ }
+
+ size_t indirect_symbol_size() const { return IndirectSymbols.size(); }
+
+ /// @}
+ /// \name Linker Option List Access
+ /// @{
+
+ std::vector<std::vector<std::string>> &getLinkerOptions() {
+ return LinkerOptions;
+ }
+
+ /// @}
+ /// \name Data Region List Access
+ /// @{
+
+ // FIXME: This is a total hack, this should not be here. Once things are
+ // factored so that the streamer has direct access to the .o writer, it can
+ // disappear.
+ std::vector<DataRegionData> &getDataRegions() { return DataRegions; }
+
+ data_region_iterator data_region_begin() { return DataRegions.begin(); }
+ const_data_region_iterator data_region_begin() const {
+ return DataRegions.begin();
+ }
+
+ data_region_iterator data_region_end() { return DataRegions.end(); }
+ const_data_region_iterator data_region_end() const {
+ return DataRegions.end();
+ }
+
+ size_t data_region_size() const { return DataRegions.size(); }
+
+ /// @}
+ /// \name Data Region List Access
+ /// @{
+
+ // FIXME: This is a total hack, this should not be here. Once things are
+ // factored so that the streamer has direct access to the .o writer, it can
+ // disappear.
+ MCLOHContainer &getLOHContainer() { return LOHContainer; }
+ const MCLOHContainer &getLOHContainer() const {
+ return const_cast<MCAssembler *>(this)->getLOHContainer();
+ }
+ /// @}
+ /// \name Backend Data Access
+ /// @{
+
+ bool registerSection(MCSection &Section);
+
+ void registerSymbol(const MCSymbol &Symbol, bool *Created = nullptr);
+
+ ArrayRef<std::string> getFileNames() { return FileNames; }
+
+ void addFileName(StringRef FileName) {
+ if (std::find(FileNames.begin(), FileNames.end(), FileName) ==
+ FileNames.end())
+ FileNames.push_back(FileName);
+ }
+
+ /// \brief Write the necessary bundle padding to the given object writer.
+ /// Expects a fragment \p F containing instructions and its size \p FSize.
+ void writeFragmentPadding(const MCFragment &F, uint64_t FSize,
+ MCObjectWriter *OW) const;
+
+ /// @}
+
+ void dump();
+};
+
+/// \brief Compute the amount of padding required before the fragment \p F to
+/// obey bundling restrictions, where \p FOffset is the fragment's offset in
+/// its section and \p FSize is the fragment's size.
+uint64_t computeBundlePadding(const MCAssembler &Assembler, const MCFragment *F,
+ uint64_t FOffset, uint64_t FSize);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCCodeEmitter.h b/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
new file mode 100644
index 0000000..b6c1915
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCCodeEmitter.h
@@ -0,0 +1,46 @@
+//===-- llvm/MC/MCCodeEmitter.h - Instruction Encoding ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCCODEEMITTER_H
+#define LLVM_MC_MCCODEEMITTER_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class MCFixup;
+class MCInst;
+class MCSubtargetInfo;
+class raw_ostream;
+template<typename T> class SmallVectorImpl;
+
+/// MCCodeEmitter - Generic instruction encoding interface.
+class MCCodeEmitter {
+private:
+ MCCodeEmitter(const MCCodeEmitter &) = delete;
+ void operator=(const MCCodeEmitter &) = delete;
+
+protected: // Can only create subclasses.
+ MCCodeEmitter();
+
+public:
+ virtual ~MCCodeEmitter();
+
+ /// Lifetime management
+ virtual void reset() {}
+
+ /// EncodeInstruction - Encode the given \p Inst to bytes on the output
+ /// stream \p OS.
+ virtual void encodeInstruction(const MCInst &Inst, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const = 0;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCCodeGenInfo.h b/contrib/llvm/include/llvm/MC/MCCodeGenInfo.h
new file mode 100644
index 0000000..0a4744f
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCCodeGenInfo.h
@@ -0,0 +1,51 @@
+//===-- llvm/MC/MCCodeGenInfo.h - Target CodeGen Info -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file tracks information about the target which can affect codegen,
+// asm parsing, and asm printing. For example, relocation model.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCCODEGENINFO_H
+#define LLVM_MC_MCCODEGENINFO_H
+
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+
+class MCCodeGenInfo {
+ /// RelocationModel - Relocation model: static, pic, etc.
+ ///
+ Reloc::Model RelocationModel;
+
+ /// CMModel - Code model.
+ ///
+ CodeModel::Model CMModel;
+
+ /// OptLevel - Optimization level.
+ ///
+ CodeGenOpt::Level OptLevel;
+
+public:
+ void initMCCodeGenInfo(Reloc::Model RM = Reloc::Default,
+ CodeModel::Model CM = CodeModel::Default,
+ CodeGenOpt::Level OL = CodeGenOpt::Default);
+
+ Reloc::Model getRelocationModel() const { return RelocationModel; }
+
+ CodeModel::Model getCodeModel() const { return CMModel; }
+
+ CodeGenOpt::Level getOptLevel() const { return OptLevel; }
+
+ // Allow overriding OptLevel on a per-function basis.
+ void setOptLevel(CodeGenOpt::Level Level) { OptLevel = Level; }
+};
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCContext.h b/contrib/llvm/include/llvm/MC/MCContext.h
new file mode 100644
index 0000000..e5a9afd
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCContext.h
@@ -0,0 +1,605 @@
+//===- MCContext.h - Machine Code Context -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCCONTEXT_H
+#define LLVM_MC_MCCONTEXT_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/SectionKind.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+#include <tuple>
+#include <vector> // FIXME: Shouldn't be needed.
+
+namespace llvm {
+ class MCAsmInfo;
+ class MCExpr;
+ class MCSection;
+ class MCSymbol;
+ class MCSymbolELF;
+ class MCLabel;
+ struct MCDwarfFile;
+ class MCDwarfLoc;
+ class MCObjectFileInfo;
+ class MCRegisterInfo;
+ class MCLineSection;
+ class SMLoc;
+ class MCSectionMachO;
+ class MCSectionELF;
+ class MCSectionCOFF;
+
+ /// Context object for machine code objects. This class owns all of the
+ /// sections that it creates.
+ ///
+ class MCContext {
+ MCContext(const MCContext &) = delete;
+ MCContext &operator=(const MCContext &) = delete;
+
+ public:
+ typedef StringMap<MCSymbol *, BumpPtrAllocator &> SymbolTable;
+
+ private:
+ /// The SourceMgr for this object, if any.
+ const SourceMgr *SrcMgr;
+
+ /// The MCAsmInfo for this target.
+ const MCAsmInfo *MAI;
+
+ /// The MCRegisterInfo for this target.
+ const MCRegisterInfo *MRI;
+
+ /// The MCObjectFileInfo for this target.
+ const MCObjectFileInfo *MOFI;
+
+ /// Allocator object used for creating machine code objects.
+ ///
+ /// We use a bump pointer allocator to avoid the need to track all allocated
+ /// objects.
+ BumpPtrAllocator Allocator;
+
+ SpecificBumpPtrAllocator<MCSectionCOFF> COFFAllocator;
+ SpecificBumpPtrAllocator<MCSectionELF> ELFAllocator;
+ SpecificBumpPtrAllocator<MCSectionMachO> MachOAllocator;
+
+ /// Bindings of names to symbols.
+ SymbolTable Symbols;
+
+ /// ELF sections can have a corresponding symbol. This maps one to the
+ /// other.
+ DenseMap<const MCSectionELF *, MCSymbolELF *> SectionSymbols;
+
+ /// A mapping from a local label number and an instance count to a symbol.
+ /// For example, in the assembly
+ /// 1:
+ /// 2:
+ /// 1:
+ /// We have three labels represented by the pairs (1, 0), (2, 0) and (1, 1)
+ DenseMap<std::pair<unsigned, unsigned>, MCSymbol *> LocalSymbols;
+
+ /// Keeps tracks of names that were used both for used declared and
+ /// artificial symbols.
+ StringMap<bool, BumpPtrAllocator &> UsedNames;
+
+ /// The next ID to dole out to an unnamed assembler temporary symbol with
+ /// a given prefix.
+ StringMap<unsigned> NextID;
+
+ /// Instances of directional local labels.
+ DenseMap<unsigned, MCLabel *> Instances;
+ /// NextInstance() creates the next instance of the directional local label
+ /// for the LocalLabelVal and adds it to the map if needed.
+ unsigned NextInstance(unsigned LocalLabelVal);
+ /// GetInstance() gets the current instance of the directional local label
+ /// for the LocalLabelVal and adds it to the map if needed.
+ unsigned GetInstance(unsigned LocalLabelVal);
+
+ /// The file name of the log file from the environment variable
+ /// AS_SECURE_LOG_FILE. Which must be set before the .secure_log_unique
+ /// directive is used or it is an error.
+ char *SecureLogFile;
+ /// The stream that gets written to for the .secure_log_unique directive.
+ std::unique_ptr<raw_fd_ostream> SecureLog;
+ /// Boolean toggled when .secure_log_unique / .secure_log_reset is seen to
+ /// catch errors if .secure_log_unique appears twice without
+ /// .secure_log_reset appearing between them.
+ bool SecureLogUsed;
+
+ /// The compilation directory to use for DW_AT_comp_dir.
+ SmallString<128> CompilationDir;
+
+ /// The main file name if passed in explicitly.
+ std::string MainFileName;
+
+ /// The dwarf file and directory tables from the dwarf .file directive.
+ /// We now emit a line table for each compile unit. To reduce the prologue
+ /// size of each line table, the files and directories used by each compile
+ /// unit are separated.
+ std::map<unsigned, MCDwarfLineTable> MCDwarfLineTablesCUMap;
+
+ /// The current dwarf line information from the last dwarf .loc directive.
+ MCDwarfLoc CurrentDwarfLoc;
+ bool DwarfLocSeen;
+
+ /// Generate dwarf debugging info for assembly source files.
+ bool GenDwarfForAssembly;
+
+ /// The current dwarf file number when generate dwarf debugging info for
+ /// assembly source files.
+ unsigned GenDwarfFileNumber;
+
+ /// Sections for generating the .debug_ranges and .debug_aranges sections.
+ SetVector<MCSection *> SectionsForRanges;
+
+ /// The information gathered from labels that will have dwarf label
+ /// entries when generating dwarf assembly source files.
+ std::vector<MCGenDwarfLabelEntry> MCGenDwarfLabelEntries;
+
+ /// The string to embed in the debug information for the compile unit, if
+ /// non-empty.
+ StringRef DwarfDebugFlags;
+
+ /// The string to embed in as the dwarf AT_producer for the compile unit, if
+ /// non-empty.
+ StringRef DwarfDebugProducer;
+
+ /// The maximum version of dwarf that we should emit.
+ uint16_t DwarfVersion;
+
+ /// Honor temporary labels, this is useful for debugging semantic
+ /// differences between temporary and non-temporary labels (primarily on
+ /// Darwin).
+ bool AllowTemporaryLabels;
+ bool UseNamesOnTempLabels = true;
+
+ /// The Compile Unit ID that we are currently processing.
+ unsigned DwarfCompileUnitID;
+
+ struct ELFSectionKey {
+ std::string SectionName;
+ StringRef GroupName;
+ unsigned UniqueID;
+ ELFSectionKey(StringRef SectionName, StringRef GroupName,
+ unsigned UniqueID)
+ : SectionName(SectionName), GroupName(GroupName), UniqueID(UniqueID) {
+ }
+ bool operator<(const ELFSectionKey &Other) const {
+ if (SectionName != Other.SectionName)
+ return SectionName < Other.SectionName;
+ if (GroupName != Other.GroupName)
+ return GroupName < Other.GroupName;
+ return UniqueID < Other.UniqueID;
+ }
+ };
+
+ struct COFFSectionKey {
+ std::string SectionName;
+ StringRef GroupName;
+ int SelectionKey;
+ COFFSectionKey(StringRef SectionName, StringRef GroupName,
+ int SelectionKey)
+ : SectionName(SectionName), GroupName(GroupName),
+ SelectionKey(SelectionKey) {}
+ bool operator<(const COFFSectionKey &Other) const {
+ if (SectionName != Other.SectionName)
+ return SectionName < Other.SectionName;
+ if (GroupName != Other.GroupName)
+ return GroupName < Other.GroupName;
+ return SelectionKey < Other.SelectionKey;
+ }
+ };
+
+ StringMap<MCSectionMachO *> MachOUniquingMap;
+ std::map<ELFSectionKey, MCSectionELF *> ELFUniquingMap;
+ std::map<COFFSectionKey, MCSectionCOFF *> COFFUniquingMap;
+ StringMap<bool> ELFRelSecNames;
+
+ SpecificBumpPtrAllocator<MCSubtargetInfo> MCSubtargetAllocator;
+
+ /// Do automatic reset in destructor
+ bool AutoReset;
+
+ bool HadError;
+
+ MCSymbol *createSymbolImpl(const StringMapEntry<bool> *Name,
+ bool CanBeUnnamed);
+ MCSymbol *createSymbol(StringRef Name, bool AlwaysAddSuffix,
+ bool IsTemporary);
+
+ MCSymbol *getOrCreateDirectionalLocalSymbol(unsigned LocalLabelVal,
+ unsigned Instance);
+
+ public:
+ explicit MCContext(const MCAsmInfo *MAI, const MCRegisterInfo *MRI,
+ const MCObjectFileInfo *MOFI,
+ const SourceMgr *Mgr = nullptr, bool DoAutoReset = true);
+ ~MCContext();
+
+ const SourceMgr *getSourceManager() const { return SrcMgr; }
+
+ const MCAsmInfo *getAsmInfo() const { return MAI; }
+
+ const MCRegisterInfo *getRegisterInfo() const { return MRI; }
+
+ const MCObjectFileInfo *getObjectFileInfo() const { return MOFI; }
+
+ void setAllowTemporaryLabels(bool Value) { AllowTemporaryLabels = Value; }
+ void setUseNamesOnTempLabels(bool Value) { UseNamesOnTempLabels = Value; }
+
+ /// \name Module Lifetime Management
+ /// @{
+
+ /// reset - return object to right after construction state to prepare
+ /// to process a new module
+ void reset();
+
+ /// @}
+
+ /// \name Symbol Management
+ /// @{
+
+ /// Create and return a new linker temporary symbol with a unique but
+ /// unspecified name.
+ MCSymbol *createLinkerPrivateTempSymbol();
+
+ /// Create and return a new assembler temporary symbol with a unique but
+ /// unspecified name.
+ MCSymbol *createTempSymbol(bool CanBeUnnamed = true);
+
+ MCSymbol *createTempSymbol(const Twine &Name, bool AlwaysAddSuffix,
+ bool CanBeUnnamed = true);
+
+ /// Create the definition of a directional local symbol for numbered label
+ /// (used for "1:" definitions).
+ MCSymbol *createDirectionalLocalSymbol(unsigned LocalLabelVal);
+
+ /// Create and return a directional local symbol for numbered label (used
+ /// for "1b" or 1f" references).
+ MCSymbol *getDirectionalLocalSymbol(unsigned LocalLabelVal, bool Before);
+
+ /// Lookup the symbol inside with the specified \p Name. If it exists,
+ /// return it. If not, create a forward reference and return it.
+ ///
+ /// \param Name - The symbol name, which must be unique across all symbols.
+ MCSymbol *getOrCreateSymbol(const Twine &Name);
+
+ MCSymbolELF *getOrCreateSectionSymbol(const MCSectionELF &Section);
+
+ /// Gets a symbol that will be defined to the final stack offset of a local
+ /// variable after codegen.
+ ///
+ /// \param Idx - The index of a local variable passed to @llvm.localescape.
+ MCSymbol *getOrCreateFrameAllocSymbol(StringRef FuncName, unsigned Idx);
+
+ MCSymbol *getOrCreateParentFrameOffsetSymbol(StringRef FuncName);
+
+ MCSymbol *getOrCreateLSDASymbol(StringRef FuncName);
+
+ /// Get the symbol for \p Name, or null.
+ MCSymbol *lookupSymbol(const Twine &Name) const;
+
+ /// getSymbols - Get a reference for the symbol table for clients that
+ /// want to, for example, iterate over all symbols. 'const' because we
+ /// still want any modifications to the table itself to use the MCContext
+ /// APIs.
+ const SymbolTable &getSymbols() const { return Symbols; }
+
+ /// @}
+
+ /// \name Section Management
+ /// @{
+
+ /// Return the MCSection for the specified mach-o section. This requires
+ /// the operands to be valid.
+ MCSectionMachO *getMachOSection(StringRef Segment, StringRef Section,
+ unsigned TypeAndAttributes,
+ unsigned Reserved2, SectionKind K,
+ const char *BeginSymName = nullptr);
+
+ MCSectionMachO *getMachOSection(StringRef Segment, StringRef Section,
+ unsigned TypeAndAttributes, SectionKind K,
+ const char *BeginSymName = nullptr) {
+ return getMachOSection(Segment, Section, TypeAndAttributes, 0, K,
+ BeginSymName);
+ }
+
+ MCSectionELF *getELFSection(StringRef Section, unsigned Type,
+ unsigned Flags) {
+ return getELFSection(Section, Type, Flags, nullptr);
+ }
+
+ MCSectionELF *getELFSection(StringRef Section, unsigned Type,
+ unsigned Flags, const char *BeginSymName) {
+ return getELFSection(Section, Type, Flags, 0, "", BeginSymName);
+ }
+
+ MCSectionELF *getELFSection(StringRef Section, unsigned Type,
+ unsigned Flags, unsigned EntrySize,
+ StringRef Group) {
+ return getELFSection(Section, Type, Flags, EntrySize, Group, nullptr);
+ }
+
+ MCSectionELF *getELFSection(StringRef Section, unsigned Type,
+ unsigned Flags, unsigned EntrySize,
+ StringRef Group, const char *BeginSymName) {
+ return getELFSection(Section, Type, Flags, EntrySize, Group, ~0,
+ BeginSymName);
+ }
+
+ MCSectionELF *getELFSection(StringRef Section, unsigned Type,
+ unsigned Flags, unsigned EntrySize,
+ StringRef Group, unsigned UniqueID) {
+ return getELFSection(Section, Type, Flags, EntrySize, Group, UniqueID,
+ nullptr);
+ }
+
+ MCSectionELF *getELFSection(StringRef Section, unsigned Type,
+ unsigned Flags, unsigned EntrySize,
+ StringRef Group, unsigned UniqueID,
+ const char *BeginSymName);
+
+ MCSectionELF *getELFSection(StringRef Section, unsigned Type,
+ unsigned Flags, unsigned EntrySize,
+ const MCSymbolELF *Group, unsigned UniqueID,
+ const char *BeginSymName,
+ const MCSectionELF *Associated);
+
+ MCSectionELF *createELFRelSection(StringRef Name, unsigned Type,
+ unsigned Flags, unsigned EntrySize,
+ const MCSymbolELF *Group,
+ const MCSectionELF *Associated);
+
+ void renameELFSection(MCSectionELF *Section, StringRef Name);
+
+ MCSectionELF *createELFGroupSection(const MCSymbolELF *Group);
+
+ MCSectionCOFF *getCOFFSection(StringRef Section, unsigned Characteristics,
+ SectionKind Kind, StringRef COMDATSymName,
+ int Selection,
+ const char *BeginSymName = nullptr);
+
+ MCSectionCOFF *getCOFFSection(StringRef Section, unsigned Characteristics,
+ SectionKind Kind,
+ const char *BeginSymName = nullptr);
+
+ MCSectionCOFF *getCOFFSection(StringRef Section);
+
+ /// Gets or creates a section equivalent to Sec that is associated with the
+ /// section containing KeySym. For example, to create a debug info section
+ /// associated with an inline function, pass the normal debug info section
+ /// as Sec and the function symbol as KeySym.
+ MCSectionCOFF *getAssociativeCOFFSection(MCSectionCOFF *Sec,
+ const MCSymbol *KeySym);
+
+ // Create and save a copy of STI and return a reference to the copy.
+ MCSubtargetInfo &getSubtargetCopy(const MCSubtargetInfo &STI);
+
+ /// @}
+
+ /// \name Dwarf Management
+ /// @{
+
+ /// \brief Get the compilation directory for DW_AT_comp_dir
+ /// This can be overridden by clients which want to control the reported
+ /// compilation directory and have it be something other than the current
+ /// working directory.
+ /// Returns an empty string if the current directory cannot be determined.
+ StringRef getCompilationDir() const { return CompilationDir; }
+
+ /// \brief Set the compilation directory for DW_AT_comp_dir
+ /// Override the default (CWD) compilation directory.
+ void setCompilationDir(StringRef S) { CompilationDir = S.str(); }
+
+ /// \brief Get the main file name for use in error messages and debug
+ /// info. This can be set to ensure we've got the correct file name
+ /// after preprocessing or for -save-temps.
+ const std::string &getMainFileName() const { return MainFileName; }
+
+ /// \brief Set the main file name and override the default.
+ void setMainFileName(StringRef S) { MainFileName = S; }
+
+ /// Creates an entry in the dwarf file and directory tables.
+ unsigned getDwarfFile(StringRef Directory, StringRef FileName,
+ unsigned FileNumber, unsigned CUID);
+
+ bool isValidDwarfFileNumber(unsigned FileNumber, unsigned CUID = 0);
+
+ const std::map<unsigned, MCDwarfLineTable> &getMCDwarfLineTables() const {
+ return MCDwarfLineTablesCUMap;
+ }
+
+ MCDwarfLineTable &getMCDwarfLineTable(unsigned CUID) {
+ return MCDwarfLineTablesCUMap[CUID];
+ }
+
+ const MCDwarfLineTable &getMCDwarfLineTable(unsigned CUID) const {
+ auto I = MCDwarfLineTablesCUMap.find(CUID);
+ assert(I != MCDwarfLineTablesCUMap.end());
+ return I->second;
+ }
+
+ const SmallVectorImpl<MCDwarfFile> &getMCDwarfFiles(unsigned CUID = 0) {
+ return getMCDwarfLineTable(CUID).getMCDwarfFiles();
+ }
+ const SmallVectorImpl<std::string> &getMCDwarfDirs(unsigned CUID = 0) {
+ return getMCDwarfLineTable(CUID).getMCDwarfDirs();
+ }
+
+ bool hasMCLineSections() const {
+ for (const auto &Table : MCDwarfLineTablesCUMap)
+ if (!Table.second.getMCDwarfFiles().empty() || Table.second.getLabel())
+ return true;
+ return false;
+ }
+ unsigned getDwarfCompileUnitID() { return DwarfCompileUnitID; }
+ void setDwarfCompileUnitID(unsigned CUIndex) {
+ DwarfCompileUnitID = CUIndex;
+ }
+ void setMCLineTableCompilationDir(unsigned CUID, StringRef CompilationDir) {
+ getMCDwarfLineTable(CUID).setCompilationDir(CompilationDir);
+ }
+
+ /// Saves the information from the currently parsed dwarf .loc directive
+ /// and sets DwarfLocSeen. When the next instruction is assembled an entry
+ /// in the line number table with this information and the address of the
+ /// instruction will be created.
+ void setCurrentDwarfLoc(unsigned FileNum, unsigned Line, unsigned Column,
+ unsigned Flags, unsigned Isa,
+ unsigned Discriminator) {
+ CurrentDwarfLoc.setFileNum(FileNum);
+ CurrentDwarfLoc.setLine(Line);
+ CurrentDwarfLoc.setColumn(Column);
+ CurrentDwarfLoc.setFlags(Flags);
+ CurrentDwarfLoc.setIsa(Isa);
+ CurrentDwarfLoc.setDiscriminator(Discriminator);
+ DwarfLocSeen = true;
+ }
+ void clearDwarfLocSeen() { DwarfLocSeen = false; }
+
+ bool getDwarfLocSeen() { return DwarfLocSeen; }
+ const MCDwarfLoc &getCurrentDwarfLoc() { return CurrentDwarfLoc; }
+
+ bool getGenDwarfForAssembly() { return GenDwarfForAssembly; }
+ void setGenDwarfForAssembly(bool Value) { GenDwarfForAssembly = Value; }
+ unsigned getGenDwarfFileNumber() { return GenDwarfFileNumber; }
+ void setGenDwarfFileNumber(unsigned FileNumber) {
+ GenDwarfFileNumber = FileNumber;
+ }
+ const SetVector<MCSection *> &getGenDwarfSectionSyms() {
+ return SectionsForRanges;
+ }
+ bool addGenDwarfSection(MCSection *Sec) {
+ return SectionsForRanges.insert(Sec);
+ }
+
+ void finalizeDwarfSections(MCStreamer &MCOS);
+ const std::vector<MCGenDwarfLabelEntry> &getMCGenDwarfLabelEntries() const {
+ return MCGenDwarfLabelEntries;
+ }
+ void addMCGenDwarfLabelEntry(const MCGenDwarfLabelEntry &E) {
+ MCGenDwarfLabelEntries.push_back(E);
+ }
+
+ void setDwarfDebugFlags(StringRef S) { DwarfDebugFlags = S; }
+ StringRef getDwarfDebugFlags() { return DwarfDebugFlags; }
+
+ void setDwarfDebugProducer(StringRef S) { DwarfDebugProducer = S; }
+ StringRef getDwarfDebugProducer() { return DwarfDebugProducer; }
+
+ void setDwarfVersion(uint16_t v) { DwarfVersion = v; }
+ uint16_t getDwarfVersion() const { return DwarfVersion; }
+
+ /// @}
+
+ char *getSecureLogFile() { return SecureLogFile; }
+ raw_fd_ostream *getSecureLog() { return SecureLog.get(); }
+ bool getSecureLogUsed() { return SecureLogUsed; }
+ void setSecureLog(std::unique_ptr<raw_fd_ostream> Value) {
+ SecureLog = std::move(Value);
+ }
+ void setSecureLogUsed(bool Value) { SecureLogUsed = Value; }
+
+ void *allocate(unsigned Size, unsigned Align = 8) {
+ return Allocator.Allocate(Size, Align);
+ }
+ void deallocate(void *Ptr) {}
+
+ bool hadError() { return HadError; }
+ void reportError(SMLoc L, const Twine &Msg);
+ // Unrecoverable error has occurred. Display the best diagnostic we can
+ // and bail via exit(1). For now, most MC backend errors are unrecoverable.
+ // FIXME: We should really do something about that.
+ LLVM_ATTRIBUTE_NORETURN void reportFatalError(SMLoc L,
+ const Twine &Msg);
+ };
+
+} // end namespace llvm
+
+// operator new and delete aren't allowed inside namespaces.
+// The throw specifications are mandated by the standard.
+/// \brief Placement new for using the MCContext's allocator.
+///
+/// This placement form of operator new uses the MCContext's allocator for
+/// obtaining memory. It is a non-throwing new, which means that it returns
+/// null on error. (If that is what the allocator does. The current does, so if
+/// this ever changes, this operator will have to be changed, too.)
+/// Usage looks like this (assuming there's an MCContext 'Context' in scope):
+/// \code
+/// // Default alignment (8)
+/// IntegerLiteral *Ex = new (Context) IntegerLiteral(arguments);
+/// // Specific alignment
+/// IntegerLiteral *Ex2 = new (Context, 4) IntegerLiteral(arguments);
+/// \endcode
+/// Please note that you cannot use delete on the pointer; it must be
+/// deallocated using an explicit destructor call followed by
+/// \c Context.Deallocate(Ptr).
+///
+/// \param Bytes The number of bytes to allocate. Calculated by the compiler.
+/// \param C The MCContext that provides the allocator.
+/// \param Alignment The alignment of the allocated memory (if the underlying
+/// allocator supports it).
+/// \return The allocated memory. Could be NULL.
+inline void *operator new(size_t Bytes, llvm::MCContext &C,
+ size_t Alignment = 8) LLVM_NOEXCEPT {
+ return C.allocate(Bytes, Alignment);
+}
+/// \brief Placement delete companion to the new above.
+///
+/// This operator is just a companion to the new above. There is no way of
+/// invoking it directly; see the new operator for more details. This operator
+/// is called implicitly by the compiler if a placement new expression using
+/// the MCContext throws in the object constructor.
+inline void operator delete(void *Ptr, llvm::MCContext &C,
+ size_t) LLVM_NOEXCEPT {
+ C.deallocate(Ptr);
+}
+
+/// This placement form of operator new[] uses the MCContext's allocator for
+/// obtaining memory. It is a non-throwing new[], which means that it returns
+/// null on error.
+/// Usage looks like this (assuming there's an MCContext 'Context' in scope):
+/// \code
+/// // Default alignment (8)
+/// char *data = new (Context) char[10];
+/// // Specific alignment
+/// char *data = new (Context, 4) char[10];
+/// \endcode
+/// Please note that you cannot use delete on the pointer; it must be
+/// deallocated using an explicit destructor call followed by
+/// \c Context.Deallocate(Ptr).
+///
+/// \param Bytes The number of bytes to allocate. Calculated by the compiler.
+/// \param C The MCContext that provides the allocator.
+/// \param Alignment The alignment of the allocated memory (if the underlying
+/// allocator supports it).
+/// \return The allocated memory. Could be NULL.
+inline void *operator new[](size_t Bytes, llvm::MCContext &C,
+ size_t Alignment = 8) LLVM_NOEXCEPT {
+ return C.allocate(Bytes, Alignment);
+}
+
+/// \brief Placement delete[] companion to the new[] above.
+///
+/// This operator is just a companion to the new[] above. There is no way of
+/// invoking it directly; see the new[] operator for more details. This operator
+/// is called implicitly by the compiler if a placement new[] expression using
+/// the MCContext throws in the object constructor.
+inline void operator delete[](void *Ptr, llvm::MCContext &C) LLVM_NOEXCEPT {
+ C.deallocate(Ptr);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCDirectives.h b/contrib/llvm/include/llvm/MC/MCDirectives.h
new file mode 100644
index 0000000..326b2a1
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCDirectives.h
@@ -0,0 +1,72 @@
+//===- MCDirectives.h - Enums for directives on various targets -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various enums that represent target-specific directives.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCDIRECTIVES_H
+#define LLVM_MC_MCDIRECTIVES_H
+
+namespace llvm {
+
+enum MCSymbolAttr {
+ MCSA_Invalid = 0, ///< Not a valid directive.
+
+ // Various directives in alphabetical order.
+ MCSA_ELF_TypeFunction, ///< .type _foo, STT_FUNC # aka @function
+ MCSA_ELF_TypeIndFunction, ///< .type _foo, STT_GNU_IFUNC
+ MCSA_ELF_TypeObject, ///< .type _foo, STT_OBJECT # aka @object
+ MCSA_ELF_TypeTLS, ///< .type _foo, STT_TLS # aka @tls_object
+ MCSA_ELF_TypeCommon, ///< .type _foo, STT_COMMON # aka @common
+ MCSA_ELF_TypeNoType, ///< .type _foo, STT_NOTYPE # aka @notype
+ MCSA_ELF_TypeGnuUniqueObject, /// .type _foo, @gnu_unique_object
+ MCSA_Global, ///< .globl
+ MCSA_Hidden, ///< .hidden (ELF)
+ MCSA_IndirectSymbol, ///< .indirect_symbol (MachO)
+ MCSA_Internal, ///< .internal (ELF)
+ MCSA_LazyReference, ///< .lazy_reference (MachO)
+ MCSA_Local, ///< .local (ELF)
+ MCSA_NoDeadStrip, ///< .no_dead_strip (MachO)
+ MCSA_SymbolResolver, ///< .symbol_resolver (MachO)
+ MCSA_PrivateExtern, ///< .private_extern (MachO)
+ MCSA_Protected, ///< .protected (ELF)
+ MCSA_Reference, ///< .reference (MachO)
+ MCSA_Weak, ///< .weak
+ MCSA_WeakDefinition, ///< .weak_definition (MachO)
+ MCSA_WeakReference, ///< .weak_reference (MachO)
+ MCSA_WeakDefAutoPrivate ///< .weak_def_can_be_hidden (MachO)
+};
+
+enum MCAssemblerFlag {
+ MCAF_SyntaxUnified, ///< .syntax (ARM/ELF)
+ MCAF_SubsectionsViaSymbols, ///< .subsections_via_symbols (MachO)
+ MCAF_Code16, ///< .code16 (X86) / .code 16 (ARM)
+ MCAF_Code32, ///< .code32 (X86) / .code 32 (ARM)
+ MCAF_Code64 ///< .code64 (X86)
+};
+
+enum MCDataRegionType {
+ MCDR_DataRegion, ///< .data_region
+ MCDR_DataRegionJT8, ///< .data_region jt8
+ MCDR_DataRegionJT16, ///< .data_region jt16
+ MCDR_DataRegionJT32, ///< .data_region jt32
+ MCDR_DataRegionEnd ///< .end_data_region
+};
+
+enum MCVersionMinType {
+ MCVM_IOSVersionMin, ///< .ios_version_min
+ MCVM_OSXVersionMin, ///< .macosx_version_min
+ MCVM_TvOSVersionMin, ///< .tvos_version_min
+ MCVM_WatchOSVersionMin, ///< .watchos_version_min
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCDisassembler.h b/contrib/llvm/include/llvm/MC/MCDisassembler.h
new file mode 100644
index 0000000..57c40d6
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCDisassembler.h
@@ -0,0 +1,113 @@
+//===-- llvm/MC/MCDisassembler.h - Disassembler interface -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_MC_MCDISASSEMBLER_H
+#define LLVM_MC_MCDISASSEMBLER_H
+
+#include "llvm-c/Disassembler.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/MC/MCSymbolizer.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class MCInst;
+class MCSubtargetInfo;
+class raw_ostream;
+class MCContext;
+
+/// Superclass for all disassemblers. Consumes a memory region and provides an
+/// array of assembly instructions.
+class MCDisassembler {
+public:
+ /// Ternary decode status. Most backends will just use Fail and
+ /// Success, however some have a concept of an instruction with
+ /// understandable semantics but which is architecturally
+ /// incorrect. An example of this is ARM UNPREDICTABLE instructions
+ /// which are disassemblable but cause undefined behaviour.
+ ///
+ /// Because it makes sense to disassemble these instructions, there
+ /// is a "soft fail" failure mode that indicates the MCInst& is
+ /// valid but architecturally incorrect.
+ ///
+ /// The enum numbers are deliberately chosen such that reduction
+ /// from Success->SoftFail ->Fail can be done with a simple
+ /// bitwise-AND:
+ ///
+ /// LEFT & TOP = | Success Unpredictable Fail
+ /// --------------+-----------------------------------
+ /// Success | Success Unpredictable Fail
+ /// Unpredictable | Unpredictable Unpredictable Fail
+ /// Fail | Fail Fail Fail
+ ///
+ /// An easy way of encoding this is as 0b11, 0b01, 0b00 for
+ /// Success, SoftFail, Fail respectively.
+ enum DecodeStatus {
+ Fail = 0,
+ SoftFail = 1,
+ Success = 3
+ };
+
+ MCDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
+ : Ctx(Ctx), STI(STI), Symbolizer(), CommentStream(nullptr) {}
+
+ virtual ~MCDisassembler();
+
+ /// Returns the disassembly of a single instruction.
+ ///
+ /// \param Instr - An MCInst to populate with the contents of the
+ /// instruction.
+ /// \param Size - A value to populate with the size of the instruction, or
+ /// the number of bytes consumed while attempting to decode
+ /// an invalid instruction.
+ /// \param Address - The address, in the memory space of region, of the first
+ /// byte of the instruction.
+ /// \param VStream - The stream to print warnings and diagnostic messages on.
+ /// \param CStream - The stream to print comments and annotations on.
+ /// \return - MCDisassembler::Success if the instruction is valid,
+ /// MCDisassembler::SoftFail if the instruction was
+ /// disassemblable but invalid,
+ /// MCDisassembler::Fail if the instruction was invalid.
+ virtual DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes, uint64_t Address,
+ raw_ostream &VStream,
+ raw_ostream &CStream) const = 0;
+
+private:
+ MCContext &Ctx;
+
+protected:
+ // Subtarget information, for instruction decoding predicates if required.
+ const MCSubtargetInfo &STI;
+ std::unique_ptr<MCSymbolizer> Symbolizer;
+
+public:
+ // Helpers around MCSymbolizer
+ bool tryAddingSymbolicOperand(MCInst &Inst,
+ int64_t Value,
+ uint64_t Address, bool IsBranch,
+ uint64_t Offset, uint64_t InstSize) const;
+
+ void tryAddingPcLoadReferenceComment(int64_t Value, uint64_t Address) const;
+
+ /// Set \p Symzer as the current symbolizer.
+ /// This takes ownership of \p Symzer, and deletes the previously set one.
+ void setSymbolizer(std::unique_ptr<MCSymbolizer> Symzer);
+
+ MCContext& getContext() const { return Ctx; }
+
+ const MCSubtargetInfo& getSubtargetInfo() const { return STI; }
+
+ // Marked mutable because we cache it inside the disassembler, rather than
+ // having to pass it around as an argument through all the autogenerated code.
+ mutable raw_ostream *CommentStream;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCDwarf.h b/contrib/llvm/include/llvm/MC/MCDwarf.h
new file mode 100644
index 0000000..8a50863
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCDwarf.h
@@ -0,0 +1,523 @@
+//===- MCDwarf.h - Machine Code Dwarf support -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCDwarfFile to support the dwarf
+// .file directive and the .loc directive.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCDWARF_H
+#define LLVM_MC_MCDWARF_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+class MCAsmBackend;
+class MCContext;
+class MCObjectStreamer;
+class MCStreamer;
+class MCSymbol;
+class SourceMgr;
+class SMLoc;
+
+/// \brief Instances of this class represent the name of the dwarf
+/// .file directive and its associated dwarf file number in the MC file,
+/// and MCDwarfFile's are created and uniqued by the MCContext class where
+/// the file number for each is its index into the vector of DwarfFiles (note
+/// index 0 is not used and not a valid dwarf file number).
+struct MCDwarfFile {
+ // \brief The base name of the file without its directory path.
+ // The StringRef references memory allocated in the MCContext.
+ std::string Name;
+
+ // \brief The index into the list of directory names for this file name.
+ unsigned DirIndex;
+};
+
+/// \brief Instances of this class represent the information from a
+/// dwarf .loc directive.
+class MCDwarfLoc {
+ uint32_t FileNum;
+ uint32_t Line;
+ uint16_t Column;
+ // Flags (see #define's below)
+ uint8_t Flags;
+ uint8_t Isa;
+ uint32_t Discriminator;
+
+// Flag that indicates the initial value of the is_stmt_start flag.
+#define DWARF2_LINE_DEFAULT_IS_STMT 1
+
+#define DWARF2_FLAG_IS_STMT (1 << 0)
+#define DWARF2_FLAG_BASIC_BLOCK (1 << 1)
+#define DWARF2_FLAG_PROLOGUE_END (1 << 2)
+#define DWARF2_FLAG_EPILOGUE_BEGIN (1 << 3)
+
+private: // MCContext manages these
+ friend class MCContext;
+ friend class MCLineEntry;
+ MCDwarfLoc(unsigned fileNum, unsigned line, unsigned column, unsigned flags,
+ unsigned isa, unsigned discriminator)
+ : FileNum(fileNum), Line(line), Column(column), Flags(flags), Isa(isa),
+ Discriminator(discriminator) {}
+
+ // Allow the default copy constructor and assignment operator to be used
+ // for an MCDwarfLoc object.
+
+public:
+ /// \brief Get the FileNum of this MCDwarfLoc.
+ unsigned getFileNum() const { return FileNum; }
+
+ /// \brief Get the Line of this MCDwarfLoc.
+ unsigned getLine() const { return Line; }
+
+ /// \brief Get the Column of this MCDwarfLoc.
+ unsigned getColumn() const { return Column; }
+
+ /// \brief Get the Flags of this MCDwarfLoc.
+ unsigned getFlags() const { return Flags; }
+
+ /// \brief Get the Isa of this MCDwarfLoc.
+ unsigned getIsa() const { return Isa; }
+
+ /// \brief Get the Discriminator of this MCDwarfLoc.
+ unsigned getDiscriminator() const { return Discriminator; }
+
+ /// \brief Set the FileNum of this MCDwarfLoc.
+ void setFileNum(unsigned fileNum) { FileNum = fileNum; }
+
+ /// \brief Set the Line of this MCDwarfLoc.
+ void setLine(unsigned line) { Line = line; }
+
+ /// \brief Set the Column of this MCDwarfLoc.
+ void setColumn(unsigned column) {
+ assert(column <= UINT16_MAX);
+ Column = column;
+ }
+
+ /// \brief Set the Flags of this MCDwarfLoc.
+ void setFlags(unsigned flags) {
+ assert(flags <= UINT8_MAX);
+ Flags = flags;
+ }
+
+ /// \brief Set the Isa of this MCDwarfLoc.
+ void setIsa(unsigned isa) {
+ assert(isa <= UINT8_MAX);
+ Isa = isa;
+ }
+
+ /// \brief Set the Discriminator of this MCDwarfLoc.
+ void setDiscriminator(unsigned discriminator) {
+ Discriminator = discriminator;
+ }
+};
+
+/// \brief Instances of this class represent the line information for
+/// the dwarf line table entries. Which is created after a machine
+/// instruction is assembled and uses an address from a temporary label
+/// created at the current address in the current section and the info from
+/// the last .loc directive seen as stored in the context.
+class MCLineEntry : public MCDwarfLoc {
+ MCSymbol *Label;
+
+private:
+ // Allow the default copy constructor and assignment operator to be used
+ // for an MCLineEntry object.
+
+public:
+ // Constructor to create an MCLineEntry given a symbol and the dwarf loc.
+ MCLineEntry(MCSymbol *label, const MCDwarfLoc loc)
+ : MCDwarfLoc(loc), Label(label) {}
+
+ MCSymbol *getLabel() const { return Label; }
+
+ // This is called when an instruction is assembled into the specified
+ // section and if there is information from the last .loc directive that
+ // has yet to have a line entry made for it is made.
+ static void Make(MCObjectStreamer *MCOS, MCSection *Section);
+};
+
+/// \brief Instances of this class represent the line information for a compile
+/// unit where machine instructions have been assembled after seeing .loc
+/// directives. This is the information used to build the dwarf line
+/// table for a section.
+class MCLineSection {
+public:
+ // \brief Add an entry to this MCLineSection's line entries.
+ void addLineEntry(const MCLineEntry &LineEntry, MCSection *Sec) {
+ MCLineDivisions[Sec].push_back(LineEntry);
+ }
+
+ typedef std::vector<MCLineEntry> MCLineEntryCollection;
+ typedef MCLineEntryCollection::iterator iterator;
+ typedef MCLineEntryCollection::const_iterator const_iterator;
+ typedef MapVector<MCSection *, MCLineEntryCollection> MCLineDivisionMap;
+
+private:
+ // A collection of MCLineEntry for each section.
+ MCLineDivisionMap MCLineDivisions;
+
+public:
+ // Returns the collection of MCLineEntry for a given Compile Unit ID.
+ const MCLineDivisionMap &getMCLineEntries() const {
+ return MCLineDivisions;
+ }
+};
+
+struct MCDwarfLineTableParams {
+ /// First special line opcode - leave room for the standard opcodes.
+ /// Note: If you want to change this, you'll have to update the
+ /// "StandardOpcodeLengths" table that is emitted in
+ /// \c Emit().
+ uint8_t DWARF2LineOpcodeBase = 13;
+ /// Minimum line offset in a special line info. opcode. The value
+ /// -5 was chosen to give a reasonable range of values.
+ int8_t DWARF2LineBase = -5;
+ /// Range of line offsets in a special line info. opcode.
+ uint8_t DWARF2LineRange = 14;
+};
+
+struct MCDwarfLineTableHeader {
+ MCSymbol *Label;
+ SmallVector<std::string, 3> MCDwarfDirs;
+ SmallVector<MCDwarfFile, 3> MCDwarfFiles;
+ StringMap<unsigned> SourceIdMap;
+ StringRef CompilationDir;
+
+ MCDwarfLineTableHeader() : Label(nullptr) {}
+ unsigned getFile(StringRef &Directory, StringRef &FileName,
+ unsigned FileNumber = 0);
+ std::pair<MCSymbol *, MCSymbol *> Emit(MCStreamer *MCOS,
+ MCDwarfLineTableParams Params) const;
+ std::pair<MCSymbol *, MCSymbol *>
+ Emit(MCStreamer *MCOS, MCDwarfLineTableParams Params,
+ ArrayRef<char> SpecialOpcodeLengths) const;
+};
+
+class MCDwarfDwoLineTable {
+ MCDwarfLineTableHeader Header;
+public:
+ void setCompilationDir(StringRef CompilationDir) {
+ Header.CompilationDir = CompilationDir;
+ }
+ unsigned getFile(StringRef Directory, StringRef FileName) {
+ return Header.getFile(Directory, FileName);
+ }
+ void Emit(MCStreamer &MCOS, MCDwarfLineTableParams Params) const;
+};
+
+class MCDwarfLineTable {
+ MCDwarfLineTableHeader Header;
+ MCLineSection MCLineSections;
+
+public:
+ // This emits the Dwarf file and the line tables for all Compile Units.
+ static void Emit(MCObjectStreamer *MCOS, MCDwarfLineTableParams Params);
+
+ // This emits the Dwarf file and the line tables for a given Compile Unit.
+ void EmitCU(MCObjectStreamer *MCOS, MCDwarfLineTableParams Params) const;
+
+ unsigned getFile(StringRef &Directory, StringRef &FileName,
+ unsigned FileNumber = 0);
+
+ MCSymbol *getLabel() const {
+ return Header.Label;
+ }
+
+ void setLabel(MCSymbol *Label) {
+ Header.Label = Label;
+ }
+
+ void setCompilationDir(StringRef CompilationDir) {
+ Header.CompilationDir = CompilationDir;
+ }
+
+ const SmallVectorImpl<std::string> &getMCDwarfDirs() const {
+ return Header.MCDwarfDirs;
+ }
+
+ SmallVectorImpl<std::string> &getMCDwarfDirs() {
+ return Header.MCDwarfDirs;
+ }
+
+ const SmallVectorImpl<MCDwarfFile> &getMCDwarfFiles() const {
+ return Header.MCDwarfFiles;
+ }
+
+ SmallVectorImpl<MCDwarfFile> &getMCDwarfFiles() {
+ return Header.MCDwarfFiles;
+ }
+
+ const MCLineSection &getMCLineSections() const {
+ return MCLineSections;
+ }
+ MCLineSection &getMCLineSections() {
+ return MCLineSections;
+ }
+};
+
+class MCDwarfLineAddr {
+public:
+ /// Utility function to encode a Dwarf pair of LineDelta and AddrDeltas.
+ static void Encode(MCContext &Context, MCDwarfLineTableParams Params,
+ int64_t LineDelta, uint64_t AddrDelta, raw_ostream &OS);
+
+ /// Utility function to emit the encoding to a streamer.
+ static void Emit(MCStreamer *MCOS, MCDwarfLineTableParams Params,
+ int64_t LineDelta, uint64_t AddrDelta);
+};
+
+class MCGenDwarfInfo {
+public:
+ //
+ // When generating dwarf for assembly source files this emits the Dwarf
+ // sections.
+ //
+ static void Emit(MCStreamer *MCOS);
+};
+
+// When generating dwarf for assembly source files this is the info that is
+// needed to be gathered for each symbol that will have a dwarf label.
+class MCGenDwarfLabelEntry {
+private:
+ // Name of the symbol without a leading underbar, if any.
+ StringRef Name;
+ // The dwarf file number this symbol is in.
+ unsigned FileNumber;
+ // The line number this symbol is at.
+ unsigned LineNumber;
+ // The low_pc for the dwarf label is taken from this symbol.
+ MCSymbol *Label;
+
+public:
+ MCGenDwarfLabelEntry(StringRef name, unsigned fileNumber, unsigned lineNumber,
+ MCSymbol *label)
+ : Name(name), FileNumber(fileNumber), LineNumber(lineNumber),
+ Label(label) {}
+
+ StringRef getName() const { return Name; }
+ unsigned getFileNumber() const { return FileNumber; }
+ unsigned getLineNumber() const { return LineNumber; }
+ MCSymbol *getLabel() const { return Label; }
+
+ // This is called when label is created when we are generating dwarf for
+ // assembly source files.
+ static void Make(MCSymbol *Symbol, MCStreamer *MCOS, SourceMgr &SrcMgr,
+ SMLoc &Loc);
+};
+
+class MCCFIInstruction {
+public:
+ enum OpType {
+ OpSameValue,
+ OpRememberState,
+ OpRestoreState,
+ OpOffset,
+ OpDefCfaRegister,
+ OpDefCfaOffset,
+ OpDefCfa,
+ OpRelOffset,
+ OpAdjustCfaOffset,
+ OpEscape,
+ OpRestore,
+ OpUndefined,
+ OpRegister,
+ OpWindowSave,
+ OpGnuArgsSize
+ };
+
+private:
+ OpType Operation;
+ MCSymbol *Label;
+ unsigned Register;
+ union {
+ int Offset;
+ unsigned Register2;
+ };
+ std::vector<char> Values;
+
+ MCCFIInstruction(OpType Op, MCSymbol *L, unsigned R, int O, StringRef V)
+ : Operation(Op), Label(L), Register(R), Offset(O),
+ Values(V.begin(), V.end()) {
+ assert(Op != OpRegister);
+ }
+
+ MCCFIInstruction(OpType Op, MCSymbol *L, unsigned R1, unsigned R2)
+ : Operation(Op), Label(L), Register(R1), Register2(R2) {
+ assert(Op == OpRegister);
+ }
+
+public:
+ /// \brief .cfi_def_cfa defines a rule for computing CFA as: take address from
+ /// Register and add Offset to it.
+ static MCCFIInstruction createDefCfa(MCSymbol *L, unsigned Register,
+ int Offset) {
+ return MCCFIInstruction(OpDefCfa, L, Register, -Offset, "");
+ }
+
+ /// \brief .cfi_def_cfa_register modifies a rule for computing CFA. From now
+ /// on Register will be used instead of the old one. Offset remains the same.
+ static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register) {
+ return MCCFIInstruction(OpDefCfaRegister, L, Register, 0, "");
+ }
+
+ /// \brief .cfi_def_cfa_offset modifies a rule for computing CFA. Register
+ /// remains the same, but offset is new. Note that it is the absolute offset
+ /// that will be added to a defined register to the compute CFA address.
+ static MCCFIInstruction createDefCfaOffset(MCSymbol *L, int Offset) {
+ return MCCFIInstruction(OpDefCfaOffset, L, 0, -Offset, "");
+ }
+
+ /// \brief .cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but
+ /// Offset is a relative value that is added/subtracted from the previous
+ /// offset.
+ static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int Adjustment) {
+ return MCCFIInstruction(OpAdjustCfaOffset, L, 0, Adjustment, "");
+ }
+
+ /// \brief .cfi_offset Previous value of Register is saved at offset Offset
+ /// from CFA.
+ static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register,
+ int Offset) {
+ return MCCFIInstruction(OpOffset, L, Register, Offset, "");
+ }
+
+ /// \brief .cfi_rel_offset Previous value of Register is saved at offset
+ /// Offset from the current CFA register. This is transformed to .cfi_offset
+ /// using the known displacement of the CFA register from the CFA.
+ static MCCFIInstruction createRelOffset(MCSymbol *L, unsigned Register,
+ int Offset) {
+ return MCCFIInstruction(OpRelOffset, L, Register, Offset, "");
+ }
+
+ /// \brief .cfi_register Previous value of Register1 is saved in
+ /// register Register2.
+ static MCCFIInstruction createRegister(MCSymbol *L, unsigned Register1,
+ unsigned Register2) {
+ return MCCFIInstruction(OpRegister, L, Register1, Register2);
+ }
+
+ /// \brief .cfi_window_save SPARC register window is saved.
+ static MCCFIInstruction createWindowSave(MCSymbol *L) {
+ return MCCFIInstruction(OpWindowSave, L, 0, 0, "");
+ }
+
+ /// \brief .cfi_restore says that the rule for Register is now the same as it
+ /// was at the beginning of the function, after all initial instructions added
+ /// by .cfi_startproc were executed.
+ static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register) {
+ return MCCFIInstruction(OpRestore, L, Register, 0, "");
+ }
+
+ /// \brief .cfi_undefined From now on the previous value of Register can't be
+ /// restored anymore.
+ static MCCFIInstruction createUndefined(MCSymbol *L, unsigned Register) {
+ return MCCFIInstruction(OpUndefined, L, Register, 0, "");
+ }
+
+ /// \brief .cfi_same_value Current value of Register is the same as in the
+ /// previous frame. I.e., no restoration is needed.
+ static MCCFIInstruction createSameValue(MCSymbol *L, unsigned Register) {
+ return MCCFIInstruction(OpSameValue, L, Register, 0, "");
+ }
+
+ /// \brief .cfi_remember_state Save all current rules for all registers.
+ static MCCFIInstruction createRememberState(MCSymbol *L) {
+ return MCCFIInstruction(OpRememberState, L, 0, 0, "");
+ }
+
+ /// \brief .cfi_restore_state Restore the previously saved state.
+ static MCCFIInstruction createRestoreState(MCSymbol *L) {
+ return MCCFIInstruction(OpRestoreState, L, 0, 0, "");
+ }
+
+ /// \brief .cfi_escape Allows the user to add arbitrary bytes to the unwind
+ /// info.
+ static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals) {
+ return MCCFIInstruction(OpEscape, L, 0, 0, Vals);
+ }
+
+ /// \brief A special wrapper for .cfi_escape that indicates GNU_ARGS_SIZE
+ static MCCFIInstruction createGnuArgsSize(MCSymbol *L, int Size) {
+ return MCCFIInstruction(OpGnuArgsSize, L, 0, Size, "");
+ }
+
+ OpType getOperation() const { return Operation; }
+ MCSymbol *getLabel() const { return Label; }
+
+ unsigned getRegister() const {
+ assert(Operation == OpDefCfa || Operation == OpOffset ||
+ Operation == OpRestore || Operation == OpUndefined ||
+ Operation == OpSameValue || Operation == OpDefCfaRegister ||
+ Operation == OpRelOffset || Operation == OpRegister);
+ return Register;
+ }
+
+ unsigned getRegister2() const {
+ assert(Operation == OpRegister);
+ return Register2;
+ }
+
+ int getOffset() const {
+ assert(Operation == OpDefCfa || Operation == OpOffset ||
+ Operation == OpRelOffset || Operation == OpDefCfaOffset ||
+ Operation == OpAdjustCfaOffset || Operation == OpGnuArgsSize);
+ return Offset;
+ }
+
+ StringRef getValues() const {
+ assert(Operation == OpEscape);
+ return StringRef(&Values[0], Values.size());
+ }
+};
+
+struct MCDwarfFrameInfo {
+ MCDwarfFrameInfo()
+ : Begin(nullptr), End(nullptr), Personality(nullptr), Lsda(nullptr),
+ Instructions(), CurrentCfaRegister(0), PersonalityEncoding(),
+ LsdaEncoding(0), CompactUnwindEncoding(0), IsSignalFrame(false),
+ IsSimple(false) {}
+ MCSymbol *Begin;
+ MCSymbol *End;
+ const MCSymbol *Personality;
+ const MCSymbol *Lsda;
+ std::vector<MCCFIInstruction> Instructions;
+ unsigned CurrentCfaRegister;
+ unsigned PersonalityEncoding;
+ unsigned LsdaEncoding;
+ uint32_t CompactUnwindEncoding;
+ bool IsSignalFrame;
+ bool IsSimple;
+};
+
+class MCDwarfFrameEmitter {
+public:
+ //
+ // This emits the frame info section.
+ //
+ static void Emit(MCObjectStreamer &streamer, MCAsmBackend *MAB, bool isEH);
+ static void EmitAdvanceLoc(MCObjectStreamer &Streamer, uint64_t AddrDelta);
+ static void EncodeAdvanceLoc(MCContext &Context, uint64_t AddrDelta,
+ raw_ostream &OS);
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
new file mode 100644
index 0000000..193dac0
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCELFObjectWriter.h
@@ -0,0 +1,135 @@
+//===-- llvm/MC/MCELFObjectWriter.h - ELF Object Writer ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCELFOBJECTWRITER_H
+#define LLVM_MC_MCELFOBJECTWRITER_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ELF.h"
+#include <vector>
+
+namespace llvm {
+class MCAssembler;
+class MCFixup;
+class MCFragment;
+class MCObjectWriter;
+class MCSymbol;
+class MCSymbolELF;
+class MCValue;
+class raw_pwrite_stream;
+
+struct ELFRelocationEntry {
+ uint64_t Offset; // Where is the relocation.
+ const MCSymbolELF *Symbol; // The symbol to relocate with.
+ unsigned Type; // The type of the relocation.
+ uint64_t Addend; // The addend to use.
+
+ ELFRelocationEntry(uint64_t Offset, const MCSymbolELF *Symbol, unsigned Type,
+ uint64_t Addend)
+ : Offset(Offset), Symbol(Symbol), Type(Type), Addend(Addend) {}
+};
+
+class MCELFObjectTargetWriter {
+ const uint8_t OSABI;
+ const uint16_t EMachine;
+ const unsigned HasRelocationAddend : 1;
+ const unsigned Is64Bit : 1;
+ const unsigned IsN64 : 1;
+
+protected:
+
+ MCELFObjectTargetWriter(bool Is64Bit_, uint8_t OSABI_,
+ uint16_t EMachine_, bool HasRelocationAddend,
+ bool IsN64=false);
+
+public:
+ static uint8_t getOSABI(Triple::OSType OSType) {
+ switch (OSType) {
+ case Triple::CloudABI:
+ return ELF::ELFOSABI_CLOUDABI;
+ case Triple::PS4:
+ case Triple::FreeBSD:
+ return ELF::ELFOSABI_FREEBSD;
+ default:
+ return ELF::ELFOSABI_NONE;
+ }
+ }
+
+ virtual ~MCELFObjectTargetWriter() {}
+
+ virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsPCRel) const = 0;
+
+ virtual bool needsRelocateWithSymbol(const MCSymbol &Sym,
+ unsigned Type) const;
+
+ virtual void sortRelocs(const MCAssembler &Asm,
+ std::vector<ELFRelocationEntry> &Relocs);
+
+ /// \name Accessors
+ /// @{
+ uint8_t getOSABI() const { return OSABI; }
+ uint16_t getEMachine() const { return EMachine; }
+ bool hasRelocationAddend() const { return HasRelocationAddend; }
+ bool is64Bit() const { return Is64Bit; }
+ bool isN64() const { return IsN64; }
+ /// @}
+
+ // Instead of changing everyone's API we pack the N64 Type fields
+ // into the existing 32 bit data unsigned.
+#define R_TYPE_SHIFT 0
+#define R_TYPE_MASK 0xffffff00
+#define R_TYPE2_SHIFT 8
+#define R_TYPE2_MASK 0xffff00ff
+#define R_TYPE3_SHIFT 16
+#define R_TYPE3_MASK 0xff00ffff
+#define R_SSYM_SHIFT 24
+#define R_SSYM_MASK 0x00ffffff
+
+ // N64 relocation type accessors
+ uint8_t getRType(uint32_t Type) const {
+ return (unsigned)((Type >> R_TYPE_SHIFT) & 0xff);
+ }
+ uint8_t getRType2(uint32_t Type) const {
+ return (unsigned)((Type >> R_TYPE2_SHIFT) & 0xff);
+ }
+ uint8_t getRType3(uint32_t Type) const {
+ return (unsigned)((Type >> R_TYPE3_SHIFT) & 0xff);
+ }
+ uint8_t getRSsym(uint32_t Type) const {
+ return (unsigned)((Type >> R_SSYM_SHIFT) & 0xff);
+ }
+
+ // N64 relocation type setting
+ unsigned setRType(unsigned Value, unsigned Type) const {
+ return ((Type & R_TYPE_MASK) | ((Value & 0xff) << R_TYPE_SHIFT));
+ }
+ unsigned setRType2(unsigned Value, unsigned Type) const {
+ return (Type & R_TYPE2_MASK) | ((Value & 0xff) << R_TYPE2_SHIFT);
+ }
+ unsigned setRType3(unsigned Value, unsigned Type) const {
+ return (Type & R_TYPE3_MASK) | ((Value & 0xff) << R_TYPE3_SHIFT);
+ }
+ unsigned setRSsym(unsigned Value, unsigned Type) const {
+ return (Type & R_SSYM_MASK) | ((Value & 0xff) << R_SSYM_SHIFT);
+ }
+};
+
+/// \brief Construct a new ELF writer instance.
+///
+/// \param MOTW - The target specific ELF writer subclass.
+/// \param OS - The stream to write to.
+/// \returns The constructed object writer.
+MCObjectWriter *createELFObjectWriter(MCELFObjectTargetWriter *MOTW,
+ raw_pwrite_stream &OS,
+ bool IsLittleEndian);
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCELFStreamer.h b/contrib/llvm/include/llvm/MC/MCELFStreamer.h
new file mode 100644
index 0000000..6eb2c2c
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCELFStreamer.h
@@ -0,0 +1,109 @@
+//===- MCELFStreamer.h - MCStreamer ELF Object File Interface ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCELFSTREAMER_H
+#define LLVM_MC_MCELFSTREAMER_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCObjectStreamer.h"
+#include "llvm/MC/SectionKind.h"
+#include "llvm/Support/DataTypes.h"
+#include <vector>
+
+namespace llvm {
+class MCAsmBackend;
+class MCAssembler;
+class MCCodeEmitter;
+class MCExpr;
+class MCInst;
+class raw_ostream;
+
+class MCELFStreamer : public MCObjectStreamer {
+public:
+ MCELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_pwrite_stream &OS,
+ MCCodeEmitter *Emitter)
+ : MCObjectStreamer(Context, TAB, OS, Emitter), SeenIdent(false) {}
+
+ ~MCELFStreamer() override;
+
+ /// state management
+ void reset() override {
+ SeenIdent = false;
+ BundleGroups.clear();
+ MCObjectStreamer::reset();
+ }
+
+ /// \name MCStreamer Interface
+ /// @{
+
+ void InitSections(bool NoExecStack) override;
+ void ChangeSection(MCSection *Section, const MCExpr *Subsection) override;
+ void EmitLabel(MCSymbol *Symbol) override;
+ void EmitAssemblerFlag(MCAssemblerFlag Flag) override;
+ void EmitThumbFunc(MCSymbol *Func) override;
+ void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
+ bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
+ void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
+ void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void BeginCOFFSymbolDef(const MCSymbol *Symbol) override;
+ void EmitCOFFSymbolStorageClass(int StorageClass) override;
+ void EmitCOFFSymbolType(int Type) override;
+ void EndCOFFSymbolDef() override;
+
+ void emitELFSize(MCSymbolELF *Symbol, const MCExpr *Value) override;
+
+ void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+
+ void EmitZerofill(MCSection *Section, MCSymbol *Symbol = nullptr,
+ uint64_t Size = 0, unsigned ByteAlignment = 0) override;
+ void EmitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment = 0) override;
+ void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ SMLoc Loc = SMLoc()) override;
+
+ void EmitFileDirective(StringRef Filename) override;
+
+ void EmitIdent(StringRef IdentString) override;
+
+ void EmitValueToAlignment(unsigned, int64_t, unsigned, unsigned) override;
+
+ void FinishImpl() override;
+
+ void EmitBundleAlignMode(unsigned AlignPow2) override;
+ void EmitBundleLock(bool AlignToEnd) override;
+ void EmitBundleUnlock() override;
+
+private:
+ bool isBundleLocked() const;
+ void EmitInstToFragment(const MCInst &Inst, const MCSubtargetInfo &) override;
+ void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo &) override;
+
+ void fixSymbolsInTLSFixups(const MCExpr *expr);
+
+ /// \brief Merge the content of the fragment \p EF into the fragment \p DF.
+ void mergeFragment(MCDataFragment *, MCDataFragment *);
+
+ bool SeenIdent;
+
+ /// BundleGroups - The stack of fragments holding the bundle-locked
+ /// instructions.
+ llvm::SmallVector<MCDataFragment *, 4> BundleGroups;
+};
+
+MCELFStreamer *createARMELFStreamer(MCContext &Context, MCAsmBackend &TAB,
+ raw_pwrite_stream &OS,
+ MCCodeEmitter *Emitter, bool RelaxAll,
+ bool IsThumb);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCExpr.h b/contrib/llvm/include/llvm/MC/MCExpr.h
new file mode 100644
index 0000000..1d6bdef
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCExpr.h
@@ -0,0 +1,572 @@
+//===- MCExpr.h - Assembly Level Expressions --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCEXPR_H
+#define LLVM_MC_MCEXPR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+class MCAsmInfo;
+class MCAsmLayout;
+class MCAssembler;
+class MCContext;
+class MCFixup;
+class MCFragment;
+class MCSection;
+class MCStreamer;
+class MCSymbol;
+class MCValue;
+class raw_ostream;
+class StringRef;
+typedef DenseMap<const MCSection *, uint64_t> SectionAddrMap;
+
+/// \brief Base class for the full range of assembler expressions which are
+/// needed for parsing.
+class MCExpr {
+public:
+ enum ExprKind {
+ Binary, ///< Binary expressions.
+ Constant, ///< Constant expressions.
+ SymbolRef, ///< References to labels and assigned expressions.
+ Unary, ///< Unary expressions.
+ Target ///< Target specific expression.
+ };
+
+private:
+ ExprKind Kind;
+
+ MCExpr(const MCExpr&) = delete;
+ void operator=(const MCExpr&) = delete;
+
+ bool evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
+ const MCAsmLayout *Layout,
+ const SectionAddrMap *Addrs) const;
+
+ bool evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm,
+ const MCAsmLayout *Layout,
+ const SectionAddrMap *Addrs, bool InSet) const;
+
+protected:
+ explicit MCExpr(ExprKind Kind) : Kind(Kind) {}
+
+ bool evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm,
+ const MCAsmLayout *Layout,
+ const MCFixup *Fixup,
+ const SectionAddrMap *Addrs, bool InSet) const;
+
+public:
+ /// \name Accessors
+ /// @{
+
+ ExprKind getKind() const { return Kind; }
+
+ /// @}
+ /// \name Utility Methods
+ /// @{
+
+ void print(raw_ostream &OS, const MCAsmInfo *MAI) const;
+ void dump() const;
+
+ /// @}
+ /// \name Expression Evaluation
+ /// @{
+
+ /// \brief Try to evaluate the expression to an absolute value.
+ ///
+ /// \param Res - The absolute value, if evaluation succeeds.
+ /// \param Layout - The assembler layout object to use for evaluating symbol
+ /// values. If not given, then only non-symbolic expressions will be
+ /// evaluated.
+ /// \return - True on success.
+ bool evaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout,
+ const SectionAddrMap &Addrs) const;
+ bool evaluateAsAbsolute(int64_t &Res) const;
+ bool evaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const;
+ bool evaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout) const;
+
+ bool evaluateKnownAbsolute(int64_t &Res, const MCAsmLayout &Layout) const;
+
+ /// \brief Try to evaluate the expression to a relocatable value, i.e. an
+ /// expression of the fixed form (a - b + constant).
+ ///
+ /// \param Res - The relocatable value, if evaluation succeeds.
+ /// \param Layout - The assembler layout object to use for evaluating values.
+ /// \param Fixup - The Fixup object if available.
+ /// \return - True on success.
+ bool evaluateAsRelocatable(MCValue &Res, const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const;
+
+ /// \brief Try to evaluate the expression to the form (a - b + constant) where
+ /// neither a nor b are variables.
+ ///
+ /// This is a more aggressive variant of evaluateAsRelocatable. The intended
+ /// use is for when relocations are not available, like the .size directive.
+ bool evaluateAsValue(MCValue &Res, const MCAsmLayout &Layout) const;
+
+ /// \brief Find the "associated section" for this expression, which is
+ /// currently defined as the absolute section for constants, or
+ /// otherwise the section associated with the first defined symbol in the
+ /// expression.
+ MCFragment *findAssociatedFragment() const;
+
+ /// @}
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MCExpr &E) {
+ E.print(OS, nullptr);
+ return OS;
+}
+
+//// \brief Represent a constant integer expression.
+class MCConstantExpr : public MCExpr {
+ int64_t Value;
+
+ explicit MCConstantExpr(int64_t Value)
+ : MCExpr(MCExpr::Constant), Value(Value) {}
+
+public:
+ /// \name Construction
+ /// @{
+
+ static const MCConstantExpr *create(int64_t Value, MCContext &Ctx);
+
+ /// @}
+ /// \name Accessors
+ /// @{
+
+ int64_t getValue() const { return Value; }
+
+ /// @}
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Constant;
+ }
+};
+
+/// \brief Represent a reference to a symbol from inside an expression.
+///
+/// A symbol reference in an expression may be a use of a label, a use of an
+/// assembler variable (defined constant), or constitute an implicit definition
+/// of the symbol as external.
+class MCSymbolRefExpr : public MCExpr {
+public:
+ enum VariantKind : uint16_t {
+ VK_None,
+ VK_Invalid,
+
+ VK_GOT,
+ VK_GOTOFF,
+ VK_GOTPCREL,
+ VK_GOTTPOFF,
+ VK_INDNTPOFF,
+ VK_NTPOFF,
+ VK_GOTNTPOFF,
+ VK_PLT,
+ VK_TLSGD,
+ VK_TLSLD,
+ VK_TLSLDM,
+ VK_TPOFF,
+ VK_DTPOFF,
+ VK_TLVP, // Mach-O thread local variable relocations
+ VK_TLVPPAGE,
+ VK_TLVPPAGEOFF,
+ VK_PAGE,
+ VK_PAGEOFF,
+ VK_GOTPAGE,
+ VK_GOTPAGEOFF,
+ VK_SECREL,
+ VK_SIZE, // symbol@SIZE
+ VK_WEAKREF, // The link between the symbols in .weakref foo, bar
+
+ VK_ARM_NONE,
+ VK_ARM_GOT_PREL,
+ VK_ARM_TARGET1,
+ VK_ARM_TARGET2,
+ VK_ARM_PREL31,
+ VK_ARM_SBREL, // symbol(sbrel)
+ VK_ARM_TLSLDO, // symbol(tlsldo)
+ VK_ARM_TLSCALL, // symbol(tlscall)
+ VK_ARM_TLSDESC, // symbol(tlsdesc)
+ VK_ARM_TLSDESCSEQ,
+
+ VK_PPC_LO, // symbol@l
+ VK_PPC_HI, // symbol@h
+ VK_PPC_HA, // symbol@ha
+ VK_PPC_HIGHER, // symbol@higher
+ VK_PPC_HIGHERA, // symbol@highera
+ VK_PPC_HIGHEST, // symbol@highest
+ VK_PPC_HIGHESTA, // symbol@highesta
+ VK_PPC_GOT_LO, // symbol@got@l
+ VK_PPC_GOT_HI, // symbol@got@h
+ VK_PPC_GOT_HA, // symbol@got@ha
+ VK_PPC_TOCBASE, // symbol@tocbase
+ VK_PPC_TOC, // symbol@toc
+ VK_PPC_TOC_LO, // symbol@toc@l
+ VK_PPC_TOC_HI, // symbol@toc@h
+ VK_PPC_TOC_HA, // symbol@toc@ha
+ VK_PPC_DTPMOD, // symbol@dtpmod
+ VK_PPC_TPREL, // symbol@tprel
+ VK_PPC_TPREL_LO, // symbol@tprel@l
+ VK_PPC_TPREL_HI, // symbol@tprel@h
+ VK_PPC_TPREL_HA, // symbol@tprel@ha
+ VK_PPC_TPREL_HIGHER, // symbol@tprel@higher
+ VK_PPC_TPREL_HIGHERA, // symbol@tprel@highera
+ VK_PPC_TPREL_HIGHEST, // symbol@tprel@highest
+ VK_PPC_TPREL_HIGHESTA, // symbol@tprel@highesta
+ VK_PPC_DTPREL, // symbol@dtprel
+ VK_PPC_DTPREL_LO, // symbol@dtprel@l
+ VK_PPC_DTPREL_HI, // symbol@dtprel@h
+ VK_PPC_DTPREL_HA, // symbol@dtprel@ha
+ VK_PPC_DTPREL_HIGHER, // symbol@dtprel@higher
+ VK_PPC_DTPREL_HIGHERA, // symbol@dtprel@highera
+ VK_PPC_DTPREL_HIGHEST, // symbol@dtprel@highest
+ VK_PPC_DTPREL_HIGHESTA,// symbol@dtprel@highesta
+ VK_PPC_GOT_TPREL, // symbol@got@tprel
+ VK_PPC_GOT_TPREL_LO, // symbol@got@tprel@l
+ VK_PPC_GOT_TPREL_HI, // symbol@got@tprel@h
+ VK_PPC_GOT_TPREL_HA, // symbol@got@tprel@ha
+ VK_PPC_GOT_DTPREL, // symbol@got@dtprel
+ VK_PPC_GOT_DTPREL_LO, // symbol@got@dtprel@l
+ VK_PPC_GOT_DTPREL_HI, // symbol@got@dtprel@h
+ VK_PPC_GOT_DTPREL_HA, // symbol@got@dtprel@ha
+ VK_PPC_TLS, // symbol@tls
+ VK_PPC_GOT_TLSGD, // symbol@got@tlsgd
+ VK_PPC_GOT_TLSGD_LO, // symbol@got@tlsgd@l
+ VK_PPC_GOT_TLSGD_HI, // symbol@got@tlsgd@h
+ VK_PPC_GOT_TLSGD_HA, // symbol@got@tlsgd@ha
+ VK_PPC_TLSGD, // symbol@tlsgd
+ VK_PPC_GOT_TLSLD, // symbol@got@tlsld
+ VK_PPC_GOT_TLSLD_LO, // symbol@got@tlsld@l
+ VK_PPC_GOT_TLSLD_HI, // symbol@got@tlsld@h
+ VK_PPC_GOT_TLSLD_HA, // symbol@got@tlsld@ha
+ VK_PPC_TLSLD, // symbol@tlsld
+ VK_PPC_LOCAL, // symbol@local
+
+ VK_Mips_GPREL,
+ VK_Mips_GOT_CALL,
+ VK_Mips_GOT16,
+ VK_Mips_GOT,
+ VK_Mips_ABS_HI,
+ VK_Mips_ABS_LO,
+ VK_Mips_TLSGD,
+ VK_Mips_TLSLDM,
+ VK_Mips_DTPREL_HI,
+ VK_Mips_DTPREL_LO,
+ VK_Mips_GOTTPREL,
+ VK_Mips_TPREL_HI,
+ VK_Mips_TPREL_LO,
+ VK_Mips_GPOFF_HI,
+ VK_Mips_GPOFF_LO,
+ VK_Mips_GOT_DISP,
+ VK_Mips_GOT_PAGE,
+ VK_Mips_GOT_OFST,
+ VK_Mips_HIGHER,
+ VK_Mips_HIGHEST,
+ VK_Mips_GOT_HI16,
+ VK_Mips_GOT_LO16,
+ VK_Mips_CALL_HI16,
+ VK_Mips_CALL_LO16,
+ VK_Mips_PCREL_HI16,
+ VK_Mips_PCREL_LO16,
+
+ VK_COFF_IMGREL32, // symbol@imgrel (image-relative)
+
+ VK_Hexagon_PCREL,
+ VK_Hexagon_LO16,
+ VK_Hexagon_HI16,
+ VK_Hexagon_GPREL,
+ VK_Hexagon_GD_GOT,
+ VK_Hexagon_LD_GOT,
+ VK_Hexagon_GD_PLT,
+ VK_Hexagon_LD_PLT,
+ VK_Hexagon_IE,
+ VK_Hexagon_IE_GOT,
+ VK_TPREL,
+ VK_DTPREL
+ };
+
+private:
+ /// The symbol reference modifier.
+ const VariantKind Kind;
+
+ /// Specifies how the variant kind should be printed.
+ const unsigned UseParensForSymbolVariant : 1;
+
+ // FIXME: Remove this bit.
+ const unsigned HasSubsectionsViaSymbols : 1;
+
+ /// The symbol being referenced.
+ const MCSymbol *Symbol;
+
+ explicit MCSymbolRefExpr(const MCSymbol *Symbol, VariantKind Kind,
+ const MCAsmInfo *MAI);
+
+public:
+ /// \name Construction
+ /// @{
+
+ static const MCSymbolRefExpr *create(const MCSymbol *Symbol, MCContext &Ctx) {
+ return MCSymbolRefExpr::create(Symbol, VK_None, Ctx);
+ }
+
+ static const MCSymbolRefExpr *create(const MCSymbol *Symbol, VariantKind Kind,
+ MCContext &Ctx);
+ static const MCSymbolRefExpr *create(StringRef Name, VariantKind Kind,
+ MCContext &Ctx);
+
+ /// @}
+ /// \name Accessors
+ /// @{
+
+ const MCSymbol &getSymbol() const { return *Symbol; }
+
+ VariantKind getKind() const { return Kind; }
+
+ void printVariantKind(raw_ostream &OS) const;
+
+ bool hasSubsectionsViaSymbols() const { return HasSubsectionsViaSymbols; }
+
+ /// @}
+ /// \name Static Utility Functions
+ /// @{
+
+ static StringRef getVariantKindName(VariantKind Kind);
+
+ static VariantKind getVariantKindForName(StringRef Name);
+
+ /// @}
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::SymbolRef;
+ }
+};
+
+/// \brief Unary assembler expressions.
+class MCUnaryExpr : public MCExpr {
+public:
+ enum Opcode {
+ LNot, ///< Logical negation.
+ Minus, ///< Unary minus.
+ Not, ///< Bitwise negation.
+ Plus ///< Unary plus.
+ };
+
+private:
+ Opcode Op;
+ const MCExpr *Expr;
+
+ MCUnaryExpr(Opcode Op, const MCExpr *Expr)
+ : MCExpr(MCExpr::Unary), Op(Op), Expr(Expr) {}
+
+public:
+ /// \name Construction
+ /// @{
+
+ static const MCUnaryExpr *create(Opcode Op, const MCExpr *Expr,
+ MCContext &Ctx);
+ static const MCUnaryExpr *createLNot(const MCExpr *Expr, MCContext &Ctx) {
+ return create(LNot, Expr, Ctx);
+ }
+ static const MCUnaryExpr *createMinus(const MCExpr *Expr, MCContext &Ctx) {
+ return create(Minus, Expr, Ctx);
+ }
+ static const MCUnaryExpr *createNot(const MCExpr *Expr, MCContext &Ctx) {
+ return create(Not, Expr, Ctx);
+ }
+ static const MCUnaryExpr *createPlus(const MCExpr *Expr, MCContext &Ctx) {
+ return create(Plus, Expr, Ctx);
+ }
+
+ /// @}
+ /// \name Accessors
+ /// @{
+
+ /// \brief Get the kind of this unary expression.
+ Opcode getOpcode() const { return Op; }
+
+ /// \brief Get the child of this unary expression.
+ const MCExpr *getSubExpr() const { return Expr; }
+
+ /// @}
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Unary;
+ }
+};
+
+/// \brief Binary assembler expressions.
+class MCBinaryExpr : public MCExpr {
+public:
+ enum Opcode {
+ Add, ///< Addition.
+ And, ///< Bitwise and.
+ Div, ///< Signed division.
+ EQ, ///< Equality comparison.
+ GT, ///< Signed greater than comparison (result is either 0 or some
+ ///< target-specific non-zero value)
+ GTE, ///< Signed greater than or equal comparison (result is either 0 or
+ ///< some target-specific non-zero value).
+ LAnd, ///< Logical and.
+ LOr, ///< Logical or.
+ LT, ///< Signed less than comparison (result is either 0 or
+ ///< some target-specific non-zero value).
+ LTE, ///< Signed less than or equal comparison (result is either 0 or
+ ///< some target-specific non-zero value).
+ Mod, ///< Signed remainder.
+ Mul, ///< Multiplication.
+ NE, ///< Inequality comparison.
+ Or, ///< Bitwise or.
+ Shl, ///< Shift left.
+ AShr, ///< Arithmetic shift right.
+ LShr, ///< Logical shift right.
+ Sub, ///< Subtraction.
+ Xor ///< Bitwise exclusive or.
+ };
+
+private:
+ Opcode Op;
+ const MCExpr *LHS, *RHS;
+
+ MCBinaryExpr(Opcode Op, const MCExpr *LHS, const MCExpr *RHS)
+ : MCExpr(MCExpr::Binary), Op(Op), LHS(LHS), RHS(RHS) {}
+
+public:
+ /// \name Construction
+ /// @{
+
+ static const MCBinaryExpr *create(Opcode Op, const MCExpr *LHS,
+ const MCExpr *RHS, MCContext &Ctx);
+ static const MCBinaryExpr *createAdd(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(Add, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createAnd(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(And, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createDiv(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(Div, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createEQ(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(EQ, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createGT(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(GT, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createGTE(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(GTE, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createLAnd(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(LAnd, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createLOr(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(LOr, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createLT(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(LT, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createLTE(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(LTE, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createMod(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(Mod, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createMul(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(Mul, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createNE(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(NE, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createOr(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(Or, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createShl(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(Shl, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createAShr(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(AShr, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createLShr(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(LShr, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createSub(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(Sub, LHS, RHS, Ctx);
+ }
+ static const MCBinaryExpr *createXor(const MCExpr *LHS, const MCExpr *RHS,
+ MCContext &Ctx) {
+ return create(Xor, LHS, RHS, Ctx);
+ }
+
+ /// @}
+ /// \name Accessors
+ /// @{
+
+ /// \brief Get the kind of this binary expression.
+ Opcode getOpcode() const { return Op; }
+
+ /// \brief Get the left-hand side expression of the binary operator.
+ const MCExpr *getLHS() const { return LHS; }
+
+ /// \brief Get the right-hand side expression of the binary operator.
+ const MCExpr *getRHS() const { return RHS; }
+
+ /// @}
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Binary;
+ }
+};
+
+/// \brief This is an extension point for target-specific MCExpr subclasses to
+/// implement.
+///
+/// NOTE: All subclasses are required to have trivial destructors because
+/// MCExprs are bump pointer allocated and not destructed.
+class MCTargetExpr : public MCExpr {
+ virtual void anchor();
+protected:
+ MCTargetExpr() : MCExpr(Target) {}
+ virtual ~MCTargetExpr() {}
+public:
+ virtual void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const = 0;
+ virtual bool evaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const = 0;
+ virtual void visitUsedExpr(MCStreamer& Streamer) const = 0;
+ virtual MCFragment *findAssociatedFragment() const = 0;
+
+ virtual void fixELFSymbolsInTLSFixups(MCAssembler &) const = 0;
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Target;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCExternalSymbolizer.h b/contrib/llvm/include/llvm/MC/MCExternalSymbolizer.h
new file mode 100644
index 0000000..2c7d237
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCExternalSymbolizer.h
@@ -0,0 +1,58 @@
+//===-- llvm/MC/MCExternalSymbolizer.h - ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCExternalSymbolizer class, which
+// enables library users to provide callbacks (through the C API) to do the
+// symbolization externally.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCEXTERNALSYMBOLIZER_H
+#define LLVM_MC_MCEXTERNALSYMBOLIZER_H
+
+#include "llvm-c/Disassembler.h"
+#include "llvm/MC/MCSymbolizer.h"
+#include <memory>
+
+namespace llvm {
+
+/// \brief Symbolize using user-provided, C API, callbacks.
+///
+/// See llvm-c/Disassembler.h.
+class MCExternalSymbolizer : public MCSymbolizer {
+protected:
+ /// \name Hooks for symbolic disassembly via the public 'C' interface.
+ /// @{
+ /// The function to get the symbolic information for operands.
+ LLVMOpInfoCallback GetOpInfo;
+ /// The function to lookup a symbol name.
+ LLVMSymbolLookupCallback SymbolLookUp;
+ /// The pointer to the block of symbolic information for above call back.
+ void *DisInfo;
+ /// @}
+
+public:
+ MCExternalSymbolizer(MCContext &Ctx,
+ std::unique_ptr<MCRelocationInfo> RelInfo,
+ LLVMOpInfoCallback getOpInfo,
+ LLVMSymbolLookupCallback symbolLookUp, void *disInfo)
+ : MCSymbolizer(Ctx, std::move(RelInfo)), GetOpInfo(getOpInfo),
+ SymbolLookUp(symbolLookUp), DisInfo(disInfo) {}
+
+ bool tryAddingSymbolicOperand(MCInst &MI, raw_ostream &CommentStream,
+ int64_t Value, uint64_t Address, bool IsBranch,
+ uint64_t Offset, uint64_t InstSize) override;
+ void tryAddingPcLoadReferenceComment(raw_ostream &CommentStream,
+ int64_t Value,
+ uint64_t Address) override;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCFixedLenDisassembler.h b/contrib/llvm/include/llvm/MC/MCFixedLenDisassembler.h
new file mode 100644
index 0000000..ad34d94
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCFixedLenDisassembler.h
@@ -0,0 +1,34 @@
+//===-- llvm/MC/MCFixedLenDisassembler.h - Decoder driver -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Fixed length disassembler decoder state machine driver.
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_MC_MCFIXEDLENDISASSEMBLER_H
+#define LLVM_MC_MCFIXEDLENDISASSEMBLER_H
+
+namespace llvm {
+
+namespace MCD {
+// Disassembler state machine opcodes.
+enum DecoderOps {
+ OPC_ExtractField = 1, // OPC_ExtractField(uint8_t Start, uint8_t Len)
+ OPC_FilterValue, // OPC_FilterValue(uleb128 Val, uint16_t NumToSkip)
+ OPC_CheckField, // OPC_CheckField(uint8_t Start, uint8_t Len,
+ // uleb128 Val, uint16_t NumToSkip)
+ OPC_CheckPredicate, // OPC_CheckPredicate(uleb128 PIdx, uint16_t NumToSkip)
+ OPC_Decode, // OPC_Decode(uleb128 Opcode, uleb128 DIdx)
+ OPC_TryDecode, // OPC_TryDecode(uleb128 Opcode, uleb128 DIdx,
+ // uint16_t NumToSkip)
+ OPC_SoftFail, // OPC_SoftFail(uleb128 PMask, uleb128 NMask)
+ OPC_Fail // OPC_Fail()
+};
+
+} // namespace MCDecode
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCFixup.h b/contrib/llvm/include/llvm/MC/MCFixup.h
new file mode 100644
index 0000000..8ab477c
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCFixup.h
@@ -0,0 +1,113 @@
+//===-- llvm/MC/MCFixup.h - Instruction Relocation and Patching -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCFIXUP_H
+#define LLVM_MC_MCFIXUP_H
+
+#include "llvm/MC/MCExpr.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SMLoc.h"
+#include <cassert>
+
+namespace llvm {
+class MCExpr;
+
+/// \brief Extensible enumeration to represent the type of a fixup.
+enum MCFixupKind {
+ FK_Data_1 = 0, ///< A one-byte fixup.
+ FK_Data_2, ///< A two-byte fixup.
+ FK_Data_4, ///< A four-byte fixup.
+ FK_Data_8, ///< A eight-byte fixup.
+ FK_PCRel_1, ///< A one-byte pc relative fixup.
+ FK_PCRel_2, ///< A two-byte pc relative fixup.
+ FK_PCRel_4, ///< A four-byte pc relative fixup.
+ FK_PCRel_8, ///< A eight-byte pc relative fixup.
+ FK_GPRel_1, ///< A one-byte gp relative fixup.
+ FK_GPRel_2, ///< A two-byte gp relative fixup.
+ FK_GPRel_4, ///< A four-byte gp relative fixup.
+ FK_GPRel_8, ///< A eight-byte gp relative fixup.
+ FK_SecRel_1, ///< A one-byte section relative fixup.
+ FK_SecRel_2, ///< A two-byte section relative fixup.
+ FK_SecRel_4, ///< A four-byte section relative fixup.
+ FK_SecRel_8, ///< A eight-byte section relative fixup.
+
+ FirstTargetFixupKind = 128,
+
+ // Limit range of target fixups, in case we want to pack more efficiently
+ // later.
+ MaxTargetFixupKind = (1 << 8)
+};
+
+/// \brief Encode information on a single operation to perform on a byte
+/// sequence (e.g., an encoded instruction) which requires assemble- or run-
+/// time patching.
+///
+/// Fixups are used any time the target instruction encoder needs to represent
+/// some value in an instruction which is not yet concrete. The encoder will
+/// encode the instruction assuming the value is 0, and emit a fixup which
+/// communicates to the assembler backend how it should rewrite the encoded
+/// value.
+///
+/// During the process of relaxation, the assembler will apply fixups as
+/// symbolic values become concrete. When relaxation is complete, any remaining
+/// fixups become relocations in the object file (or errors, if the fixup cannot
+/// be encoded on the target).
+class MCFixup {
+ /// The value to put into the fixup location. The exact interpretation of the
+ /// expression is target dependent, usually it will be one of the operands to
+ /// an instruction or an assembler directive.
+ const MCExpr *Value;
+
+ /// The byte index of start of the relocation inside the encoded instruction.
+ uint32_t Offset;
+
+ /// The target dependent kind of fixup item this is. The kind is used to
+ /// determine how the operand value should be encoded into the instruction.
+ unsigned Kind;
+
+ /// The source location which gave rise to the fixup, if any.
+ SMLoc Loc;
+public:
+ static MCFixup create(uint32_t Offset, const MCExpr *Value,
+ MCFixupKind Kind, SMLoc Loc = SMLoc()) {
+ assert(unsigned(Kind) < MaxTargetFixupKind && "Kind out of range!");
+ MCFixup FI;
+ FI.Value = Value;
+ FI.Offset = Offset;
+ FI.Kind = unsigned(Kind);
+ FI.Loc = Loc;
+ return FI;
+ }
+
+ MCFixupKind getKind() const { return MCFixupKind(Kind); }
+
+ uint32_t getOffset() const { return Offset; }
+ void setOffset(uint32_t Value) { Offset = Value; }
+
+ const MCExpr *getValue() const { return Value; }
+
+ /// \brief Return the generic fixup kind for a value with the given size. It
+ /// is an error to pass an unsupported size.
+ static MCFixupKind getKindForSize(unsigned Size, bool isPCRel) {
+ switch (Size) {
+ default: llvm_unreachable("Invalid generic fixup size!");
+ case 1: return isPCRel ? FK_PCRel_1 : FK_Data_1;
+ case 2: return isPCRel ? FK_PCRel_2 : FK_Data_2;
+ case 4: return isPCRel ? FK_PCRel_4 : FK_Data_4;
+ case 8: return isPCRel ? FK_PCRel_8 : FK_Data_8;
+ }
+ }
+
+ SMLoc getLoc() const { return Loc; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCFixupKindInfo.h b/contrib/llvm/include/llvm/MC/MCFixupKindInfo.h
new file mode 100644
index 0000000..58183bd
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCFixupKindInfo.h
@@ -0,0 +1,43 @@
+//===-- llvm/MC/MCFixupKindInfo.h - Fixup Descriptors -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCFIXUPKINDINFO_H
+#define LLVM_MC_MCFIXUPKINDINFO_H
+
+namespace llvm {
+
+/// \brief Target independent information on a fixup kind.
+struct MCFixupKindInfo {
+ enum FixupKindFlags {
+ /// Is this fixup kind PCrelative? This is used by the assembler backend to
+ /// evaluate fixup values in a target independent manner when possible.
+ FKF_IsPCRel = (1 << 0),
+
+ /// Should this fixup kind force a 4-byte aligned effective PC value?
+ FKF_IsAlignedDownTo32Bits = (1 << 1)
+ };
+
+ /// A target specific name for the fixup kind. The names will be unique for
+ /// distinct kinds on any given target.
+ const char *Name;
+
+ /// The bit offset to write the relocation into.
+ unsigned TargetOffset;
+
+ /// The number of bits written by this fixup. The bits are assumed to be
+ /// contiguous.
+ unsigned TargetSize;
+
+ /// Flags describing additional information on this fixup kind.
+ unsigned Flags;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCFragment.h b/contrib/llvm/include/llvm/MC/MCFragment.h
new file mode 100644
index 0000000..7d6db52
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCFragment.h
@@ -0,0 +1,506 @@
+//===- MCFragment.h - Fragment type hierarchy -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCFRAGMENT_H
+#define LLVM_MC_MCFRAGMENT_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCInst.h"
+
+namespace llvm {
+class MCSection;
+class MCSymbol;
+class MCSubtargetInfo;
+
+class MCFragment : public ilist_node_with_parent<MCFragment, MCSection> {
+ friend class MCAsmLayout;
+
+ MCFragment(const MCFragment &) = delete;
+ void operator=(const MCFragment &) = delete;
+
+public:
+ enum FragmentType : uint8_t {
+ FT_Align,
+ FT_Data,
+ FT_CompactEncodedInst,
+ FT_Fill,
+ FT_Relaxable,
+ FT_Org,
+ FT_Dwarf,
+ FT_DwarfFrame,
+ FT_LEB,
+ FT_SafeSEH,
+ FT_Dummy
+ };
+
+private:
+ FragmentType Kind;
+
+protected:
+ bool HasInstructions;
+
+private:
+ /// \brief Should this fragment be aligned to the end of a bundle?
+ bool AlignToBundleEnd;
+
+ uint8_t BundlePadding;
+
+ /// LayoutOrder - The layout order of this fragment.
+ unsigned LayoutOrder;
+
+ /// The data for the section this fragment is in.
+ MCSection *Parent;
+
+ /// Atom - The atom this fragment is in, as represented by it's defining
+ /// symbol.
+ const MCSymbol *Atom;
+
+ /// \name Assembler Backend Data
+ /// @{
+ //
+ // FIXME: This could all be kept private to the assembler implementation.
+
+ /// Offset - The offset of this fragment in its section. This is ~0 until
+ /// initialized.
+ uint64_t Offset;
+
+ /// @}
+
+protected:
+ MCFragment(FragmentType Kind, bool HasInstructions,
+ uint8_t BundlePadding, MCSection *Parent = nullptr);
+
+ ~MCFragment();
+private:
+
+ // This is a friend so that the sentinal can be created.
+ friend struct ilist_sentinel_traits<MCFragment>;
+ MCFragment();
+
+public:
+ /// Destroys the current fragment.
+ ///
+ /// This must be used instead of delete as MCFragment is non-virtual.
+ /// This method will dispatch to the appropriate subclass.
+ void destroy();
+
+ FragmentType getKind() const { return Kind; }
+
+ MCSection *getParent() const { return Parent; }
+ void setParent(MCSection *Value) { Parent = Value; }
+
+ const MCSymbol *getAtom() const { return Atom; }
+ void setAtom(const MCSymbol *Value) { Atom = Value; }
+
+ unsigned getLayoutOrder() const { return LayoutOrder; }
+ void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }
+
+ /// \brief Does this fragment have instructions emitted into it? By default
+ /// this is false, but specific fragment types may set it to true.
+ bool hasInstructions() const { return HasInstructions; }
+
+ /// \brief Should this fragment be placed at the end of an aligned bundle?
+ bool alignToBundleEnd() const { return AlignToBundleEnd; }
+ void setAlignToBundleEnd(bool V) { AlignToBundleEnd = V; }
+
+ /// \brief Get the padding size that must be inserted before this fragment.
+ /// Used for bundling. By default, no padding is inserted.
+ /// Note that padding size is restricted to 8 bits. This is an optimization
+ /// to reduce the amount of space used for each fragment. In practice, larger
+ /// padding should never be required.
+ uint8_t getBundlePadding() const { return BundlePadding; }
+
+ /// \brief Set the padding size for this fragment. By default it's a no-op,
+ /// and only some fragments have a meaningful implementation.
+ void setBundlePadding(uint8_t N) { BundlePadding = N; }
+
+ /// \brief Return true if given frgment has FT_Dummy type.
+ bool isDummy() const { return Kind == FT_Dummy; }
+
+ void dump();
+};
+
+class MCDummyFragment : public MCFragment {
+public:
+ explicit MCDummyFragment(MCSection *Sec)
+ : MCFragment(FT_Dummy, false, 0, Sec){};
+ static bool classof(const MCFragment *F) { return F->getKind() == FT_Dummy; }
+};
+
+/// Interface implemented by fragments that contain encoded instructions and/or
+/// data.
+///
+class MCEncodedFragment : public MCFragment {
+protected:
+ MCEncodedFragment(MCFragment::FragmentType FType, bool HasInstructions,
+ MCSection *Sec)
+ : MCFragment(FType, HasInstructions, 0, Sec) {}
+
+public:
+ static bool classof(const MCFragment *F) {
+ MCFragment::FragmentType Kind = F->getKind();
+ switch (Kind) {
+ default:
+ return false;
+ case MCFragment::FT_Relaxable:
+ case MCFragment::FT_CompactEncodedInst:
+ case MCFragment::FT_Data:
+ return true;
+ }
+ }
+};
+
+/// Interface implemented by fragments that contain encoded instructions and/or
+/// data.
+///
+template<unsigned ContentsSize>
+class MCEncodedFragmentWithContents : public MCEncodedFragment {
+ SmallVector<char, ContentsSize> Contents;
+
+protected:
+ MCEncodedFragmentWithContents(MCFragment::FragmentType FType,
+ bool HasInstructions,
+ MCSection *Sec)
+ : MCEncodedFragment(FType, HasInstructions, Sec) {}
+
+public:
+ SmallVectorImpl<char> &getContents() { return Contents; }
+ const SmallVectorImpl<char> &getContents() const { return Contents; }
+};
+
+/// Interface implemented by fragments that contain encoded instructions and/or
+/// data and also have fixups registered.
+///
+template<unsigned ContentsSize, unsigned FixupsSize>
+class MCEncodedFragmentWithFixups :
+ public MCEncodedFragmentWithContents<ContentsSize> {
+
+ /// Fixups - The list of fixups in this fragment.
+ SmallVector<MCFixup, FixupsSize> Fixups;
+
+protected:
+ MCEncodedFragmentWithFixups(MCFragment::FragmentType FType,
+ bool HasInstructions,
+ MCSection *Sec)
+ : MCEncodedFragmentWithContents<ContentsSize>(FType, HasInstructions,
+ Sec) {}
+
+public:
+ typedef SmallVectorImpl<MCFixup>::const_iterator const_fixup_iterator;
+ typedef SmallVectorImpl<MCFixup>::iterator fixup_iterator;
+
+ SmallVectorImpl<MCFixup> &getFixups() { return Fixups; }
+ const SmallVectorImpl<MCFixup> &getFixups() const { return Fixups; }
+
+ fixup_iterator fixup_begin() { return Fixups.begin(); }
+ const_fixup_iterator fixup_begin() const { return Fixups.begin(); }
+
+ fixup_iterator fixup_end() { return Fixups.end(); }
+ const_fixup_iterator fixup_end() const { return Fixups.end(); }
+
+ static bool classof(const MCFragment *F) {
+ MCFragment::FragmentType Kind = F->getKind();
+ return Kind == MCFragment::FT_Relaxable || Kind == MCFragment::FT_Data;
+ }
+};
+
+/// Fragment for data and encoded instructions.
+///
+class MCDataFragment : public MCEncodedFragmentWithFixups<32, 4> {
+public:
+ MCDataFragment(MCSection *Sec = nullptr)
+ : MCEncodedFragmentWithFixups<32, 4>(FT_Data, false, Sec) {}
+
+ void setHasInstructions(bool V) { HasInstructions = V; }
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_Data;
+ }
+};
+
+/// This is a compact (memory-size-wise) fragment for holding an encoded
+/// instruction (non-relaxable) that has no fixups registered. When applicable,
+/// it can be used instead of MCDataFragment and lead to lower memory
+/// consumption.
+///
+class MCCompactEncodedInstFragment : public MCEncodedFragmentWithContents<4> {
+public:
+ MCCompactEncodedInstFragment(MCSection *Sec = nullptr)
+ : MCEncodedFragmentWithContents(FT_CompactEncodedInst, true, Sec) {
+ }
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_CompactEncodedInst;
+ }
+};
+
+/// A relaxable fragment holds on to its MCInst, since it may need to be
+/// relaxed during the assembler layout and relaxation stage.
+///
+class MCRelaxableFragment : public MCEncodedFragmentWithFixups<8, 1> {
+
+ /// Inst - The instruction this is a fragment for.
+ MCInst Inst;
+
+ /// STI - The MCSubtargetInfo in effect when the instruction was encoded.
+ const MCSubtargetInfo &STI;
+
+public:
+ MCRelaxableFragment(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCSection *Sec = nullptr)
+ : MCEncodedFragmentWithFixups(FT_Relaxable, true, Sec),
+ Inst(Inst), STI(STI) {}
+
+ const MCInst &getInst() const { return Inst; }
+ void setInst(const MCInst &Value) { Inst = Value; }
+
+ const MCSubtargetInfo &getSubtargetInfo() { return STI; }
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_Relaxable;
+ }
+};
+
+class MCAlignFragment : public MCFragment {
+
+ /// Alignment - The alignment to ensure, in bytes.
+ unsigned Alignment;
+
+ /// EmitNops - Flag to indicate that (optimal) NOPs should be emitted instead
+ /// of using the provided value. The exact interpretation of this flag is
+ /// target dependent.
+ bool EmitNops : 1;
+
+ /// Value - Value to use for filling padding bytes.
+ int64_t Value;
+
+ /// ValueSize - The size of the integer (in bytes) of \p Value.
+ unsigned ValueSize;
+
+ /// MaxBytesToEmit - The maximum number of bytes to emit; if the alignment
+ /// cannot be satisfied in this width then this fragment is ignored.
+ unsigned MaxBytesToEmit;
+
+public:
+ MCAlignFragment(unsigned Alignment, int64_t Value, unsigned ValueSize,
+ unsigned MaxBytesToEmit, MCSection *Sec = nullptr)
+ : MCFragment(FT_Align, false, 0, Sec), Alignment(Alignment),
+ EmitNops(false), Value(Value),
+ ValueSize(ValueSize), MaxBytesToEmit(MaxBytesToEmit) {}
+
+ /// \name Accessors
+ /// @{
+
+ unsigned getAlignment() const { return Alignment; }
+
+ int64_t getValue() const { return Value; }
+
+ unsigned getValueSize() const { return ValueSize; }
+
+ unsigned getMaxBytesToEmit() const { return MaxBytesToEmit; }
+
+ bool hasEmitNops() const { return EmitNops; }
+ void setEmitNops(bool Value) { EmitNops = Value; }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_Align;
+ }
+};
+
+class MCFillFragment : public MCFragment {
+
+ /// Value - Value to use for filling bytes.
+ int64_t Value;
+
+ /// ValueSize - The size (in bytes) of \p Value to use when filling, or 0 if
+ /// this is a virtual fill fragment.
+ unsigned ValueSize;
+
+ /// Size - The number of bytes to insert.
+ uint64_t Size;
+
+public:
+ MCFillFragment(int64_t Value, unsigned ValueSize, uint64_t Size,
+ MCSection *Sec = nullptr)
+ : MCFragment(FT_Fill, false, 0, Sec), Value(Value), ValueSize(ValueSize),
+ Size(Size) {
+ assert((!ValueSize || (Size % ValueSize) == 0) &&
+ "Fill size must be a multiple of the value size!");
+ }
+
+ /// \name Accessors
+ /// @{
+
+ int64_t getValue() const { return Value; }
+
+ unsigned getValueSize() const { return ValueSize; }
+
+ uint64_t getSize() const { return Size; }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_Fill;
+ }
+};
+
+class MCOrgFragment : public MCFragment {
+
+ /// Offset - The offset this fragment should start at.
+ const MCExpr *Offset;
+
+ /// Value - Value to use for filling bytes.
+ int8_t Value;
+
+public:
+ MCOrgFragment(const MCExpr &Offset, int8_t Value, MCSection *Sec = nullptr)
+ : MCFragment(FT_Org, false, 0, Sec), Offset(&Offset), Value(Value) {}
+
+ /// \name Accessors
+ /// @{
+
+ const MCExpr &getOffset() const { return *Offset; }
+
+ uint8_t getValue() const { return Value; }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_Org;
+ }
+};
+
+class MCLEBFragment : public MCFragment {
+
+ /// Value - The value this fragment should contain.
+ const MCExpr *Value;
+
+ /// IsSigned - True if this is a sleb128, false if uleb128.
+ bool IsSigned;
+
+ SmallString<8> Contents;
+
+public:
+ MCLEBFragment(const MCExpr &Value_, bool IsSigned_, MCSection *Sec = nullptr)
+ : MCFragment(FT_LEB, false, 0, Sec), Value(&Value_), IsSigned(IsSigned_) {
+ Contents.push_back(0);
+ }
+
+ /// \name Accessors
+ /// @{
+
+ const MCExpr &getValue() const { return *Value; }
+
+ bool isSigned() const { return IsSigned; }
+
+ SmallString<8> &getContents() { return Contents; }
+ const SmallString<8> &getContents() const { return Contents; }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_LEB;
+ }
+};
+
+class MCDwarfLineAddrFragment : public MCFragment {
+
+ /// LineDelta - the value of the difference between the two line numbers
+ /// between two .loc dwarf directives.
+ int64_t LineDelta;
+
+ /// AddrDelta - The expression for the difference of the two symbols that
+ /// make up the address delta between two .loc dwarf directives.
+ const MCExpr *AddrDelta;
+
+ SmallString<8> Contents;
+
+public:
+ MCDwarfLineAddrFragment(int64_t LineDelta, const MCExpr &AddrDelta,
+ MCSection *Sec = nullptr)
+ : MCFragment(FT_Dwarf, false, 0, Sec), LineDelta(LineDelta),
+ AddrDelta(&AddrDelta) {
+ Contents.push_back(0);
+ }
+
+ /// \name Accessors
+ /// @{
+
+ int64_t getLineDelta() const { return LineDelta; }
+
+ const MCExpr &getAddrDelta() const { return *AddrDelta; }
+
+ SmallString<8> &getContents() { return Contents; }
+ const SmallString<8> &getContents() const { return Contents; }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_Dwarf;
+ }
+};
+
+class MCDwarfCallFrameFragment : public MCFragment {
+
+ /// AddrDelta - The expression for the difference of the two symbols that
+ /// make up the address delta between two .cfi_* dwarf directives.
+ const MCExpr *AddrDelta;
+
+ SmallString<8> Contents;
+
+public:
+ MCDwarfCallFrameFragment(const MCExpr &AddrDelta, MCSection *Sec = nullptr)
+ : MCFragment(FT_DwarfFrame, false, 0, Sec), AddrDelta(&AddrDelta) {
+ Contents.push_back(0);
+ }
+
+ /// \name Accessors
+ /// @{
+
+ const MCExpr &getAddrDelta() const { return *AddrDelta; }
+
+ SmallString<8> &getContents() { return Contents; }
+ const SmallString<8> &getContents() const { return Contents; }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_DwarfFrame;
+ }
+};
+
+class MCSafeSEHFragment : public MCFragment {
+ const MCSymbol *Sym;
+
+public:
+ MCSafeSEHFragment(const MCSymbol *Sym, MCSection *Sec = nullptr)
+ : MCFragment(FT_SafeSEH, false, 0, Sec), Sym(Sym) {}
+
+ /// \name Accessors
+ /// @{
+
+ const MCSymbol *getSymbol() { return Sym; }
+ const MCSymbol *getSymbol() const { return Sym; }
+
+ /// @}
+
+ static bool classof(const MCFragment *F) {
+ return F->getKind() == MCFragment::FT_SafeSEH;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCInst.h b/contrib/llvm/include/llvm/MC/MCInst.h
new file mode 100644
index 0000000..4688b5f
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCInst.h
@@ -0,0 +1,205 @@
+//===-- llvm/MC/MCInst.h - MCInst class -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCInst and MCOperand classes, which
+// is the basic representation used to represent low-level machine code
+// instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINST_H
+#define LLVM_MC_MCINST_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/SMLoc.h"
+
+namespace llvm {
+class raw_ostream;
+class MCAsmInfo;
+class MCInstPrinter;
+class MCExpr;
+class MCInst;
+
+/// \brief Instances of this class represent operands of the MCInst class.
+/// This is a simple discriminated union.
+class MCOperand {
+ enum MachineOperandType : unsigned char {
+ kInvalid, ///< Uninitialized.
+ kRegister, ///< Register operand.
+ kImmediate, ///< Immediate operand.
+ kFPImmediate, ///< Floating-point immediate operand.
+ kExpr, ///< Relocatable immediate operand.
+ kInst ///< Sub-instruction operand.
+ };
+ MachineOperandType Kind;
+
+ union {
+ unsigned RegVal;
+ int64_t ImmVal;
+ double FPImmVal;
+ const MCExpr *ExprVal;
+ const MCInst *InstVal;
+ };
+
+public:
+ MCOperand() : Kind(kInvalid), FPImmVal(0.0) {}
+
+ bool isValid() const { return Kind != kInvalid; }
+ bool isReg() const { return Kind == kRegister; }
+ bool isImm() const { return Kind == kImmediate; }
+ bool isFPImm() const { return Kind == kFPImmediate; }
+ bool isExpr() const { return Kind == kExpr; }
+ bool isInst() const { return Kind == kInst; }
+
+ /// \brief Returns the register number.
+ unsigned getReg() const {
+ assert(isReg() && "This is not a register operand!");
+ return RegVal;
+ }
+
+ /// \brief Set the register number.
+ void setReg(unsigned Reg) {
+ assert(isReg() && "This is not a register operand!");
+ RegVal = Reg;
+ }
+
+ int64_t getImm() const {
+ assert(isImm() && "This is not an immediate");
+ return ImmVal;
+ }
+ void setImm(int64_t Val) {
+ assert(isImm() && "This is not an immediate");
+ ImmVal = Val;
+ }
+
+ double getFPImm() const {
+ assert(isFPImm() && "This is not an FP immediate");
+ return FPImmVal;
+ }
+
+ void setFPImm(double Val) {
+ assert(isFPImm() && "This is not an FP immediate");
+ FPImmVal = Val;
+ }
+
+ const MCExpr *getExpr() const {
+ assert(isExpr() && "This is not an expression");
+ return ExprVal;
+ }
+ void setExpr(const MCExpr *Val) {
+ assert(isExpr() && "This is not an expression");
+ ExprVal = Val;
+ }
+
+ const MCInst *getInst() const {
+ assert(isInst() && "This is not a sub-instruction");
+ return InstVal;
+ }
+ void setInst(const MCInst *Val) {
+ assert(isInst() && "This is not a sub-instruction");
+ InstVal = Val;
+ }
+
+ static MCOperand createReg(unsigned Reg) {
+ MCOperand Op;
+ Op.Kind = kRegister;
+ Op.RegVal = Reg;
+ return Op;
+ }
+ static MCOperand createImm(int64_t Val) {
+ MCOperand Op;
+ Op.Kind = kImmediate;
+ Op.ImmVal = Val;
+ return Op;
+ }
+ static MCOperand createFPImm(double Val) {
+ MCOperand Op;
+ Op.Kind = kFPImmediate;
+ Op.FPImmVal = Val;
+ return Op;
+ }
+ static MCOperand createExpr(const MCExpr *Val) {
+ MCOperand Op;
+ Op.Kind = kExpr;
+ Op.ExprVal = Val;
+ return Op;
+ }
+ static MCOperand createInst(const MCInst *Val) {
+ MCOperand Op;
+ Op.Kind = kInst;
+ Op.InstVal = Val;
+ return Op;
+ }
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+};
+
+template <> struct isPodLike<MCOperand> { static const bool value = true; };
+
+/// \brief Instances of this class represent a single low-level machine
+/// instruction.
+class MCInst {
+ unsigned Opcode;
+ SMLoc Loc;
+ SmallVector<MCOperand, 8> Operands;
+
+public:
+ MCInst() : Opcode(0) {}
+
+ void setOpcode(unsigned Op) { Opcode = Op; }
+ unsigned getOpcode() const { return Opcode; }
+
+ void setLoc(SMLoc loc) { Loc = loc; }
+ SMLoc getLoc() const { return Loc; }
+
+ const MCOperand &getOperand(unsigned i) const { return Operands[i]; }
+ MCOperand &getOperand(unsigned i) { return Operands[i]; }
+ unsigned getNumOperands() const { return Operands.size(); }
+
+ void addOperand(const MCOperand &Op) { Operands.push_back(Op); }
+
+ typedef SmallVectorImpl<MCOperand>::iterator iterator;
+ typedef SmallVectorImpl<MCOperand>::const_iterator const_iterator;
+ void clear() { Operands.clear(); }
+ void erase(iterator I) { Operands.erase(I); }
+ size_t size() const { return Operands.size(); }
+ iterator begin() { return Operands.begin(); }
+ const_iterator begin() const { return Operands.begin(); }
+ iterator end() { return Operands.end(); }
+ const_iterator end() const { return Operands.end(); }
+ iterator insert(iterator I, const MCOperand &Op) {
+ return Operands.insert(I, Op);
+ }
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+ /// \brief Dump the MCInst as prettily as possible using the additional MC
+ /// structures, if given. Operators are separated by the \p Separator
+ /// string.
+ void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer = nullptr,
+ StringRef Separator = " ") const;
+};
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MCOperand &MO) {
+ MO.print(OS);
+ return OS;
+}
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MCInst &MI) {
+ MI.print(OS);
+ return OS;
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCInstBuilder.h b/contrib/llvm/include/llvm/MC/MCInstBuilder.h
new file mode 100644
index 0000000..30609bd
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCInstBuilder.h
@@ -0,0 +1,74 @@
+//===-- llvm/MC/MCInstBuilder.h - Simplify creation of MCInsts --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the MCInstBuilder class for convenient creation of
+// MCInsts.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTBUILDER_H
+#define LLVM_MC_MCINSTBUILDER_H
+
+#include "llvm/MC/MCInst.h"
+
+namespace llvm {
+
+class MCInstBuilder {
+ MCInst Inst;
+
+public:
+ /// \brief Create a new MCInstBuilder for an MCInst with a specific opcode.
+ MCInstBuilder(unsigned Opcode) {
+ Inst.setOpcode(Opcode);
+ }
+
+ /// \brief Add a new register operand.
+ MCInstBuilder &addReg(unsigned Reg) {
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return *this;
+ }
+
+ /// \brief Add a new integer immediate operand.
+ MCInstBuilder &addImm(int64_t Val) {
+ Inst.addOperand(MCOperand::createImm(Val));
+ return *this;
+ }
+
+ /// \brief Add a new floating point immediate operand.
+ MCInstBuilder &addFPImm(double Val) {
+ Inst.addOperand(MCOperand::createFPImm(Val));
+ return *this;
+ }
+
+ /// \brief Add a new MCExpr operand.
+ MCInstBuilder &addExpr(const MCExpr *Val) {
+ Inst.addOperand(MCOperand::createExpr(Val));
+ return *this;
+ }
+
+ /// \brief Add a new MCInst operand.
+ MCInstBuilder &addInst(const MCInst *Val) {
+ Inst.addOperand(MCOperand::createInst(Val));
+ return *this;
+ }
+
+ /// \brief Add an operand.
+ MCInstBuilder &addOperand(const MCOperand &Op) {
+ Inst.addOperand(Op);
+ return *this;
+ }
+
+ operator MCInst&() {
+ return Inst;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCInstPrinter.h b/contrib/llvm/include/llvm/MC/MCInstPrinter.h
new file mode 100644
index 0000000..0eafd02
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCInstPrinter.h
@@ -0,0 +1,108 @@
+//===- MCInstPrinter.h - MCInst to target assembly syntax -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTPRINTER_H
+#define LLVM_MC_MCINSTPRINTER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Format.h"
+
+namespace llvm {
+class MCInst;
+class raw_ostream;
+class MCAsmInfo;
+class MCInstrInfo;
+class MCRegisterInfo;
+class MCSubtargetInfo;
+class StringRef;
+
+/// Convert `Bytes' to a hex string and output to `OS'
+void dumpBytes(ArrayRef<uint8_t> Bytes, raw_ostream &OS);
+
+namespace HexStyle {
+enum Style {
+ C, ///< 0xff
+ Asm ///< 0ffh
+};
+}
+
+/// \brief This is an instance of a target assembly language printer that
+/// converts an MCInst to valid target assembly syntax.
+class MCInstPrinter {
+protected:
+ /// \brief A stream that comments can be emitted to if desired. Each comment
+ /// must end with a newline. This will be null if verbose assembly emission
+ /// is disable.
+ raw_ostream *CommentStream;
+ const MCAsmInfo &MAI;
+ const MCInstrInfo &MII;
+ const MCRegisterInfo &MRI;
+
+ /// True if we are printing marked up assembly.
+ bool UseMarkup;
+
+ /// True if we are printing immediates as hex.
+ bool PrintImmHex;
+
+ /// Which style to use for printing hexadecimal values.
+ HexStyle::Style PrintHexStyle;
+
+ /// Utility function for printing annotations.
+ void printAnnotation(raw_ostream &OS, StringRef Annot);
+
+public:
+ MCInstPrinter(const MCAsmInfo &mai, const MCInstrInfo &mii,
+ const MCRegisterInfo &mri)
+ : CommentStream(nullptr), MAI(mai), MII(mii), MRI(mri), UseMarkup(0),
+ PrintImmHex(0), PrintHexStyle(HexStyle::C) {}
+
+ virtual ~MCInstPrinter();
+
+ /// \brief Specify a stream to emit comments to.
+ void setCommentStream(raw_ostream &OS) { CommentStream = &OS; }
+
+ /// \brief Print the specified MCInst to the specified raw_ostream.
+ virtual void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot,
+ const MCSubtargetInfo &STI) = 0;
+
+ /// \brief Return the name of the specified opcode enum (e.g. "MOV32ri") or
+ /// empty if we can't resolve it.
+ StringRef getOpcodeName(unsigned Opcode) const;
+
+ /// \brief Print the assembler register name.
+ virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
+
+ bool getUseMarkup() const { return UseMarkup; }
+ void setUseMarkup(bool Value) { UseMarkup = Value; }
+
+ /// Utility functions to make adding mark ups simpler.
+ StringRef markup(StringRef s) const;
+ StringRef markup(StringRef a, StringRef b) const;
+
+ bool getPrintImmHex() const { return PrintImmHex; }
+ void setPrintImmHex(bool Value) { PrintImmHex = Value; }
+
+ HexStyle::Style getPrintHexStyle() const { return PrintHexStyle; }
+ void setPrintHexStyle(HexStyle::Style Value) { PrintHexStyle = Value; }
+
+ /// Utility function to print immediates in decimal or hex.
+ format_object<int64_t> formatImm(int64_t Value) const {
+ return PrintImmHex ? formatHex(Value) : formatDec(Value);
+ }
+
+ /// Utility functions to print decimal/hexadecimal values.
+ format_object<int64_t> formatDec(int64_t Value) const;
+ format_object<int64_t> formatHex(int64_t Value) const;
+ format_object<uint64_t> formatHex(uint64_t Value) const;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h b/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h
new file mode 100644
index 0000000..8f5159e
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCInstrAnalysis.h
@@ -0,0 +1,71 @@
+//===-- llvm/MC/MCInstrAnalysis.h - InstrDesc target hooks ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MCInstrAnalysis class which the MCTargetDescs can
+// derive from to give additional information to MC.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTRANALYSIS_H
+#define LLVM_MC_MCINSTRANALYSIS_H
+
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCInstrInfo.h"
+
+namespace llvm {
+
+class MCInstrAnalysis {
+protected:
+ friend class Target;
+ const MCInstrInfo *Info;
+
+public:
+ MCInstrAnalysis(const MCInstrInfo *Info) : Info(Info) {}
+
+ virtual ~MCInstrAnalysis() {}
+
+ virtual bool isBranch(const MCInst &Inst) const {
+ return Info->get(Inst.getOpcode()).isBranch();
+ }
+
+ virtual bool isConditionalBranch(const MCInst &Inst) const {
+ return Info->get(Inst.getOpcode()).isConditionalBranch();
+ }
+
+ virtual bool isUnconditionalBranch(const MCInst &Inst) const {
+ return Info->get(Inst.getOpcode()).isUnconditionalBranch();
+ }
+
+ virtual bool isIndirectBranch(const MCInst &Inst) const {
+ return Info->get(Inst.getOpcode()).isIndirectBranch();
+ }
+
+ virtual bool isCall(const MCInst &Inst) const {
+ return Info->get(Inst.getOpcode()).isCall();
+ }
+
+ virtual bool isReturn(const MCInst &Inst) const {
+ return Info->get(Inst.getOpcode()).isReturn();
+ }
+
+ virtual bool isTerminator(const MCInst &Inst) const {
+ return Info->get(Inst.getOpcode()).isTerminator();
+ }
+
+ /// \brief Given a branch instruction try to get the address the branch
+ /// targets. Return true on success, and the address in Target.
+ virtual bool
+ evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size,
+ uint64_t &Target) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCInstrDesc.h b/contrib/llvm/include/llvm/MC/MCInstrDesc.h
new file mode 100644
index 0000000..88aab73
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCInstrDesc.h
@@ -0,0 +1,556 @@
+//===-- llvm/MC/MCInstrDesc.h - Instruction Descriptors -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MCOperandInfo and MCInstrDesc classes, which
+// are used to describe target instructions and their operands.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTRDESC_H
+#define LLVM_MC_MCINSTRDESC_H
+
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/DataTypes.h"
+#include <string>
+
+namespace llvm {
+ class MCInst;
+ class MCSubtargetInfo;
+ class FeatureBitset;
+
+//===----------------------------------------------------------------------===//
+// Machine Operand Flags and Description
+//===----------------------------------------------------------------------===//
+
+namespace MCOI {
+// Operand constraints
+enum OperandConstraint {
+ TIED_TO = 0, // Must be allocated the same register as.
+ EARLY_CLOBBER // Operand is an early clobber register operand
+};
+
+/// \brief These are flags set on operands, but should be considered
+/// private, all access should go through the MCOperandInfo accessors.
+/// See the accessors for a description of what these are.
+enum OperandFlags { LookupPtrRegClass = 0, Predicate, OptionalDef };
+
+/// \brief Operands are tagged with one of the values of this enum.
+enum OperandType {
+ OPERAND_UNKNOWN = 0,
+ OPERAND_IMMEDIATE = 1,
+ OPERAND_REGISTER = 2,
+ OPERAND_MEMORY = 3,
+ OPERAND_PCREL = 4,
+ OPERAND_FIRST_TARGET = 5
+};
+}
+
+/// \brief This holds information about one operand of a machine instruction,
+/// indicating the register class for register operands, etc.
+class MCOperandInfo {
+public:
+ /// \brief This specifies the register class enumeration of the operand
+ /// if the operand is a register. If isLookupPtrRegClass is set, then this is
+ /// an index that is passed to TargetRegisterInfo::getPointerRegClass(x) to
+ /// get a dynamic register class.
+ int16_t RegClass;
+
+ /// \brief These are flags from the MCOI::OperandFlags enum.
+ uint8_t Flags;
+
+ /// \brief Information about the type of the operand.
+ uint8_t OperandType;
+ /// \brief The lower 16 bits are used to specify which constraints are set.
+ /// The higher 16 bits are used to specify the value of constraints (4 bits
+ /// each).
+ uint32_t Constraints;
+
+ /// \brief Set if this operand is a pointer value and it requires a callback
+ /// to look up its register class.
+ bool isLookupPtrRegClass() const {
+ return Flags & (1 << MCOI::LookupPtrRegClass);
+ }
+
+ /// \brief Set if this is one of the operands that made up of the predicate
+ /// operand that controls an isPredicable() instruction.
+ bool isPredicate() const { return Flags & (1 << MCOI::Predicate); }
+
+ /// \brief Set if this operand is a optional def.
+ bool isOptionalDef() const { return Flags & (1 << MCOI::OptionalDef); }
+};
+
+//===----------------------------------------------------------------------===//
+// Machine Instruction Flags and Description
+//===----------------------------------------------------------------------===//
+
+namespace MCID {
+/// \brief These should be considered private to the implementation of the
+/// MCInstrDesc class. Clients should use the predicate methods on MCInstrDesc,
+/// not use these directly. These all correspond to bitfields in the
+/// MCInstrDesc::Flags field.
+enum Flag {
+ Variadic = 0,
+ HasOptionalDef,
+ Pseudo,
+ Return,
+ Call,
+ Barrier,
+ Terminator,
+ Branch,
+ IndirectBranch,
+ Compare,
+ MoveImm,
+ Bitcast,
+ Select,
+ DelaySlot,
+ FoldableAsLoad,
+ MayLoad,
+ MayStore,
+ Predicable,
+ NotDuplicable,
+ UnmodeledSideEffects,
+ Commutable,
+ ConvertibleTo3Addr,
+ UsesCustomInserter,
+ HasPostISelHook,
+ Rematerializable,
+ CheapAsAMove,
+ ExtraSrcRegAllocReq,
+ ExtraDefRegAllocReq,
+ RegSequence,
+ ExtractSubreg,
+ InsertSubreg,
+ Convergent
+};
+}
+
+/// \brief Describe properties that are true of each instruction in the target
+/// description file. This captures information about side effects, register
+/// use and many other things. There is one instance of this struct for each
+/// target instruction class, and the MachineInstr class points to this struct
+/// directly to describe itself.
+class MCInstrDesc {
+public:
+ unsigned short Opcode; // The opcode number
+ unsigned short NumOperands; // Num of args (may be more if variable_ops)
+ unsigned char NumDefs; // Num of args that are definitions
+ unsigned char Size; // Number of bytes in encoding.
+ unsigned short SchedClass; // enum identifying instr sched class
+ uint64_t Flags; // Flags identifying machine instr class
+ uint64_t TSFlags; // Target Specific Flag values
+ const MCPhysReg *ImplicitUses; // Registers implicitly read by this instr
+ const MCPhysReg *ImplicitDefs; // Registers implicitly defined by this instr
+ const MCOperandInfo *OpInfo; // 'NumOperands' entries about operands
+ // Subtarget feature that this is deprecated on, if any
+ // -1 implies this is not deprecated by any single feature. It may still be
+ // deprecated due to a "complex" reason, below.
+ int64_t DeprecatedFeature;
+
+ // A complex method to determine is a certain is deprecated or not, and return
+ // the reason for deprecation.
+ bool (*ComplexDeprecationInfo)(MCInst &, const MCSubtargetInfo &,
+ std::string &);
+
+ /// \brief Returns the value of the specific constraint if
+ /// it is set. Returns -1 if it is not set.
+ int getOperandConstraint(unsigned OpNum,
+ MCOI::OperandConstraint Constraint) const {
+ if (OpNum < NumOperands &&
+ (OpInfo[OpNum].Constraints & (1 << Constraint))) {
+ unsigned Pos = 16 + Constraint * 4;
+ return (int)(OpInfo[OpNum].Constraints >> Pos) & 0xf;
+ }
+ return -1;
+ }
+
+ /// \brief Returns true if a certain instruction is deprecated and if so
+ /// returns the reason in \p Info.
+ bool getDeprecatedInfo(MCInst &MI, const MCSubtargetInfo &STI,
+ std::string &Info) const;
+
+ /// \brief Return the opcode number for this descriptor.
+ unsigned getOpcode() const { return Opcode; }
+
+ /// \brief Return the number of declared MachineOperands for this
+ /// MachineInstruction. Note that variadic (isVariadic() returns true)
+ /// instructions may have additional operands at the end of the list, and note
+ /// that the machine instruction may include implicit register def/uses as
+ /// well.
+ unsigned getNumOperands() const { return NumOperands; }
+
+ /// \brief Return the number of MachineOperands that are register
+ /// definitions. Register definitions always occur at the start of the
+ /// machine operand list. This is the number of "outs" in the .td file,
+ /// and does not include implicit defs.
+ unsigned getNumDefs() const { return NumDefs; }
+
+ /// \brief Return flags of this instruction.
+ unsigned getFlags() const { return Flags; }
+
+ /// \brief Return true if this instruction can have a variable number of
+ /// operands. In this case, the variable operands will be after the normal
+ /// operands but before the implicit definitions and uses (if any are
+ /// present).
+ bool isVariadic() const { return Flags & (1 << MCID::Variadic); }
+
+ /// \brief Set if this instruction has an optional definition, e.g.
+ /// ARM instructions which can set condition code if 's' bit is set.
+ bool hasOptionalDef() const { return Flags & (1 << MCID::HasOptionalDef); }
+
+ /// \brief Return true if this is a pseudo instruction that doesn't
+ /// correspond to a real machine instruction.
+ bool isPseudo() const { return Flags & (1 << MCID::Pseudo); }
+
+ /// \brief Return true if the instruction is a return.
+ bool isReturn() const { return Flags & (1 << MCID::Return); }
+
+ /// \brief Return true if the instruction is a call.
+ bool isCall() const { return Flags & (1 << MCID::Call); }
+
+ /// \brief Returns true if the specified instruction stops control flow
+ /// from executing the instruction immediately following it. Examples include
+ /// unconditional branches and return instructions.
+ bool isBarrier() const { return Flags & (1 << MCID::Barrier); }
+
+ /// \brief Returns true if this instruction part of the terminator for
+ /// a basic block. Typically this is things like return and branch
+ /// instructions.
+ ///
+ /// Various passes use this to insert code into the bottom of a basic block,
+ /// but before control flow occurs.
+ bool isTerminator() const { return Flags & (1 << MCID::Terminator); }
+
+ /// \brief Returns true if this is a conditional, unconditional, or
+ /// indirect branch. Predicates below can be used to discriminate between
+ /// these cases, and the TargetInstrInfo::AnalyzeBranch method can be used to
+ /// get more information.
+ bool isBranch() const { return Flags & (1 << MCID::Branch); }
+
+ /// \brief Return true if this is an indirect branch, such as a
+ /// branch through a register.
+ bool isIndirectBranch() const { return Flags & (1 << MCID::IndirectBranch); }
+
+ /// \brief Return true if this is a branch which may fall
+ /// through to the next instruction or may transfer control flow to some other
+ /// block. The TargetInstrInfo::AnalyzeBranch method can be used to get more
+ /// information about this branch.
+ bool isConditionalBranch() const {
+ return isBranch() & !isBarrier() & !isIndirectBranch();
+ }
+
+ /// \brief Return true if this is a branch which always
+ /// transfers control flow to some other block. The
+ /// TargetInstrInfo::AnalyzeBranch method can be used to get more information
+ /// about this branch.
+ bool isUnconditionalBranch() const {
+ return isBranch() & isBarrier() & !isIndirectBranch();
+ }
+
+ /// \brief Return true if this is a branch or an instruction which directly
+ /// writes to the program counter. Considered 'may' affect rather than
+ /// 'does' affect as things like predication are not taken into account.
+ bool mayAffectControlFlow(const MCInst &MI, const MCRegisterInfo &RI) const;
+
+ /// \brief Return true if this instruction has a predicate operand
+ /// that controls execution. It may be set to 'always', or may be set to other
+ /// values. There are various methods in TargetInstrInfo that can be used to
+ /// control and modify the predicate in this instruction.
+ bool isPredicable() const { return Flags & (1 << MCID::Predicable); }
+
+ /// \brief Return true if this instruction is a comparison.
+ bool isCompare() const { return Flags & (1 << MCID::Compare); }
+
+ /// \brief Return true if this instruction is a move immediate
+ /// (including conditional moves) instruction.
+ bool isMoveImmediate() const { return Flags & (1 << MCID::MoveImm); }
+
+ /// \brief Return true if this instruction is a bitcast instruction.
+ bool isBitcast() const { return Flags & (1 << MCID::Bitcast); }
+
+ /// \brief Return true if this is a select instruction.
+ bool isSelect() const { return Flags & (1 << MCID::Select); }
+
+ /// \brief Return true if this instruction cannot be safely
+ /// duplicated. For example, if the instruction has a unique labels attached
+ /// to it, duplicating it would cause multiple definition errors.
+ bool isNotDuplicable() const { return Flags & (1 << MCID::NotDuplicable); }
+
+ /// \brief Returns true if the specified instruction has a delay slot which
+ /// must be filled by the code generator.
+ bool hasDelaySlot() const { return Flags & (1 << MCID::DelaySlot); }
+
+ /// \brief Return true for instructions that can be folded as memory operands
+ /// in other instructions. The most common use for this is instructions that
+ /// are simple loads from memory that don't modify the loaded value in any
+ /// way, but it can also be used for instructions that can be expressed as
+ /// constant-pool loads, such as V_SETALLONES on x86, to allow them to be
+ /// folded when it is beneficial. This should only be set on instructions
+ /// that return a value in their only virtual register definition.
+ bool canFoldAsLoad() const { return Flags & (1 << MCID::FoldableAsLoad); }
+
+ /// \brief Return true if this instruction behaves
+ /// the same way as the generic REG_SEQUENCE instructions.
+ /// E.g., on ARM,
+ /// dX VMOVDRR rY, rZ
+ /// is equivalent to
+ /// dX = REG_SEQUENCE rY, ssub_0, rZ, ssub_1.
+ ///
+ /// Note that for the optimizers to be able to take advantage of
+ /// this property, TargetInstrInfo::getRegSequenceLikeInputs has to be
+ /// override accordingly.
+ bool isRegSequenceLike() const { return Flags & (1 << MCID::RegSequence); }
+
+ /// \brief Return true if this instruction behaves
+ /// the same way as the generic EXTRACT_SUBREG instructions.
+ /// E.g., on ARM,
+ /// rX, rY VMOVRRD dZ
+ /// is equivalent to two EXTRACT_SUBREG:
+ /// rX = EXTRACT_SUBREG dZ, ssub_0
+ /// rY = EXTRACT_SUBREG dZ, ssub_1
+ ///
+ /// Note that for the optimizers to be able to take advantage of
+ /// this property, TargetInstrInfo::getExtractSubregLikeInputs has to be
+ /// override accordingly.
+ bool isExtractSubregLike() const {
+ return Flags & (1 << MCID::ExtractSubreg);
+ }
+
+ /// \brief Return true if this instruction behaves
+ /// the same way as the generic INSERT_SUBREG instructions.
+ /// E.g., on ARM,
+ /// dX = VSETLNi32 dY, rZ, Imm
+ /// is equivalent to a INSERT_SUBREG:
+ /// dX = INSERT_SUBREG dY, rZ, translateImmToSubIdx(Imm)
+ ///
+ /// Note that for the optimizers to be able to take advantage of
+ /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be
+ /// override accordingly.
+ bool isInsertSubregLike() const { return Flags & (1 << MCID::InsertSubreg); }
+
+
+ /// \brief Return true if this instruction is convergent.
+ ///
+ /// Convergent instructions may not be made control-dependent on any
+ /// additional values.
+ bool isConvergent() const { return Flags & (1 << MCID::Convergent); }
+
+ //===--------------------------------------------------------------------===//
+ // Side Effect Analysis
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Return true if this instruction could possibly read memory.
+ /// Instructions with this flag set are not necessarily simple load
+ /// instructions, they may load a value and modify it, for example.
+ bool mayLoad() const { return Flags & (1 << MCID::MayLoad); }
+
+ /// \brief Return true if this instruction could possibly modify memory.
+ /// Instructions with this flag set are not necessarily simple store
+ /// instructions, they may store a modified value based on their operands, or
+ /// may not actually modify anything, for example.
+ bool mayStore() const { return Flags & (1 << MCID::MayStore); }
+
+ /// \brief Return true if this instruction has side
+ /// effects that are not modeled by other flags. This does not return true
+ /// for instructions whose effects are captured by:
+ ///
+ /// 1. Their operand list and implicit definition/use list. Register use/def
+ /// info is explicit for instructions.
+ /// 2. Memory accesses. Use mayLoad/mayStore.
+ /// 3. Calling, branching, returning: use isCall/isReturn/isBranch.
+ ///
+ /// Examples of side effects would be modifying 'invisible' machine state like
+ /// a control register, flushing a cache, modifying a register invisible to
+ /// LLVM, etc.
+ bool hasUnmodeledSideEffects() const {
+ return Flags & (1 << MCID::UnmodeledSideEffects);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Flags that indicate whether an instruction can be modified by a method.
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Return true if this may be a 2- or 3-address instruction (of the
+ /// form "X = op Y, Z, ..."), which produces the same result if Y and Z are
+ /// exchanged. If this flag is set, then the
+ /// TargetInstrInfo::commuteInstruction method may be used to hack on the
+ /// instruction.
+ ///
+ /// Note that this flag may be set on instructions that are only commutable
+ /// sometimes. In these cases, the call to commuteInstruction will fail.
+ /// Also note that some instructions require non-trivial modification to
+ /// commute them.
+ bool isCommutable() const { return Flags & (1 << MCID::Commutable); }
+
+ /// \brief Return true if this is a 2-address instruction which can be changed
+ /// into a 3-address instruction if needed. Doing this transformation can be
+ /// profitable in the register allocator, because it means that the
+ /// instruction can use a 2-address form if possible, but degrade into a less
+ /// efficient form if the source and dest register cannot be assigned to the
+ /// same register. For example, this allows the x86 backend to turn a "shl
+ /// reg, 3" instruction into an LEA instruction, which is the same speed as
+ /// the shift but has bigger code size.
+ ///
+ /// If this returns true, then the target must implement the
+ /// TargetInstrInfo::convertToThreeAddress method for this instruction, which
+ /// is allowed to fail if the transformation isn't valid for this specific
+ /// instruction (e.g. shl reg, 4 on x86).
+ ///
+ bool isConvertibleTo3Addr() const {
+ return Flags & (1 << MCID::ConvertibleTo3Addr);
+ }
+
+ /// \brief Return true if this instruction requires custom insertion support
+ /// when the DAG scheduler is inserting it into a machine basic block. If
+ /// this is true for the instruction, it basically means that it is a pseudo
+ /// instruction used at SelectionDAG time that is expanded out into magic code
+ /// by the target when MachineInstrs are formed.
+ ///
+ /// If this is true, the TargetLoweringInfo::InsertAtEndOfBasicBlock method
+ /// is used to insert this into the MachineBasicBlock.
+ bool usesCustomInsertionHook() const {
+ return Flags & (1 << MCID::UsesCustomInserter);
+ }
+
+ /// \brief Return true if this instruction requires *adjustment* after
+ /// instruction selection by calling a target hook. For example, this can be
+ /// used to fill in ARM 's' optional operand depending on whether the
+ /// conditional flag register is used.
+ bool hasPostISelHook() const { return Flags & (1 << MCID::HasPostISelHook); }
+
+ /// \brief Returns true if this instruction is a candidate for remat. This
+ /// flag is only used in TargetInstrInfo method isTriviallyRematerializable.
+ ///
+ /// If this flag is set, the isReallyTriviallyReMaterializable()
+ /// or isReallyTriviallyReMaterializableGeneric methods are called to verify
+ /// the instruction is really rematable.
+ bool isRematerializable() const {
+ return Flags & (1 << MCID::Rematerializable);
+ }
+
+ /// \brief Returns true if this instruction has the same cost (or less) than a
+ /// move instruction. This is useful during certain types of optimizations
+ /// (e.g., remat during two-address conversion or machine licm) where we would
+ /// like to remat or hoist the instruction, but not if it costs more than
+ /// moving the instruction into the appropriate register. Note, we are not
+ /// marking copies from and to the same register class with this flag.
+ ///
+ /// This method could be called by interface TargetInstrInfo::isAsCheapAsAMove
+ /// for different subtargets.
+ bool isAsCheapAsAMove() const { return Flags & (1 << MCID::CheapAsAMove); }
+
+ /// \brief Returns true if this instruction source operands have special
+ /// register allocation requirements that are not captured by the operand
+ /// register classes. e.g. ARM::STRD's two source registers must be an even /
+ /// odd pair, ARM::STM registers have to be in ascending order. Post-register
+ /// allocation passes should not attempt to change allocations for sources of
+ /// instructions with this flag.
+ bool hasExtraSrcRegAllocReq() const {
+ return Flags & (1 << MCID::ExtraSrcRegAllocReq);
+ }
+
+ /// \brief Returns true if this instruction def operands have special register
+ /// allocation requirements that are not captured by the operand register
+ /// classes. e.g. ARM::LDRD's two def registers must be an even / odd pair,
+ /// ARM::LDM registers have to be in ascending order. Post-register
+ /// allocation passes should not attempt to change allocations for definitions
+ /// of instructions with this flag.
+ bool hasExtraDefRegAllocReq() const {
+ return Flags & (1 << MCID::ExtraDefRegAllocReq);
+ }
+
+ /// \brief Return a list of registers that are potentially read by any
+ /// instance of this machine instruction. For example, on X86, the "adc"
+ /// instruction adds two register operands and adds the carry bit in from the
+ /// flags register. In this case, the instruction is marked as implicitly
+ /// reading the flags. Likewise, the variable shift instruction on X86 is
+ /// marked as implicitly reading the 'CL' register, which it always does.
+ ///
+ /// This method returns null if the instruction has no implicit uses.
+ const MCPhysReg *getImplicitUses() const { return ImplicitUses; }
+
+ /// \brief Return the number of implicit uses this instruction has.
+ unsigned getNumImplicitUses() const {
+ if (!ImplicitUses)
+ return 0;
+ unsigned i = 0;
+ for (; ImplicitUses[i]; ++i) /*empty*/
+ ;
+ return i;
+ }
+
+ /// \brief Return a list of registers that are potentially written by any
+ /// instance of this machine instruction. For example, on X86, many
+ /// instructions implicitly set the flags register. In this case, they are
+ /// marked as setting the FLAGS. Likewise, many instructions always deposit
+ /// their result in a physical register. For example, the X86 divide
+ /// instruction always deposits the quotient and remainder in the EAX/EDX
+ /// registers. For that instruction, this will return a list containing the
+ /// EAX/EDX/EFLAGS registers.
+ ///
+ /// This method returns null if the instruction has no implicit defs.
+ const MCPhysReg *getImplicitDefs() const { return ImplicitDefs; }
+
+ /// \brief Return the number of implicit defs this instruct has.
+ unsigned getNumImplicitDefs() const {
+ if (!ImplicitDefs)
+ return 0;
+ unsigned i = 0;
+ for (; ImplicitDefs[i]; ++i) /*empty*/
+ ;
+ return i;
+ }
+
+ /// \brief Return true if this instruction implicitly
+ /// uses the specified physical register.
+ bool hasImplicitUseOfPhysReg(unsigned Reg) const {
+ if (const MCPhysReg *ImpUses = ImplicitUses)
+ for (; *ImpUses; ++ImpUses)
+ if (*ImpUses == Reg)
+ return true;
+ return false;
+ }
+
+ /// \brief Return true if this instruction implicitly
+ /// defines the specified physical register.
+ bool hasImplicitDefOfPhysReg(unsigned Reg,
+ const MCRegisterInfo *MRI = nullptr) const;
+
+ /// \brief Return the scheduling class for this instruction. The
+ /// scheduling class is an index into the InstrItineraryData table. This
+ /// returns zero if there is no known scheduling information for the
+ /// instruction.
+ unsigned getSchedClass() const { return SchedClass; }
+
+ /// \brief Return the number of bytes in the encoding of this instruction,
+ /// or zero if the encoding size cannot be known from the opcode.
+ unsigned getSize() const { return Size; }
+
+ /// \brief Find the index of the first operand in the
+ /// operand list that is used to represent the predicate. It returns -1 if
+ /// none is found.
+ int findFirstPredOperandIdx() const {
+ if (isPredicable()) {
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ if (OpInfo[i].isPredicate())
+ return i;
+ }
+ return -1;
+ }
+
+private:
+
+ /// \brief Return true if this instruction defines the specified physical
+ /// register, either explicitly or implicitly.
+ bool hasDefOfPhysReg(const MCInst &MI, unsigned Reg,
+ const MCRegisterInfo &RI) const;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCInstrInfo.h b/contrib/llvm/include/llvm/MC/MCInstrInfo.h
new file mode 100644
index 0000000..70c8658
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCInstrInfo.h
@@ -0,0 +1,59 @@
+//===-- llvm/MC/MCInstrInfo.h - Target Instruction Info ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the target machine instruction set.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTRINFO_H
+#define LLVM_MC_MCINSTRINFO_H
+
+#include "llvm/MC/MCInstrDesc.h"
+#include <cassert>
+
+namespace llvm {
+
+//---------------------------------------------------------------------------
+/// \brief Interface to description of machine instruction set.
+class MCInstrInfo {
+ const MCInstrDesc *Desc; // Raw array to allow static init'n
+ const unsigned *InstrNameIndices; // Array for name indices in InstrNameData
+ const char *InstrNameData; // Instruction name string pool
+ unsigned NumOpcodes; // Number of entries in the desc array
+
+public:
+ /// \brief Initialize MCInstrInfo, called by TableGen auto-generated routines.
+ /// *DO NOT USE*.
+ void InitMCInstrInfo(const MCInstrDesc *D, const unsigned *NI, const char *ND,
+ unsigned NO) {
+ Desc = D;
+ InstrNameIndices = NI;
+ InstrNameData = ND;
+ NumOpcodes = NO;
+ }
+
+ unsigned getNumOpcodes() const { return NumOpcodes; }
+
+ /// \brief Return the machine instruction descriptor that corresponds to the
+ /// specified instruction opcode.
+ const MCInstrDesc &get(unsigned Opcode) const {
+ assert(Opcode < NumOpcodes && "Invalid opcode!");
+ return Desc[Opcode];
+ }
+
+ /// \brief Returns the name for the instructions with the given opcode.
+ const char *getName(unsigned Opcode) const {
+ assert(Opcode < NumOpcodes && "Invalid opcode!");
+ return &InstrNameData[InstrNameIndices[Opcode]];
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCInstrItineraries.h b/contrib/llvm/include/llvm/MC/MCInstrItineraries.h
new file mode 100644
index 0000000..b2871a9
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCInstrItineraries.h
@@ -0,0 +1,239 @@
+//===-- llvm/MC/MCInstrItineraries.h - Scheduling ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the structures used for instruction
+// itineraries, stages, and operand reads/writes. This is used by
+// schedulers to determine instruction stages and latencies.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCINSTRITINERARIES_H
+#define LLVM_MC_MCINSTRITINERARIES_H
+
+#include "llvm/MC/MCSchedule.h"
+#include <algorithm>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// These values represent a non-pipelined step in
+/// the execution of an instruction. Cycles represents the number of
+/// discrete time slots needed to complete the stage. Units represent
+/// the choice of functional units that can be used to complete the
+/// stage. Eg. IntUnit1, IntUnit2. NextCycles indicates how many
+/// cycles should elapse from the start of this stage to the start of
+/// the next stage in the itinerary. A value of -1 indicates that the
+/// next stage should start immediately after the current one.
+/// For example:
+///
+/// { 1, x, -1 }
+/// indicates that the stage occupies FU x for 1 cycle and that
+/// the next stage starts immediately after this one.
+///
+/// { 2, x|y, 1 }
+/// indicates that the stage occupies either FU x or FU y for 2
+/// consecutive cycles and that the next stage starts one cycle
+/// after this stage starts. That is, the stage requirements
+/// overlap in time.
+///
+/// { 1, x, 0 }
+/// indicates that the stage occupies FU x for 1 cycle and that
+/// the next stage starts in this same cycle. This can be used to
+/// indicate that the instruction requires multiple stages at the
+/// same time.
+///
+/// FU reservation can be of two different kinds:
+/// - FUs which instruction actually requires
+/// - FUs which instruction just reserves. Reserved unit is not available for
+/// execution of other instruction. However, several instructions can reserve
+/// the same unit several times.
+/// Such two types of units reservation is used to model instruction domain
+/// change stalls, FUs using the same resource (e.g. same register file), etc.
+
+struct InstrStage {
+ enum ReservationKinds {
+ Required = 0,
+ Reserved = 1
+ };
+
+ unsigned Cycles_; ///< Length of stage in machine cycles
+ unsigned Units_; ///< Choice of functional units
+ int NextCycles_; ///< Number of machine cycles to next stage
+ ReservationKinds Kind_; ///< Kind of the FU reservation
+
+ /// \brief Returns the number of cycles the stage is occupied.
+ unsigned getCycles() const {
+ return Cycles_;
+ }
+
+ /// \brief Returns the choice of FUs.
+ unsigned getUnits() const {
+ return Units_;
+ }
+
+ ReservationKinds getReservationKind() const {
+ return Kind_;
+ }
+
+ /// \brief Returns the number of cycles from the start of this stage to the
+ /// start of the next stage in the itinerary
+ unsigned getNextCycles() const {
+ return (NextCycles_ >= 0) ? (unsigned)NextCycles_ : Cycles_;
+ }
+};
+
+
+//===----------------------------------------------------------------------===//
+/// An itinerary represents the scheduling information for an instruction.
+/// This includes a set of stages occupied by the instruction and the pipeline
+/// cycle in which operands are read and written.
+///
+struct InstrItinerary {
+ int NumMicroOps; ///< # of micro-ops, -1 means it's variable
+ unsigned FirstStage; ///< Index of first stage in itinerary
+ unsigned LastStage; ///< Index of last + 1 stage in itinerary
+ unsigned FirstOperandCycle; ///< Index of first operand rd/wr
+ unsigned LastOperandCycle; ///< Index of last + 1 operand rd/wr
+};
+
+
+//===----------------------------------------------------------------------===//
+/// Itinerary data supplied by a subtarget to be used by a target.
+///
+class InstrItineraryData {
+public:
+ MCSchedModel SchedModel; ///< Basic machine properties.
+ const InstrStage *Stages; ///< Array of stages selected
+ const unsigned *OperandCycles; ///< Array of operand cycles selected
+ const unsigned *Forwardings; ///< Array of pipeline forwarding pathes
+ const InstrItinerary *Itineraries; ///< Array of itineraries selected
+
+ /// Ctors.
+ InstrItineraryData() : SchedModel(MCSchedModel::GetDefaultSchedModel()),
+ Stages(nullptr), OperandCycles(nullptr),
+ Forwardings(nullptr), Itineraries(nullptr) {}
+
+ InstrItineraryData(const MCSchedModel &SM, const InstrStage *S,
+ const unsigned *OS, const unsigned *F)
+ : SchedModel(SM), Stages(S), OperandCycles(OS), Forwardings(F),
+ Itineraries(SchedModel.InstrItineraries) {}
+
+ /// \brief Returns true if there are no itineraries.
+ bool isEmpty() const { return Itineraries == nullptr; }
+
+ /// \brief Returns true if the index is for the end marker itinerary.
+ bool isEndMarker(unsigned ItinClassIndx) const {
+ return ((Itineraries[ItinClassIndx].FirstStage == ~0U) &&
+ (Itineraries[ItinClassIndx].LastStage == ~0U));
+ }
+
+ /// \brief Return the first stage of the itinerary.
+ const InstrStage *beginStage(unsigned ItinClassIndx) const {
+ unsigned StageIdx = Itineraries[ItinClassIndx].FirstStage;
+ return Stages + StageIdx;
+ }
+
+ /// \brief Return the last+1 stage of the itinerary.
+ const InstrStage *endStage(unsigned ItinClassIndx) const {
+ unsigned StageIdx = Itineraries[ItinClassIndx].LastStage;
+ return Stages + StageIdx;
+ }
+
+ /// \brief Return the total stage latency of the given class. The latency is
+ /// the maximum completion time for any stage in the itinerary. If no stages
+ /// exist, it defaults to one cycle.
+ unsigned getStageLatency(unsigned ItinClassIndx) const {
+ // If the target doesn't provide itinerary information, use a simple
+ // non-zero default value for all instructions.
+ if (isEmpty())
+ return 1;
+
+ // Calculate the maximum completion time for any stage.
+ unsigned Latency = 0, StartCycle = 0;
+ for (const InstrStage *IS = beginStage(ItinClassIndx),
+ *E = endStage(ItinClassIndx); IS != E; ++IS) {
+ Latency = std::max(Latency, StartCycle + IS->getCycles());
+ StartCycle += IS->getNextCycles();
+ }
+ return Latency;
+ }
+
+ /// \brief Return the cycle for the given class and operand. Return -1 if no
+ /// cycle is specified for the operand.
+ int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const {
+ if (isEmpty())
+ return -1;
+
+ unsigned FirstIdx = Itineraries[ItinClassIndx].FirstOperandCycle;
+ unsigned LastIdx = Itineraries[ItinClassIndx].LastOperandCycle;
+ if ((FirstIdx + OperandIdx) >= LastIdx)
+ return -1;
+
+ return (int)OperandCycles[FirstIdx + OperandIdx];
+ }
+
+ /// \brief Return true if there is a pipeline forwarding between instructions
+ /// of itinerary classes DefClass and UseClasses so that value produced by an
+ /// instruction of itinerary class DefClass, operand index DefIdx can be
+ /// bypassed when it's read by an instruction of itinerary class UseClass,
+ /// operand index UseIdx.
+ bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx,
+ unsigned UseClass, unsigned UseIdx) const {
+ unsigned FirstDefIdx = Itineraries[DefClass].FirstOperandCycle;
+ unsigned LastDefIdx = Itineraries[DefClass].LastOperandCycle;
+ if ((FirstDefIdx + DefIdx) >= LastDefIdx)
+ return false;
+ if (Forwardings[FirstDefIdx + DefIdx] == 0)
+ return false;
+
+ unsigned FirstUseIdx = Itineraries[UseClass].FirstOperandCycle;
+ unsigned LastUseIdx = Itineraries[UseClass].LastOperandCycle;
+ if ((FirstUseIdx + UseIdx) >= LastUseIdx)
+ return false;
+
+ return Forwardings[FirstDefIdx + DefIdx] ==
+ Forwardings[FirstUseIdx + UseIdx];
+ }
+
+ /// \brief Compute and return the use operand latency of a given itinerary
+ /// class and operand index if the value is produced by an instruction of the
+ /// specified itinerary class and def operand index.
+ int getOperandLatency(unsigned DefClass, unsigned DefIdx,
+ unsigned UseClass, unsigned UseIdx) const {
+ if (isEmpty())
+ return -1;
+
+ int DefCycle = getOperandCycle(DefClass, DefIdx);
+ if (DefCycle == -1)
+ return -1;
+
+ int UseCycle = getOperandCycle(UseClass, UseIdx);
+ if (UseCycle == -1)
+ return -1;
+
+ UseCycle = DefCycle - UseCycle + 1;
+ if (UseCycle > 0 &&
+ hasPipelineForwarding(DefClass, DefIdx, UseClass, UseIdx))
+ // FIXME: This assumes one cycle benefit for every pipeline forwarding.
+ --UseCycle;
+ return UseCycle;
+ }
+
+ /// \brief Return the number of micro-ops that the given class decodes to.
+ /// Return -1 for classes that require dynamic lookup via TargetInstrInfo.
+ int getNumMicroOps(unsigned ItinClassIndx) const {
+ if (isEmpty())
+ return 1;
+ return Itineraries[ItinClassIndx].NumMicroOps;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCLabel.h b/contrib/llvm/include/llvm/MC/MCLabel.h
new file mode 100644
index 0000000..a12473f
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCLabel.h
@@ -0,0 +1,57 @@
+//===- MCLabel.h - Machine Code Directional Local Labels --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCLabel class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCLABEL_H
+#define LLVM_MC_MCLABEL_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class MCContext;
+class raw_ostream;
+
+/// \brief Instances of this class represent a label name in the MC file,
+/// and MCLabel are created and uniqued by the MCContext class. MCLabel
+/// should only be constructed for valid instances in the object file.
+class MCLabel {
+ // \brief The instance number of this Directional Local Label.
+ unsigned Instance;
+
+private: // MCContext creates and uniques these.
+ friend class MCContext;
+ MCLabel(unsigned instance) : Instance(instance) {}
+
+ MCLabel(const MCLabel &) = delete;
+ void operator=(const MCLabel &) = delete;
+
+public:
+ /// \brief Get the current instance of this Directional Local Label.
+ unsigned getInstance() const { return Instance; }
+
+ /// \brief Increment the current instance of this Directional Local Label.
+ unsigned incInstance() { return ++Instance; }
+
+ /// \brief Print the value to the stream \p OS.
+ void print(raw_ostream &OS) const;
+
+ /// \brief Print the value to stderr.
+ void dump() const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MCLabel &Label) {
+ Label.print(OS);
+ return OS;
+}
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCLinkerOptimizationHint.h b/contrib/llvm/include/llvm/MC/MCLinkerOptimizationHint.h
new file mode 100644
index 0000000..a519c4b
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCLinkerOptimizationHint.h
@@ -0,0 +1,205 @@
+//===- MCLinkerOptimizationHint.h - LOH interface ---------------*- C++ -*-===//
+//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares some helpers classes to handle Linker Optimization Hint
+// (LOH).
+//
+// FIXME: LOH interface supports only MachO format at the moment.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCLINKEROPTIMIZATIONHINT_H
+#define LLVM_MC_MCLINKEROPTIMIZATIONHINT_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/MC/MCMachObjectWriter.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+// Forward declarations.
+class MCAsmLayout;
+class MCSymbol;
+
+/// Linker Optimization Hint Type.
+enum MCLOHType {
+ MCLOH_AdrpAdrp = 0x1u, ///< Adrp xY, _v1@PAGE -> Adrp xY, _v2@PAGE.
+ MCLOH_AdrpLdr = 0x2u, ///< Adrp _v@PAGE -> Ldr _v@PAGEOFF.
+ MCLOH_AdrpAddLdr = 0x3u, ///< Adrp _v@PAGE -> Add _v@PAGEOFF -> Ldr.
+ MCLOH_AdrpLdrGotLdr = 0x4u, ///< Adrp _v@GOTPAGE -> Ldr _v@GOTPAGEOFF -> Ldr.
+ MCLOH_AdrpAddStr = 0x5u, ///< Adrp _v@PAGE -> Add _v@PAGEOFF -> Str.
+ MCLOH_AdrpLdrGotStr = 0x6u, ///< Adrp _v@GOTPAGE -> Ldr _v@GOTPAGEOFF -> Str.
+ MCLOH_AdrpAdd = 0x7u, ///< Adrp _v@PAGE -> Add _v@PAGEOFF.
+ MCLOH_AdrpLdrGot = 0x8u ///< Adrp _v@GOTPAGE -> Ldr _v@GOTPAGEOFF.
+};
+
+static inline StringRef MCLOHDirectiveName() {
+ return StringRef(".loh");
+}
+
+static inline bool isValidMCLOHType(unsigned Kind) {
+ return Kind >= MCLOH_AdrpAdrp && Kind <= MCLOH_AdrpLdrGot;
+}
+
+static inline int MCLOHNameToId(StringRef Name) {
+#define MCLOHCaseNameToId(Name) .Case(#Name, MCLOH_ ## Name)
+ return StringSwitch<int>(Name)
+ MCLOHCaseNameToId(AdrpAdrp)
+ MCLOHCaseNameToId(AdrpLdr)
+ MCLOHCaseNameToId(AdrpAddLdr)
+ MCLOHCaseNameToId(AdrpLdrGotLdr)
+ MCLOHCaseNameToId(AdrpAddStr)
+ MCLOHCaseNameToId(AdrpLdrGotStr)
+ MCLOHCaseNameToId(AdrpAdd)
+ MCLOHCaseNameToId(AdrpLdrGot)
+ .Default(-1);
+}
+
+static inline StringRef MCLOHIdToName(MCLOHType Kind) {
+#define MCLOHCaseIdToName(Name) case MCLOH_ ## Name: return StringRef(#Name);
+ switch (Kind) {
+ MCLOHCaseIdToName(AdrpAdrp);
+ MCLOHCaseIdToName(AdrpLdr);
+ MCLOHCaseIdToName(AdrpAddLdr);
+ MCLOHCaseIdToName(AdrpLdrGotLdr);
+ MCLOHCaseIdToName(AdrpAddStr);
+ MCLOHCaseIdToName(AdrpLdrGotStr);
+ MCLOHCaseIdToName(AdrpAdd);
+ MCLOHCaseIdToName(AdrpLdrGot);
+ }
+ return StringRef();
+}
+
+static inline int MCLOHIdToNbArgs(MCLOHType Kind) {
+ switch (Kind) {
+ // LOH with two arguments
+ case MCLOH_AdrpAdrp:
+ case MCLOH_AdrpLdr:
+ case MCLOH_AdrpAdd:
+ case MCLOH_AdrpLdrGot:
+ return 2;
+ // LOH with three arguments
+ case MCLOH_AdrpAddLdr:
+ case MCLOH_AdrpLdrGotLdr:
+ case MCLOH_AdrpAddStr:
+ case MCLOH_AdrpLdrGotStr:
+ return 3;
+ }
+ return -1;
+}
+
+/// Store Linker Optimization Hint information (LOH).
+class MCLOHDirective {
+ MCLOHType Kind;
+
+ /// Arguments of this directive. Order matters.
+ SmallVector<MCSymbol *, 3> Args;
+
+ /// Emit this directive in \p OutStream using the information available
+ /// in the given \p ObjWriter and \p Layout to get the address of the
+ /// arguments within the object file.
+ void emit_impl(raw_ostream &OutStream, const MachObjectWriter &ObjWriter,
+ const MCAsmLayout &Layout) const;
+
+public:
+ typedef SmallVectorImpl<MCSymbol *> LOHArgs;
+
+ MCLOHDirective(MCLOHType Kind, const LOHArgs &Args)
+ : Kind(Kind), Args(Args.begin(), Args.end()) {
+ assert(isValidMCLOHType(Kind) && "Invalid LOH directive type!");
+ }
+
+ MCLOHType getKind() const { return Kind; }
+
+ const LOHArgs &getArgs() const { return Args; }
+
+ /// Emit this directive as:
+ /// <kind, numArgs, addr1, ..., addrN>
+ void emit(MachObjectWriter &ObjWriter, const MCAsmLayout &Layout) const {
+ raw_ostream &OutStream = ObjWriter.getStream();
+ emit_impl(OutStream, ObjWriter, Layout);
+ }
+
+ /// Get the size in bytes of this directive if emitted in \p ObjWriter with
+ /// the given \p Layout.
+ uint64_t getEmitSize(const MachObjectWriter &ObjWriter,
+ const MCAsmLayout &Layout) const {
+ class raw_counting_ostream : public raw_ostream {
+ uint64_t Count;
+
+ void write_impl(const char *, size_t size) override { Count += size; }
+
+ uint64_t current_pos() const override { return Count; }
+
+ public:
+ raw_counting_ostream() : Count(0) {}
+ ~raw_counting_ostream() override { flush(); }
+ };
+
+ raw_counting_ostream OutStream;
+ emit_impl(OutStream, ObjWriter, Layout);
+ return OutStream.tell();
+ }
+};
+
+class MCLOHContainer {
+ /// Keep track of the emit size of all the LOHs.
+ mutable uint64_t EmitSize;
+
+ /// Keep track of all LOH directives.
+ SmallVector<MCLOHDirective, 32> Directives;
+
+public:
+ typedef SmallVectorImpl<MCLOHDirective> LOHDirectives;
+
+ MCLOHContainer() : EmitSize(0) {}
+
+ /// Const accessor to the directives.
+ const LOHDirectives &getDirectives() const {
+ return Directives;
+ }
+
+ /// Add the directive of the given kind \p Kind with the given arguments
+ /// \p Args to the container.
+ void addDirective(MCLOHType Kind, const MCLOHDirective::LOHArgs &Args) {
+ Directives.push_back(MCLOHDirective(Kind, Args));
+ }
+
+ /// Get the size of the directives if emitted.
+ uint64_t getEmitSize(const MachObjectWriter &ObjWriter,
+ const MCAsmLayout &Layout) const {
+ if (!EmitSize) {
+ for (const MCLOHDirective &D : Directives)
+ EmitSize += D.getEmitSize(ObjWriter, Layout);
+ }
+ return EmitSize;
+ }
+
+ /// Emit all Linker Optimization Hint in one big table.
+ /// Each line of the table is emitted by LOHDirective::emit.
+ void emit(MachObjectWriter &ObjWriter, const MCAsmLayout &Layout) const {
+ for (const MCLOHDirective &D : Directives)
+ D.emit(ObjWriter, Layout);
+ }
+
+ void reset() {
+ Directives.clear();
+ EmitSize = 0;
+ }
+};
+
+// Add types for specialized template using MCSymbol.
+typedef MCLOHDirective::LOHArgs MCLOHArgs;
+typedef MCLOHContainer::LOHDirectives MCLOHDirectives;
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h b/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h
new file mode 100644
index 0000000..cd3db95
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCMachObjectWriter.h
@@ -0,0 +1,277 @@
+//===-- llvm/MC/MCMachObjectWriter.h - Mach Object Writer -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCMACHOBJECTWRITER_H
+#define LLVM_MC_MCMACHOBJECTWRITER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/StringTableBuilder.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MachO.h"
+#include <vector>
+
+namespace llvm {
+
+class MachObjectWriter;
+
+class MCMachObjectTargetWriter {
+ const unsigned Is64Bit : 1;
+ const uint32_t CPUType;
+ const uint32_t CPUSubtype;
+ unsigned LocalDifference_RIT;
+
+protected:
+ MCMachObjectTargetWriter(bool Is64Bit_, uint32_t CPUType_,
+ uint32_t CPUSubtype_);
+
+ void setLocalDifferenceRelocationType(unsigned Type) {
+ LocalDifference_RIT = Type;
+ }
+
+public:
+ virtual ~MCMachObjectTargetWriter();
+
+ /// \name Lifetime Management
+ /// @{
+
+ virtual void reset() {}
+
+ /// @}
+
+ /// \name Accessors
+ /// @{
+
+ bool is64Bit() const { return Is64Bit; }
+ uint32_t getCPUType() const { return CPUType; }
+ uint32_t getCPUSubtype() const { return CPUSubtype; }
+ unsigned getLocalDifferenceRelocationType() const {
+ return LocalDifference_RIT;
+ }
+
+ /// @}
+
+ /// \name API
+ /// @{
+
+ virtual void recordRelocation(MachObjectWriter *Writer, MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ uint64_t &FixedValue) = 0;
+
+ /// @}
+};
+
+class MachObjectWriter : public MCObjectWriter {
+ /// Helper struct for containing some precomputed information on symbols.
+ struct MachSymbolData {
+ const MCSymbol *Symbol;
+ uint64_t StringIndex;
+ uint8_t SectionIndex;
+
+ // Support lexicographic sorting.
+ bool operator<(const MachSymbolData &RHS) const;
+ };
+
+ /// The target specific Mach-O writer instance.
+ std::unique_ptr<MCMachObjectTargetWriter> TargetObjectWriter;
+
+ /// \name Relocation Data
+ /// @{
+
+ struct RelAndSymbol {
+ const MCSymbol *Sym;
+ MachO::any_relocation_info MRE;
+ RelAndSymbol(const MCSymbol *Sym, const MachO::any_relocation_info &MRE)
+ : Sym(Sym), MRE(MRE) {}
+ };
+
+ llvm::DenseMap<const MCSection *, std::vector<RelAndSymbol>> Relocations;
+ llvm::DenseMap<const MCSection *, unsigned> IndirectSymBase;
+
+ SectionAddrMap SectionAddress;
+
+ /// @}
+ /// \name Symbol Table Data
+ /// @{
+
+ StringTableBuilder StringTable{StringTableBuilder::MachO};
+ std::vector<MachSymbolData> LocalSymbolData;
+ std::vector<MachSymbolData> ExternalSymbolData;
+ std::vector<MachSymbolData> UndefinedSymbolData;
+
+ /// @}
+
+ MachSymbolData *findSymbolData(const MCSymbol &Sym);
+
+public:
+ MachObjectWriter(MCMachObjectTargetWriter *MOTW, raw_pwrite_stream &OS,
+ bool IsLittleEndian)
+ : MCObjectWriter(OS, IsLittleEndian), TargetObjectWriter(MOTW) {}
+
+ const MCSymbol &findAliasedSymbol(const MCSymbol &Sym) const;
+
+ /// \name Lifetime management Methods
+ /// @{
+
+ void reset() override;
+
+ /// @}
+
+ /// \name Utility Methods
+ /// @{
+
+ bool isFixupKindPCRel(const MCAssembler &Asm, unsigned Kind);
+
+ SectionAddrMap &getSectionAddressMap() { return SectionAddress; }
+
+ uint64_t getSectionAddress(const MCSection *Sec) const {
+ return SectionAddress.lookup(Sec);
+ }
+ uint64_t getSymbolAddress(const MCSymbol &S, const MCAsmLayout &Layout) const;
+
+ uint64_t getFragmentAddress(const MCFragment *Fragment,
+ const MCAsmLayout &Layout) const;
+
+ uint64_t getPaddingSize(const MCSection *SD, const MCAsmLayout &Layout) const;
+
+ bool doesSymbolRequireExternRelocation(const MCSymbol &S);
+
+ /// @}
+
+ /// \name Target Writer Proxy Accessors
+ /// @{
+
+ bool is64Bit() const { return TargetObjectWriter->is64Bit(); }
+ bool isX86_64() const {
+ uint32_t CPUType = TargetObjectWriter->getCPUType();
+ return CPUType == MachO::CPU_TYPE_X86_64;
+ }
+
+ /// @}
+
+ void writeHeader(MachO::HeaderFileType Type, unsigned NumLoadCommands,
+ unsigned LoadCommandsSize, bool SubsectionsViaSymbols);
+
+ /// Write a segment load command.
+ ///
+ /// \param NumSections The number of sections in this segment.
+ /// \param SectionDataSize The total size of the sections.
+ void writeSegmentLoadCommand(StringRef Name, unsigned NumSections,
+ uint64_t VMAddr, uint64_t VMSize,
+ uint64_t SectionDataStartOffset,
+ uint64_t SectionDataSize, uint32_t MaxProt,
+ uint32_t InitProt);
+
+ void writeSection(const MCAsmLayout &Layout, const MCSection &Sec,
+ uint64_t VMAddr, uint64_t FileOffset, unsigned Flags,
+ uint64_t RelocationsStart, unsigned NumRelocations);
+
+ void writeSymtabLoadCommand(uint32_t SymbolOffset, uint32_t NumSymbols,
+ uint32_t StringTableOffset,
+ uint32_t StringTableSize);
+
+ void writeDysymtabLoadCommand(
+ uint32_t FirstLocalSymbol, uint32_t NumLocalSymbols,
+ uint32_t FirstExternalSymbol, uint32_t NumExternalSymbols,
+ uint32_t FirstUndefinedSymbol, uint32_t NumUndefinedSymbols,
+ uint32_t IndirectSymbolOffset, uint32_t NumIndirectSymbols);
+
+ void writeNlist(MachSymbolData &MSD, const MCAsmLayout &Layout);
+
+ void writeLinkeditLoadCommand(uint32_t Type, uint32_t DataOffset,
+ uint32_t DataSize);
+
+ void writeLinkerOptionsLoadCommand(const std::vector<std::string> &Options);
+
+ // FIXME: We really need to improve the relocation validation. Basically, we
+ // want to implement a separate computation which evaluates the relocation
+ // entry as the linker would, and verifies that the resultant fixup value is
+ // exactly what the encoder wanted. This will catch several classes of
+ // problems:
+ //
+ // - Relocation entry bugs, the two algorithms are unlikely to have the same
+ // exact bug.
+ //
+ // - Relaxation issues, where we forget to relax something.
+ //
+ // - Input errors, where something cannot be correctly encoded. 'as' allows
+ // these through in many cases.
+
+ // Add a relocation to be output in the object file. At the time this is
+ // called, the symbol indexes are not know, so if the relocation refers
+ // to a symbol it should be passed as \p RelSymbol so that it can be updated
+ // afterwards. If the relocation doesn't refer to a symbol, nullptr should be
+ // used.
+ void addRelocation(const MCSymbol *RelSymbol, const MCSection *Sec,
+ MachO::any_relocation_info &MRE) {
+ RelAndSymbol P(RelSymbol, MRE);
+ Relocations[Sec].push_back(P);
+ }
+
+ void recordScatteredRelocation(const MCAssembler &Asm,
+ const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ unsigned Log2Size, uint64_t &FixedValue);
+
+ void recordTLVPRelocation(const MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, uint64_t &FixedValue);
+
+ void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment, const MCFixup &Fixup,
+ MCValue Target, bool &IsPCRel,
+ uint64_t &FixedValue) override;
+
+ void bindIndirectSymbols(MCAssembler &Asm);
+
+ /// Compute the symbol table data.
+ void computeSymbolTable(MCAssembler &Asm,
+ std::vector<MachSymbolData> &LocalSymbolData,
+ std::vector<MachSymbolData> &ExternalSymbolData,
+ std::vector<MachSymbolData> &UndefinedSymbolData);
+
+ void computeSectionAddresses(const MCAssembler &Asm,
+ const MCAsmLayout &Layout);
+
+ void executePostLayoutBinding(MCAssembler &Asm,
+ const MCAsmLayout &Layout) override;
+
+ bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+ const MCSymbol &A,
+ const MCSymbol &B,
+ bool InSet) const override;
+
+ bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+ const MCSymbol &SymA,
+ const MCFragment &FB, bool InSet,
+ bool IsPCRel) const override;
+
+ void writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) override;
+};
+
+/// Construct a new Mach-O writer instance.
+///
+/// This routine takes ownership of the target writer subclass.
+///
+/// \param MOTW - The target specific Mach-O writer subclass.
+/// \param OS - The stream to write to.
+/// \returns The constructed object writer.
+MCObjectWriter *createMachObjectWriter(MCMachObjectTargetWriter *MOTW,
+ raw_pwrite_stream &OS,
+ bool IsLittleEndian);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h b/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
new file mode 100644
index 0000000..cf2c3f1
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCObjectFileInfo.h
@@ -0,0 +1,358 @@
+//===-- llvm/MC/MCObjectFileInfo.h - Object File Info -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes common object file formats.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCOBJECTFILEINFO_H
+#define LLVM_MC_MCOBJECTFILEINFO_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+class MCContext;
+class MCSection;
+
+class MCObjectFileInfo {
+protected:
+ /// True if .comm supports alignment. This is a hack for as long as we
+ /// support 10.4 Tiger, whose assembler doesn't support alignment on comm.
+ bool CommDirectiveSupportsAlignment;
+
+ /// True if target object file supports a weak_definition of constant 0 for an
+ /// omitted EH frame.
+ bool SupportsWeakOmittedEHFrame;
+
+ /// True if the target object file supports emitting a compact unwind section
+ /// without an associated EH frame section.
+ bool SupportsCompactUnwindWithoutEHFrame;
+
+ /// OmitDwarfIfHaveCompactUnwind - True if the target object file
+ /// supports having some functions with compact unwind and other with
+ /// dwarf unwind.
+ bool OmitDwarfIfHaveCompactUnwind;
+
+ /// PersonalityEncoding, LSDAEncoding, TTypeEncoding - Some encoding values
+ /// for EH.
+ unsigned PersonalityEncoding;
+ unsigned LSDAEncoding;
+ unsigned FDECFIEncoding;
+ unsigned TTypeEncoding;
+
+ /// Compact unwind encoding indicating that we should emit only an EH frame.
+ unsigned CompactUnwindDwarfEHFrameOnly;
+
+ /// Section directive for standard text.
+ MCSection *TextSection;
+
+ /// Section directive for standard data.
+ MCSection *DataSection;
+
+ /// Section that is default initialized to zero.
+ MCSection *BSSSection;
+
+ /// Section that is readonly and can contain arbitrary initialized data.
+ /// Targets are not required to have a readonly section. If they don't,
+ /// various bits of code will fall back to using the data section for
+ /// constants.
+ MCSection *ReadOnlySection;
+
+ /// This section contains the static constructor pointer list.
+ MCSection *StaticCtorSection;
+
+ /// This section contains the static destructor pointer list.
+ MCSection *StaticDtorSection;
+
+ /// If exception handling is supported by the target, this is the section the
+ /// Language Specific Data Area information is emitted to.
+ MCSection *LSDASection;
+
+ /// If exception handling is supported by the target and the target can
+ /// support a compact representation of the CIE and FDE, this is the section
+ /// to emit them into.
+ MCSection *CompactUnwindSection;
+
+ // Dwarf sections for debug info. If a target supports debug info, these must
+ // be set.
+ MCSection *DwarfAbbrevSection;
+ MCSection *DwarfInfoSection;
+ MCSection *DwarfLineSection;
+ MCSection *DwarfFrameSection;
+ MCSection *DwarfPubTypesSection;
+ const MCSection *DwarfDebugInlineSection;
+ MCSection *DwarfStrSection;
+ MCSection *DwarfLocSection;
+ MCSection *DwarfARangesSection;
+ MCSection *DwarfRangesSection;
+ // The pubnames section is no longer generated by default. The generation
+ // can be enabled by a compiler flag.
+ MCSection *DwarfPubNamesSection;
+
+ /// DWARF5 Experimental Debug Info Sections
+ /// DwarfAccelNamesSection, DwarfAccelObjCSection,
+ /// DwarfAccelNamespaceSection, DwarfAccelTypesSection -
+ /// If we use the DWARF accelerated hash tables then we want to emit these
+ /// sections.
+ MCSection *DwarfAccelNamesSection;
+ MCSection *DwarfAccelObjCSection;
+ MCSection *DwarfAccelNamespaceSection;
+ MCSection *DwarfAccelTypesSection;
+
+ // These are used for the Fission separate debug information files.
+ MCSection *DwarfInfoDWOSection;
+ MCSection *DwarfTypesDWOSection;
+ MCSection *DwarfAbbrevDWOSection;
+ MCSection *DwarfStrDWOSection;
+ MCSection *DwarfLineDWOSection;
+ MCSection *DwarfLocDWOSection;
+ MCSection *DwarfStrOffDWOSection;
+ MCSection *DwarfAddrSection;
+
+ // These are for Fission DWP files.
+ MCSection *DwarfCUIndexSection;
+ MCSection *DwarfTUIndexSection;
+
+ /// Section for newer gnu pubnames.
+ MCSection *DwarfGnuPubNamesSection;
+ /// Section for newer gnu pubtypes.
+ MCSection *DwarfGnuPubTypesSection;
+
+ MCSection *COFFDebugSymbolsSection;
+
+ /// Extra TLS Variable Data section.
+ ///
+ /// If the target needs to put additional information for a TLS variable,
+ /// it'll go here.
+ MCSection *TLSExtraDataSection;
+
+ /// Section directive for Thread Local data. ELF, MachO and COFF.
+ MCSection *TLSDataSection; // Defaults to ".tdata".
+
+ /// Section directive for Thread Local uninitialized data.
+ ///
+ /// Null if this target doesn't support a BSS section. ELF and MachO only.
+ MCSection *TLSBSSSection; // Defaults to ".tbss".
+
+ /// StackMap section.
+ MCSection *StackMapSection;
+
+ /// FaultMap section.
+ MCSection *FaultMapSection;
+
+ /// EH frame section.
+ ///
+ /// It is initialized on demand so it can be overwritten (with uniquing).
+ MCSection *EHFrameSection;
+
+ // ELF specific sections.
+ MCSection *DataRelROSection;
+ MCSection *MergeableConst4Section;
+ MCSection *MergeableConst8Section;
+ MCSection *MergeableConst16Section;
+
+ // MachO specific sections.
+
+ /// Section for thread local structure information.
+ ///
+ /// Contains the source code name of the variable, visibility and a pointer to
+ /// the initial value (.tdata or .tbss).
+ MCSection *TLSTLVSection; // Defaults to ".tlv".
+
+ /// Section for thread local data initialization functions.
+ const MCSection *TLSThreadInitSection; // Defaults to ".thread_init_func".
+
+ MCSection *CStringSection;
+ MCSection *UStringSection;
+ MCSection *TextCoalSection;
+ MCSection *ConstTextCoalSection;
+ MCSection *ConstDataSection;
+ MCSection *DataCoalSection;
+ MCSection *DataCommonSection;
+ MCSection *DataBSSSection;
+ MCSection *FourByteConstantSection;
+ MCSection *EightByteConstantSection;
+ MCSection *SixteenByteConstantSection;
+ MCSection *LazySymbolPointerSection;
+ MCSection *NonLazySymbolPointerSection;
+
+ /// COFF specific sections.
+ MCSection *DrectveSection;
+ MCSection *PDataSection;
+ MCSection *XDataSection;
+ MCSection *SXDataSection;
+
+public:
+ void InitMCObjectFileInfo(const Triple &TT, Reloc::Model RM,
+ CodeModel::Model CM, MCContext &ctx);
+ LLVM_ATTRIBUTE_DEPRECATED(
+ void InitMCObjectFileInfo(StringRef TT, Reloc::Model RM,
+ CodeModel::Model CM, MCContext &ctx),
+ "StringRef GNU Triple argument replaced by a llvm::Triple object");
+
+ bool getSupportsWeakOmittedEHFrame() const {
+ return SupportsWeakOmittedEHFrame;
+ }
+ bool getSupportsCompactUnwindWithoutEHFrame() const {
+ return SupportsCompactUnwindWithoutEHFrame;
+ }
+ bool getOmitDwarfIfHaveCompactUnwind() const {
+ return OmitDwarfIfHaveCompactUnwind;
+ }
+
+ bool getCommDirectiveSupportsAlignment() const {
+ return CommDirectiveSupportsAlignment;
+ }
+
+ unsigned getPersonalityEncoding() const { return PersonalityEncoding; }
+ unsigned getLSDAEncoding() const { return LSDAEncoding; }
+ unsigned getFDEEncoding() const { return FDECFIEncoding; }
+ unsigned getTTypeEncoding() const { return TTypeEncoding; }
+
+ unsigned getCompactUnwindDwarfEHFrameOnly() const {
+ return CompactUnwindDwarfEHFrameOnly;
+ }
+
+ MCSection *getTextSection() const { return TextSection; }
+ MCSection *getDataSection() const { return DataSection; }
+ MCSection *getBSSSection() const { return BSSSection; }
+ MCSection *getReadOnlySection() const { return ReadOnlySection; }
+ MCSection *getLSDASection() const { return LSDASection; }
+ MCSection *getCompactUnwindSection() const { return CompactUnwindSection; }
+ MCSection *getDwarfAbbrevSection() const { return DwarfAbbrevSection; }
+ MCSection *getDwarfInfoSection() const { return DwarfInfoSection; }
+ MCSection *getDwarfLineSection() const { return DwarfLineSection; }
+ MCSection *getDwarfFrameSection() const { return DwarfFrameSection; }
+ MCSection *getDwarfPubNamesSection() const { return DwarfPubNamesSection; }
+ MCSection *getDwarfPubTypesSection() const { return DwarfPubTypesSection; }
+ MCSection *getDwarfGnuPubNamesSection() const {
+ return DwarfGnuPubNamesSection;
+ }
+ MCSection *getDwarfGnuPubTypesSection() const {
+ return DwarfGnuPubTypesSection;
+ }
+ const MCSection *getDwarfDebugInlineSection() const {
+ return DwarfDebugInlineSection;
+ }
+ MCSection *getDwarfStrSection() const { return DwarfStrSection; }
+ MCSection *getDwarfLocSection() const { return DwarfLocSection; }
+ MCSection *getDwarfARangesSection() const { return DwarfARangesSection; }
+ MCSection *getDwarfRangesSection() const { return DwarfRangesSection; }
+
+ // DWARF5 Experimental Debug Info Sections
+ MCSection *getDwarfAccelNamesSection() const {
+ return DwarfAccelNamesSection;
+ }
+ MCSection *getDwarfAccelObjCSection() const { return DwarfAccelObjCSection; }
+ MCSection *getDwarfAccelNamespaceSection() const {
+ return DwarfAccelNamespaceSection;
+ }
+ MCSection *getDwarfAccelTypesSection() const {
+ return DwarfAccelTypesSection;
+ }
+ MCSection *getDwarfInfoDWOSection() const { return DwarfInfoDWOSection; }
+ MCSection *getDwarfTypesSection(uint64_t Hash) const;
+ MCSection *getDwarfTypesDWOSection() const { return DwarfTypesDWOSection; }
+ MCSection *getDwarfAbbrevDWOSection() const { return DwarfAbbrevDWOSection; }
+ MCSection *getDwarfStrDWOSection() const { return DwarfStrDWOSection; }
+ MCSection *getDwarfLineDWOSection() const { return DwarfLineDWOSection; }
+ MCSection *getDwarfLocDWOSection() const { return DwarfLocDWOSection; }
+ MCSection *getDwarfStrOffDWOSection() const { return DwarfStrOffDWOSection; }
+ MCSection *getDwarfAddrSection() const { return DwarfAddrSection; }
+ MCSection *getDwarfCUIndexSection() const { return DwarfCUIndexSection; }
+ MCSection *getDwarfTUIndexSection() const { return DwarfTUIndexSection; }
+
+ MCSection *getCOFFDebugSymbolsSection() const {
+ return COFFDebugSymbolsSection;
+ }
+
+ MCSection *getTLSExtraDataSection() const { return TLSExtraDataSection; }
+ const MCSection *getTLSDataSection() const { return TLSDataSection; }
+ MCSection *getTLSBSSSection() const { return TLSBSSSection; }
+
+ MCSection *getStackMapSection() const { return StackMapSection; }
+ MCSection *getFaultMapSection() const { return FaultMapSection; }
+
+ // ELF specific sections.
+ MCSection *getDataRelROSection() const { return DataRelROSection; }
+ const MCSection *getMergeableConst4Section() const {
+ return MergeableConst4Section;
+ }
+ const MCSection *getMergeableConst8Section() const {
+ return MergeableConst8Section;
+ }
+ const MCSection *getMergeableConst16Section() const {
+ return MergeableConst16Section;
+ }
+
+ // MachO specific sections.
+ const MCSection *getTLSTLVSection() const { return TLSTLVSection; }
+ const MCSection *getTLSThreadInitSection() const {
+ return TLSThreadInitSection;
+ }
+ const MCSection *getCStringSection() const { return CStringSection; }
+ const MCSection *getUStringSection() const { return UStringSection; }
+ MCSection *getTextCoalSection() const { return TextCoalSection; }
+ const MCSection *getConstTextCoalSection() const {
+ return ConstTextCoalSection;
+ }
+ const MCSection *getConstDataSection() const { return ConstDataSection; }
+ const MCSection *getDataCoalSection() const { return DataCoalSection; }
+ const MCSection *getDataCommonSection() const { return DataCommonSection; }
+ MCSection *getDataBSSSection() const { return DataBSSSection; }
+ const MCSection *getFourByteConstantSection() const {
+ return FourByteConstantSection;
+ }
+ const MCSection *getEightByteConstantSection() const {
+ return EightByteConstantSection;
+ }
+ const MCSection *getSixteenByteConstantSection() const {
+ return SixteenByteConstantSection;
+ }
+ MCSection *getLazySymbolPointerSection() const {
+ return LazySymbolPointerSection;
+ }
+ MCSection *getNonLazySymbolPointerSection() const {
+ return NonLazySymbolPointerSection;
+ }
+
+ // COFF specific sections.
+ MCSection *getDrectveSection() const { return DrectveSection; }
+ MCSection *getPDataSection() const { return PDataSection; }
+ MCSection *getXDataSection() const { return XDataSection; }
+ MCSection *getSXDataSection() const { return SXDataSection; }
+
+ MCSection *getEHFrameSection() {
+ return EHFrameSection;
+ }
+
+ enum Environment { IsMachO, IsELF, IsCOFF };
+ Environment getObjectFileType() const { return Env; }
+
+ Reloc::Model getRelocM() const { return RelocM; }
+
+private:
+ Environment Env;
+ Reloc::Model RelocM;
+ CodeModel::Model CMModel;
+ MCContext *Ctx;
+ Triple TT;
+
+ void initMachOMCObjectFileInfo(Triple T);
+ void initELFMCObjectFileInfo(Triple T);
+ void initCOFFMCObjectFileInfo(Triple T);
+
+public:
+ const Triple &getTargetTriple() const { return TT; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCObjectStreamer.h b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
new file mode 100644
index 0000000..9fe2fda
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCObjectStreamer.h
@@ -0,0 +1,147 @@
+//===- MCObjectStreamer.h - MCStreamer Object File Interface ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCOBJECTSTREAMER_H
+#define LLVM_MC_MCOBJECTSTREAMER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class MCAssembler;
+class MCCodeEmitter;
+class MCSubtargetInfo;
+class MCExpr;
+class MCFragment;
+class MCDataFragment;
+class MCAsmBackend;
+class raw_ostream;
+class raw_pwrite_stream;
+
+/// \brief Streaming object file generation interface.
+///
+/// This class provides an implementation of the MCStreamer interface which is
+/// suitable for use with the assembler backend. Specific object file formats
+/// are expected to subclass this interface to implement directives specific
+/// to that file format or custom semantics expected by the object writer
+/// implementation.
+class MCObjectStreamer : public MCStreamer {
+ MCAssembler *Assembler;
+ MCSection::iterator CurInsertionPoint;
+ bool EmitEHFrame;
+ bool EmitDebugFrame;
+ SmallVector<MCSymbol *, 2> PendingLabels;
+
+ virtual void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo&) = 0;
+ void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override;
+ void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) override;
+
+protected:
+ MCObjectStreamer(MCContext &Context, MCAsmBackend &TAB, raw_pwrite_stream &OS,
+ MCCodeEmitter *Emitter);
+ ~MCObjectStreamer() override;
+
+public:
+ /// state management
+ void reset() override;
+
+ /// Object streamers require the integrated assembler.
+ bool isIntegratedAssemblerRequired() const override { return true; }
+
+ void EmitFrames(MCAsmBackend *MAB);
+ void EmitCFISections(bool EH, bool Debug) override;
+
+protected:
+ MCFragment *getCurrentFragment() const;
+
+ void insert(MCFragment *F) {
+ flushPendingLabels(F);
+ MCSection *CurSection = getCurrentSectionOnly();
+ CurSection->getFragmentList().insert(CurInsertionPoint, F);
+ F->setParent(CurSection);
+ }
+
+ /// Get a data fragment to write into, creating a new one if the current
+ /// fragment is not a data fragment.
+ MCDataFragment *getOrCreateDataFragment();
+
+ bool changeSectionImpl(MCSection *Section, const MCExpr *Subsection);
+
+ /// If any labels have been emitted but not assigned fragments, ensure that
+ /// they get assigned, either to F if possible or to a new data fragment.
+ /// Optionally, it is also possible to provide an offset \p FOffset, which
+ /// will be used as a symbol offset within the fragment.
+ void flushPendingLabels(MCFragment *F, uint64_t FOffset = 0);
+
+public:
+ void visitUsedSymbol(const MCSymbol &Sym) override;
+
+ MCAssembler &getAssembler() { return *Assembler; }
+
+ /// \name MCStreamer Interface
+ /// @{
+
+ void EmitLabel(MCSymbol *Symbol) override;
+ void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value) override;
+ void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ SMLoc Loc = SMLoc()) override;
+ void EmitULEB128Value(const MCExpr *Value) override;
+ void EmitSLEB128Value(const MCExpr *Value) override;
+ void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) override;
+ void ChangeSection(MCSection *Section, const MCExpr *Subsection) override;
+ void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo& STI) override;
+
+ /// \brief Emit an instruction to a special fragment, because this instruction
+ /// can change its size during relaxation.
+ virtual void EmitInstToFragment(const MCInst &Inst, const MCSubtargetInfo &);
+
+ void EmitBundleAlignMode(unsigned AlignPow2) override;
+ void EmitBundleLock(bool AlignToEnd) override;
+ void EmitBundleUnlock() override;
+ void EmitBytes(StringRef Data) override;
+ void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
+ unsigned ValueSize = 1,
+ unsigned MaxBytesToEmit = 0) override;
+ void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0) override;
+ void emitValueToOffset(const MCExpr *Offset, unsigned char Value) override;
+ void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+ unsigned Column, unsigned Flags,
+ unsigned Isa, unsigned Discriminator,
+ StringRef FileName) override;
+ void EmitDwarfAdvanceLineAddr(int64_t LineDelta, const MCSymbol *LastLabel,
+ const MCSymbol *Label,
+ unsigned PointerSize);
+ void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel,
+ const MCSymbol *Label);
+ void EmitGPRel32Value(const MCExpr *Value) override;
+ void EmitGPRel64Value(const MCExpr *Value) override;
+ bool EmitRelocDirective(const MCExpr &Offset, StringRef Name,
+ const MCExpr *Expr, SMLoc Loc) override;
+ void EmitFill(uint64_t NumBytes, uint8_t FillValue) override;
+ void FinishImpl() override;
+
+ /// Emit the absolute difference between two symbols if possible.
+ ///
+ /// Emit the absolute difference between \c Hi and \c Lo, as long as we can
+ /// compute it. Currently, that requires that both symbols are in the same
+ /// data fragment. Otherwise, do nothing and return \c false.
+ ///
+ /// \pre Offset of \c Hi is greater than the offset \c Lo.
+ void emitAbsoluteSymbolDiff(const MCSymbol *Hi, const MCSymbol *Lo,
+ unsigned Size) override;
+
+ bool mayHaveInstructions(MCSection &Sec) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCObjectWriter.h b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
new file mode 100644
index 0000000..63c833a
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCObjectWriter.h
@@ -0,0 +1,203 @@
+//===-- llvm/MC/MCObjectWriter.h - Object File Writer Interface -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCOBJECTWRITER_H
+#define LLVM_MC_MCOBJECTWRITER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+namespace llvm {
+class MCAsmLayout;
+class MCAssembler;
+class MCFixup;
+class MCFragment;
+class MCSymbolRefExpr;
+class MCValue;
+
+/// Defines the object file and target independent interfaces used by the
+/// assembler backend to write native file format object files.
+///
+/// The object writer contains a few callbacks used by the assembler to allow
+/// the object writer to modify the assembler data structures at appropriate
+/// points. Once assembly is complete, the object writer is given the
+/// MCAssembler instance, which contains all the symbol and section data which
+/// should be emitted as part of writeObject().
+///
+/// The object writer also contains a number of helper methods for writing
+/// binary data to the output stream.
+class MCObjectWriter {
+ MCObjectWriter(const MCObjectWriter &) = delete;
+ void operator=(const MCObjectWriter &) = delete;
+
+ raw_pwrite_stream *OS;
+
+protected:
+ unsigned IsLittleEndian : 1;
+
+protected: // Can only create subclasses.
+ MCObjectWriter(raw_pwrite_stream &OS, bool IsLittleEndian)
+ : OS(&OS), IsLittleEndian(IsLittleEndian) {}
+
+ unsigned getInitialOffset() {
+ return OS->tell();
+ }
+
+public:
+ virtual ~MCObjectWriter();
+
+ /// lifetime management
+ virtual void reset() {}
+
+ bool isLittleEndian() const { return IsLittleEndian; }
+
+ raw_pwrite_stream &getStream() { return *OS; }
+ void setStream(raw_pwrite_stream &NewOS) { OS = &NewOS; }
+
+ /// \name High-Level API
+ /// @{
+
+ /// Perform any late binding of symbols (for example, to assign symbol
+ /// indices for use when generating relocations).
+ ///
+ /// This routine is called by the assembler after layout and relaxation is
+ /// complete.
+ virtual void executePostLayoutBinding(MCAssembler &Asm,
+ const MCAsmLayout &Layout) = 0;
+
+ /// Record a relocation entry.
+ ///
+ /// This routine is called by the assembler after layout and relaxation, and
+ /// post layout binding. The implementation is responsible for storing
+ /// information about the relocation so that it can be emitted during
+ /// writeObject().
+ virtual void recordRelocation(MCAssembler &Asm, const MCAsmLayout &Layout,
+ const MCFragment *Fragment,
+ const MCFixup &Fixup, MCValue Target,
+ bool &IsPCRel, uint64_t &FixedValue) = 0;
+
+ /// Check whether the difference (A - B) between two symbol references is
+ /// fully resolved.
+ ///
+ /// Clients are not required to answer precisely and may conservatively return
+ /// false, even when a difference is fully resolved.
+ bool isSymbolRefDifferenceFullyResolved(const MCAssembler &Asm,
+ const MCSymbolRefExpr *A,
+ const MCSymbolRefExpr *B,
+ bool InSet) const;
+
+ virtual bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+ const MCSymbol &A,
+ const MCSymbol &B,
+ bool InSet) const;
+
+ virtual bool isSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm,
+ const MCSymbol &SymA,
+ const MCFragment &FB,
+ bool InSet,
+ bool IsPCRel) const;
+
+ /// True if this symbol (which is a variable) is weak. This is not
+ /// just STB_WEAK, but more generally whether or not we can evaluate
+ /// past it.
+ virtual bool isWeak(const MCSymbol &Sym) const;
+
+ /// Write the object file.
+ ///
+ /// This routine is called by the assembler after layout and relaxation is
+ /// complete, fixups have been evaluated and applied, and relocations
+ /// generated.
+ virtual void writeObject(MCAssembler &Asm, const MCAsmLayout &Layout) = 0;
+
+ /// @}
+ /// \name Binary Output
+ /// @{
+
+ void write8(uint8_t Value) { *OS << char(Value); }
+
+ void writeLE16(uint16_t Value) {
+ support::endian::Writer<support::little>(*OS).write(Value);
+ }
+
+ void writeLE32(uint32_t Value) {
+ support::endian::Writer<support::little>(*OS).write(Value);
+ }
+
+ void writeLE64(uint64_t Value) {
+ support::endian::Writer<support::little>(*OS).write(Value);
+ }
+
+ void writeBE16(uint16_t Value) {
+ support::endian::Writer<support::big>(*OS).write(Value);
+ }
+
+ void writeBE32(uint32_t Value) {
+ support::endian::Writer<support::big>(*OS).write(Value);
+ }
+
+ void writeBE64(uint64_t Value) {
+ support::endian::Writer<support::big>(*OS).write(Value);
+ }
+
+ void write16(uint16_t Value) {
+ if (IsLittleEndian)
+ writeLE16(Value);
+ else
+ writeBE16(Value);
+ }
+
+ void write32(uint32_t Value) {
+ if (IsLittleEndian)
+ writeLE32(Value);
+ else
+ writeBE32(Value);
+ }
+
+ void write64(uint64_t Value) {
+ if (IsLittleEndian)
+ writeLE64(Value);
+ else
+ writeBE64(Value);
+ }
+
+ void WriteZeros(unsigned N) {
+ const char Zeros[16] = {0};
+
+ for (unsigned i = 0, e = N / 16; i != e; ++i)
+ *OS << StringRef(Zeros, 16);
+
+ *OS << StringRef(Zeros, N % 16);
+ }
+
+ void writeBytes(const SmallVectorImpl<char> &ByteVec,
+ unsigned ZeroFillSize = 0) {
+ writeBytes(StringRef(ByteVec.data(), ByteVec.size()), ZeroFillSize);
+ }
+
+ void writeBytes(StringRef Str, unsigned ZeroFillSize = 0) {
+ // TODO: this version may need to go away once all fragment contents are
+ // converted to SmallVector<char, N>
+ assert(
+ (ZeroFillSize == 0 || Str.size() <= ZeroFillSize) &&
+ "data size greater than fill size, unexpected large write will occur");
+ *OS << Str;
+ if (ZeroFillSize)
+ WriteZeros(ZeroFillSize - Str.size());
+ }
+
+ /// @}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCParser/AsmCond.h b/contrib/llvm/include/llvm/MC/MCParser/AsmCond.h
new file mode 100644
index 0000000..a918b56
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCParser/AsmCond.h
@@ -0,0 +1,40 @@
+//===- AsmCond.h - Assembly file conditional assembly ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_ASMCOND_H
+#define LLVM_MC_MCPARSER_ASMCOND_H
+
+namespace llvm {
+
+/// AsmCond - Class to support conditional assembly
+///
+/// The conditional assembly feature (.if, .else, .elseif and .endif) is
+/// implemented with AsmCond that tells us what we are in the middle of
+/// processing. Ignore can be either true or false. When true we are ignoring
+/// the block of code in the middle of a conditional.
+
+class AsmCond {
+public:
+ enum ConditionalAssemblyType {
+ NoCond, // no conditional is being processed
+ IfCond, // inside if conditional
+ ElseIfCond, // inside elseif conditional
+ ElseCond // inside else conditional
+ };
+
+ ConditionalAssemblyType TheCond;
+ bool CondMet;
+ bool Ignore;
+
+ AsmCond() : TheCond(NoCond), CondMet(false), Ignore(false) {}
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
new file mode 100644
index 0000000..1bb6d21
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCParser/AsmLexer.h
@@ -0,0 +1,74 @@
+//===- AsmLexer.h - Lexer for Assembly Files --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class declares the lexer for assembly files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_ASMLEXER_H
+#define LLVM_MC_MCPARSER_ASMLEXER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/Support/DataTypes.h"
+#include <string>
+
+namespace llvm {
+class MemoryBuffer;
+class MCAsmInfo;
+
+/// AsmLexer - Lexer class for assembly files.
+class AsmLexer : public MCAsmLexer {
+ const MCAsmInfo &MAI;
+
+ const char *CurPtr;
+ StringRef CurBuf;
+ bool isAtStartOfLine;
+
+ void operator=(const AsmLexer&) = delete;
+ AsmLexer(const AsmLexer&) = delete;
+
+protected:
+ /// LexToken - Read the next token and return its code.
+ AsmToken LexToken() override;
+
+public:
+ AsmLexer(const MCAsmInfo &MAI);
+ ~AsmLexer() override;
+
+ void setBuffer(StringRef Buf, const char *ptr = nullptr);
+
+ StringRef LexUntilEndOfStatement() override;
+ StringRef LexUntilEndOfLine();
+
+ size_t peekTokens(MutableArrayRef<AsmToken> Buf,
+ bool ShouldSkipSpace = true) override;
+
+ bool isAtStartOfComment(const char *Ptr);
+ bool isAtStatementSeparator(const char *Ptr);
+
+ const MCAsmInfo &getMAI() const { return MAI; }
+
+private:
+ int getNextChar();
+ AsmToken ReturnError(const char *Loc, const std::string &Msg);
+
+ AsmToken LexIdentifier();
+ AsmToken LexSlash();
+ AsmToken LexLineComment();
+ AsmToken LexDigit();
+ AsmToken LexSingleQuote();
+ AsmToken LexQuote();
+ AsmToken LexFloatLiteral();
+ AsmToken LexHexFloatLiteral(bool NoIntDigits);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
new file mode 100644
index 0000000..55279f4
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -0,0 +1,217 @@
+//===-- llvm/MC/MCAsmLexer.h - Abstract Asm Lexer Interface -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCASMLEXER_H
+#define LLVM_MC_MCPARSER_MCASMLEXER_H
+
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/SMLoc.h"
+
+namespace llvm {
+
+/// Target independent representation for an assembler token.
+class AsmToken {
+public:
+ enum TokenKind {
+ // Markers
+ Eof, Error,
+
+ // String values.
+ Identifier,
+ String,
+
+ // Integer values.
+ Integer,
+ BigNum, // larger than 64 bits
+
+ // Real values.
+ Real,
+
+ // No-value.
+ EndOfStatement,
+ Colon,
+ Space,
+ Plus, Minus, Tilde,
+ Slash, // '/'
+ BackSlash, // '\'
+ LParen, RParen, LBrac, RBrac, LCurly, RCurly,
+ Star, Dot, Comma, Dollar, Equal, EqualEqual,
+
+ Pipe, PipePipe, Caret,
+ Amp, AmpAmp, Exclaim, ExclaimEqual, Percent, Hash,
+ Less, LessEqual, LessLess, LessGreater,
+ Greater, GreaterEqual, GreaterGreater, At
+ };
+
+private:
+ TokenKind Kind;
+
+ /// A reference to the entire token contents; this is always a pointer into
+ /// a memory buffer owned by the source manager.
+ StringRef Str;
+
+ APInt IntVal;
+
+public:
+ AsmToken() {}
+ AsmToken(TokenKind Kind, StringRef Str, APInt IntVal)
+ : Kind(Kind), Str(Str), IntVal(IntVal) {}
+ AsmToken(TokenKind Kind, StringRef Str, int64_t IntVal = 0)
+ : Kind(Kind), Str(Str), IntVal(64, IntVal, true) {}
+
+ TokenKind getKind() const { return Kind; }
+ bool is(TokenKind K) const { return Kind == K; }
+ bool isNot(TokenKind K) const { return Kind != K; }
+
+ SMLoc getLoc() const;
+ SMLoc getEndLoc() const;
+ SMRange getLocRange() const;
+
+ /// Get the contents of a string token (without quotes).
+ StringRef getStringContents() const {
+ assert(Kind == String && "This token isn't a string!");
+ return Str.slice(1, Str.size() - 1);
+ }
+
+ /// Get the identifier string for the current token, which should be an
+ /// identifier or a string. This gets the portion of the string which should
+ /// be used as the identifier, e.g., it does not include the quotes on
+ /// strings.
+ StringRef getIdentifier() const {
+ if (Kind == Identifier)
+ return getString();
+ return getStringContents();
+ }
+
+ /// Get the string for the current token, this includes all characters (for
+ /// example, the quotes on strings) in the token.
+ ///
+ /// The returned StringRef points into the source manager's memory buffer, and
+ /// is safe to store across calls to Lex().
+ StringRef getString() const { return Str; }
+
+ // FIXME: Don't compute this in advance, it makes every token larger, and is
+ // also not generally what we want (it is nicer for recovery etc. to lex 123br
+ // as a single token, then diagnose as an invalid number).
+ int64_t getIntVal() const {
+ assert(Kind == Integer && "This token isn't an integer!");
+ return IntVal.getZExtValue();
+ }
+
+ APInt getAPIntVal() const {
+ assert((Kind == Integer || Kind == BigNum) &&
+ "This token isn't an integer!");
+ return IntVal;
+ }
+};
+
+/// Generic assembler lexer interface, for use by target specific assembly
+/// lexers.
+class MCAsmLexer {
+ /// The current token, stored in the base class for faster access.
+ SmallVector<AsmToken, 1> CurTok;
+
+ /// The location and description of the current error
+ SMLoc ErrLoc;
+ std::string Err;
+
+ MCAsmLexer(const MCAsmLexer &) = delete;
+ void operator=(const MCAsmLexer &) = delete;
+protected: // Can only create subclasses.
+ const char *TokStart;
+ bool SkipSpace;
+ bool AllowAtInIdentifier;
+
+ MCAsmLexer();
+
+ virtual AsmToken LexToken() = 0;
+
+ void SetError(SMLoc errLoc, const std::string &err) {
+ ErrLoc = errLoc;
+ Err = err;
+ }
+
+public:
+ virtual ~MCAsmLexer();
+
+ /// Consume the next token from the input stream and return it.
+ ///
+ /// The lexer will continuosly return the end-of-file token once the end of
+ /// the main input file has been reached.
+ const AsmToken &Lex() {
+ assert(!CurTok.empty());
+ CurTok.erase(CurTok.begin());
+ if (CurTok.empty())
+ CurTok.emplace_back(LexToken());
+ return CurTok.front();
+ }
+
+ void UnLex(AsmToken const &Token) {
+ CurTok.insert(CurTok.begin(), Token);
+ }
+
+ virtual StringRef LexUntilEndOfStatement() = 0;
+
+ /// Get the current source location.
+ SMLoc getLoc() const;
+
+ /// Get the current (last) lexed token.
+ const AsmToken &getTok() const {
+ return CurTok[0];
+ }
+
+ /// Look ahead at the next token to be lexed.
+ const AsmToken peekTok(bool ShouldSkipSpace = true) {
+ AsmToken Tok;
+
+ MutableArrayRef<AsmToken> Buf(Tok);
+ size_t ReadCount = peekTokens(Buf, ShouldSkipSpace);
+
+ assert(ReadCount == 1);
+ (void)ReadCount;
+
+ return Tok;
+ }
+
+ /// Look ahead an arbitrary number of tokens.
+ virtual size_t peekTokens(MutableArrayRef<AsmToken> Buf,
+ bool ShouldSkipSpace = true) = 0;
+
+ /// Get the current error location
+ SMLoc getErrLoc() {
+ return ErrLoc;
+ }
+
+ /// Get the current error string
+ const std::string &getErr() {
+ return Err;
+ }
+
+ /// Get the kind of current token.
+ AsmToken::TokenKind getKind() const { return getTok().getKind(); }
+
+ /// Check if the current token has kind \p K.
+ bool is(AsmToken::TokenKind K) const { return getTok().is(K); }
+
+ /// Check if the current token has kind \p K.
+ bool isNot(AsmToken::TokenKind K) const { return getTok().isNot(K); }
+
+ /// Set whether spaces should be ignored by the lexer
+ void setSkipSpace(bool val) { SkipSpace = val; }
+
+ bool getAllowAtInIdentifier() { return AllowAtInIdentifier; }
+ void setAllowAtInIdentifier(bool v) { AllowAtInIdentifier = v; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
new file mode 100644
index 0000000..ac8706d
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParser.h
@@ -0,0 +1,219 @@
+//===-- llvm/MC/MCAsmParser.h - Abstract Asm Parser Interface ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCASMPARSER_H
+#define LLVM_MC_MCPARSER_MCASMPARSER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCParser/AsmLexer.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+class MCAsmInfo;
+class MCAsmLexer;
+class MCAsmParserExtension;
+class MCContext;
+class MCExpr;
+class MCInstPrinter;
+class MCInstrInfo;
+class MCStreamer;
+class MCTargetAsmParser;
+class SMLoc;
+class SMRange;
+class SourceMgr;
+class Twine;
+
+class InlineAsmIdentifierInfo {
+public:
+ void *OpDecl;
+ bool IsVarDecl;
+ unsigned Length, Size, Type;
+
+ void clear() {
+ OpDecl = nullptr;
+ IsVarDecl = false;
+ Length = 1;
+ Size = 0;
+ Type = 0;
+ }
+};
+
+/// \brief Generic Sema callback for assembly parser.
+class MCAsmParserSemaCallback {
+public:
+ virtual ~MCAsmParserSemaCallback();
+ virtual void *LookupInlineAsmIdentifier(StringRef &LineBuf,
+ InlineAsmIdentifierInfo &Info,
+ bool IsUnevaluatedContext) = 0;
+ virtual StringRef LookupInlineAsmLabel(StringRef Identifier, SourceMgr &SM,
+ SMLoc Location, bool Create) = 0;
+
+ virtual bool LookupInlineAsmField(StringRef Base, StringRef Member,
+ unsigned &Offset) = 0;
+};
+
+/// \brief Generic assembler parser interface, for use by target specific
+/// assembly parsers.
+class MCAsmParser {
+public:
+ typedef bool (*DirectiveHandler)(MCAsmParserExtension*, StringRef, SMLoc);
+ typedef std::pair<MCAsmParserExtension*, DirectiveHandler>
+ ExtensionDirectiveHandler;
+
+private:
+ MCAsmParser(const MCAsmParser &) = delete;
+ void operator=(const MCAsmParser &) = delete;
+
+ MCTargetAsmParser *TargetParser;
+
+ unsigned ShowParsedOperands : 1;
+
+protected: // Can only create subclasses.
+ MCAsmParser();
+
+public:
+ virtual ~MCAsmParser();
+
+ virtual void addDirectiveHandler(StringRef Directive,
+ ExtensionDirectiveHandler Handler) = 0;
+
+ virtual void addAliasForDirective(StringRef Directive, StringRef Alias) = 0;
+
+ virtual SourceMgr &getSourceManager() = 0;
+
+ virtual MCAsmLexer &getLexer() = 0;
+ const MCAsmLexer &getLexer() const {
+ return const_cast<MCAsmParser*>(this)->getLexer();
+ }
+
+ virtual MCContext &getContext() = 0;
+
+ /// \brief Return the output streamer for the assembler.
+ virtual MCStreamer &getStreamer() = 0;
+
+ MCTargetAsmParser &getTargetParser() const { return *TargetParser; }
+ void setTargetParser(MCTargetAsmParser &P);
+
+ virtual unsigned getAssemblerDialect() { return 0;}
+ virtual void setAssemblerDialect(unsigned i) { }
+
+ bool getShowParsedOperands() const { return ShowParsedOperands; }
+ void setShowParsedOperands(bool Value) { ShowParsedOperands = Value; }
+
+ /// \brief Run the parser on the input source buffer.
+ virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false) = 0;
+
+ virtual void setParsingInlineAsm(bool V) = 0;
+ virtual bool isParsingInlineAsm() = 0;
+
+ /// \brief Parse MS-style inline assembly.
+ virtual bool parseMSInlineAsm(
+ void *AsmLoc, std::string &AsmString, unsigned &NumOutputs,
+ unsigned &NumInputs, SmallVectorImpl<std::pair<void *, bool>> &OpDecls,
+ SmallVectorImpl<std::string> &Constraints,
+ SmallVectorImpl<std::string> &Clobbers, const MCInstrInfo *MII,
+ const MCInstPrinter *IP, MCAsmParserSemaCallback &SI) = 0;
+
+ /// \brief Emit a note at the location \p L, with the message \p Msg.
+ virtual void Note(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = None) = 0;
+
+ /// \brief Emit a warning at the location \p L, with the message \p Msg.
+ ///
+ /// \return The return value is true, if warnings are fatal.
+ virtual bool Warning(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = None) = 0;
+
+ /// \brief Emit an error at the location \p L, with the message \p Msg.
+ ///
+ /// \return The return value is always true, as an idiomatic convenience to
+ /// clients.
+ virtual bool Error(SMLoc L, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = None) = 0;
+
+ /// \brief Get the next AsmToken in the stream, possibly handling file
+ /// inclusion first.
+ virtual const AsmToken &Lex() = 0;
+
+ /// \brief Get the current AsmToken from the stream.
+ const AsmToken &getTok() const;
+
+ /// \brief Report an error at the current lexer location.
+ bool TokError(const Twine &Msg, ArrayRef<SMRange> Ranges = None);
+
+ /// \brief Parse an identifier or string (as a quoted identifier) and set \p
+ /// Res to the identifier contents.
+ virtual bool parseIdentifier(StringRef &Res) = 0;
+
+ /// \brief Parse up to the end of statement and return the contents from the
+ /// current token until the end of the statement; the current token on exit
+ /// will be either the EndOfStatement or EOF.
+ virtual StringRef parseStringToEndOfStatement() = 0;
+
+ /// \brief Parse the current token as a string which may include escaped
+ /// characters and return the string contents.
+ virtual bool parseEscapedString(std::string &Data) = 0;
+
+ /// \brief Skip to the end of the current statement, for error recovery.
+ virtual void eatToEndOfStatement() = 0;
+
+ /// \brief Parse an arbitrary expression.
+ ///
+ /// \param Res - The value of the expression. The result is undefined
+ /// on error.
+ /// \return - False on success.
+ virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc) = 0;
+ bool parseExpression(const MCExpr *&Res);
+
+ /// \brief Parse a primary expression.
+ ///
+ /// \param Res - The value of the expression. The result is undefined
+ /// on error.
+ /// \return - False on success.
+ virtual bool parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) = 0;
+
+ /// \brief Parse an arbitrary expression, assuming that an initial '(' has
+ /// already been consumed.
+ ///
+ /// \param Res - The value of the expression. The result is undefined
+ /// on error.
+ /// \return - False on success.
+ virtual bool parseParenExpression(const MCExpr *&Res, SMLoc &EndLoc) = 0;
+
+ /// \brief Parse an expression which must evaluate to an absolute value.
+ ///
+ /// \param Res - The value of the absolute expression. The result is undefined
+ /// on error.
+ /// \return - False on success.
+ virtual bool parseAbsoluteExpression(int64_t &Res) = 0;
+
+ /// \brief Ensure that we have a valid section set in the streamer. Otherwise,
+ /// report an error and switch to .text.
+ virtual void checkForValidSection() = 0;
+
+ /// \brief Parse an arbitrary expression of a specified parenthesis depth,
+ /// assuming that the initial '(' characters have already been consumed.
+ ///
+ /// \param ParenDepth - Specifies how many trailing expressions outside the
+ /// current parentheses we have to parse.
+ /// \param Res - The value of the expression. The result is undefined
+ /// on error.
+ /// \return - False on success.
+ virtual bool parseParenExprOfDepth(unsigned ParenDepth, const MCExpr *&Res,
+ SMLoc &EndLoc) = 0;
+};
+
+/// \brief Create an MCAsmParser instance.
+MCAsmParser *createMCAsmParser(SourceMgr &, MCContext &, MCStreamer &,
+ const MCAsmInfo &);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h
new file mode 100644
index 0000000..30b25dc
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserExtension.h
@@ -0,0 +1,92 @@
+//===-- llvm/MC/MCAsmParserExtension.h - Asm Parser Hooks -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
+#define LLVM_MC_MCPARSER_MCASMPARSEREXTENSION_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/Support/SMLoc.h"
+
+namespace llvm {
+class Twine;
+
+/// \brief Generic interface for extending the MCAsmParser,
+/// which is implemented by target and object file assembly parser
+/// implementations.
+class MCAsmParserExtension {
+ MCAsmParserExtension(const MCAsmParserExtension &) = delete;
+ void operator=(const MCAsmParserExtension &) = delete;
+
+ MCAsmParser *Parser;
+
+protected:
+ MCAsmParserExtension();
+
+ // Helper template for implementing static dispatch functions.
+ template<typename T, bool (T::*Handler)(StringRef, SMLoc)>
+ static bool HandleDirective(MCAsmParserExtension *Target,
+ StringRef Directive,
+ SMLoc DirectiveLoc) {
+ T *Obj = static_cast<T*>(Target);
+ return (Obj->*Handler)(Directive, DirectiveLoc);
+ }
+
+ bool BracketExpressionsSupported;
+
+public:
+ virtual ~MCAsmParserExtension();
+
+ /// \brief Initialize the extension for parsing using the given \p Parser.
+ /// The extension should use the AsmParser interfaces to register its
+ /// parsing routines.
+ virtual void Initialize(MCAsmParser &Parser);
+
+ /// \name MCAsmParser Proxy Interfaces
+ /// @{
+
+ MCContext &getContext() { return getParser().getContext(); }
+
+ MCAsmLexer &getLexer() { return getParser().getLexer(); }
+ const MCAsmLexer &getLexer() const {
+ return const_cast<MCAsmParserExtension *>(this)->getLexer();
+ }
+
+ MCAsmParser &getParser() { return *Parser; }
+ const MCAsmParser &getParser() const {
+ return const_cast<MCAsmParserExtension*>(this)->getParser();
+ }
+
+ SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
+ MCStreamer &getStreamer() { return getParser().getStreamer(); }
+ bool Warning(SMLoc L, const Twine &Msg) {
+ return getParser().Warning(L, Msg);
+ }
+ bool Error(SMLoc L, const Twine &Msg) {
+ return getParser().Error(L, Msg);
+ }
+ void Note(SMLoc L, const Twine &Msg) {
+ getParser().Note(L, Msg);
+ }
+ bool TokError(const Twine &Msg) {
+ return getParser().TokError(Msg);
+ }
+
+ const AsmToken &Lex() { return getParser().Lex(); }
+
+ const AsmToken &getTok() { return getParser().getTok(); }
+
+ bool HasBracketExpressions() const { return BracketExpressionsSupported; }
+
+ /// @}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserUtils.h b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserUtils.h
new file mode 100644
index 0000000..9834fe9
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCAsmParserUtils.h
@@ -0,0 +1,33 @@
+//===------ llvm/MC/MCAsmParserUtils.h - Asm Parser Utilities ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCASMPARSERUTILS_H
+#define LLVM_MC_MCPARSER_MCASMPARSERUTILS_H
+
+namespace llvm {
+
+class MCAsmParser;
+class MCExpr;
+class MCSymbol;
+class StringRef;
+
+namespace MCParserUtils {
+
+/// Parse a value expression and return whether it can be assigned to a symbol
+/// with the given name.
+///
+/// On success, returns false and sets the Symbol and Value output parameters.
+bool parseAssignmentExpression(StringRef Name, bool allow_redef,
+ MCAsmParser &Parser, MCSymbol *&Symbol,
+ const MCExpr *&Value);
+
+} // namespace MCParserUtils
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
new file mode 100644
index 0000000..a90d280
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCParser/MCParsedAsmOperand.h
@@ -0,0 +1,98 @@
+//===-- llvm/MC/MCParsedAsmOperand.h - Asm Parser Operand -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCPARSER_MCPARSEDASMOPERAND_H
+#define LLVM_MC_MCPARSER_MCPARSEDASMOPERAND_H
+
+#include <string>
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/SMLoc.h"
+
+namespace llvm {
+class raw_ostream;
+
+/// MCParsedAsmOperand - This abstract class represents a source-level assembly
+/// instruction operand. It should be subclassed by target-specific code. This
+/// base class is used by target-independent clients and is the interface
+/// between parsing an asm instruction and recognizing it.
+class MCParsedAsmOperand {
+ /// MCOperandNum - The corresponding MCInst operand number. Only valid when
+ /// parsing MS-style inline assembly.
+ unsigned MCOperandNum;
+
+ /// Constraint - The constraint on this operand. Only valid when parsing
+ /// MS-style inline assembly.
+ std::string Constraint;
+
+protected:
+ // This only seems to need to be movable (by ARMOperand) but ARMOperand has
+ // lots of members and MSVC doesn't support defaulted move ops, so to avoid
+ // that verbosity, just rely on defaulted copy ops. It's only the Constraint
+ // string member that would benefit from movement anyway.
+ MCParsedAsmOperand(const MCParsedAsmOperand &RHS) = default;
+ MCParsedAsmOperand &operator=(const MCParsedAsmOperand &) = default;
+ MCParsedAsmOperand() = default;
+
+public:
+ virtual ~MCParsedAsmOperand() {}
+
+ void setConstraint(StringRef C) { Constraint = C.str(); }
+ StringRef getConstraint() { return Constraint; }
+
+ void setMCOperandNum (unsigned OpNum) { MCOperandNum = OpNum; }
+ unsigned getMCOperandNum() { return MCOperandNum; }
+
+ virtual StringRef getSymName() { return StringRef(); }
+ virtual void *getOpDecl() { return nullptr; }
+
+ /// isToken - Is this a token operand?
+ virtual bool isToken() const = 0;
+ /// isImm - Is this an immediate operand?
+ virtual bool isImm() const = 0;
+ /// isReg - Is this a register operand?
+ virtual bool isReg() const = 0;
+ virtual unsigned getReg() const = 0;
+
+ /// isMem - Is this a memory operand?
+ virtual bool isMem() const = 0;
+
+ /// getStartLoc - Get the location of the first token of this operand.
+ virtual SMLoc getStartLoc() const = 0;
+ /// getEndLoc - Get the location of the last token of this operand.
+ virtual SMLoc getEndLoc() const = 0;
+
+ /// needAddressOf - Do we need to emit code to get the address of the
+ /// variable/label? Only valid when parsing MS-style inline assembly.
+ virtual bool needAddressOf() const { return false; }
+
+ /// isOffsetOf - Do we need to emit code to get the offset of the variable,
+ /// rather then the value of the variable? Only valid when parsing MS-style
+ /// inline assembly.
+ virtual bool isOffsetOf() const { return false; }
+
+ /// getOffsetOfLoc - Get the location of the offset operator.
+ virtual SMLoc getOffsetOfLoc() const { return SMLoc(); }
+
+ /// print - Print a debug representation of the operand to the given stream.
+ virtual void print(raw_ostream &OS) const = 0;
+ /// dump - Print to the debug stream.
+ virtual void dump() const;
+};
+
+//===----------------------------------------------------------------------===//
+// Debugging Support
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MCParsedAsmOperand &MO) {
+ MO.print(OS);
+ return OS;
+}
+
+} // end namespace llvm.
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCRegisterInfo.h b/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
new file mode 100644
index 0000000..a4d5e08
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCRegisterInfo.h
@@ -0,0 +1,689 @@
+//=== MC/MCRegisterInfo.h - Target Register Description ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes an abstract interface used to get information about a
+// target machines register file. This information is used for a variety of
+// purposed, especially register allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCREGISTERINFO_H
+#define LLVM_MC_MCREGISTERINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+
+namespace llvm {
+
+/// An unsigned integer type large enough to represent all physical registers,
+/// but not necessarily virtual registers.
+typedef uint16_t MCPhysReg;
+
+/// MCRegisterClass - Base class of TargetRegisterClass.
+class MCRegisterClass {
+public:
+ typedef const MCPhysReg* iterator;
+ typedef const MCPhysReg* const_iterator;
+
+ const iterator RegsBegin;
+ const uint8_t *const RegSet;
+ const uint32_t NameIdx;
+ const uint16_t RegsSize;
+ const uint16_t RegSetSize;
+ const uint16_t ID;
+ const uint16_t RegSize, Alignment; // Size & Alignment of register in bytes
+ const int8_t CopyCost;
+ const bool Allocatable;
+
+ /// getID() - Return the register class ID number.
+ ///
+ unsigned getID() const { return ID; }
+
+ /// begin/end - Return all of the registers in this class.
+ ///
+ iterator begin() const { return RegsBegin; }
+ iterator end() const { return RegsBegin + RegsSize; }
+
+ /// getNumRegs - Return the number of registers in this class.
+ ///
+ unsigned getNumRegs() const { return RegsSize; }
+
+ /// getRegister - Return the specified register in the class.
+ ///
+ unsigned getRegister(unsigned i) const {
+ assert(i < getNumRegs() && "Register number out of range!");
+ return RegsBegin[i];
+ }
+
+ /// contains - Return true if the specified register is included in this
+ /// register class. This does not include virtual registers.
+ bool contains(unsigned Reg) const {
+ unsigned InByte = Reg % 8;
+ unsigned Byte = Reg / 8;
+ if (Byte >= RegSetSize)
+ return false;
+ return (RegSet[Byte] & (1 << InByte)) != 0;
+ }
+
+ /// contains - Return true if both registers are in this class.
+ bool contains(unsigned Reg1, unsigned Reg2) const {
+ return contains(Reg1) && contains(Reg2);
+ }
+
+ /// getSize - Return the size of the register in bytes, which is also the size
+ /// of a stack slot allocated to hold a spilled copy of this register.
+ unsigned getSize() const { return RegSize; }
+
+ /// getAlignment - Return the minimum required alignment for a register of
+ /// this class.
+ unsigned getAlignment() const { return Alignment; }
+
+ /// getCopyCost - Return the cost of copying a value between two registers in
+ /// this class. A negative number means the register class is very expensive
+ /// to copy e.g. status flag register classes.
+ int getCopyCost() const { return CopyCost; }
+
+ /// isAllocatable - Return true if this register class may be used to create
+ /// virtual registers.
+ bool isAllocatable() const { return Allocatable; }
+};
+
+/// MCRegisterDesc - This record contains information about a particular
+/// register. The SubRegs field is a zero terminated array of registers that
+/// are sub-registers of the specific register, e.g. AL, AH are sub-registers
+/// of AX. The SuperRegs field is a zero terminated array of registers that are
+/// super-registers of the specific register, e.g. RAX, EAX, are
+/// super-registers of AX.
+///
+struct MCRegisterDesc {
+ uint32_t Name; // Printable name for the reg (for debugging)
+ uint32_t SubRegs; // Sub-register set, described above
+ uint32_t SuperRegs; // Super-register set, described above
+
+ // Offset into MCRI::SubRegIndices of a list of sub-register indices for each
+ // sub-register in SubRegs.
+ uint32_t SubRegIndices;
+
+ // RegUnits - Points to the list of register units. The low 4 bits holds the
+ // Scale, the high bits hold an offset into DiffLists. See MCRegUnitIterator.
+ uint32_t RegUnits;
+
+ /// Index into list with lane mask sequences. The sequence contains a lanemask
+ /// for every register unit.
+ uint16_t RegUnitLaneMasks;
+};
+
+/// MCRegisterInfo base class - We assume that the target defines a static
+/// array of MCRegisterDesc objects that represent all of the machine
+/// registers that the target has. As such, we simply have to track a pointer
+/// to this array so that we can turn register number into a register
+/// descriptor.
+///
+/// Note this class is designed to be a base class of TargetRegisterInfo, which
+/// is the interface used by codegen. However, specific targets *should never*
+/// specialize this class. MCRegisterInfo should only contain getters to access
+/// TableGen generated physical register data. It must not be extended with
+/// virtual methods.
+///
+class MCRegisterInfo {
+public:
+ typedef const MCRegisterClass *regclass_iterator;
+
+ /// DwarfLLVMRegPair - Emitted by tablegen so Dwarf<->LLVM reg mappings can be
+ /// performed with a binary search.
+ struct DwarfLLVMRegPair {
+ unsigned FromReg;
+ unsigned ToReg;
+
+ bool operator<(DwarfLLVMRegPair RHS) const { return FromReg < RHS.FromReg; }
+ };
+
+ /// SubRegCoveredBits - Emitted by tablegen: bit range covered by a subreg
+ /// index, -1 in any being invalid.
+ struct SubRegCoveredBits {
+ uint16_t Offset;
+ uint16_t Size;
+ };
+private:
+ const MCRegisterDesc *Desc; // Pointer to the descriptor array
+ unsigned NumRegs; // Number of entries in the array
+ unsigned RAReg; // Return address register
+ unsigned PCReg; // Program counter register
+ const MCRegisterClass *Classes; // Pointer to the regclass array
+ unsigned NumClasses; // Number of entries in the array
+ unsigned NumRegUnits; // Number of regunits.
+ const MCPhysReg (*RegUnitRoots)[2]; // Pointer to regunit root table.
+ const MCPhysReg *DiffLists; // Pointer to the difflists array
+ const unsigned *RegUnitMaskSequences; // Pointer to lane mask sequences
+ // for register units.
+ const char *RegStrings; // Pointer to the string table.
+ const char *RegClassStrings; // Pointer to the class strings.
+ const uint16_t *SubRegIndices; // Pointer to the subreg lookup
+ // array.
+ const SubRegCoveredBits *SubRegIdxRanges; // Pointer to the subreg covered
+ // bit ranges array.
+ unsigned NumSubRegIndices; // Number of subreg indices.
+ const uint16_t *RegEncodingTable; // Pointer to array of register
+ // encodings.
+
+ unsigned L2DwarfRegsSize;
+ unsigned EHL2DwarfRegsSize;
+ unsigned Dwarf2LRegsSize;
+ unsigned EHDwarf2LRegsSize;
+ const DwarfLLVMRegPair *L2DwarfRegs; // LLVM to Dwarf regs mapping
+ const DwarfLLVMRegPair *EHL2DwarfRegs; // LLVM to Dwarf regs mapping EH
+ const DwarfLLVMRegPair *Dwarf2LRegs; // Dwarf to LLVM regs mapping
+ const DwarfLLVMRegPair *EHDwarf2LRegs; // Dwarf to LLVM regs mapping EH
+ DenseMap<unsigned, int> L2SEHRegs; // LLVM to SEH regs mapping
+
+public:
+ /// DiffListIterator - Base iterator class that can traverse the
+ /// differentially encoded register and regunit lists in DiffLists.
+ /// Don't use this class directly, use one of the specialized sub-classes
+ /// defined below.
+ class DiffListIterator {
+ uint16_t Val;
+ const MCPhysReg *List;
+
+ protected:
+ /// Create an invalid iterator. Call init() to point to something useful.
+ DiffListIterator() : Val(0), List(nullptr) {}
+
+ /// init - Point the iterator to InitVal, decoding subsequent values from
+ /// DiffList. The iterator will initially point to InitVal, sub-classes are
+ /// responsible for skipping the seed value if it is not part of the list.
+ void init(MCPhysReg InitVal, const MCPhysReg *DiffList) {
+ Val = InitVal;
+ List = DiffList;
+ }
+
+ /// advance - Move to the next list position, return the applied
+ /// differential. This function does not detect the end of the list, that
+ /// is the caller's responsibility (by checking for a 0 return value).
+ unsigned advance() {
+ assert(isValid() && "Cannot move off the end of the list.");
+ MCPhysReg D = *List++;
+ Val += D;
+ return D;
+ }
+
+ public:
+
+ /// isValid - returns true if this iterator is not yet at the end.
+ bool isValid() const { return List; }
+
+ /// Dereference the iterator to get the value at the current position.
+ unsigned operator*() const { return Val; }
+
+ /// Pre-increment to move to the next position.
+ void operator++() {
+ // The end of the list is encoded as a 0 differential.
+ if (!advance())
+ List = nullptr;
+ }
+ };
+
+ // These iterators are allowed to sub-class DiffListIterator and access
+ // internal list pointers.
+ friend class MCSubRegIterator;
+ friend class MCSubRegIndexIterator;
+ friend class MCSuperRegIterator;
+ friend class MCRegUnitIterator;
+ friend class MCRegUnitMaskIterator;
+ friend class MCRegUnitRootIterator;
+
+ /// \brief Initialize MCRegisterInfo, called by TableGen
+ /// auto-generated routines. *DO NOT USE*.
+ void InitMCRegisterInfo(const MCRegisterDesc *D, unsigned NR, unsigned RA,
+ unsigned PC,
+ const MCRegisterClass *C, unsigned NC,
+ const MCPhysReg (*RURoots)[2],
+ unsigned NRU,
+ const MCPhysReg *DL,
+ const unsigned *RUMS,
+ const char *Strings,
+ const char *ClassStrings,
+ const uint16_t *SubIndices,
+ unsigned NumIndices,
+ const SubRegCoveredBits *SubIdxRanges,
+ const uint16_t *RET) {
+ Desc = D;
+ NumRegs = NR;
+ RAReg = RA;
+ PCReg = PC;
+ Classes = C;
+ DiffLists = DL;
+ RegUnitMaskSequences = RUMS;
+ RegStrings = Strings;
+ RegClassStrings = ClassStrings;
+ NumClasses = NC;
+ RegUnitRoots = RURoots;
+ NumRegUnits = NRU;
+ SubRegIndices = SubIndices;
+ NumSubRegIndices = NumIndices;
+ SubRegIdxRanges = SubIdxRanges;
+ RegEncodingTable = RET;
+ }
+
+ /// \brief Used to initialize LLVM register to Dwarf
+ /// register number mapping. Called by TableGen auto-generated routines.
+ /// *DO NOT USE*.
+ void mapLLVMRegsToDwarfRegs(const DwarfLLVMRegPair *Map, unsigned Size,
+ bool isEH) {
+ if (isEH) {
+ EHL2DwarfRegs = Map;
+ EHL2DwarfRegsSize = Size;
+ } else {
+ L2DwarfRegs = Map;
+ L2DwarfRegsSize = Size;
+ }
+ }
+
+ /// \brief Used to initialize Dwarf register to LLVM
+ /// register number mapping. Called by TableGen auto-generated routines.
+ /// *DO NOT USE*.
+ void mapDwarfRegsToLLVMRegs(const DwarfLLVMRegPair *Map, unsigned Size,
+ bool isEH) {
+ if (isEH) {
+ EHDwarf2LRegs = Map;
+ EHDwarf2LRegsSize = Size;
+ } else {
+ Dwarf2LRegs = Map;
+ Dwarf2LRegsSize = Size;
+ }
+ }
+
+ /// mapLLVMRegToSEHReg - Used to initialize LLVM register to SEH register
+ /// number mapping. By default the SEH register number is just the same
+ /// as the LLVM register number.
+ /// FIXME: TableGen these numbers. Currently this requires target specific
+ /// initialization code.
+ void mapLLVMRegToSEHReg(unsigned LLVMReg, int SEHReg) {
+ L2SEHRegs[LLVMReg] = SEHReg;
+ }
+
+ /// \brief This method should return the register where the return
+ /// address can be found.
+ unsigned getRARegister() const {
+ return RAReg;
+ }
+
+ /// Return the register which is the program counter.
+ unsigned getProgramCounter() const {
+ return PCReg;
+ }
+
+ const MCRegisterDesc &operator[](unsigned RegNo) const {
+ assert(RegNo < NumRegs &&
+ "Attempting to access record for invalid register number!");
+ return Desc[RegNo];
+ }
+
+ /// \brief Provide a get method, equivalent to [], but more useful with a
+ /// pointer to this object.
+ const MCRegisterDesc &get(unsigned RegNo) const {
+ return operator[](RegNo);
+ }
+
+ /// \brief Returns the physical register number of sub-register "Index"
+ /// for physical register RegNo. Return zero if the sub-register does not
+ /// exist.
+ unsigned getSubReg(unsigned Reg, unsigned Idx) const;
+
+ /// \brief Return a super-register of the specified register
+ /// Reg so its sub-register of index SubIdx is Reg.
+ unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
+ const MCRegisterClass *RC) const;
+
+ /// \brief For a given register pair, return the sub-register index
+ /// if the second register is a sub-register of the first. Return zero
+ /// otherwise.
+ unsigned getSubRegIndex(unsigned RegNo, unsigned SubRegNo) const;
+
+ /// \brief Get the size of the bit range covered by a sub-register index.
+ /// If the index isn't continuous, return the sum of the sizes of its parts.
+ /// If the index is used to access subregisters of different sizes, return -1.
+ unsigned getSubRegIdxSize(unsigned Idx) const;
+
+ /// \brief Get the offset of the bit range covered by a sub-register index.
+ /// If an Offset doesn't make sense (the index isn't continuous, or is used to
+ /// access sub-registers at different offsets), return -1.
+ unsigned getSubRegIdxOffset(unsigned Idx) const;
+
+ /// \brief Return the human-readable symbolic target-specific name for the
+ /// specified physical register.
+ const char *getName(unsigned RegNo) const {
+ return RegStrings + get(RegNo).Name;
+ }
+
+ /// \brief Return the number of registers this target has (useful for
+ /// sizing arrays holding per register information)
+ unsigned getNumRegs() const {
+ return NumRegs;
+ }
+
+ /// \brief Return the number of sub-register indices
+ /// understood by the target. Index 0 is reserved for the no-op sub-register,
+ /// while 1 to getNumSubRegIndices() - 1 represent real sub-registers.
+ unsigned getNumSubRegIndices() const {
+ return NumSubRegIndices;
+ }
+
+ /// \brief Return the number of (native) register units in the
+ /// target. Register units are numbered from 0 to getNumRegUnits() - 1. They
+ /// can be accessed through MCRegUnitIterator defined below.
+ unsigned getNumRegUnits() const {
+ return NumRegUnits;
+ }
+
+ /// \brief Map a target register to an equivalent dwarf register
+ /// number. Returns -1 if there is no equivalent value. The second
+ /// parameter allows targets to use different numberings for EH info and
+ /// debugging info.
+ int getDwarfRegNum(unsigned RegNum, bool isEH) const;
+
+ /// \brief Map a dwarf register back to a target register.
+ int getLLVMRegNum(unsigned RegNum, bool isEH) const;
+
+ /// \brief Map a target register to an equivalent SEH register
+ /// number. Returns LLVM register number if there is no equivalent value.
+ int getSEHRegNum(unsigned RegNum) const;
+
+ regclass_iterator regclass_begin() const { return Classes; }
+ regclass_iterator regclass_end() const { return Classes+NumClasses; }
+
+ unsigned getNumRegClasses() const {
+ return (unsigned)(regclass_end()-regclass_begin());
+ }
+
+ /// \brief Returns the register class associated with the enumeration
+ /// value. See class MCOperandInfo.
+ const MCRegisterClass& getRegClass(unsigned i) const {
+ assert(i < getNumRegClasses() && "Register Class ID out of range");
+ return Classes[i];
+ }
+
+ const char *getRegClassName(const MCRegisterClass *Class) const {
+ return RegClassStrings + Class->NameIdx;
+ }
+
+ /// \brief Returns the encoding for RegNo
+ uint16_t getEncodingValue(unsigned RegNo) const {
+ assert(RegNo < NumRegs &&
+ "Attempting to get encoding for invalid register number!");
+ return RegEncodingTable[RegNo];
+ }
+
+ /// \brief Returns true if RegB is a sub-register of RegA.
+ bool isSubRegister(unsigned RegA, unsigned RegB) const {
+ return isSuperRegister(RegB, RegA);
+ }
+
+ /// \brief Returns true if RegB is a super-register of RegA.
+ bool isSuperRegister(unsigned RegA, unsigned RegB) const;
+
+ /// \brief Returns true if RegB is a sub-register of RegA or if RegB == RegA.
+ bool isSubRegisterEq(unsigned RegA, unsigned RegB) const {
+ return isSuperRegisterEq(RegB, RegA);
+ }
+
+ /// \brief Returns true if RegB is a super-register of RegA or if
+ /// RegB == RegA.
+ bool isSuperRegisterEq(unsigned RegA, unsigned RegB) const {
+ return RegA == RegB || isSuperRegister(RegA, RegB);
+ }
+
+};
+
+//===----------------------------------------------------------------------===//
+// Register List Iterators
+//===----------------------------------------------------------------------===//
+
+// MCRegisterInfo provides lists of super-registers, sub-registers, and
+// aliasing registers. Use these iterator classes to traverse the lists.
+
+/// MCSubRegIterator enumerates all sub-registers of Reg.
+/// If IncludeSelf is set, Reg itself is included in the list.
+class MCSubRegIterator : public MCRegisterInfo::DiffListIterator {
+public:
+ MCSubRegIterator(unsigned Reg, const MCRegisterInfo *MCRI,
+ bool IncludeSelf = false) {
+ init(Reg, MCRI->DiffLists + MCRI->get(Reg).SubRegs);
+ // Initially, the iterator points to Reg itself.
+ if (!IncludeSelf)
+ ++*this;
+ }
+};
+
+/// Iterator that enumerates the sub-registers of a Reg and the associated
+/// sub-register indices.
+class MCSubRegIndexIterator {
+ MCSubRegIterator SRIter;
+ const uint16_t *SRIndex;
+public:
+ /// Constructs an iterator that traverses subregisters and their
+ /// associated subregister indices.
+ MCSubRegIndexIterator(unsigned Reg, const MCRegisterInfo *MCRI)
+ : SRIter(Reg, MCRI) {
+ SRIndex = MCRI->SubRegIndices + MCRI->get(Reg).SubRegIndices;
+ }
+
+ /// Returns current sub-register.
+ unsigned getSubReg() const {
+ return *SRIter;
+ }
+ /// Returns sub-register index of the current sub-register.
+ unsigned getSubRegIndex() const {
+ return *SRIndex;
+ }
+
+ /// Returns true if this iterator is not yet at the end.
+ bool isValid() const { return SRIter.isValid(); }
+
+ /// Moves to the next position.
+ void operator++() {
+ ++SRIter;
+ ++SRIndex;
+ }
+};
+
+/// MCSuperRegIterator enumerates all super-registers of Reg.
+/// If IncludeSelf is set, Reg itself is included in the list.
+class MCSuperRegIterator : public MCRegisterInfo::DiffListIterator {
+public:
+ MCSuperRegIterator() {}
+ MCSuperRegIterator(unsigned Reg, const MCRegisterInfo *MCRI,
+ bool IncludeSelf = false) {
+ init(Reg, MCRI->DiffLists + MCRI->get(Reg).SuperRegs);
+ // Initially, the iterator points to Reg itself.
+ if (!IncludeSelf)
+ ++*this;
+ }
+};
+
+// Definition for isSuperRegister. Put it down here since it needs the
+// iterator defined above in addition to the MCRegisterInfo class itself.
+inline bool MCRegisterInfo::isSuperRegister(unsigned RegA, unsigned RegB) const{
+ for (MCSuperRegIterator I(RegA, this); I.isValid(); ++I)
+ if (*I == RegB)
+ return true;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Register Units
+//===----------------------------------------------------------------------===//
+
+// Register units are used to compute register aliasing. Every register has at
+// least one register unit, but it can have more. Two registers overlap if and
+// only if they have a common register unit.
+//
+// A target with a complicated sub-register structure will typically have many
+// fewer register units than actual registers. MCRI::getNumRegUnits() returns
+// the number of register units in the target.
+
+// MCRegUnitIterator enumerates a list of register units for Reg. The list is
+// in ascending numerical order.
+class MCRegUnitIterator : public MCRegisterInfo::DiffListIterator {
+public:
+ /// MCRegUnitIterator - Create an iterator that traverses the register units
+ /// in Reg.
+ MCRegUnitIterator() {}
+ MCRegUnitIterator(unsigned Reg, const MCRegisterInfo *MCRI) {
+ assert(Reg && "Null register has no regunits");
+ // Decode the RegUnits MCRegisterDesc field.
+ unsigned RU = MCRI->get(Reg).RegUnits;
+ unsigned Scale = RU & 15;
+ unsigned Offset = RU >> 4;
+
+ // Initialize the iterator to Reg * Scale, and the List pointer to
+ // DiffLists + Offset.
+ init(Reg * Scale, MCRI->DiffLists + Offset);
+
+ // That may not be a valid unit, we need to advance by one to get the real
+ // unit number. The first differential can be 0 which would normally
+ // terminate the list, but since we know every register has at least one
+ // unit, we can allow a 0 differential here.
+ advance();
+ }
+};
+
+/// MCRegUnitIterator enumerates a list of register units and their associated
+/// lane masks for Reg. The register units are in ascending numerical order.
+class MCRegUnitMaskIterator {
+ MCRegUnitIterator RUIter;
+ const unsigned *MaskListIter;
+public:
+ MCRegUnitMaskIterator() {}
+ /// Constructs an iterator that traverses the register units and their
+ /// associated LaneMasks in Reg.
+ MCRegUnitMaskIterator(unsigned Reg, const MCRegisterInfo *MCRI)
+ : RUIter(Reg, MCRI) {
+ uint16_t Idx = MCRI->get(Reg).RegUnitLaneMasks;
+ MaskListIter = &MCRI->RegUnitMaskSequences[Idx];
+ }
+
+ /// Returns a (RegUnit, LaneMask) pair.
+ std::pair<unsigned,unsigned> operator*() const {
+ return std::make_pair(*RUIter, *MaskListIter);
+ }
+
+ /// Returns true if this iterator is not yet at the end.
+ bool isValid() const { return RUIter.isValid(); }
+
+ /// Moves to the next position.
+ void operator++() {
+ ++MaskListIter;
+ ++RUIter;
+ }
+};
+
+// Each register unit has one or two root registers. The complete set of
+// registers containing a register unit is the union of the roots and their
+// super-registers. All registers aliasing Unit can be visited like this:
+//
+// for (MCRegUnitRootIterator RI(Unit, MCRI); RI.isValid(); ++RI) {
+// for (MCSuperRegIterator SI(*RI, MCRI, true); SI.isValid(); ++SI)
+// visit(*SI);
+// }
+
+/// MCRegUnitRootIterator enumerates the root registers of a register unit.
+class MCRegUnitRootIterator {
+ uint16_t Reg0;
+ uint16_t Reg1;
+public:
+ MCRegUnitRootIterator() : Reg0(0), Reg1(0) {}
+ MCRegUnitRootIterator(unsigned RegUnit, const MCRegisterInfo *MCRI) {
+ assert(RegUnit < MCRI->getNumRegUnits() && "Invalid register unit");
+ Reg0 = MCRI->RegUnitRoots[RegUnit][0];
+ Reg1 = MCRI->RegUnitRoots[RegUnit][1];
+ }
+
+ /// \brief Dereference to get the current root register.
+ unsigned operator*() const {
+ return Reg0;
+ }
+
+ /// \brief Check if the iterator is at the end of the list.
+ bool isValid() const {
+ return Reg0;
+ }
+
+ /// \brief Preincrement to move to the next root register.
+ void operator++() {
+ assert(isValid() && "Cannot move off the end of the list.");
+ Reg0 = Reg1;
+ Reg1 = 0;
+ }
+};
+
+/// MCRegAliasIterator enumerates all registers aliasing Reg. If IncludeSelf is
+/// set, Reg itself is included in the list. This iterator does not guarantee
+/// any ordering or that entries are unique.
+class MCRegAliasIterator {
+private:
+ unsigned Reg;
+ const MCRegisterInfo *MCRI;
+ bool IncludeSelf;
+
+ MCRegUnitIterator RI;
+ MCRegUnitRootIterator RRI;
+ MCSuperRegIterator SI;
+public:
+ MCRegAliasIterator(unsigned Reg, const MCRegisterInfo *MCRI,
+ bool IncludeSelf)
+ : Reg(Reg), MCRI(MCRI), IncludeSelf(IncludeSelf) {
+
+ // Initialize the iterators.
+ for (RI = MCRegUnitIterator(Reg, MCRI); RI.isValid(); ++RI) {
+ for (RRI = MCRegUnitRootIterator(*RI, MCRI); RRI.isValid(); ++RRI) {
+ for (SI = MCSuperRegIterator(*RRI, MCRI, true); SI.isValid(); ++SI) {
+ if (!(!IncludeSelf && Reg == *SI))
+ return;
+ }
+ }
+ }
+ }
+
+ bool isValid() const { return RI.isValid(); }
+
+ unsigned operator*() const {
+ assert (SI.isValid() && "Cannot dereference an invalid iterator.");
+ return *SI;
+ }
+
+ void advance() {
+ // Assuming SI is valid.
+ ++SI;
+ if (SI.isValid()) return;
+
+ ++RRI;
+ if (RRI.isValid()) {
+ SI = MCSuperRegIterator(*RRI, MCRI, true);
+ return;
+ }
+
+ ++RI;
+ if (RI.isValid()) {
+ RRI = MCRegUnitRootIterator(*RI, MCRI);
+ SI = MCSuperRegIterator(*RRI, MCRI, true);
+ }
+ }
+
+ void operator++() {
+ assert(isValid() && "Cannot move off the end of the list.");
+ do advance();
+ while (!IncludeSelf && isValid() && *SI == Reg);
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCRelocationInfo.h b/contrib/llvm/include/llvm/MC/MCRelocationInfo.h
new file mode 100644
index 0000000..40e0217
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCRelocationInfo.h
@@ -0,0 +1,55 @@
+//==-- llvm/MC/MCRelocationInfo.h --------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCRelocationInfo class, which provides methods to
+// create MCExprs from relocations, either found in an object::ObjectFile
+// (object::RelocationRef), or provided through the C API.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCRELOCATIONINFO_H
+#define LLVM_MC_MCRELOCATIONINFO_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+
+namespace object {
+class RelocationRef;
+}
+class MCExpr;
+class MCContext;
+
+/// \brief Create MCExprs from relocations found in an object file.
+class MCRelocationInfo {
+ MCRelocationInfo(const MCRelocationInfo &) = delete;
+ void operator=(const MCRelocationInfo &) = delete;
+
+protected:
+ MCContext &Ctx;
+
+public:
+ MCRelocationInfo(MCContext &Ctx);
+ virtual ~MCRelocationInfo();
+
+ /// \brief Create an MCExpr for the relocation \p Rel.
+ /// \returns If possible, an MCExpr corresponding to Rel, else 0.
+ virtual const MCExpr *createExprForRelocation(object::RelocationRef Rel);
+
+ /// \brief Create an MCExpr for the target-specific \p VariantKind.
+ /// The VariantKinds are defined in llvm-c/Disassembler.h.
+ /// Used by MCExternalSymbolizer.
+ /// \returns If possible, an MCExpr corresponding to VariantKind, else 0.
+ virtual const MCExpr *createExprForCAPIVariantKind(const MCExpr *SubExpr,
+ unsigned VariantKind);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSchedule.h b/contrib/llvm/include/llvm/MC/MCSchedule.h
new file mode 100644
index 0000000..d7f9b69
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSchedule.h
@@ -0,0 +1,237 @@
+//===-- llvm/MC/MCSchedule.h - Scheduling -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes used to describe a subtarget's machine model
+// for scheduling and other instruction cost heuristics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSCHEDULE_H
+#define LLVM_MC_MCSCHEDULE_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+
+namespace llvm {
+
+struct InstrItinerary;
+
+/// Define a kind of processor resource that will be modeled by the scheduler.
+struct MCProcResourceDesc {
+#ifndef NDEBUG
+ const char *Name;
+#endif
+ unsigned NumUnits; // Number of resource of this kind
+ unsigned SuperIdx; // Index of the resources kind that contains this kind.
+
+ // Number of resources that may be buffered.
+ //
+ // Buffered resources (BufferSize != 0) may be consumed at some indeterminate
+ // cycle after dispatch. This should be used for out-of-order cpus when
+ // instructions that use this resource can be buffered in a reservaton
+ // station.
+ //
+ // Unbuffered resources (BufferSize == 0) always consume their resource some
+ // fixed number of cycles after dispatch. If a resource is unbuffered, then
+ // the scheduler will avoid scheduling instructions with conflicting resources
+ // in the same cycle. This is for in-order cpus, or the in-order portion of
+ // an out-of-order cpus.
+ int BufferSize;
+
+ bool operator==(const MCProcResourceDesc &Other) const {
+ return NumUnits == Other.NumUnits && SuperIdx == Other.SuperIdx
+ && BufferSize == Other.BufferSize;
+ }
+};
+
+/// Identify one of the processor resource kinds consumed by a particular
+/// scheduling class for the specified number of cycles.
+struct MCWriteProcResEntry {
+ unsigned ProcResourceIdx;
+ unsigned Cycles;
+
+ bool operator==(const MCWriteProcResEntry &Other) const {
+ return ProcResourceIdx == Other.ProcResourceIdx && Cycles == Other.Cycles;
+ }
+};
+
+/// Specify the latency in cpu cycles for a particular scheduling class and def
+/// index. -1 indicates an invalid latency. Heuristics would typically consider
+/// an instruction with invalid latency to have infinite latency. Also identify
+/// the WriteResources of this def. When the operand expands to a sequence of
+/// writes, this ID is the last write in the sequence.
+struct MCWriteLatencyEntry {
+ int Cycles;
+ unsigned WriteResourceID;
+
+ bool operator==(const MCWriteLatencyEntry &Other) const {
+ return Cycles == Other.Cycles && WriteResourceID == Other.WriteResourceID;
+ }
+};
+
+/// Specify the number of cycles allowed after instruction issue before a
+/// particular use operand reads its registers. This effectively reduces the
+/// write's latency. Here we allow negative cycles for corner cases where
+/// latency increases. This rule only applies when the entry's WriteResource
+/// matches the write's WriteResource.
+///
+/// MCReadAdvanceEntries are sorted first by operand index (UseIdx), then by
+/// WriteResourceIdx.
+struct MCReadAdvanceEntry {
+ unsigned UseIdx;
+ unsigned WriteResourceID;
+ int Cycles;
+
+ bool operator==(const MCReadAdvanceEntry &Other) const {
+ return UseIdx == Other.UseIdx && WriteResourceID == Other.WriteResourceID
+ && Cycles == Other.Cycles;
+ }
+};
+
+/// Summarize the scheduling resources required for an instruction of a
+/// particular scheduling class.
+///
+/// Defined as an aggregate struct for creating tables with initializer lists.
+struct MCSchedClassDesc {
+ static const unsigned short InvalidNumMicroOps = UINT16_MAX;
+ static const unsigned short VariantNumMicroOps = UINT16_MAX - 1;
+
+#ifndef NDEBUG
+ const char* Name;
+#endif
+ unsigned short NumMicroOps;
+ bool BeginGroup;
+ bool EndGroup;
+ unsigned WriteProcResIdx; // First index into WriteProcResTable.
+ unsigned NumWriteProcResEntries;
+ unsigned WriteLatencyIdx; // First index into WriteLatencyTable.
+ unsigned NumWriteLatencyEntries;
+ unsigned ReadAdvanceIdx; // First index into ReadAdvanceTable.
+ unsigned NumReadAdvanceEntries;
+
+ bool isValid() const {
+ return NumMicroOps != InvalidNumMicroOps;
+ }
+ bool isVariant() const {
+ return NumMicroOps == VariantNumMicroOps;
+ }
+};
+
+/// Machine model for scheduling, bundling, and heuristics.
+///
+/// The machine model directly provides basic information about the
+/// microarchitecture to the scheduler in the form of properties. It also
+/// optionally refers to scheduler resource tables and itinerary
+/// tables. Scheduler resource tables model the latency and cost for each
+/// instruction type. Itinerary tables are an independent mechanism that
+/// provides a detailed reservation table describing each cycle of instruction
+/// execution. Subtargets may define any or all of the above categories of data
+/// depending on the type of CPU and selected scheduler.
+struct MCSchedModel {
+ // IssueWidth is the maximum number of instructions that may be scheduled in
+ // the same per-cycle group.
+ unsigned IssueWidth;
+ static const unsigned DefaultIssueWidth = 1;
+
+ // MicroOpBufferSize is the number of micro-ops that the processor may buffer
+ // for out-of-order execution.
+ //
+ // "0" means operations that are not ready in this cycle are not considered
+ // for scheduling (they go in the pending queue). Latency is paramount. This
+ // may be more efficient if many instructions are pending in a schedule.
+ //
+ // "1" means all instructions are considered for scheduling regardless of
+ // whether they are ready in this cycle. Latency still causes issue stalls,
+ // but we balance those stalls against other heuristics.
+ //
+ // "> 1" means the processor is out-of-order. This is a machine independent
+ // estimate of highly machine specific characteristics such as the register
+ // renaming pool and reorder buffer.
+ unsigned MicroOpBufferSize;
+ static const unsigned DefaultMicroOpBufferSize = 0;
+
+ // LoopMicroOpBufferSize is the number of micro-ops that the processor may
+ // buffer for optimized loop execution. More generally, this represents the
+ // optimal number of micro-ops in a loop body. A loop may be partially
+ // unrolled to bring the count of micro-ops in the loop body closer to this
+ // number.
+ unsigned LoopMicroOpBufferSize;
+ static const unsigned DefaultLoopMicroOpBufferSize = 0;
+
+ // LoadLatency is the expected latency of load instructions.
+ //
+ // If MinLatency >= 0, this may be overriden for individual load opcodes by
+ // InstrItinerary OperandCycles.
+ unsigned LoadLatency;
+ static const unsigned DefaultLoadLatency = 4;
+
+ // HighLatency is the expected latency of "very high latency" operations.
+ // See TargetInstrInfo::isHighLatencyDef().
+ // By default, this is set to an arbitrarily high number of cycles
+ // likely to have some impact on scheduling heuristics.
+ // If MinLatency >= 0, this may be overriden by InstrItinData OperandCycles.
+ unsigned HighLatency;
+ static const unsigned DefaultHighLatency = 10;
+
+ // MispredictPenalty is the typical number of extra cycles the processor
+ // takes to recover from a branch misprediction.
+ unsigned MispredictPenalty;
+ static const unsigned DefaultMispredictPenalty = 10;
+
+ bool PostRAScheduler; // default value is false
+
+ bool CompleteModel;
+
+ unsigned ProcID;
+ const MCProcResourceDesc *ProcResourceTable;
+ const MCSchedClassDesc *SchedClassTable;
+ unsigned NumProcResourceKinds;
+ unsigned NumSchedClasses;
+ // Instruction itinerary tables used by InstrItineraryData.
+ friend class InstrItineraryData;
+ const InstrItinerary *InstrItineraries;
+
+ unsigned getProcessorID() const { return ProcID; }
+
+ /// Does this machine model include instruction-level scheduling.
+ bool hasInstrSchedModel() const { return SchedClassTable; }
+
+ /// Return true if this machine model data for all instructions with a
+ /// scheduling class (itinerary class or SchedRW list).
+ bool isComplete() const { return CompleteModel; }
+
+ /// Return true if machine supports out of order execution.
+ bool isOutOfOrder() const { return MicroOpBufferSize > 1; }
+
+ unsigned getNumProcResourceKinds() const {
+ return NumProcResourceKinds;
+ }
+
+ const MCProcResourceDesc *getProcResource(unsigned ProcResourceIdx) const {
+ assert(hasInstrSchedModel() && "No scheduling machine model");
+
+ assert(ProcResourceIdx < NumProcResourceKinds && "bad proc resource idx");
+ return &ProcResourceTable[ProcResourceIdx];
+ }
+
+ const MCSchedClassDesc *getSchedClassDesc(unsigned SchedClassIdx) const {
+ assert(hasInstrSchedModel() && "No scheduling machine model");
+
+ assert(SchedClassIdx < NumSchedClasses && "bad scheduling class idx");
+ return &SchedClassTable[SchedClassIdx];
+ }
+
+ /// Returns the default initialized model.
+ static const MCSchedModel &GetDefaultSchedModel() { return Default; }
+ static const MCSchedModel Default;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSection.h b/contrib/llvm/include/llvm/MC/MCSection.h
new file mode 100644
index 0000000..09a9892
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSection.h
@@ -0,0 +1,203 @@
+//===- MCSection.h - Machine Code Sections ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCSection class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSECTION_H
+#define LLVM_MC_MCSECTION_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/MC/MCFragment.h"
+#include "llvm/MC/SectionKind.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class MCAsmInfo;
+class MCAssembler;
+class MCContext;
+class MCExpr;
+class MCFragment;
+class MCSection;
+class MCSymbol;
+class raw_ostream;
+
+template<>
+struct ilist_node_traits<MCFragment> {
+ MCFragment *createNode(const MCFragment &V);
+ static void deleteNode(MCFragment *V);
+
+ void addNodeToList(MCFragment *) {}
+ void removeNodeFromList(MCFragment *) {}
+ void transferNodesFromList(ilist_node_traits & /*SrcTraits*/,
+ ilist_iterator<MCFragment> /*first*/,
+ ilist_iterator<MCFragment> /*last*/) {}
+};
+
+/// Instances of this class represent a uniqued identifier for a section in the
+/// current translation unit. The MCContext class uniques and creates these.
+class MCSection {
+public:
+ enum SectionVariant { SV_COFF = 0, SV_ELF, SV_MachO };
+
+ /// \brief Express the state of bundle locked groups while emitting code.
+ enum BundleLockStateType {
+ NotBundleLocked,
+ BundleLocked,
+ BundleLockedAlignToEnd
+ };
+
+ typedef iplist<MCFragment> FragmentListType;
+
+ typedef FragmentListType::const_iterator const_iterator;
+ typedef FragmentListType::iterator iterator;
+
+ typedef FragmentListType::const_reverse_iterator const_reverse_iterator;
+ typedef FragmentListType::reverse_iterator reverse_iterator;
+
+private:
+ MCSection(const MCSection &) = delete;
+ void operator=(const MCSection &) = delete;
+
+ MCSymbol *Begin;
+ MCSymbol *End = nullptr;
+ /// The alignment requirement of this section.
+ unsigned Alignment = 1;
+ /// The section index in the assemblers section list.
+ unsigned Ordinal = 0;
+ /// The index of this section in the layout order.
+ unsigned LayoutOrder;
+
+ /// \brief Keeping track of bundle-locked state.
+ BundleLockStateType BundleLockState = NotBundleLocked;
+
+ /// \brief Current nesting depth of bundle_lock directives.
+ unsigned BundleLockNestingDepth = 0;
+
+ /// \brief We've seen a bundle_lock directive but not its first instruction
+ /// yet.
+ unsigned BundleGroupBeforeFirstInst : 1;
+
+ /// Whether this section has had instructions emitted into it.
+ unsigned HasInstructions : 1;
+
+ unsigned IsRegistered : 1;
+
+ MCDummyFragment DummyFragment;
+
+ FragmentListType Fragments;
+
+ /// Mapping from subsection number to insertion point for subsection numbers
+ /// below that number.
+ SmallVector<std::pair<unsigned, MCFragment *>, 1> SubsectionFragmentMap;
+
+protected:
+ MCSection(SectionVariant V, SectionKind K, MCSymbol *Begin);
+ SectionVariant Variant;
+ SectionKind Kind;
+ ~MCSection();
+
+public:
+ SectionKind getKind() const { return Kind; }
+
+ SectionVariant getVariant() const { return Variant; }
+
+ MCSymbol *getBeginSymbol() { return Begin; }
+ const MCSymbol *getBeginSymbol() const {
+ return const_cast<MCSection *>(this)->getBeginSymbol();
+ }
+ void setBeginSymbol(MCSymbol *Sym) {
+ assert(!Begin);
+ Begin = Sym;
+ }
+ MCSymbol *getEndSymbol(MCContext &Ctx);
+ bool hasEnded() const;
+
+ unsigned getAlignment() const { return Alignment; }
+ void setAlignment(unsigned Value) { Alignment = Value; }
+
+ unsigned getOrdinal() const { return Ordinal; }
+ void setOrdinal(unsigned Value) { Ordinal = Value; }
+
+ unsigned getLayoutOrder() const { return LayoutOrder; }
+ void setLayoutOrder(unsigned Value) { LayoutOrder = Value; }
+
+ BundleLockStateType getBundleLockState() const { return BundleLockState; }
+ void setBundleLockState(BundleLockStateType NewState);
+ bool isBundleLocked() const { return BundleLockState != NotBundleLocked; }
+
+ bool isBundleGroupBeforeFirstInst() const {
+ return BundleGroupBeforeFirstInst;
+ }
+ void setBundleGroupBeforeFirstInst(bool IsFirst) {
+ BundleGroupBeforeFirstInst = IsFirst;
+ }
+
+ bool hasInstructions() const { return HasInstructions; }
+ void setHasInstructions(bool Value) { HasInstructions = Value; }
+
+ bool isRegistered() const { return IsRegistered; }
+ void setIsRegistered(bool Value) { IsRegistered = Value; }
+
+ MCSection::FragmentListType &getFragmentList() { return Fragments; }
+ const MCSection::FragmentListType &getFragmentList() const {
+ return const_cast<MCSection *>(this)->getFragmentList();
+ }
+
+ /// Support for MCFragment::getNextNode().
+ static FragmentListType MCSection::*getSublistAccess(MCFragment *) {
+ return &MCSection::Fragments;
+ }
+
+ const MCDummyFragment &getDummyFragment() const { return DummyFragment; }
+ MCDummyFragment &getDummyFragment() { return DummyFragment; }
+
+ MCSection::iterator begin();
+ MCSection::const_iterator begin() const {
+ return const_cast<MCSection *>(this)->begin();
+ }
+
+ MCSection::iterator end();
+ MCSection::const_iterator end() const {
+ return const_cast<MCSection *>(this)->end();
+ }
+
+ MCSection::reverse_iterator rbegin();
+ MCSection::const_reverse_iterator rbegin() const {
+ return const_cast<MCSection *>(this)->rbegin();
+ }
+
+ MCSection::reverse_iterator rend();
+ MCSection::const_reverse_iterator rend() const {
+ return const_cast<MCSection *>(this)->rend();
+ }
+
+ MCSection::iterator getSubsectionInsertionPoint(unsigned Subsection);
+
+ void dump();
+
+ virtual void PrintSwitchToSection(const MCAsmInfo &MAI, raw_ostream &OS,
+ const MCExpr *Subsection) const = 0;
+
+ /// Return true if a .align directive should use "optimized nops" to fill
+ /// instead of 0s.
+ virtual bool UseCodeAlign() const = 0;
+
+ /// Check whether this section is "virtual", that is has no actual object
+ /// file contents.
+ virtual bool isVirtualSection() const = 0;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSectionCOFF.h b/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
new file mode 100644
index 0000000..d94682c
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSectionCOFF.h
@@ -0,0 +1,79 @@
+//===- MCSectionCOFF.h - COFF Machine Code Sections -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCSectionCOFF class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSECTIONCOFF_H
+#define LLVM_MC_MCSECTIONCOFF_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCSection.h"
+
+namespace llvm {
+class MCSymbol;
+
+/// This represents a section on Windows
+class MCSectionCOFF final : public MCSection {
+ // The memory for this string is stored in the same MCContext as *this.
+ StringRef SectionName;
+
+ // FIXME: The following fields should not be mutable, but are for now so the
+ // asm parser can honor the .linkonce directive.
+
+ /// This is the Characteristics field of a section, drawn from the enums
+ /// below.
+ mutable unsigned Characteristics;
+
+ /// The COMDAT symbol of this section. Only valid if this is a COMDAT section.
+ /// Two COMDAT sections are merged if they have the same COMDAT symbol.
+ MCSymbol *COMDATSymbol;
+
+ /// This is the Selection field for the section symbol, if it is a COMDAT
+ /// section (Characteristics & IMAGE_SCN_LNK_COMDAT) != 0
+ mutable int Selection;
+
+private:
+ friend class MCContext;
+ MCSectionCOFF(StringRef Section, unsigned Characteristics,
+ MCSymbol *COMDATSymbol, int Selection, SectionKind K,
+ MCSymbol *Begin)
+ : MCSection(SV_COFF, K, Begin), SectionName(Section),
+ Characteristics(Characteristics), COMDATSymbol(COMDATSymbol),
+ Selection(Selection) {
+ assert((Characteristics & 0x00F00000) == 0 &&
+ "alignment must not be set upon section creation");
+ }
+
+public:
+ ~MCSectionCOFF();
+
+ /// Decides whether a '.section' directive should be printed before the
+ /// section name
+ bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
+
+ StringRef getSectionName() const { return SectionName; }
+ unsigned getCharacteristics() const { return Characteristics; }
+ MCSymbol *getCOMDATSymbol() const { return COMDATSymbol; }
+ int getSelection() const { return Selection; }
+
+ void setSelection(int Selection) const;
+
+ void PrintSwitchToSection(const MCAsmInfo &MAI, raw_ostream &OS,
+ const MCExpr *Subsection) const override;
+ bool UseCodeAlign() const override;
+ bool isVirtualSection() const override;
+
+ static bool classof(const MCSection *S) { return S->getVariant() == SV_COFF; }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSectionELF.h b/contrib/llvm/include/llvm/MC/MCSectionELF.h
new file mode 100644
index 0000000..b3bb3ad
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSectionELF.h
@@ -0,0 +1,97 @@
+//===- MCSectionELF.h - ELF Machine Code Sections ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCSectionELF class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSECTIONELF_H
+#define LLVM_MC_MCSECTIONELF_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class MCSymbol;
+
+/// This represents a section on linux, lots of unix variants and some bare
+/// metal systems.
+class MCSectionELF final : public MCSection {
+ /// This is the name of the section. The referenced memory is owned by
+ /// TargetLoweringObjectFileELF's ELFUniqueMap.
+ StringRef SectionName;
+
+ /// This is the sh_type field of a section, drawn from the enums below.
+ unsigned Type;
+
+ /// This is the sh_flags field of a section, drawn from the enums below.
+ unsigned Flags;
+
+ unsigned UniqueID;
+
+ /// The size of each entry in this section. This size only makes sense for
+ /// sections that contain fixed-sized entries. If a section does not contain
+ /// fixed-sized entries 'EntrySize' will be 0.
+ unsigned EntrySize;
+
+ const MCSymbolELF *Group;
+
+ /// Depending on the type of the section this is sh_link or sh_info.
+ const MCSectionELF *Associated;
+
+private:
+ friend class MCContext;
+ MCSectionELF(StringRef Section, unsigned type, unsigned flags, SectionKind K,
+ unsigned entrySize, const MCSymbolELF *group, unsigned UniqueID,
+ MCSymbol *Begin, const MCSectionELF *Associated)
+ : MCSection(SV_ELF, K, Begin), SectionName(Section), Type(type),
+ Flags(flags), UniqueID(UniqueID), EntrySize(entrySize), Group(group),
+ Associated(Associated) {
+ if (Group)
+ Group->setIsSignature();
+ }
+
+ void setSectionName(StringRef Name) { SectionName = Name; }
+
+public:
+ ~MCSectionELF();
+
+ /// Decides whether a '.section' directive should be printed before the
+ /// section name
+ bool ShouldOmitSectionDirective(StringRef Name, const MCAsmInfo &MAI) const;
+
+ StringRef getSectionName() const { return SectionName; }
+ unsigned getType() const { return Type; }
+ unsigned getFlags() const { return Flags; }
+ unsigned getEntrySize() const { return EntrySize; }
+ const MCSymbolELF *getGroup() const { return Group; }
+
+ void PrintSwitchToSection(const MCAsmInfo &MAI, raw_ostream &OS,
+ const MCExpr *Subsection) const override;
+ bool UseCodeAlign() const override;
+ bool isVirtualSection() const override;
+
+ bool isUnique() const { return UniqueID != ~0U; }
+ unsigned getUniqueID() const { return UniqueID; }
+
+ const MCSectionELF *getAssociatedSection() const { return Associated; }
+
+ static bool classof(const MCSection *S) {
+ return S->getVariant() == SV_ELF;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSectionMachO.h b/contrib/llvm/include/llvm/MC/MCSectionMachO.h
new file mode 100644
index 0000000..658dfcd
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSectionMachO.h
@@ -0,0 +1,91 @@
+//===- MCSectionMachO.h - MachO Machine Code Sections -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCSectionMachO class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSECTIONMACHO_H
+#define LLVM_MC_MCSECTIONMACHO_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/Support/MachO.h"
+
+namespace llvm {
+
+/// This represents a section on a Mach-O system (used by Mac OS X). On a Mac
+/// system, these are also described in /usr/include/mach-o/loader.h.
+class MCSectionMachO final : public MCSection {
+ char SegmentName[16]; // Not necessarily null terminated!
+ char SectionName[16]; // Not necessarily null terminated!
+
+ /// This is the SECTION_TYPE and SECTION_ATTRIBUTES field of a section, drawn
+ /// from the enums below.
+ unsigned TypeAndAttributes;
+
+ /// The 'reserved2' field of a section, used to represent the size of stubs,
+ /// for example.
+ unsigned Reserved2;
+
+ MCSectionMachO(StringRef Segment, StringRef Section, unsigned TAA,
+ unsigned reserved2, SectionKind K, MCSymbol *Begin);
+ friend class MCContext;
+public:
+
+ StringRef getSegmentName() const {
+ // SegmentName is not necessarily null terminated!
+ if (SegmentName[15])
+ return StringRef(SegmentName, 16);
+ return StringRef(SegmentName);
+ }
+ StringRef getSectionName() const {
+ // SectionName is not necessarily null terminated!
+ if (SectionName[15])
+ return StringRef(SectionName, 16);
+ return StringRef(SectionName);
+ }
+
+ unsigned getTypeAndAttributes() const { return TypeAndAttributes; }
+ unsigned getStubSize() const { return Reserved2; }
+
+ MachO::SectionType getType() const {
+ return static_cast<MachO::SectionType>(TypeAndAttributes &
+ MachO::SECTION_TYPE);
+ }
+ bool hasAttribute(unsigned Value) const {
+ return (TypeAndAttributes & Value) != 0;
+ }
+
+ /// Parse the section specifier indicated by "Spec". This is a string that can
+ /// appear after a .section directive in a mach-o flavored .s file. If
+ /// successful, this fills in the specified Out parameters and returns an
+ /// empty string. When an invalid section specifier is present, this returns
+ /// a string indicating the problem. If no TAA was parsed, TAA is not altered,
+ /// and TAAWasSet becomes false.
+ static std::string ParseSectionSpecifier(StringRef Spec, // In.
+ StringRef &Segment, // Out.
+ StringRef &Section, // Out.
+ unsigned &TAA, // Out.
+ bool &TAAParsed, // Out.
+ unsigned &StubSize); // Out.
+
+ void PrintSwitchToSection(const MCAsmInfo &MAI, raw_ostream &OS,
+ const MCExpr *Subsection) const override;
+ bool UseCodeAlign() const override;
+ bool isVirtualSection() const override;
+
+ static bool classof(const MCSection *S) {
+ return S->getVariant() == SV_MachO;
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCStreamer.h b/contrib/llvm/include/llvm/MC/MCStreamer.h
new file mode 100644
index 0000000..494f02d
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCStreamer.h
@@ -0,0 +1,750 @@
+//===- MCStreamer.h - High-level Streaming Machine Code Output --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MCStreamer class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSTREAMER_H
+#define LLVM_MC_MCSTREAMER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCLinkerOptimizationHint.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCWinEH.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/SMLoc.h"
+#include <string>
+
+namespace llvm {
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCContext;
+class MCExpr;
+class MCInst;
+class MCInstPrinter;
+class MCSection;
+class MCStreamer;
+class MCSymbolELF;
+class MCSymbolRefExpr;
+class MCSubtargetInfo;
+class StringRef;
+class Twine;
+class raw_ostream;
+class formatted_raw_ostream;
+class AssemblerConstantPools;
+
+typedef std::pair<MCSection *, const MCExpr *> MCSectionSubPair;
+
+/// Target specific streamer interface. This is used so that targets can
+/// implement support for target specific assembly directives.
+///
+/// If target foo wants to use this, it should implement 3 classes:
+/// * FooTargetStreamer : public MCTargetStreamer
+/// * FooTargetAsmStreamer : public FooTargetStreamer
+/// * FooTargetELFStreamer : public FooTargetStreamer
+///
+/// FooTargetStreamer should have a pure virtual method for each directive. For
+/// example, for a ".bar symbol_name" directive, it should have
+/// virtual emitBar(const MCSymbol &Symbol) = 0;
+///
+/// The FooTargetAsmStreamer and FooTargetELFStreamer classes implement the
+/// method. The assembly streamer just prints ".bar symbol_name". The object
+/// streamer does whatever is needed to implement .bar in the object file.
+///
+/// In the assembly printer and parser the target streamer can be used by
+/// calling getTargetStreamer and casting it to FooTargetStreamer:
+///
+/// MCTargetStreamer &TS = OutStreamer.getTargetStreamer();
+/// FooTargetStreamer &ATS = static_cast<FooTargetStreamer &>(TS);
+///
+/// The base classes FooTargetAsmStreamer and FooTargetELFStreamer should
+/// *never* be treated differently. Callers should always talk to a
+/// FooTargetStreamer.
+class MCTargetStreamer {
+protected:
+ MCStreamer &Streamer;
+
+public:
+ MCTargetStreamer(MCStreamer &S);
+ virtual ~MCTargetStreamer();
+
+ MCStreamer &getStreamer() { return Streamer; }
+
+ // Allow a target to add behavior to the EmitLabel of MCStreamer.
+ virtual void emitLabel(MCSymbol *Symbol);
+ // Allow a target to add behavior to the emitAssignment of MCStreamer.
+ virtual void emitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+
+ virtual void prettyPrintAsm(MCInstPrinter &InstPrinter, raw_ostream &OS,
+ const MCInst &Inst, const MCSubtargetInfo &STI);
+
+ virtual void finish();
+};
+
+// FIXME: declared here because it is used from
+// lib/CodeGen/AsmPrinter/ARMException.cpp.
+class ARMTargetStreamer : public MCTargetStreamer {
+public:
+ ARMTargetStreamer(MCStreamer &S);
+ ~ARMTargetStreamer() override;
+
+ virtual void emitFnStart();
+ virtual void emitFnEnd();
+ virtual void emitCantUnwind();
+ virtual void emitPersonality(const MCSymbol *Personality);
+ virtual void emitPersonalityIndex(unsigned Index);
+ virtual void emitHandlerData();
+ virtual void emitSetFP(unsigned FpReg, unsigned SpReg,
+ int64_t Offset = 0);
+ virtual void emitMovSP(unsigned Reg, int64_t Offset = 0);
+ virtual void emitPad(int64_t Offset);
+ virtual void emitRegSave(const SmallVectorImpl<unsigned> &RegList,
+ bool isVector);
+ virtual void emitUnwindRaw(int64_t StackOffset,
+ const SmallVectorImpl<uint8_t> &Opcodes);
+
+ virtual void switchVendor(StringRef Vendor);
+ virtual void emitAttribute(unsigned Attribute, unsigned Value);
+ virtual void emitTextAttribute(unsigned Attribute, StringRef String);
+ virtual void emitIntTextAttribute(unsigned Attribute, unsigned IntValue,
+ StringRef StringValue = "");
+ virtual void emitFPU(unsigned FPU);
+ virtual void emitArch(unsigned Arch);
+ virtual void emitArchExtension(unsigned ArchExt);
+ virtual void emitObjectArch(unsigned Arch);
+ virtual void finishAttributeSection();
+ virtual void emitInst(uint32_t Inst, char Suffix = '\0');
+
+ virtual void AnnotateTLSDescriptorSequence(const MCSymbolRefExpr *SRE);
+
+ virtual void emitThumbSet(MCSymbol *Symbol, const MCExpr *Value);
+
+ void finish() override;
+
+ /// Callback used to implement the ldr= pseudo.
+ /// Add a new entry to the constant pool for the current section and return an
+ /// MCExpr that can be used to refer to the constant pool location.
+ const MCExpr *addConstantPoolEntry(const MCExpr *, SMLoc Loc);
+
+ /// Callback used to implemnt the .ltorg directive.
+ /// Emit contents of constant pool for the current section.
+ void emitCurrentConstantPool();
+
+private:
+ std::unique_ptr<AssemblerConstantPools> ConstantPools;
+};
+
+/// \brief Streaming machine code generation interface.
+///
+/// This interface is intended to provide a programatic interface that is very
+/// similar to the level that an assembler .s file provides. It has callbacks
+/// to emit bytes, handle directives, etc. The implementation of this interface
+/// retains state to know what the current section is etc.
+///
+/// There are multiple implementations of this interface: one for writing out
+/// a .s file, and implementations that write out .o files of various formats.
+///
+class MCStreamer {
+ MCContext &Context;
+ std::unique_ptr<MCTargetStreamer> TargetStreamer;
+
+ MCStreamer(const MCStreamer &) = delete;
+ MCStreamer &operator=(const MCStreamer &) = delete;
+
+ std::vector<MCDwarfFrameInfo> DwarfFrameInfos;
+ MCDwarfFrameInfo *getCurrentDwarfFrameInfo();
+ void EnsureValidDwarfFrame();
+
+ MCSymbol *EmitCFICommon();
+
+ std::vector<WinEH::FrameInfo *> WinFrameInfos;
+ WinEH::FrameInfo *CurrentWinFrameInfo;
+ void EnsureValidWinFrameInfo();
+
+ /// \brief Tracks an index to represent the order a symbol was emitted in.
+ /// Zero means we did not emit that symbol.
+ DenseMap<const MCSymbol *, unsigned> SymbolOrdering;
+
+ /// \brief This is stack of current and previous section values saved by
+ /// PushSection.
+ SmallVector<std::pair<MCSectionSubPair, MCSectionSubPair>, 4> SectionStack;
+
+protected:
+ MCStreamer(MCContext &Ctx);
+
+ virtual void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame);
+ virtual void EmitCFIEndProcImpl(MCDwarfFrameInfo &CurFrame);
+
+ WinEH::FrameInfo *getCurrentWinFrameInfo() {
+ return CurrentWinFrameInfo;
+ }
+
+ virtual void EmitWindowsUnwindTables();
+
+ virtual void EmitRawTextImpl(StringRef String);
+
+public:
+ virtual ~MCStreamer();
+
+ void visitUsedExpr(const MCExpr &Expr);
+ virtual void visitUsedSymbol(const MCSymbol &Sym);
+
+ void setTargetStreamer(MCTargetStreamer *TS) {
+ TargetStreamer.reset(TS);
+ }
+
+ /// State management
+ ///
+ virtual void reset();
+
+ MCContext &getContext() const { return Context; }
+
+ MCTargetStreamer *getTargetStreamer() {
+ return TargetStreamer.get();
+ }
+
+ unsigned getNumFrameInfos() { return DwarfFrameInfos.size(); }
+ ArrayRef<MCDwarfFrameInfo> getDwarfFrameInfos() const {
+ return DwarfFrameInfos;
+ }
+
+ unsigned getNumWinFrameInfos() { return WinFrameInfos.size(); }
+ ArrayRef<WinEH::FrameInfo *> getWinFrameInfos() const {
+ return WinFrameInfos;
+ }
+
+ void generateCompactUnwindEncodings(MCAsmBackend *MAB);
+
+ /// \name Assembly File Formatting.
+ /// @{
+
+ /// \brief Return true if this streamer supports verbose assembly and if it is
+ /// enabled.
+ virtual bool isVerboseAsm() const { return false; }
+
+ /// \brief Return true if this asm streamer supports emitting unformatted text
+ /// to the .s file with EmitRawText.
+ virtual bool hasRawTextSupport() const { return false; }
+
+ /// \brief Is the integrated assembler required for this streamer to function
+ /// correctly?
+ virtual bool isIntegratedAssemblerRequired() const { return false; }
+
+ /// \brief Add a textual command.
+ ///
+ /// Typically for comments that can be emitted to the generated .s
+ /// file if applicable as a QoI issue to make the output of the compiler
+ /// more readable. This only affects the MCAsmStreamer, and only when
+ /// verbose assembly output is enabled.
+ ///
+ /// If the comment includes embedded \n's, they will each get the comment
+ /// prefix as appropriate. The added comment should not end with a \n.
+ virtual void AddComment(const Twine &T) {}
+
+ /// \brief Return a raw_ostream that comments can be written to. Unlike
+ /// AddComment, you are required to terminate comments with \n if you use this
+ /// method.
+ virtual raw_ostream &GetCommentOS();
+
+ /// \brief Print T and prefix it with the comment string (normally #) and
+ /// optionally a tab. This prints the comment immediately, not at the end of
+ /// the current line. It is basically a safe version of EmitRawText: since it
+ /// only prints comments, the object streamer ignores it instead of asserting.
+ virtual void emitRawComment(const Twine &T, bool TabPrefix = true);
+
+ /// AddBlankLine - Emit a blank line to a .s file to pretty it up.
+ virtual void AddBlankLine() {}
+
+ /// @}
+
+ /// \name Symbol & Section Management
+ /// @{
+
+ /// \brief Return the current section that the streamer is emitting code to.
+ MCSectionSubPair getCurrentSection() const {
+ if (!SectionStack.empty())
+ return SectionStack.back().first;
+ return MCSectionSubPair();
+ }
+ MCSection *getCurrentSectionOnly() const { return getCurrentSection().first; }
+
+ /// \brief Return the previous section that the streamer is emitting code to.
+ MCSectionSubPair getPreviousSection() const {
+ if (!SectionStack.empty())
+ return SectionStack.back().second;
+ return MCSectionSubPair();
+ }
+
+ /// \brief Returns an index to represent the order a symbol was emitted in.
+ /// (zero if we did not emit that symbol)
+ unsigned GetSymbolOrder(const MCSymbol *Sym) const {
+ return SymbolOrdering.lookup(Sym);
+ }
+
+ /// \brief Update streamer for a new active section.
+ ///
+ /// This is called by PopSection and SwitchSection, if the current
+ /// section changes.
+ virtual void ChangeSection(MCSection *, const MCExpr *);
+
+ /// \brief Save the current and previous section on the section stack.
+ void PushSection() {
+ SectionStack.push_back(
+ std::make_pair(getCurrentSection(), getPreviousSection()));
+ }
+
+ /// \brief Restore the current and previous section from the section stack.
+ /// Calls ChangeSection as needed.
+ ///
+ /// Returns false if the stack was empty.
+ bool PopSection() {
+ if (SectionStack.size() <= 1)
+ return false;
+ auto I = SectionStack.end();
+ --I;
+ MCSectionSubPair OldSection = I->first;
+ --I;
+ MCSectionSubPair NewSection = I->first;
+
+ if (OldSection != NewSection)
+ ChangeSection(NewSection.first, NewSection.second);
+ SectionStack.pop_back();
+ return true;
+ }
+
+ bool SubSection(const MCExpr *Subsection) {
+ if (SectionStack.empty())
+ return false;
+
+ SwitchSection(SectionStack.back().first.first, Subsection);
+ return true;
+ }
+
+ /// Set the current section where code is being emitted to \p Section. This
+ /// is required to update CurSection.
+ ///
+ /// This corresponds to assembler directives like .section, .text, etc.
+ virtual void SwitchSection(MCSection *Section,
+ const MCExpr *Subsection = nullptr);
+
+ /// \brief Set the current section where code is being emitted to \p Section.
+ /// This is required to update CurSection. This version does not call
+ /// ChangeSection.
+ void SwitchSectionNoChange(MCSection *Section,
+ const MCExpr *Subsection = nullptr) {
+ assert(Section && "Cannot switch to a null section!");
+ MCSectionSubPair curSection = SectionStack.back().first;
+ SectionStack.back().second = curSection;
+ if (MCSectionSubPair(Section, Subsection) != curSection)
+ SectionStack.back().first = MCSectionSubPair(Section, Subsection);
+ }
+
+ /// \brief Create the default sections and set the initial one.
+ virtual void InitSections(bool NoExecStack);
+
+ MCSymbol *endSection(MCSection *Section);
+
+ /// \brief Sets the symbol's section.
+ ///
+ /// Each emitted symbol will be tracked in the ordering table,
+ /// so we can sort on them later.
+ void AssignFragment(MCSymbol *Symbol, MCFragment *Fragment);
+
+ /// \brief Emit a label for \p Symbol into the current section.
+ ///
+ /// This corresponds to an assembler statement such as:
+ /// foo:
+ ///
+ /// \param Symbol - The symbol to emit. A given symbol should only be
+ /// emitted as a label once, and symbols emitted as a label should never be
+ /// used in an assignment.
+ // FIXME: These emission are non-const because we mutate the symbol to
+ // add the section we're emitting it to later.
+ virtual void EmitLabel(MCSymbol *Symbol);
+
+ virtual void EmitEHSymAttributes(const MCSymbol *Symbol, MCSymbol *EHSymbol);
+
+ /// \brief Note in the output the specified \p Flag.
+ virtual void EmitAssemblerFlag(MCAssemblerFlag Flag);
+
+ /// \brief Emit the given list \p Options of strings as linker
+ /// options into the output.
+ virtual void EmitLinkerOptions(ArrayRef<std::string> Kind) {}
+
+ /// \brief Note in the output the specified region \p Kind.
+ virtual void EmitDataRegion(MCDataRegionType Kind) {}
+
+ /// \brief Specify the MachO minimum deployment target version.
+ virtual void EmitVersionMin(MCVersionMinType, unsigned Major, unsigned Minor,
+ unsigned Update) {}
+
+ /// \brief Note in the output that the specified \p Func is a Thumb mode
+ /// function (ARM target only).
+ virtual void EmitThumbFunc(MCSymbol *Func);
+
+ /// \brief Emit an assignment of \p Value to \p Symbol.
+ ///
+ /// This corresponds to an assembler statement such as:
+ /// symbol = value
+ ///
+ /// The assignment generates no code, but has the side effect of binding the
+ /// value in the current context. For the assembly streamer, this prints the
+ /// binding into the .s file.
+ ///
+ /// \param Symbol - The symbol being assigned to.
+ /// \param Value - The value for the symbol.
+ virtual void EmitAssignment(MCSymbol *Symbol, const MCExpr *Value);
+
+ /// \brief Emit an weak reference from \p Alias to \p Symbol.
+ ///
+ /// This corresponds to an assembler statement such as:
+ /// .weakref alias, symbol
+ ///
+ /// \param Alias - The alias that is being created.
+ /// \param Symbol - The symbol being aliased.
+ virtual void EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol);
+
+ /// \brief Add the given \p Attribute to \p Symbol.
+ virtual bool EmitSymbolAttribute(MCSymbol *Symbol,
+ MCSymbolAttr Attribute) = 0;
+
+ /// \brief Set the \p DescValue for the \p Symbol.
+ ///
+ /// \param Symbol - The symbol to have its n_desc field set.
+ /// \param DescValue - The value to set into the n_desc field.
+ virtual void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue);
+
+ /// \brief Start emitting COFF symbol definition
+ ///
+ /// \param Symbol - The symbol to have its External & Type fields set.
+ virtual void BeginCOFFSymbolDef(const MCSymbol *Symbol);
+
+ /// \brief Emit the storage class of the symbol.
+ ///
+ /// \param StorageClass - The storage class the symbol should have.
+ virtual void EmitCOFFSymbolStorageClass(int StorageClass);
+
+ /// \brief Emit the type of the symbol.
+ ///
+ /// \param Type - A COFF type identifier (see COFF::SymbolType in X86COFF.h)
+ virtual void EmitCOFFSymbolType(int Type);
+
+ /// \brief Marks the end of the symbol definition.
+ virtual void EndCOFFSymbolDef();
+
+ virtual void EmitCOFFSafeSEH(MCSymbol const *Symbol);
+
+ /// \brief Emits a COFF section index.
+ ///
+ /// \param Symbol - Symbol the section number relocation should point to.
+ virtual void EmitCOFFSectionIndex(MCSymbol const *Symbol);
+
+ /// \brief Emits a COFF section relative relocation.
+ ///
+ /// \param Symbol - Symbol the section relative relocation should point to.
+ virtual void EmitCOFFSecRel32(MCSymbol const *Symbol);
+
+ /// \brief Emit an ELF .size directive.
+ ///
+ /// This corresponds to an assembler statement such as:
+ /// .size symbol, expression
+ virtual void emitELFSize(MCSymbolELF *Symbol, const MCExpr *Value);
+
+ /// \brief Emit a Linker Optimization Hint (LOH) directive.
+ /// \param Args - Arguments of the LOH.
+ virtual void EmitLOHDirective(MCLOHType Kind, const MCLOHArgs &Args) {}
+
+ /// \brief Emit a common symbol.
+ ///
+ /// \param Symbol - The common symbol to emit.
+ /// \param Size - The size of the common symbol.
+ /// \param ByteAlignment - The alignment of the symbol if
+ /// non-zero. This must be a power of 2.
+ virtual void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) = 0;
+
+ /// \brief Emit a local common (.lcomm) symbol.
+ ///
+ /// \param Symbol - The common symbol to emit.
+ /// \param Size - The size of the common symbol.
+ /// \param ByteAlignment - The alignment of the common symbol in bytes.
+ virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment);
+
+ /// \brief Emit the zerofill section and an optional symbol.
+ ///
+ /// \param Section - The zerofill section to create and or to put the symbol
+ /// \param Symbol - The zerofill symbol to emit, if non-NULL.
+ /// \param Size - The size of the zerofill symbol.
+ /// \param ByteAlignment - The alignment of the zerofill symbol if
+ /// non-zero. This must be a power of 2 on some targets.
+ virtual void EmitZerofill(MCSection *Section, MCSymbol *Symbol = nullptr,
+ uint64_t Size = 0, unsigned ByteAlignment = 0) = 0;
+
+ /// \brief Emit a thread local bss (.tbss) symbol.
+ ///
+ /// \param Section - The thread local common section.
+ /// \param Symbol - The thread local common symbol to emit.
+ /// \param Size - The size of the symbol.
+ /// \param ByteAlignment - The alignment of the thread local common symbol
+ /// if non-zero. This must be a power of 2 on some targets.
+ virtual void EmitTBSSSymbol(MCSection *Section, MCSymbol *Symbol,
+ uint64_t Size, unsigned ByteAlignment = 0);
+
+ /// @}
+ /// \name Generating Data
+ /// @{
+
+ /// \brief Emit the bytes in \p Data into the output.
+ ///
+ /// This is used to implement assembler directives such as .byte, .ascii,
+ /// etc.
+ virtual void EmitBytes(StringRef Data);
+
+ /// \brief Emit the expression \p Value into the output as a native
+ /// integer of the given \p Size bytes.
+ ///
+ /// This is used to implement assembler directives such as .word, .quad,
+ /// etc.
+ ///
+ /// \param Value - The value to emit.
+ /// \param Size - The size of the integer (in bytes) to emit. This must
+ /// match a native machine width.
+ /// \param Loc - The location of the expression for error reporting.
+ virtual void EmitValueImpl(const MCExpr *Value, unsigned Size,
+ SMLoc Loc = SMLoc());
+
+ void EmitValue(const MCExpr *Value, unsigned Size, SMLoc Loc = SMLoc());
+
+ /// \brief Special case of EmitValue that avoids the client having
+ /// to pass in a MCExpr for constant integers.
+ virtual void EmitIntValue(uint64_t Value, unsigned Size);
+
+ virtual void EmitULEB128Value(const MCExpr *Value);
+
+ virtual void EmitSLEB128Value(const MCExpr *Value);
+
+ /// \brief Special case of EmitULEB128Value that avoids the client having to
+ /// pass in a MCExpr for constant integers.
+ void EmitULEB128IntValue(uint64_t Value, unsigned Padding = 0);
+
+ /// \brief Special case of EmitSLEB128Value that avoids the client having to
+ /// pass in a MCExpr for constant integers.
+ void EmitSLEB128IntValue(int64_t Value);
+
+ /// \brief Special case of EmitValue that avoids the client having to pass in
+ /// a MCExpr for MCSymbols.
+ void EmitSymbolValue(const MCSymbol *Sym, unsigned Size,
+ bool IsSectionRelative = false);
+
+ /// \brief Emit the expression \p Value into the output as a gprel64 (64-bit
+ /// GP relative) value.
+ ///
+ /// This is used to implement assembler directives such as .gpdword on
+ /// targets that support them.
+ virtual void EmitGPRel64Value(const MCExpr *Value);
+
+ /// \brief Emit the expression \p Value into the output as a gprel32 (32-bit
+ /// GP relative) value.
+ ///
+ /// This is used to implement assembler directives such as .gprel32 on
+ /// targets that support them.
+ virtual void EmitGPRel32Value(const MCExpr *Value);
+
+ /// \brief Emit NumBytes bytes worth of the value specified by FillValue.
+ /// This implements directives such as '.space'.
+ virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue);
+
+ /// \brief Emit NumBytes worth of zeros.
+ /// This function properly handles data in virtual sections.
+ void EmitZeros(uint64_t NumBytes);
+
+ /// \brief Emit some number of copies of \p Value until the byte alignment \p
+ /// ByteAlignment is reached.
+ ///
+ /// If the number of bytes need to emit for the alignment is not a multiple
+ /// of \p ValueSize, then the contents of the emitted fill bytes is
+ /// undefined.
+ ///
+ /// This used to implement the .align assembler directive.
+ ///
+ /// \param ByteAlignment - The alignment to reach. This must be a power of
+ /// two on some targets.
+ /// \param Value - The value to use when filling bytes.
+ /// \param ValueSize - The size of the integer (in bytes) to emit for
+ /// \p Value. This must match a native machine width.
+ /// \param MaxBytesToEmit - The maximum numbers of bytes to emit, or 0. If
+ /// the alignment cannot be reached in this many bytes, no bytes are
+ /// emitted.
+ virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0,
+ unsigned ValueSize = 1,
+ unsigned MaxBytesToEmit = 0);
+
+ /// \brief Emit nops until the byte alignment \p ByteAlignment is reached.
+ ///
+ /// This used to align code where the alignment bytes may be executed. This
+ /// can emit different bytes for different sizes to optimize execution.
+ ///
+ /// \param ByteAlignment - The alignment to reach. This must be a power of
+ /// two on some targets.
+ /// \param MaxBytesToEmit - The maximum numbers of bytes to emit, or 0. If
+ /// the alignment cannot be reached in this many bytes, no bytes are
+ /// emitted.
+ virtual void EmitCodeAlignment(unsigned ByteAlignment,
+ unsigned MaxBytesToEmit = 0);
+
+ /// \brief Emit some number of copies of \p Value until the byte offset \p
+ /// Offset is reached.
+ ///
+ /// This is used to implement assembler directives such as .org.
+ ///
+ /// \param Offset - The offset to reach. This may be an expression, but the
+ /// expression must be associated with the current section.
+ /// \param Value - The value to use when filling bytes.
+ virtual void emitValueToOffset(const MCExpr *Offset, unsigned char Value = 0);
+
+ /// @}
+
+ /// \brief Switch to a new logical file. This is used to implement the '.file
+ /// "foo.c"' assembler directive.
+ virtual void EmitFileDirective(StringRef Filename);
+
+ /// \brief Emit the "identifiers" directive. This implements the
+ /// '.ident "version foo"' assembler directive.
+ virtual void EmitIdent(StringRef IdentString) {}
+
+ /// \brief Associate a filename with a specified logical file number. This
+ /// implements the DWARF2 '.file 4 "foo.c"' assembler directive.
+ virtual unsigned EmitDwarfFileDirective(unsigned FileNo, StringRef Directory,
+ StringRef Filename,
+ unsigned CUID = 0);
+
+ /// \brief This implements the DWARF2 '.loc fileno lineno ...' assembler
+ /// directive.
+ virtual void EmitDwarfLocDirective(unsigned FileNo, unsigned Line,
+ unsigned Column, unsigned Flags,
+ unsigned Isa, unsigned Discriminator,
+ StringRef FileName);
+
+ /// Emit the absolute difference between two symbols.
+ ///
+ /// \pre Offset of \c Hi is greater than the offset \c Lo.
+ virtual void emitAbsoluteSymbolDiff(const MCSymbol *Hi, const MCSymbol *Lo,
+ unsigned Size);
+
+ virtual MCSymbol *getDwarfLineTableSymbol(unsigned CUID);
+ virtual void EmitCFISections(bool EH, bool Debug);
+ void EmitCFIStartProc(bool IsSimple);
+ void EmitCFIEndProc();
+ virtual void EmitCFIDefCfa(int64_t Register, int64_t Offset);
+ virtual void EmitCFIDefCfaOffset(int64_t Offset);
+ virtual void EmitCFIDefCfaRegister(int64_t Register);
+ virtual void EmitCFIOffset(int64_t Register, int64_t Offset);
+ virtual void EmitCFIPersonality(const MCSymbol *Sym, unsigned Encoding);
+ virtual void EmitCFILsda(const MCSymbol *Sym, unsigned Encoding);
+ virtual void EmitCFIRememberState();
+ virtual void EmitCFIRestoreState();
+ virtual void EmitCFISameValue(int64_t Register);
+ virtual void EmitCFIRestore(int64_t Register);
+ virtual void EmitCFIRelOffset(int64_t Register, int64_t Offset);
+ virtual void EmitCFIAdjustCfaOffset(int64_t Adjustment);
+ virtual void EmitCFIEscape(StringRef Values);
+ virtual void EmitCFIGnuArgsSize(int64_t Size);
+ virtual void EmitCFISignalFrame();
+ virtual void EmitCFIUndefined(int64_t Register);
+ virtual void EmitCFIRegister(int64_t Register1, int64_t Register2);
+ virtual void EmitCFIWindowSave();
+
+ virtual void EmitWinCFIStartProc(const MCSymbol *Symbol);
+ virtual void EmitWinCFIEndProc();
+ virtual void EmitWinCFIStartChained();
+ virtual void EmitWinCFIEndChained();
+ virtual void EmitWinCFIPushReg(unsigned Register);
+ virtual void EmitWinCFISetFrame(unsigned Register, unsigned Offset);
+ virtual void EmitWinCFIAllocStack(unsigned Size);
+ virtual void EmitWinCFISaveReg(unsigned Register, unsigned Offset);
+ virtual void EmitWinCFISaveXMM(unsigned Register, unsigned Offset);
+ virtual void EmitWinCFIPushFrame(bool Code);
+ virtual void EmitWinCFIEndProlog();
+
+ virtual void EmitWinEHHandler(const MCSymbol *Sym, bool Unwind, bool Except);
+ virtual void EmitWinEHHandlerData();
+
+ virtual void EmitSyntaxDirective();
+
+ /// \brief Emit a .reloc directive.
+ /// Returns true if the relocation could not be emitted because Name is not
+ /// known.
+ virtual bool EmitRelocDirective(const MCExpr &Offset, StringRef Name,
+ const MCExpr *Expr, SMLoc Loc) {
+ return true;
+ }
+
+ /// \brief Emit the given \p Instruction into the current section.
+ virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI);
+
+ /// \brief Set the bundle alignment mode from now on in the section.
+ /// The argument is the power of 2 to which the alignment is set. The
+ /// value 0 means turn the bundle alignment off.
+ virtual void EmitBundleAlignMode(unsigned AlignPow2);
+
+ /// \brief The following instructions are a bundle-locked group.
+ ///
+ /// \param AlignToEnd - If true, the bundle-locked group will be aligned to
+ /// the end of a bundle.
+ virtual void EmitBundleLock(bool AlignToEnd);
+
+ /// \brief Ends a bundle-locked group.
+ virtual void EmitBundleUnlock();
+
+ /// \brief If this file is backed by a assembly streamer, this dumps the
+ /// specified string in the output .s file. This capability is indicated by
+ /// the hasRawTextSupport() predicate. By default this aborts.
+ void EmitRawText(const Twine &String);
+
+ /// \brief Streamer specific finalization.
+ virtual void FinishImpl();
+ /// \brief Finish emission of machine code.
+ void Finish();
+
+ virtual bool mayHaveInstructions(MCSection &Sec) const { return true; }
+};
+
+/// Create a dummy machine code streamer, which does nothing. This is useful for
+/// timing the assembler front end.
+MCStreamer *createNullStreamer(MCContext &Ctx);
+
+/// Create a machine code streamer which will print out assembly for the native
+/// target, suitable for compiling with a native assembler.
+///
+/// \param InstPrint - If given, the instruction printer to use. If not given
+/// the MCInst representation will be printed. This method takes ownership of
+/// InstPrint.
+///
+/// \param CE - If given, a code emitter to use to show the instruction
+/// encoding inline with the assembly. This method takes ownership of \p CE.
+///
+/// \param TAB - If given, a target asm backend to use to show the fixup
+/// information in conjunction with encoding information. This method takes
+/// ownership of \p TAB.
+///
+/// \param ShowInst - Whether to show the MCInst representation inline with
+/// the assembly.
+MCStreamer *createAsmStreamer(MCContext &Ctx,
+ std::unique_ptr<formatted_raw_ostream> OS,
+ bool isVerboseAsm, bool useDwarfDirectory,
+ MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+ MCAsmBackend *TAB, bool ShowInst);
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h b/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
new file mode 100644
index 0000000..446feef
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSubtargetInfo.h
@@ -0,0 +1,170 @@
+//==-- llvm/MC/MCSubtargetInfo.h - Subtarget Information ---------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the subtarget options of a Target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSUBTARGETINFO_H
+#define LLVM_MC_MCSUBTARGETINFO_H
+
+#include "llvm/MC/MCInstrItineraries.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include <string>
+
+namespace llvm {
+
+class StringRef;
+
+//===----------------------------------------------------------------------===//
+///
+/// MCSubtargetInfo - Generic base class for all target subtargets.
+///
+class MCSubtargetInfo {
+ Triple TargetTriple; // Target triple
+ std::string CPU; // CPU being targeted.
+ ArrayRef<SubtargetFeatureKV> ProcFeatures; // Processor feature list
+ ArrayRef<SubtargetFeatureKV> ProcDesc; // Processor descriptions
+
+ // Scheduler machine model
+ const SubtargetInfoKV *ProcSchedModels;
+ const MCWriteProcResEntry *WriteProcResTable;
+ const MCWriteLatencyEntry *WriteLatencyTable;
+ const MCReadAdvanceEntry *ReadAdvanceTable;
+ const MCSchedModel *CPUSchedModel;
+
+ const InstrStage *Stages; // Instruction itinerary stages
+ const unsigned *OperandCycles; // Itinerary operand cycles
+ const unsigned *ForwardingPaths; // Forwarding paths
+ FeatureBitset FeatureBits; // Feature bits for current CPU + FS
+
+ MCSubtargetInfo() = delete;
+ MCSubtargetInfo &operator=(MCSubtargetInfo &&) = delete;
+ MCSubtargetInfo &operator=(const MCSubtargetInfo &) = delete;
+
+public:
+ MCSubtargetInfo(const MCSubtargetInfo &) = default;
+ MCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS,
+ ArrayRef<SubtargetFeatureKV> PF,
+ ArrayRef<SubtargetFeatureKV> PD,
+ const SubtargetInfoKV *ProcSched,
+ const MCWriteProcResEntry *WPR, const MCWriteLatencyEntry *WL,
+ const MCReadAdvanceEntry *RA, const InstrStage *IS,
+ const unsigned *OC, const unsigned *FP);
+
+ /// getTargetTriple - Return the target triple string.
+ const Triple &getTargetTriple() const { return TargetTriple; }
+
+ /// getCPU - Return the CPU string.
+ StringRef getCPU() const {
+ return CPU;
+ }
+
+ /// getFeatureBits - Return the feature bits.
+ ///
+ const FeatureBitset& getFeatureBits() const {
+ return FeatureBits;
+ }
+
+ /// setFeatureBits - Set the feature bits.
+ ///
+ void setFeatureBits(const FeatureBitset &FeatureBits_) {
+ FeatureBits = FeatureBits_;
+ }
+
+protected:
+ /// Initialize the scheduling model and feature bits.
+ ///
+ /// FIXME: Find a way to stick this in the constructor, since it should only
+ /// be called during initialization.
+ void InitMCProcessorInfo(StringRef CPU, StringRef FS);
+
+public:
+ /// Set the features to the default for the given CPU with an appended feature
+ /// string.
+ void setDefaultFeatures(StringRef CPU, StringRef FS);
+
+ /// ToggleFeature - Toggle a feature and returns the re-computed feature
+ /// bits. This version does not change the implied bits.
+ FeatureBitset ToggleFeature(uint64_t FB);
+
+ /// ToggleFeature - Toggle a feature and returns the re-computed feature
+ /// bits. This version does not change the implied bits.
+ FeatureBitset ToggleFeature(const FeatureBitset& FB);
+
+ /// ToggleFeature - Toggle a set of features and returns the re-computed
+ /// feature bits. This version will also change all implied bits.
+ FeatureBitset ToggleFeature(StringRef FS);
+
+ /// Apply a feature flag and return the re-computed feature bits, including
+ /// all feature bits implied by the flag.
+ FeatureBitset ApplyFeatureFlag(StringRef FS);
+
+ /// getSchedModelForCPU - Get the machine model of a CPU.
+ ///
+ const MCSchedModel &getSchedModelForCPU(StringRef CPU) const;
+
+ /// Get the machine model for this subtarget's CPU.
+ const MCSchedModel &getSchedModel() const { return *CPUSchedModel; }
+
+ /// Return an iterator at the first process resource consumed by the given
+ /// scheduling class.
+ const MCWriteProcResEntry *getWriteProcResBegin(
+ const MCSchedClassDesc *SC) const {
+ return &WriteProcResTable[SC->WriteProcResIdx];
+ }
+ const MCWriteProcResEntry *getWriteProcResEnd(
+ const MCSchedClassDesc *SC) const {
+ return getWriteProcResBegin(SC) + SC->NumWriteProcResEntries;
+ }
+
+ const MCWriteLatencyEntry *getWriteLatencyEntry(const MCSchedClassDesc *SC,
+ unsigned DefIdx) const {
+ assert(DefIdx < SC->NumWriteLatencyEntries &&
+ "MachineModel does not specify a WriteResource for DefIdx");
+
+ return &WriteLatencyTable[SC->WriteLatencyIdx + DefIdx];
+ }
+
+ int getReadAdvanceCycles(const MCSchedClassDesc *SC, unsigned UseIdx,
+ unsigned WriteResID) const {
+ // TODO: The number of read advance entries in a class can be significant
+ // (~50). Consider compressing the WriteID into a dense ID of those that are
+ // used by ReadAdvance and representing them as a bitset.
+ for (const MCReadAdvanceEntry *I = &ReadAdvanceTable[SC->ReadAdvanceIdx],
+ *E = I + SC->NumReadAdvanceEntries; I != E; ++I) {
+ if (I->UseIdx < UseIdx)
+ continue;
+ if (I->UseIdx > UseIdx)
+ break;
+ // Find the first WriteResIdx match, which has the highest cycle count.
+ if (!I->WriteResourceID || I->WriteResourceID == WriteResID) {
+ return I->Cycles;
+ }
+ }
+ return 0;
+ }
+
+ /// getInstrItineraryForCPU - Get scheduling itinerary of a CPU.
+ ///
+ InstrItineraryData getInstrItineraryForCPU(StringRef CPU) const;
+
+ /// Initialize an InstrItineraryData instance.
+ void initInstrItins(InstrItineraryData &InstrItins) const;
+
+ /// Check whether the CPU string is valid.
+ bool isCPUStringValid(StringRef CPU) const {
+ auto Found = std::lower_bound(ProcDesc.begin(), ProcDesc.end(), CPU);
+ return Found != ProcDesc.end() && StringRef(Found->Key) == CPU;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSymbol.h b/contrib/llvm/include/llvm/MC/MCSymbol.h
new file mode 100644
index 0000000..c51ecfc
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSymbol.h
@@ -0,0 +1,421 @@
+//===- MCSymbol.h - Machine Code Symbols ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCSymbol class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSYMBOL_H
+#define LLVM_MC_MCSYMBOL_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+class MCAsmInfo;
+class MCExpr;
+class MCSymbol;
+class MCFragment;
+class MCSection;
+class MCContext;
+class raw_ostream;
+
+/// MCSymbol - Instances of this class represent a symbol name in the MC file,
+/// and MCSymbols are created and uniqued by the MCContext class. MCSymbols
+/// should only be constructed with valid names for the object file.
+///
+/// If the symbol is defined/emitted into the current translation unit, the
+/// Section member is set to indicate what section it lives in. Otherwise, if
+/// it is a reference to an external entity, it has a null section.
+class MCSymbol {
+protected:
+ /// The kind of the symbol. If it is any value other than unset then this
+ /// class is actually one of the appropriate subclasses of MCSymbol.
+ enum SymbolKind {
+ SymbolKindUnset,
+ SymbolKindCOFF,
+ SymbolKindELF,
+ SymbolKindMachO,
+ };
+
+ /// A symbol can contain an Offset, or Value, or be Common, but never more
+ /// than one of these.
+ enum Contents : uint8_t {
+ SymContentsUnset,
+ SymContentsOffset,
+ SymContentsVariable,
+ SymContentsCommon,
+ };
+
+ // Special sentinal value for the absolute pseudo fragment.
+ static MCFragment *AbsolutePseudoFragment;
+
+ /// If a symbol has a Fragment, the section is implied, so we only need
+ /// one pointer.
+ /// The special AbsolutePseudoFragment value is for absolute symbols.
+ /// If this is a variable symbol, this caches the variable value's fragment.
+ /// FIXME: We might be able to simplify this by having the asm streamer create
+ /// dummy fragments.
+ /// If this is a section, then it gives the symbol is defined in. This is null
+ /// for undefined symbols.
+ ///
+ /// If this is a fragment, then it gives the fragment this symbol's value is
+ /// relative to, if any.
+ ///
+ /// For the 'HasName' integer, this is true if this symbol is named.
+ /// A named symbol will have a pointer to the name allocated in the bytes
+ /// immediately prior to the MCSymbol.
+ mutable PointerIntPair<MCFragment *, 1> FragmentAndHasName;
+
+ /// IsTemporary - True if this is an assembler temporary label, which
+ /// typically does not survive in the .o file's symbol table. Usually
+ /// "Lfoo" or ".foo".
+ unsigned IsTemporary : 1;
+
+ /// \brief True if this symbol can be redefined.
+ unsigned IsRedefinable : 1;
+
+ /// IsUsed - True if this symbol has been used.
+ mutable unsigned IsUsed : 1;
+
+ mutable bool IsRegistered : 1;
+
+ /// This symbol is visible outside this translation unit.
+ mutable unsigned IsExternal : 1;
+
+ /// This symbol is private extern.
+ mutable unsigned IsPrivateExtern : 1;
+
+ /// LLVM RTTI discriminator. This is actually a SymbolKind enumerator, but is
+ /// unsigned to avoid sign extension and achieve better bitpacking with MSVC.
+ unsigned Kind : 2;
+
+ /// True if we have created a relocation that uses this symbol.
+ mutable unsigned IsUsedInReloc : 1;
+
+ /// This is actually a Contents enumerator, but is unsigned to avoid sign
+ /// extension and achieve better bitpacking with MSVC.
+ unsigned SymbolContents : 2;
+
+ /// The alignment of the symbol, if it is 'common', or -1.
+ ///
+ /// The alignment is stored as log2(align) + 1. This allows all values from
+ /// 0 to 2^31 to be stored which is every power of 2 representable by an
+ /// unsigned.
+ enum : unsigned { NumCommonAlignmentBits = 5 };
+ unsigned CommonAlignLog2 : NumCommonAlignmentBits;
+
+ /// The Flags field is used by object file implementations to store
+ /// additional per symbol information which is not easily classified.
+ enum : unsigned { NumFlagsBits = 16 };
+ mutable uint32_t Flags : NumFlagsBits;
+
+ /// Index field, for use by the object file implementation.
+ mutable uint32_t Index = 0;
+
+ union {
+ /// The offset to apply to the fragment address to form this symbol's value.
+ uint64_t Offset;
+
+ /// The size of the symbol, if it is 'common'.
+ uint64_t CommonSize;
+
+ /// If non-null, the value for a variable symbol.
+ const MCExpr *Value;
+ };
+
+protected: // MCContext creates and uniques these.
+ friend class MCExpr;
+ friend class MCContext;
+
+ /// \brief The name for a symbol.
+ /// MCSymbol contains a uint64_t so is probably aligned to 8. On a 32-bit
+ /// system, the name is a pointer so isn't going to satisfy the 8 byte
+ /// alignment of uint64_t. Account for that here.
+ typedef union {
+ const StringMapEntry<bool> *NameEntry;
+ uint64_t AlignmentPadding;
+ } NameEntryStorageTy;
+
+ MCSymbol(SymbolKind Kind, const StringMapEntry<bool> *Name, bool isTemporary)
+ : IsTemporary(isTemporary), IsRedefinable(false), IsUsed(false),
+ IsRegistered(false), IsExternal(false), IsPrivateExtern(false),
+ Kind(Kind), IsUsedInReloc(false), SymbolContents(SymContentsUnset),
+ CommonAlignLog2(0), Flags(0) {
+ Offset = 0;
+ FragmentAndHasName.setInt(!!Name);
+ if (Name)
+ getNameEntryPtr() = Name;
+ }
+
+ // Provide custom new/delete as we will only allocate space for a name
+ // if we need one.
+ void *operator new(size_t s, const StringMapEntry<bool> *Name,
+ MCContext &Ctx);
+
+private:
+
+ void operator delete(void *);
+ /// \brief Placement delete - required by std, but never called.
+ void operator delete(void*, unsigned) {
+ llvm_unreachable("Constructor throws?");
+ }
+ /// \brief Placement delete - required by std, but never called.
+ void operator delete(void*, unsigned, bool) {
+ llvm_unreachable("Constructor throws?");
+ }
+
+ MCSymbol(const MCSymbol &) = delete;
+ void operator=(const MCSymbol &) = delete;
+ MCSection *getSectionPtr(bool SetUsed = true) const {
+ if (MCFragment *F = getFragment(SetUsed)) {
+ assert(F != AbsolutePseudoFragment);
+ return F->getParent();
+ }
+ return nullptr;
+ }
+
+ /// \brief Get a reference to the name field. Requires that we have a name
+ const StringMapEntry<bool> *&getNameEntryPtr() {
+ assert(FragmentAndHasName.getInt() && "Name is required");
+ NameEntryStorageTy *Name = reinterpret_cast<NameEntryStorageTy *>(this);
+ return (*(Name - 1)).NameEntry;
+ }
+ const StringMapEntry<bool> *&getNameEntryPtr() const {
+ return const_cast<MCSymbol*>(this)->getNameEntryPtr();
+ }
+
+public:
+ /// getName - Get the symbol name.
+ StringRef getName() const {
+ if (!FragmentAndHasName.getInt())
+ return StringRef();
+
+ return getNameEntryPtr()->first();
+ }
+
+ bool isRegistered() const { return IsRegistered; }
+ void setIsRegistered(bool Value) const { IsRegistered = Value; }
+
+ void setUsedInReloc() const { IsUsedInReloc = true; }
+ bool isUsedInReloc() const { return IsUsedInReloc; }
+
+ /// \name Accessors
+ /// @{
+
+ /// isTemporary - Check if this is an assembler temporary symbol.
+ bool isTemporary() const { return IsTemporary; }
+
+ /// isUsed - Check if this is used.
+ bool isUsed() const { return IsUsed; }
+ void setUsed(bool Value) const { IsUsed |= Value; }
+
+ /// \brief Check if this symbol is redefinable.
+ bool isRedefinable() const { return IsRedefinable; }
+ /// \brief Mark this symbol as redefinable.
+ void setRedefinable(bool Value) { IsRedefinable = Value; }
+ /// \brief Prepare this symbol to be redefined.
+ void redefineIfPossible() {
+ if (IsRedefinable) {
+ if (SymbolContents == SymContentsVariable) {
+ Value = nullptr;
+ SymbolContents = SymContentsUnset;
+ }
+ setUndefined();
+ IsRedefinable = false;
+ }
+ }
+
+ /// @}
+ /// \name Associated Sections
+ /// @{
+
+ /// isDefined - Check if this symbol is defined (i.e., it has an address).
+ ///
+ /// Defined symbols are either absolute or in some section.
+ bool isDefined(bool SetUsed = true) const {
+ return getFragment(SetUsed) != nullptr;
+ }
+
+ /// isInSection - Check if this symbol is defined in some section (i.e., it
+ /// is defined but not absolute).
+ bool isInSection(bool SetUsed = true) const {
+ return isDefined(SetUsed) && !isAbsolute(SetUsed);
+ }
+
+ /// isUndefined - Check if this symbol undefined (i.e., implicitly defined).
+ bool isUndefined(bool SetUsed = true) const { return !isDefined(SetUsed); }
+
+ /// isAbsolute - Check if this is an absolute symbol.
+ bool isAbsolute(bool SetUsed = true) const {
+ return getFragment(SetUsed) == AbsolutePseudoFragment;
+ }
+
+ /// Get the section associated with a defined, non-absolute symbol.
+ MCSection &getSection(bool SetUsed = true) const {
+ assert(isInSection(SetUsed) && "Invalid accessor!");
+ return *getSectionPtr(SetUsed);
+ }
+
+ /// Mark the symbol as defined in the fragment \p F.
+ void setFragment(MCFragment *F) const {
+ assert(!isVariable() && "Cannot set fragment of variable");
+ FragmentAndHasName.setPointer(F);
+ }
+
+ /// Mark the symbol as undefined.
+ void setUndefined() { FragmentAndHasName.setPointer(nullptr); }
+
+ bool isELF() const { return Kind == SymbolKindELF; }
+
+ bool isCOFF() const { return Kind == SymbolKindCOFF; }
+
+ bool isMachO() const { return Kind == SymbolKindMachO; }
+
+ /// @}
+ /// \name Variable Symbols
+ /// @{
+
+ /// isVariable - Check if this is a variable symbol.
+ bool isVariable() const {
+ return SymbolContents == SymContentsVariable;
+ }
+
+ /// getVariableValue - Get the value for variable symbols.
+ const MCExpr *getVariableValue(bool SetUsed = true) const {
+ assert(isVariable() && "Invalid accessor!");
+ IsUsed |= SetUsed;
+ return Value;
+ }
+
+ void setVariableValue(const MCExpr *Value);
+
+ /// @}
+
+ /// Get the (implementation defined) index.
+ uint32_t getIndex() const {
+ return Index;
+ }
+
+ /// Set the (implementation defined) index.
+ void setIndex(uint32_t Value) const {
+ Index = Value;
+ }
+
+ uint64_t getOffset() const {
+ assert((SymbolContents == SymContentsUnset ||
+ SymbolContents == SymContentsOffset) &&
+ "Cannot get offset for a common/variable symbol");
+ return Offset;
+ }
+ void setOffset(uint64_t Value) {
+ assert((SymbolContents == SymContentsUnset ||
+ SymbolContents == SymContentsOffset) &&
+ "Cannot set offset for a common/variable symbol");
+ Offset = Value;
+ SymbolContents = SymContentsOffset;
+ }
+
+ /// Return the size of a 'common' symbol.
+ uint64_t getCommonSize() const {
+ assert(isCommon() && "Not a 'common' symbol!");
+ return CommonSize;
+ }
+
+ /// Mark this symbol as being 'common'.
+ ///
+ /// \param Size - The size of the symbol.
+ /// \param Align - The alignment of the symbol.
+ void setCommon(uint64_t Size, unsigned Align) {
+ assert(getOffset() == 0);
+ CommonSize = Size;
+ SymbolContents = SymContentsCommon;
+
+ assert((!Align || isPowerOf2_32(Align)) &&
+ "Alignment must be a power of 2");
+ unsigned Log2Align = Log2_32(Align) + 1;
+ assert(Log2Align < (1U << NumCommonAlignmentBits) &&
+ "Out of range alignment");
+ CommonAlignLog2 = Log2Align;
+ }
+
+ /// Return the alignment of a 'common' symbol.
+ unsigned getCommonAlignment() const {
+ assert(isCommon() && "Not a 'common' symbol!");
+ return CommonAlignLog2 ? (1U << (CommonAlignLog2 - 1)) : 0;
+ }
+
+ /// Declare this symbol as being 'common'.
+ ///
+ /// \param Size - The size of the symbol.
+ /// \param Align - The alignment of the symbol.
+ /// \return True if symbol was already declared as a different type
+ bool declareCommon(uint64_t Size, unsigned Align) {
+ assert(isCommon() || getOffset() == 0);
+ if(isCommon()) {
+ if(CommonSize != Size || getCommonAlignment() != Align)
+ return true;
+ } else
+ setCommon(Size, Align);
+ return false;
+ }
+
+ /// Is this a 'common' symbol.
+ bool isCommon() const {
+ return SymbolContents == SymContentsCommon;
+ }
+
+ MCFragment *getFragment(bool SetUsed = true) const {
+ MCFragment *Fragment = FragmentAndHasName.getPointer();
+ if (Fragment || !isVariable())
+ return Fragment;
+ Fragment = getVariableValue(SetUsed)->findAssociatedFragment();
+ FragmentAndHasName.setPointer(Fragment);
+ return Fragment;
+ }
+
+ bool isExternal() const { return IsExternal; }
+ void setExternal(bool Value) const { IsExternal = Value; }
+
+ bool isPrivateExtern() const { return IsPrivateExtern; }
+ void setPrivateExtern(bool Value) { IsPrivateExtern = Value; }
+
+ /// print - Print the value to the stream \p OS.
+ void print(raw_ostream &OS, const MCAsmInfo *MAI) const;
+
+ /// dump - Print the value to stderr.
+ void dump() const;
+
+protected:
+ /// Get the (implementation defined) symbol flags.
+ uint32_t getFlags() const { return Flags; }
+
+ /// Set the (implementation defined) symbol flags.
+ void setFlags(uint32_t Value) const {
+ assert(Value < (1U << NumFlagsBits) && "Out of range flags");
+ Flags = Value;
+ }
+
+ /// Modify the flags via a mask
+ void modifyFlags(uint32_t Value, uint32_t Mask) const {
+ assert(Value < (1U << NumFlagsBits) && "Out of range flags");
+ Flags = (Flags & ~Mask) | Value;
+ }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MCSymbol &Sym) {
+ Sym.print(OS, nullptr);
+ return OS;
+}
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSymbolCOFF.h b/contrib/llvm/include/llvm/MC/MCSymbolCOFF.h
new file mode 100644
index 0000000..2172c67
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSymbolCOFF.h
@@ -0,0 +1,64 @@
+//===- MCSymbolCOFF.h - ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_MC_MCSYMBOLCOFF_H
+#define LLVM_MC_MCSYMBOLCOFF_H
+
+#include "llvm/MC/MCSymbol.h"
+
+namespace llvm {
+class MCSymbolCOFF : public MCSymbol {
+
+ /// This corresponds to the e_type field of the COFF symbol.
+ mutable uint16_t Type;
+
+ enum SymbolFlags : uint16_t {
+ SF_ClassMask = 0x00FF,
+ SF_ClassShift = 0,
+
+ SF_WeakExternal = 0x0100,
+ SF_SafeSEH = 0x0200,
+ };
+
+public:
+ MCSymbolCOFF(const StringMapEntry<bool> *Name, bool isTemporary)
+ : MCSymbol(SymbolKindCOFF, Name, isTemporary), Type(0) {}
+
+ uint16_t getType() const {
+ return Type;
+ }
+ void setType(uint16_t Ty) const {
+ Type = Ty;
+ }
+
+ uint16_t getClass() const {
+ return (getFlags() & SF_ClassMask) >> SF_ClassShift;
+ }
+ void setClass(uint16_t StorageClass) const {
+ modifyFlags(StorageClass << SF_ClassShift, SF_ClassMask);
+ }
+
+ bool isWeakExternal() const {
+ return getFlags() & SF_WeakExternal;
+ }
+ void setIsWeakExternal() const {
+ modifyFlags(SF_WeakExternal, SF_WeakExternal);
+ }
+
+ bool isSafeSEH() const {
+ return getFlags() & SF_SafeSEH;
+ }
+ void setIsSafeSEH() const {
+ modifyFlags(SF_SafeSEH, SF_SafeSEH);
+ }
+
+ static bool classof(const MCSymbol *S) { return S->isCOFF(); }
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSymbolELF.h b/contrib/llvm/include/llvm/MC/MCSymbolELF.h
new file mode 100644
index 0000000..bbcd22e
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSymbolELF.h
@@ -0,0 +1,54 @@
+//===- MCSymbolELF.h - -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_MC_MCSYMBOLELF_H
+#define LLVM_MC_MCSYMBOLELF_H
+
+#include "llvm/MC/MCSymbol.h"
+
+namespace llvm {
+class MCSymbolELF : public MCSymbol {
+ /// An expression describing how to calculate the size of a symbol. If a
+ /// symbol has no size this field will be NULL.
+ const MCExpr *SymbolSize = nullptr;
+
+public:
+ MCSymbolELF(const StringMapEntry<bool> *Name, bool isTemporary)
+ : MCSymbol(SymbolKindELF, Name, isTemporary) {}
+ void setSize(const MCExpr *SS) { SymbolSize = SS; }
+
+ const MCExpr *getSize() const { return SymbolSize; }
+
+ void setVisibility(unsigned Visibility);
+ unsigned getVisibility() const;
+
+ void setOther(unsigned Other);
+ unsigned getOther() const;
+
+ void setType(unsigned Type) const;
+ unsigned getType() const;
+
+ void setBinding(unsigned Binding) const;
+ unsigned getBinding() const;
+
+ bool isBindingSet() const;
+
+ void setIsWeakrefUsedInReloc() const;
+ bool isWeakrefUsedInReloc() const;
+
+ void setIsSignature() const;
+ bool isSignature() const;
+
+ static bool classof(const MCSymbol *S) { return S->isELF(); }
+
+private:
+ void setIsBindingSet() const;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSymbolMachO.h b/contrib/llvm/include/llvm/MC/MCSymbolMachO.h
new file mode 100644
index 0000000..5b0321f
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSymbolMachO.h
@@ -0,0 +1,123 @@
+//===- MCSymbolMachO.h - ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_MC_MCSYMBOLMACHO_H
+#define LLVM_MC_MCSYMBOLMACHO_H
+
+#include "llvm/MC/MCSymbol.h"
+
+namespace llvm {
+class MCSymbolMachO : public MCSymbol {
+ /// \brief We store the value for the 'desc' symbol field in the
+ /// lowest 16 bits of the implementation defined flags.
+ enum MachOSymbolFlags : uint16_t { // See <mach-o/nlist.h>.
+ SF_DescFlagsMask = 0xFFFF,
+
+ // Reference type flags.
+ SF_ReferenceTypeMask = 0x0007,
+ SF_ReferenceTypeUndefinedNonLazy = 0x0000,
+ SF_ReferenceTypeUndefinedLazy = 0x0001,
+ SF_ReferenceTypeDefined = 0x0002,
+ SF_ReferenceTypePrivateDefined = 0x0003,
+ SF_ReferenceTypePrivateUndefinedNonLazy = 0x0004,
+ SF_ReferenceTypePrivateUndefinedLazy = 0x0005,
+
+ // Other 'desc' flags.
+ SF_ThumbFunc = 0x0008,
+ SF_NoDeadStrip = 0x0020,
+ SF_WeakReference = 0x0040,
+ SF_WeakDefinition = 0x0080,
+ SF_SymbolResolver = 0x0100,
+
+ // Common alignment
+ SF_CommonAlignmentMask = 0xF0FF,
+ SF_CommonAlignmentShift = 8
+ };
+
+public:
+ MCSymbolMachO(const StringMapEntry<bool> *Name, bool isTemporary)
+ : MCSymbol(SymbolKindMachO, Name, isTemporary) {}
+
+ // Reference type methods.
+
+ void clearReferenceType() const {
+ modifyFlags(0, SF_ReferenceTypeMask);
+ }
+
+ void setReferenceTypeUndefinedLazy(bool Value) const {
+ modifyFlags(Value ? SF_ReferenceTypeUndefinedLazy : 0,
+ SF_ReferenceTypeUndefinedLazy);
+ }
+
+ // Other 'desc' methods.
+
+ void setThumbFunc() const {
+ modifyFlags(SF_ThumbFunc, SF_ThumbFunc);
+ }
+
+ bool isNoDeadStrip() const {
+ return getFlags() & SF_NoDeadStrip;
+ }
+ void setNoDeadStrip() const {
+ modifyFlags(SF_NoDeadStrip, SF_NoDeadStrip);
+ }
+
+ bool isWeakReference() const {
+ return getFlags() & SF_WeakReference;
+ }
+ void setWeakReference() const {
+ modifyFlags(SF_WeakReference, SF_WeakReference);
+ }
+
+ bool isWeakDefinition() const {
+ return getFlags() & SF_WeakDefinition;
+ }
+ void setWeakDefinition() const {
+ modifyFlags(SF_WeakDefinition, SF_WeakDefinition);
+ }
+
+ bool isSymbolResolver() const {
+ return getFlags() & SF_SymbolResolver;
+ }
+ void setSymbolResolver() const {
+ modifyFlags(SF_SymbolResolver, SF_SymbolResolver);
+ }
+
+ void setDesc(unsigned Value) const {
+ assert(Value == (Value & SF_DescFlagsMask) &&
+ "Invalid .desc value!");
+ setFlags(Value & SF_DescFlagsMask);
+ }
+
+ /// \brief Get the encoded value of the flags as they will be emitted in to
+ /// the MachO binary
+ uint16_t getEncodedFlags() const {
+ uint16_t Flags = getFlags();
+
+ // Common alignment is packed into the 'desc' bits.
+ if (isCommon()) {
+ if (unsigned Align = getCommonAlignment()) {
+ unsigned Log2Size = Log2_32(Align);
+ assert((1U << Log2Size) == Align && "Invalid 'common' alignment!");
+ if (Log2Size > 15)
+ report_fatal_error("invalid 'common' alignment '" +
+ Twine(Align) + "' for '" + getName() + "'",
+ false);
+ Flags = (Flags & SF_CommonAlignmentMask) |
+ (Log2Size << SF_CommonAlignmentShift);
+ }
+ }
+
+ return Flags;
+ }
+
+ static bool classof(const MCSymbol *S) { return S->isMachO(); }
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCSymbolizer.h b/contrib/llvm/include/llvm/MC/MCSymbolizer.h
new file mode 100644
index 0000000..2ef1767
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCSymbolizer.h
@@ -0,0 +1,85 @@
+//===-- llvm/MC/MCSymbolizer.h - MCSymbolizer class -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCSymbolizer class, which is used
+// to symbolize instructions decoded from an object, that is, transform their
+// immediate operands to MCExprs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCSYMBOLIZER_H
+#define LLVM_MC_MCSYMBOLIZER_H
+
+#include "llvm/MC/MCRelocationInfo.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <memory>
+
+namespace llvm {
+
+class MCContext;
+class MCInst;
+class raw_ostream;
+
+/// \brief Symbolize and annotate disassembled instructions.
+///
+/// For now this mimics the old symbolization logic (from both ARM and x86), that
+/// relied on user-provided (C API) callbacks to do the actual symbol lookup in
+/// the object file. This was moved to MCExternalSymbolizer.
+/// A better API would not rely on actually calling the two methods here from
+/// inside each disassembler, but would use the instr info to determine what
+/// operands are actually symbolizable, and in what way. I don't think this
+/// information exists right now.
+class MCSymbolizer {
+ MCSymbolizer(const MCSymbolizer &) = delete;
+ void operator=(const MCSymbolizer &) = delete;
+
+protected:
+ MCContext &Ctx;
+ std::unique_ptr<MCRelocationInfo> RelInfo;
+
+public:
+ /// \brief Construct an MCSymbolizer, taking ownership of \p RelInfo.
+ MCSymbolizer(MCContext &Ctx, std::unique_ptr<MCRelocationInfo> RelInfo)
+ : Ctx(Ctx), RelInfo(std::move(RelInfo)) {
+ }
+
+ virtual ~MCSymbolizer();
+
+ /// \brief Try to add a symbolic operand instead of \p Value to the MCInst.
+ ///
+ /// Instead of having a difficult to read immediate, a symbolic operand would
+ /// represent this immediate in a more understandable way, for instance as a
+ /// symbol or an offset from a symbol. Relocations can also be used to enrich
+ /// the symbolic expression.
+ /// \param Inst - The MCInst where to insert the symbolic operand.
+ /// \param cStream - Stream to print comments and annotations on.
+ /// \param Value - Operand value, pc-adjusted by the caller if necessary.
+ /// \param Address - Load address of the instruction.
+ /// \param IsBranch - Is the instruction a branch?
+ /// \param Offset - Byte offset of the operand inside the inst.
+ /// \param InstSize - Size of the instruction in bytes.
+ /// \return Whether a symbolic operand was added.
+ virtual bool tryAddingSymbolicOperand(MCInst &Inst, raw_ostream &cStream,
+ int64_t Value, uint64_t Address,
+ bool IsBranch, uint64_t Offset,
+ uint64_t InstSize) = 0;
+
+ /// \brief Try to add a comment on the PC-relative load.
+ /// For instance, in Mach-O, this is used to add annotations to instructions
+ /// that use C string literals, as found in __cstring.
+ virtual void tryAddingPcLoadReferenceComment(raw_ostream &cStream,
+ int64_t Value,
+ uint64_t Address) = 0;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h b/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h
new file mode 100644
index 0000000..03b2dc9
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCTargetAsmParser.h
@@ -0,0 +1,222 @@
+//===-- llvm/MC/MCTargetAsmParser.h - Target Assembly Parser ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCTARGETASMPARSER_H
+#define LLVM_MC_MCTARGETASMPARSER_H
+
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include <memory>
+
+namespace llvm {
+class AsmToken;
+class MCInst;
+class MCParsedAsmOperand;
+class MCStreamer;
+class MCSubtargetInfo;
+class SMLoc;
+class StringRef;
+template <typename T> class SmallVectorImpl;
+
+typedef SmallVectorImpl<std::unique_ptr<MCParsedAsmOperand>> OperandVector;
+
+enum AsmRewriteKind {
+ AOK_Delete = 0, // Rewrite should be ignored.
+ AOK_Align, // Rewrite align as .align.
+ AOK_EVEN, // Rewrite even as .even.
+ AOK_DotOperator, // Rewrite a dot operator expression as an immediate.
+ // E.g., [eax].foo.bar -> [eax].8
+ AOK_Emit, // Rewrite _emit as .byte.
+ AOK_Imm, // Rewrite as $$N.
+ AOK_ImmPrefix, // Add $$ before a parsed Imm.
+ AOK_Input, // Rewrite in terms of $N.
+ AOK_Output, // Rewrite in terms of $N.
+ AOK_SizeDirective, // Add a sizing directive (e.g., dword ptr).
+ AOK_Label, // Rewrite local labels.
+ AOK_Skip // Skip emission (e.g., offset/type operators).
+};
+
+const char AsmRewritePrecedence [] = {
+ 0, // AOK_Delete
+ 2, // AOK_Align
+ 2, // AOK_EVEN
+ 2, // AOK_DotOperator
+ 2, // AOK_Emit
+ 4, // AOK_Imm
+ 4, // AOK_ImmPrefix
+ 3, // AOK_Input
+ 3, // AOK_Output
+ 5, // AOK_SizeDirective
+ 1, // AOK_Label
+ 2 // AOK_Skip
+};
+
+struct AsmRewrite {
+ AsmRewriteKind Kind;
+ SMLoc Loc;
+ unsigned Len;
+ unsigned Val;
+ StringRef Label;
+public:
+ AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len = 0, unsigned val = 0)
+ : Kind(kind), Loc(loc), Len(len), Val(val) {}
+ AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len, StringRef label)
+ : Kind(kind), Loc(loc), Len(len), Val(0), Label(label) {}
+};
+
+struct ParseInstructionInfo {
+
+ SmallVectorImpl<AsmRewrite> *AsmRewrites;
+
+ ParseInstructionInfo() : AsmRewrites(nullptr) {}
+ ParseInstructionInfo(SmallVectorImpl<AsmRewrite> *rewrites)
+ : AsmRewrites(rewrites) {}
+};
+
+/// MCTargetAsmParser - Generic interface to target specific assembly parsers.
+class MCTargetAsmParser : public MCAsmParserExtension {
+public:
+ enum MatchResultTy {
+ Match_InvalidOperand,
+ Match_MissingFeature,
+ Match_MnemonicFail,
+ Match_Success,
+ FIRST_TARGET_MATCH_RESULT_TY
+ };
+
+private:
+ MCTargetAsmParser(const MCTargetAsmParser &) = delete;
+ void operator=(const MCTargetAsmParser &) = delete;
+protected: // Can only create subclasses.
+ MCTargetAsmParser(MCTargetOptions const &, const MCSubtargetInfo &STI);
+
+ /// Create a copy of STI and return a non-const reference to it.
+ MCSubtargetInfo &copySTI();
+
+ /// AvailableFeatures - The current set of available features.
+ uint64_t AvailableFeatures;
+
+ /// ParsingInlineAsm - Are we parsing ms-style inline assembly?
+ bool ParsingInlineAsm;
+
+ /// SemaCallback - The Sema callback implementation. Must be set when parsing
+ /// ms-style inline assembly.
+ MCAsmParserSemaCallback *SemaCallback;
+
+ /// Set of options which affects instrumentation of inline assembly.
+ MCTargetOptions MCOptions;
+
+ /// Current STI.
+ const MCSubtargetInfo *STI;
+
+public:
+ ~MCTargetAsmParser() override;
+
+ const MCSubtargetInfo &getSTI() const;
+
+ uint64_t getAvailableFeatures() const { return AvailableFeatures; }
+ void setAvailableFeatures(uint64_t Value) { AvailableFeatures = Value; }
+
+ bool isParsingInlineAsm () { return ParsingInlineAsm; }
+ void setParsingInlineAsm (bool Value) { ParsingInlineAsm = Value; }
+
+ MCTargetOptions getTargetOptions() const { return MCOptions; }
+
+ void setSemaCallback(MCAsmParserSemaCallback *Callback) {
+ SemaCallback = Callback;
+ }
+
+ virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
+ SMLoc &EndLoc) = 0;
+
+ /// Sets frame register corresponding to the current MachineFunction.
+ virtual void SetFrameRegister(unsigned RegNo) {}
+
+ /// ParseInstruction - Parse one assembly instruction.
+ ///
+ /// The parser is positioned following the instruction name. The target
+ /// specific instruction parser should parse the entire instruction and
+ /// construct the appropriate MCInst, or emit an error. On success, the entire
+ /// line should be parsed up to and including the end-of-statement token. On
+ /// failure, the parser is not required to read to the end of the line.
+ //
+ /// \param Name - The instruction name.
+ /// \param NameLoc - The source location of the name.
+ /// \param Operands [out] - The list of parsed operands, this returns
+ /// ownership of them to the caller.
+ /// \return True on failure.
+ virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) = 0;
+ virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ AsmToken Token, OperandVector &Operands) {
+ return ParseInstruction(Info, Name, Token.getLoc(), Operands);
+ }
+
+ /// ParseDirective - Parse a target specific assembler directive
+ ///
+ /// The parser is positioned following the directive name. The target
+ /// specific directive parser should parse the entire directive doing or
+ /// recording any target specific work, or return true and do nothing if the
+ /// directive is not target specific. If the directive is specific for
+ /// the target, the entire line is parsed up to and including the
+ /// end-of-statement token and false is returned.
+ ///
+ /// \param DirectiveID - the identifier token of the directive.
+ virtual bool ParseDirective(AsmToken DirectiveID) = 0;
+
+ /// MatchAndEmitInstruction - Recognize a series of operands of a parsed
+ /// instruction as an actual MCInst and emit it to the specified MCStreamer.
+ /// This returns false on success and returns true on failure to match.
+ ///
+ /// On failure, the target parser is responsible for emitting a diagnostic
+ /// explaining the match failure.
+ virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands, MCStreamer &Out,
+ uint64_t &ErrorInfo,
+ bool MatchingInlineAsm) = 0;
+
+ /// Allows targets to let registers opt out of clobber lists.
+ virtual bool OmitRegisterFromClobberLists(unsigned RegNo) { return false; }
+
+ /// Allow a target to add special case operand matching for things that
+ /// tblgen doesn't/can't handle effectively. For example, literal
+ /// immediates on ARM. TableGen expects a token operand, but the parser
+ /// will recognize them as immediates.
+ virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
+ unsigned Kind) {
+ return Match_InvalidOperand;
+ }
+
+ /// checkTargetMatchPredicate - Validate the instruction match against
+ /// any complex target predicates not expressible via match classes.
+ virtual unsigned checkTargetMatchPredicate(MCInst &Inst) {
+ return Match_Success;
+ }
+
+ virtual void convertToMapAndConstraints(unsigned Kind,
+ const OperandVector &Operands) = 0;
+
+ // Return whether this parser uses assignment statements with equals tokens
+ virtual bool equalIsAsmAssignment() { return true; };
+ // Return whether this start of statement identifier is a label
+ virtual bool isLabel(AsmToken &Token) { return true; };
+
+ virtual const MCExpr *applyModifierToExpr(const MCExpr *E,
+ MCSymbolRefExpr::VariantKind,
+ MCContext &Ctx) {
+ return nullptr;
+ }
+
+ virtual void onLabelParsed(MCSymbol *Symbol) { }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCTargetOptions.h b/contrib/llvm/include/llvm/MC/MCTargetOptions.h
new file mode 100644
index 0000000..4b66a75
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCTargetOptions.h
@@ -0,0 +1,72 @@
+//===- MCTargetOptions.h - MC Target Options -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCTARGETOPTIONS_H
+#define LLVM_MC_MCTARGETOPTIONS_H
+
+#include <string>
+
+namespace llvm {
+
+class StringRef;
+
+class MCTargetOptions {
+public:
+ enum AsmInstrumentation {
+ AsmInstrumentationNone,
+ AsmInstrumentationAddress
+ };
+
+ /// Enables AddressSanitizer instrumentation at machine level.
+ bool SanitizeAddress : 1;
+
+ bool MCRelaxAll : 1;
+ bool MCNoExecStack : 1;
+ bool MCFatalWarnings : 1;
+ bool MCNoWarn : 1;
+ bool MCSaveTempLabels : 1;
+ bool MCUseDwarfDirectory : 1;
+ bool MCIncrementalLinkerCompatible : 1;
+ bool ShowMCEncoding : 1;
+ bool ShowMCInst : 1;
+ bool AsmVerbose : 1;
+ int DwarfVersion;
+ /// getABIName - If this returns a non-empty string this represents the
+ /// textual name of the ABI that we want the backend to use, e.g. o32, or
+ /// aapcs-linux.
+ StringRef getABIName() const;
+ std::string ABIName;
+ MCTargetOptions();
+};
+
+inline bool operator==(const MCTargetOptions &LHS, const MCTargetOptions &RHS) {
+#define ARE_EQUAL(X) LHS.X == RHS.X
+ return (ARE_EQUAL(SanitizeAddress) &&
+ ARE_EQUAL(MCRelaxAll) &&
+ ARE_EQUAL(MCNoExecStack) &&
+ ARE_EQUAL(MCFatalWarnings) &&
+ ARE_EQUAL(MCNoWarn) &&
+ ARE_EQUAL(MCSaveTempLabels) &&
+ ARE_EQUAL(MCUseDwarfDirectory) &&
+ ARE_EQUAL(MCIncrementalLinkerCompatible) &&
+ ARE_EQUAL(ShowMCEncoding) &&
+ ARE_EQUAL(ShowMCInst) &&
+ ARE_EQUAL(AsmVerbose) &&
+ ARE_EQUAL(DwarfVersion) &&
+ ARE_EQUAL(ABIName));
+#undef ARE_EQUAL
+}
+
+inline bool operator!=(const MCTargetOptions &LHS, const MCTargetOptions &RHS) {
+ return !(LHS == RHS);
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h b/contrib/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h
new file mode 100644
index 0000000..5180208
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCTargetOptionsCommandFlags.h
@@ -0,0 +1,74 @@
+//===-- MCTargetOptionsCommandFlags.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains machine code-specific flags that are shared between
+// different command line tools.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCTARGETOPTIONSCOMMANDFLAGS_H
+#define LLVM_MC_MCTARGETOPTIONSCOMMANDFLAGS_H
+
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/Support/CommandLine.h"
+using namespace llvm;
+
+cl::opt<MCTargetOptions::AsmInstrumentation> AsmInstrumentation(
+ "asm-instrumentation", cl::desc("Instrumentation of inline assembly and "
+ "assembly source files"),
+ cl::init(MCTargetOptions::AsmInstrumentationNone),
+ cl::values(clEnumValN(MCTargetOptions::AsmInstrumentationNone, "none",
+ "no instrumentation at all"),
+ clEnumValN(MCTargetOptions::AsmInstrumentationAddress, "address",
+ "instrument instructions with memory arguments"),
+ clEnumValEnd));
+
+cl::opt<bool> RelaxAll("mc-relax-all",
+ cl::desc("When used with filetype=obj, "
+ "relax all fixups in the emitted object file"));
+
+cl::opt<bool> IncrementalLinkerCompatible(
+ "incremental-linker-compatible",
+ cl::desc(
+ "When used with filetype=obj, "
+ "emit an object file which can be used with an incremental linker"));
+
+cl::opt<int> DwarfVersion("dwarf-version", cl::desc("Dwarf version"),
+ cl::init(0));
+
+cl::opt<bool> ShowMCInst("asm-show-inst",
+ cl::desc("Emit internal instruction representation to "
+ "assembly file"));
+
+cl::opt<bool> FatalWarnings("fatal-warnings",
+ cl::desc("Treat warnings as errors"));
+
+cl::opt<bool> NoWarn("no-warn", cl::desc("Suppress all warnings"));
+cl::alias NoWarnW("W", cl::desc("Alias for --no-warn"), cl::aliasopt(NoWarn));
+
+cl::opt<std::string>
+ABIName("target-abi", cl::Hidden,
+ cl::desc("The name of the ABI to be targeted from the backend."),
+ cl::init(""));
+
+static inline MCTargetOptions InitMCTargetOptionsFromFlags() {
+ MCTargetOptions Options;
+ Options.SanitizeAddress =
+ (AsmInstrumentation == MCTargetOptions::AsmInstrumentationAddress);
+ Options.MCRelaxAll = RelaxAll;
+ Options.MCIncrementalLinkerCompatible = IncrementalLinkerCompatible;
+ Options.DwarfVersion = DwarfVersion;
+ Options.ShowMCInst = ShowMCInst;
+ Options.ABIName = ABIName;
+ Options.MCFatalWarnings = FatalWarnings;
+ Options.MCNoWarn = NoWarn;
+ return Options;
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCValue.h b/contrib/llvm/include/llvm/MC/MCValue.h
new file mode 100644
index 0000000..ead08fd
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCValue.h
@@ -0,0 +1,86 @@
+//===-- llvm/MC/MCValue.h - MCValue class -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MCValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCVALUE_H
+#define LLVM_MC_MCVALUE_H
+
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+
+namespace llvm {
+class MCAsmInfo;
+class raw_ostream;
+
+/// \brief This represents an "assembler immediate".
+///
+/// In its most general form, this can hold ":Kind:(SymbolA - SymbolB +
+/// imm64)". Not all targets supports relocations of this general form, but we
+/// need to represent this anyway.
+///
+/// In general both SymbolA and SymbolB will also have a modifier
+/// analogous to the top-level Kind. Current targets are not expected
+/// to make use of both though. The choice comes down to whether
+/// relocation modifiers apply to the closest symbol or the whole
+/// expression.
+///
+/// Note that this class must remain a simple POD value class, because we need
+/// it to live in unions etc.
+class MCValue {
+ const MCSymbolRefExpr *SymA, *SymB;
+ int64_t Cst;
+ uint32_t RefKind;
+public:
+
+ int64_t getConstant() const { return Cst; }
+ const MCSymbolRefExpr *getSymA() const { return SymA; }
+ const MCSymbolRefExpr *getSymB() const { return SymB; }
+ uint32_t getRefKind() const { return RefKind; }
+
+ /// \brief Is this an absolute (as opposed to relocatable) value.
+ bool isAbsolute() const { return !SymA && !SymB; }
+
+ /// \brief Print the value to the stream \p OS.
+ void print(raw_ostream &OS) const;
+
+ /// \brief Print the value to stderr.
+ void dump() const;
+
+ MCSymbolRefExpr::VariantKind getAccessVariant() const;
+
+ static MCValue get(const MCSymbolRefExpr *SymA,
+ const MCSymbolRefExpr *SymB = nullptr,
+ int64_t Val = 0, uint32_t RefKind = 0) {
+ MCValue R;
+ R.Cst = Val;
+ R.SymA = SymA;
+ R.SymB = SymB;
+ R.RefKind = RefKind;
+ return R;
+ }
+
+ static MCValue get(int64_t Val) {
+ MCValue R;
+ R.Cst = Val;
+ R.SymA = nullptr;
+ R.SymB = nullptr;
+ R.RefKind = 0;
+ return R;
+ }
+
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCWin64EH.h b/contrib/llvm/include/llvm/MC/MCWin64EH.h
new file mode 100644
index 0000000..0e81a19
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCWin64EH.h
@@ -0,0 +1,63 @@
+//===- MCWin64EH.h - Machine Code Win64 EH support --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains declarations to support the Win64 Exception Handling
+// scheme in MC.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWIN64EH_H
+#define LLVM_MC_MCWIN64EH_H
+
+#include "llvm/MC/MCWinEH.h"
+#include "llvm/Support/Win64EH.h"
+#include <vector>
+
+namespace llvm {
+class MCStreamer;
+class MCSymbol;
+
+namespace Win64EH {
+struct Instruction {
+ static WinEH::Instruction PushNonVol(MCSymbol *L, unsigned Reg) {
+ return WinEH::Instruction(Win64EH::UOP_PushNonVol, L, Reg, -1);
+ }
+ static WinEH::Instruction Alloc(MCSymbol *L, unsigned Size) {
+ return WinEH::Instruction(Size > 128 ? UOP_AllocLarge : UOP_AllocSmall, L,
+ -1, Size);
+ }
+ static WinEH::Instruction PushMachFrame(MCSymbol *L, bool Code) {
+ return WinEH::Instruction(UOP_PushMachFrame, L, -1, Code ? 1 : 0);
+ }
+ static WinEH::Instruction SaveNonVol(MCSymbol *L, unsigned Reg,
+ unsigned Offset) {
+ return WinEH::Instruction(Offset > 512 * 1024 - 8 ? UOP_SaveNonVolBig
+ : UOP_SaveNonVol,
+ L, Reg, Offset);
+ }
+ static WinEH::Instruction SaveXMM(MCSymbol *L, unsigned Reg,
+ unsigned Offset) {
+ return WinEH::Instruction(Offset > 512 * 1024 - 8 ? UOP_SaveXMM128Big
+ : UOP_SaveXMM128,
+ L, Reg, Offset);
+ }
+ static WinEH::Instruction SetFPReg(MCSymbol *L, unsigned Reg, unsigned Off) {
+ return WinEH::Instruction(UOP_SetFPReg, L, Reg, Off);
+ }
+};
+
+class UnwindEmitter : public WinEH::UnwindEmitter {
+public:
+ void Emit(MCStreamer &Streamer) const override;
+ void EmitUnwindInfo(MCStreamer &Streamer, WinEH::FrameInfo *FI) const override;
+};
+}
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCWinCOFFObjectWriter.h b/contrib/llvm/include/llvm/MC/MCWinCOFFObjectWriter.h
new file mode 100644
index 0000000..e2e95c7
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCWinCOFFObjectWriter.h
@@ -0,0 +1,47 @@
+//===-- llvm/MC/MCWinCOFFObjectWriter.h - Win COFF Object Writer *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWINCOFFOBJECTWRITER_H
+#define LLVM_MC_MCWINCOFFOBJECTWRITER_H
+
+namespace llvm {
+class MCAsmBackend;
+class MCFixup;
+class MCObjectWriter;
+class MCValue;
+class raw_ostream;
+class raw_pwrite_stream;
+
+ class MCWinCOFFObjectTargetWriter {
+ virtual void anchor();
+ const unsigned Machine;
+
+ protected:
+ MCWinCOFFObjectTargetWriter(unsigned Machine_);
+
+ public:
+ virtual ~MCWinCOFFObjectTargetWriter() {}
+
+ unsigned getMachine() const { return Machine; }
+ virtual unsigned getRelocType(const MCValue &Target, const MCFixup &Fixup,
+ bool IsCrossSection,
+ const MCAsmBackend &MAB) const = 0;
+ virtual bool recordRelocation(const MCFixup &) const { return true; }
+ };
+
+ /// \brief Construct a new Win COFF writer instance.
+ ///
+ /// \param MOTW - The target specific WinCOFF writer subclass.
+ /// \param OS - The stream to write to.
+ /// \returns The constructed object writer.
+ MCObjectWriter *createWinCOFFObjectWriter(MCWinCOFFObjectTargetWriter *MOTW,
+ raw_pwrite_stream &OS);
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MCWinCOFFStreamer.h b/contrib/llvm/include/llvm/MC/MCWinCOFFStreamer.h
new file mode 100644
index 0000000..fe1ada9
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCWinCOFFStreamer.h
@@ -0,0 +1,81 @@
+//===- MCWinCOFFStreamer.h - COFF Object File Interface ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWINCOFFSTREAMER_H
+#define LLVM_MC_MCWINCOFFSTREAMER_H
+
+#include "llvm/MC/MCDirectives.h"
+#include "llvm/MC/MCObjectStreamer.h"
+
+namespace llvm {
+class MCAsmBackend;
+class MCContext;
+class MCCodeEmitter;
+class MCExpr;
+class MCInst;
+class MCSection;
+class MCSubtargetInfo;
+class MCSymbol;
+class StringRef;
+class raw_ostream;
+class raw_pwrite_stream;
+
+class MCWinCOFFStreamer : public MCObjectStreamer {
+public:
+ MCWinCOFFStreamer(MCContext &Context, MCAsmBackend &MAB, MCCodeEmitter &CE,
+ raw_pwrite_stream &OS);
+
+ /// state management
+ void reset() override {
+ CurSymbol = nullptr;
+ MCObjectStreamer::reset();
+ }
+
+ /// \name MCStreamer interface
+ /// \{
+
+ void InitSections(bool NoExecStack) override;
+ void EmitLabel(MCSymbol *Symbol) override;
+ void EmitAssemblerFlag(MCAssemblerFlag Flag) override;
+ void EmitThumbFunc(MCSymbol *Func) override;
+ bool EmitSymbolAttribute(MCSymbol *Symbol, MCSymbolAttr Attribute) override;
+ void EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) override;
+ void BeginCOFFSymbolDef(MCSymbol const *Symbol) override;
+ void EmitCOFFSymbolStorageClass(int StorageClass) override;
+ void EmitCOFFSymbolType(int Type) override;
+ void EndCOFFSymbolDef() override;
+ void EmitCOFFSafeSEH(MCSymbol const *Symbol) override;
+ void EmitCOFFSectionIndex(MCSymbol const *Symbol) override;
+ void EmitCOFFSecRel32(MCSymbol const *Symbol) override;
+ void EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void EmitZerofill(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void EmitTBSSSymbol(MCSection *Section, MCSymbol *Symbol, uint64_t Size,
+ unsigned ByteAlignment) override;
+ void EmitFileDirective(StringRef Filename) override;
+ void EmitIdent(StringRef IdentString) override;
+ void EmitWinEHHandlerData() override;
+ void FinishImpl() override;
+
+ /// \}
+
+protected:
+ const MCSymbol *CurSymbol;
+ void EmitInstToData(const MCInst &Inst, const MCSubtargetInfo &STI) override;
+
+private:
+ void Error(const Twine &Msg) const;
+};
+}
+
+#endif
+
diff --git a/contrib/llvm/include/llvm/MC/MCWinEH.h b/contrib/llvm/include/llvm/MC/MCWinEH.h
new file mode 100644
index 0000000..723d7a3
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MCWinEH.h
@@ -0,0 +1,84 @@
+//===- MCWinEH.h - Windows Unwinding Support --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_MCWINEH_H
+#define LLVM_MC_MCWINEH_H
+
+#include <vector>
+
+namespace llvm {
+class MCContext;
+class MCSection;
+class MCStreamer;
+class MCSymbol;
+class StringRef;
+
+namespace WinEH {
+struct Instruction {
+ const MCSymbol *Label;
+ const unsigned Offset;
+ const unsigned Register;
+ const unsigned Operation;
+
+ Instruction(unsigned Op, MCSymbol *L, unsigned Reg, unsigned Off)
+ : Label(L), Offset(Off), Register(Reg), Operation(Op) {}
+};
+
+struct FrameInfo {
+ const MCSymbol *Begin;
+ const MCSymbol *End;
+ const MCSymbol *ExceptionHandler;
+ const MCSymbol *Function;
+ const MCSymbol *PrologEnd;
+ const MCSymbol *Symbol;
+
+ bool HandlesUnwind;
+ bool HandlesExceptions;
+
+ int LastFrameInst;
+ const FrameInfo *ChainedParent;
+ std::vector<Instruction> Instructions;
+
+ FrameInfo()
+ : Begin(nullptr), End(nullptr), ExceptionHandler(nullptr),
+ Function(nullptr), PrologEnd(nullptr), Symbol(nullptr),
+ HandlesUnwind(false), HandlesExceptions(false), LastFrameInst(-1),
+ ChainedParent(nullptr), Instructions() {}
+ FrameInfo(const MCSymbol *Function, const MCSymbol *BeginFuncEHLabel)
+ : Begin(BeginFuncEHLabel), End(nullptr), ExceptionHandler(nullptr),
+ Function(Function), PrologEnd(nullptr), Symbol(nullptr),
+ HandlesUnwind(false), HandlesExceptions(false), LastFrameInst(-1),
+ ChainedParent(nullptr), Instructions() {}
+ FrameInfo(const MCSymbol *Function, const MCSymbol *BeginFuncEHLabel,
+ const FrameInfo *ChainedParent)
+ : Begin(BeginFuncEHLabel), End(nullptr), ExceptionHandler(nullptr),
+ Function(Function), PrologEnd(nullptr), Symbol(nullptr),
+ HandlesUnwind(false), HandlesExceptions(false), LastFrameInst(-1),
+ ChainedParent(ChainedParent), Instructions() {}
+};
+
+class UnwindEmitter {
+public:
+ static MCSection *getPDataSection(const MCSymbol *Function,
+ MCContext &Context);
+ static MCSection *getXDataSection(const MCSymbol *Function,
+ MCContext &Context);
+
+ virtual ~UnwindEmitter() { }
+
+ //
+ // This emits the unwind info sections (.pdata and .xdata in PE/COFF).
+ //
+ virtual void Emit(MCStreamer &Streamer) const = 0;
+ virtual void EmitUnwindInfo(MCStreamer &Streamer, FrameInfo *FI) const = 0;
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/MachineLocation.h b/contrib/llvm/include/llvm/MC/MachineLocation.h
new file mode 100644
index 0000000..4b5cf43
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/MachineLocation.h
@@ -0,0 +1,79 @@
+//===-- llvm/MC/MachineLocation.h -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// The MachineLocation class is used to represent a simple location in a machine
+// frame. Locations will be one of two forms; a register or an address formed
+// from a base address plus an offset. Register indirection can be specified by
+// explicitly passing an offset to the constructor.
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_MC_MACHINELOCATION_H
+#define LLVM_MC_MACHINELOCATION_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+ class MCSymbol;
+
+class MachineLocation {
+private:
+ bool IsRegister; // True if location is a register.
+ unsigned Register; // gcc/gdb register number.
+ int Offset; // Displacement if not register.
+public:
+ enum : uint32_t {
+ // The target register number for an abstract frame pointer. The value is
+ // an arbitrary value that doesn't collide with any real target register.
+ VirtualFP = ~0U
+ };
+ MachineLocation()
+ : IsRegister(false), Register(0), Offset(0) {}
+ /// Create a direct register location.
+ explicit MachineLocation(unsigned R)
+ : IsRegister(true), Register(R), Offset(0) {}
+ /// Create a register-indirect location with an offset.
+ MachineLocation(unsigned R, int O)
+ : IsRegister(false), Register(R), Offset(O) {}
+
+ bool operator==(const MachineLocation &Other) const {
+ return IsRegister == Other.IsRegister && Register == Other.Register &&
+ Offset == Other.Offset;
+ }
+
+ // Accessors.
+ /// \return true iff this is a register-indirect location.
+ bool isIndirect() const { return !IsRegister; }
+ bool isReg() const { return IsRegister; }
+ unsigned getReg() const { return Register; }
+ int getOffset() const { return Offset; }
+ void setIsRegister(bool Is) { IsRegister = Is; }
+ void setRegister(unsigned R) { Register = R; }
+ void setOffset(int O) { Offset = O; }
+ /// Make this location a direct register location.
+ void set(unsigned R) {
+ IsRegister = true;
+ Register = R;
+ Offset = 0;
+ }
+ /// Make this location a register-indirect+offset location.
+ void set(unsigned R, int O) {
+ IsRegister = false;
+ Register = R;
+ Offset = O;
+ }
+};
+
+inline bool operator!=(const MachineLocation &LHS, const MachineLocation &RHS) {
+ return !(LHS == RHS);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/SectionKind.h b/contrib/llvm/include/llvm/MC/SectionKind.h
new file mode 100644
index 0000000..b09b93c
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/SectionKind.h
@@ -0,0 +1,195 @@
+//===-- llvm/MC/SectionKind.h - Classification of sections ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_SECTIONKIND_H
+#define LLVM_MC_SECTIONKIND_H
+
+namespace llvm {
+
+/// SectionKind - This is a simple POD value that classifies the properties of
+/// a section. A section is classified into the deepest possible
+/// classification, and then the target maps them onto their sections based on
+/// what capabilities they have.
+///
+/// The comments below describe these as if they were an inheritance hierarchy
+/// in order to explain the predicates below.
+///
+class SectionKind {
+ enum Kind {
+ /// Metadata - Debug info sections or other metadata.
+ Metadata,
+
+ /// Text - Text section, used for functions and other executable code.
+ Text,
+
+ /// ReadOnly - Data that is never written to at program runtime by the
+ /// program or the dynamic linker. Things in the top-level readonly
+ /// SectionKind are not mergeable.
+ ReadOnly,
+
+ /// MergableCString - Any null-terminated string which allows merging.
+ /// These values are known to end in a nul value of the specified size,
+ /// not otherwise contain a nul value, and be mergable. This allows the
+ /// linker to unique the strings if it so desires.
+
+ /// Mergeable1ByteCString - 1 byte mergable, null terminated, string.
+ Mergeable1ByteCString,
+
+ /// Mergeable2ByteCString - 2 byte mergable, null terminated, string.
+ Mergeable2ByteCString,
+
+ /// Mergeable4ByteCString - 4 byte mergable, null terminated, string.
+ Mergeable4ByteCString,
+
+ /// MergeableConst - These are sections for merging fixed-length
+ /// constants together. For example, this can be used to unique
+ /// constant pool entries etc.
+
+ /// MergeableConst4 - This is a section used by 4-byte constants,
+ /// for example, floats.
+ MergeableConst4,
+
+ /// MergeableConst8 - This is a section used by 8-byte constants,
+ /// for example, doubles.
+ MergeableConst8,
+
+ /// MergeableConst16 - This is a section used by 16-byte constants,
+ /// for example, vectors.
+ MergeableConst16,
+
+ /// Writeable - This is the base of all segments that need to be written
+ /// to during program runtime.
+
+ /// ThreadLocal - This is the base of all TLS segments. All TLS
+ /// objects must be writeable, otherwise there is no reason for them to
+ /// be thread local!
+
+ /// ThreadBSS - Zero-initialized TLS data objects.
+ ThreadBSS,
+
+ /// ThreadData - Initialized TLS data objects.
+ ThreadData,
+
+ /// GlobalWriteableData - Writeable data that is global (not thread
+ /// local).
+
+ /// BSS - Zero initialized writeable data.
+ BSS,
+
+ /// BSSLocal - This is BSS (zero initialized and writable) data
+ /// which has local linkage.
+ BSSLocal,
+
+ /// BSSExtern - This is BSS data with normal external linkage.
+ BSSExtern,
+
+ /// Common - Data with common linkage. These represent tentative
+ /// definitions, which always have a zero initializer and are never
+ /// marked 'constant'.
+ Common,
+
+ /// This is writeable data that has a non-zero initializer.
+ Data,
+
+ /// ReadOnlyWithRel - These are global variables that are never
+ /// written to by the program, but that have relocations, so they
+ /// must be stuck in a writeable section so that the dynamic linker
+ /// can write to them. If it chooses to, the dynamic linker can
+ /// mark the pages these globals end up on as read-only after it is
+ /// done with its relocation phase.
+ ReadOnlyWithRel
+ } K : 8;
+public:
+
+ bool isMetadata() const { return K == Metadata; }
+ bool isText() const { return K == Text; }
+
+ bool isReadOnly() const {
+ return K == ReadOnly || isMergeableCString() ||
+ isMergeableConst();
+ }
+
+ bool isMergeableCString() const {
+ return K == Mergeable1ByteCString || K == Mergeable2ByteCString ||
+ K == Mergeable4ByteCString;
+ }
+ bool isMergeable1ByteCString() const { return K == Mergeable1ByteCString; }
+ bool isMergeable2ByteCString() const { return K == Mergeable2ByteCString; }
+ bool isMergeable4ByteCString() const { return K == Mergeable4ByteCString; }
+
+ bool isMergeableConst() const {
+ return K == MergeableConst4 || K == MergeableConst8 ||
+ K == MergeableConst16;
+ }
+ bool isMergeableConst4() const { return K == MergeableConst4; }
+ bool isMergeableConst8() const { return K == MergeableConst8; }
+ bool isMergeableConst16() const { return K == MergeableConst16; }
+
+ bool isWriteable() const {
+ return isThreadLocal() || isGlobalWriteableData();
+ }
+
+ bool isThreadLocal() const {
+ return K == ThreadData || K == ThreadBSS;
+ }
+
+ bool isThreadBSS() const { return K == ThreadBSS; }
+ bool isThreadData() const { return K == ThreadData; }
+
+ bool isGlobalWriteableData() const {
+ return isBSS() || isCommon() || isData() || isReadOnlyWithRel();
+ }
+
+ bool isBSS() const { return K == BSS || K == BSSLocal || K == BSSExtern; }
+ bool isBSSLocal() const { return K == BSSLocal; }
+ bool isBSSExtern() const { return K == BSSExtern; }
+
+ bool isCommon() const { return K == Common; }
+
+ bool isData() const { return K == Data; }
+
+ bool isReadOnlyWithRel() const {
+ return K == ReadOnlyWithRel;
+ }
+private:
+ static SectionKind get(Kind K) {
+ SectionKind Res;
+ Res.K = K;
+ return Res;
+ }
+public:
+
+ static SectionKind getMetadata() { return get(Metadata); }
+ static SectionKind getText() { return get(Text); }
+ static SectionKind getReadOnly() { return get(ReadOnly); }
+ static SectionKind getMergeable1ByteCString() {
+ return get(Mergeable1ByteCString);
+ }
+ static SectionKind getMergeable2ByteCString() {
+ return get(Mergeable2ByteCString);
+ }
+ static SectionKind getMergeable4ByteCString() {
+ return get(Mergeable4ByteCString);
+ }
+ static SectionKind getMergeableConst4() { return get(MergeableConst4); }
+ static SectionKind getMergeableConst8() { return get(MergeableConst8); }
+ static SectionKind getMergeableConst16() { return get(MergeableConst16); }
+ static SectionKind getThreadBSS() { return get(ThreadBSS); }
+ static SectionKind getThreadData() { return get(ThreadData); }
+ static SectionKind getBSS() { return get(BSS); }
+ static SectionKind getBSSLocal() { return get(BSSLocal); }
+ static SectionKind getBSSExtern() { return get(BSSExtern); }
+ static SectionKind getCommon() { return get(Common); }
+ static SectionKind getData() { return get(Data); }
+ static SectionKind getReadOnlyWithRel() { return get(ReadOnlyWithRel); }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/StringTableBuilder.h b/contrib/llvm/include/llvm/MC/StringTableBuilder.h
new file mode 100644
index 0000000..adde86b
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/StringTableBuilder.h
@@ -0,0 +1,65 @@
+//===-- StringTableBuilder.h - String table building utility ------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_STRINGTABLEBUILDER_H
+#define LLVM_MC_STRINGTABLEBUILDER_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/DenseMap.h"
+#include <cassert>
+
+namespace llvm {
+
+/// \brief Utility for building string tables with deduplicated suffixes.
+class StringTableBuilder {
+public:
+ enum Kind { ELF, WinCOFF, MachO, RAW };
+
+private:
+ SmallString<256> StringTable;
+ DenseMap<StringRef, size_t> StringIndexMap;
+ size_t Size = 0;
+ Kind K;
+
+public:
+ StringTableBuilder(Kind K);
+
+ /// \brief Add a string to the builder. Returns the position of S in the
+ /// table. The position will be changed if finalize is used.
+ /// Can only be used before the table is finalized.
+ size_t add(StringRef S);
+
+ /// \brief Analyze the strings and build the final table. No more strings can
+ /// be added after this point.
+ void finalize();
+
+ /// \brief Retrieve the string table data. Can only be used after the table
+ /// is finalized.
+ StringRef data() const {
+ assert(isFinalized());
+ return StringTable;
+ }
+
+ /// \brief Get the offest of a string in the string table. Can only be used
+ /// after the table is finalized.
+ size_t getOffset(StringRef S) const;
+
+ const DenseMap<StringRef, size_t> &getMap() const { return StringIndexMap; }
+ size_t getSize() const { return Size; }
+ void clear();
+
+private:
+ bool isFinalized() const {
+ return !StringTable.empty();
+ }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/SubtargetFeature.h b/contrib/llvm/include/llvm/MC/SubtargetFeature.h
new file mode 100644
index 0000000..0d97b22
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/SubtargetFeature.h
@@ -0,0 +1,127 @@
+//===-- llvm/MC/SubtargetFeature.h - CPU characteristics --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines and manages user or tool specified CPU characteristics.
+// The intent is to be able to package specific features that should or should
+// not be used on a specific target processor. A tool, such as llc, could, as
+// as example, gather chip info from the command line, a long with features
+// that should be used on that chip.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_MC_SUBTARGETFEATURE_H
+#define LLVM_MC_SUBTARGETFEATURE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/DataTypes.h"
+#include <bitset>
+
+namespace llvm {
+ class raw_ostream;
+ class StringRef;
+
+// A container class for subtarget features.
+// This is convenient because std::bitset does not have a constructor
+// with an initializer list of set bits.
+const unsigned MAX_SUBTARGET_FEATURES = 128;
+class FeatureBitset : public std::bitset<MAX_SUBTARGET_FEATURES> {
+public:
+ // Cannot inherit constructors because it's not supported by VC++..
+ FeatureBitset() : bitset() {}
+
+ FeatureBitset(const bitset<MAX_SUBTARGET_FEATURES>& B) : bitset(B) {}
+
+ FeatureBitset(std::initializer_list<unsigned> Init) : bitset() {
+ for (auto I = Init.begin() , E = Init.end(); I != E; ++I)
+ set(*I);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// SubtargetFeatureKV - Used to provide key value pairs for feature and
+/// CPU bit flags.
+//
+struct SubtargetFeatureKV {
+ const char *Key; // K-V key string
+ const char *Desc; // Help descriptor
+ FeatureBitset Value; // K-V integer value
+ FeatureBitset Implies; // K-V bit mask
+
+ // Compare routine for std::lower_bound
+ bool operator<(StringRef S) const {
+ return StringRef(Key) < S;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// SubtargetInfoKV - Used to provide key value pairs for CPU and arbitrary
+/// pointers.
+//
+struct SubtargetInfoKV {
+ const char *Key; // K-V key string
+ const void *Value; // K-V pointer value
+
+ // Compare routine for std::lower_bound
+ bool operator<(StringRef S) const {
+ return StringRef(Key) < S;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+///
+/// SubtargetFeatures - Manages the enabling and disabling of subtarget
+/// specific features. Features are encoded as a string of the form
+/// "+attr1,+attr2,-attr3,...,+attrN"
+/// A comma separates each feature from the next (all lowercase.)
+/// Each of the remaining features is prefixed with + or - indicating whether
+/// that feature should be enabled or disabled contrary to the cpu
+/// specification.
+///
+
+class SubtargetFeatures {
+ std::vector<std::string> Features; // Subtarget features as a vector
+public:
+ explicit SubtargetFeatures(StringRef Initial = "");
+
+ /// Features string accessors.
+ std::string getString() const;
+
+ /// Adding Features.
+ void AddFeature(StringRef String, bool Enable = true);
+
+ /// ToggleFeature - Toggle a feature and returns the newly updated feature
+ /// bits.
+ FeatureBitset ToggleFeature(FeatureBitset Bits, StringRef String,
+ ArrayRef<SubtargetFeatureKV> FeatureTable);
+
+ /// Apply the feature flag and return the newly updated feature bits.
+ FeatureBitset ApplyFeatureFlag(FeatureBitset Bits, StringRef Feature,
+ ArrayRef<SubtargetFeatureKV> FeatureTable);
+
+ /// Get feature bits of a CPU.
+ FeatureBitset getFeatureBits(StringRef CPU,
+ ArrayRef<SubtargetFeatureKV> CPUTable,
+ ArrayRef<SubtargetFeatureKV> FeatureTable);
+
+ /// Print feature string.
+ void print(raw_ostream &OS) const;
+
+ // Dump feature info.
+ void dump() const;
+
+ /// Adds the default features for the specified target triple.
+ void getDefaultSubtargetFeatures(const Triple& Triple);
+};
+
+} // End namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/MC/YAML.h b/contrib/llvm/include/llvm/MC/YAML.h
new file mode 100644
index 0000000..383cdc6
--- /dev/null
+++ b/contrib/llvm/include/llvm/MC/YAML.h
@@ -0,0 +1,94 @@
+#ifndef LLVM_MC_YAML_H
+#define LLVM_MC_YAML_H
+
+#include "llvm/Support/YAMLTraits.h"
+
+namespace llvm {
+namespace yaml {
+/// \brief Specialized YAMLIO scalar type for representing a binary blob.
+///
+/// A typical use case would be to represent the content of a section in a
+/// binary file.
+/// This class has custom YAMLIO traits for convenient reading and writing.
+/// It renders as a string of hex digits in a YAML file.
+/// For example, it might render as `DEADBEEFCAFEBABE` (YAML does not
+/// require the quotation marks, so for simplicity when outputting they are
+/// omitted).
+/// When reading, any string whose content is an even number of hex digits
+/// will be accepted.
+/// For example, all of the following are acceptable:
+/// `DEADBEEF`, `"DeADbEeF"`, `"\x44EADBEEF"` (Note: '\x44' == 'D')
+///
+/// A significant advantage of using this class is that it never allocates
+/// temporary strings or buffers for any of its functionality.
+///
+/// Example:
+///
+/// The YAML mapping:
+/// \code
+/// Foo: DEADBEEFCAFEBABE
+/// \endcode
+///
+/// Could be modeled in YAMLIO by the struct:
+/// \code
+/// struct FooHolder {
+/// BinaryRef Foo;
+/// };
+/// namespace llvm {
+/// namespace yaml {
+/// template <>
+/// struct MappingTraits<FooHolder> {
+/// static void mapping(IO &IO, FooHolder &FH) {
+/// IO.mapRequired("Foo", FH.Foo);
+/// }
+/// };
+/// } // end namespace yaml
+/// } // end namespace llvm
+/// \endcode
+class BinaryRef {
+ friend bool operator==(const BinaryRef &LHS, const BinaryRef &RHS);
+ /// \brief Either raw binary data, or a string of hex bytes (must always
+ /// be an even number of characters).
+ ArrayRef<uint8_t> Data;
+ /// \brief Discriminator between the two states of the `Data` member.
+ bool DataIsHexString;
+
+public:
+ BinaryRef(ArrayRef<uint8_t> Data) : Data(Data), DataIsHexString(false) {}
+ BinaryRef(StringRef Data)
+ : Data(reinterpret_cast<const uint8_t *>(Data.data()), Data.size()),
+ DataIsHexString(true) {}
+ BinaryRef() : DataIsHexString(true) {}
+ /// \brief The number of bytes that are represented by this BinaryRef.
+ /// This is the number of bytes that writeAsBinary() will write.
+ ArrayRef<uint8_t>::size_type binary_size() const {
+ if (DataIsHexString)
+ return Data.size() / 2;
+ return Data.size();
+ }
+ /// \brief Write the contents (regardless of whether it is binary or a
+ /// hex string) as binary to the given raw_ostream.
+ void writeAsBinary(raw_ostream &OS) const;
+ /// \brief Write the contents (regardless of whether it is binary or a
+ /// hex string) as hex to the given raw_ostream.
+ ///
+ /// For example, a possible output could be `DEADBEEFCAFEBABE`.
+ void writeAsHex(raw_ostream &OS) const;
+};
+
+inline bool operator==(const BinaryRef &LHS, const BinaryRef &RHS) {
+ // Special case for default constructed BinaryRef.
+ if (LHS.Data.empty() && RHS.Data.empty())
+ return true;
+
+ return LHS.DataIsHexString == RHS.DataIsHexString && LHS.Data == RHS.Data;
+}
+
+template <> struct ScalarTraits<BinaryRef> {
+ static void output(const BinaryRef &, void *, llvm::raw_ostream &);
+ static StringRef input(StringRef, void *, BinaryRef &);
+ static bool mustQuote(StringRef S) { return needsQuotes(S); }
+};
+}
+}
+#endif
diff --git a/contrib/llvm/include/llvm/Object/Archive.h b/contrib/llvm/include/llvm/Object/Archive.h
new file mode 100644
index 0000000..8dd042a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/Archive.h
@@ -0,0 +1,231 @@
+//===- Archive.h - ar archive file format -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ar archive file format class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ARCHIVE_H
+#define LLVM_OBJECT_ARCHIVE_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace object {
+struct ArchiveMemberHeader {
+ char Name[16];
+ char LastModified[12];
+ char UID[6];
+ char GID[6];
+ char AccessMode[8];
+ char Size[10]; ///< Size of data, not including header or padding.
+ char Terminator[2];
+
+ /// Get the name without looking up long names.
+ llvm::StringRef getName() const;
+
+ /// Members are not larger than 4GB.
+ ErrorOr<uint32_t> getSize() const;
+
+ sys::fs::perms getAccessMode() const;
+ sys::TimeValue getLastModified() const;
+ llvm::StringRef getRawLastModified() const {
+ return StringRef(LastModified, sizeof(LastModified)).rtrim(" ");
+ }
+ unsigned getUID() const;
+ unsigned getGID() const;
+};
+
+class Archive : public Binary {
+ virtual void anchor();
+public:
+ class Child {
+ friend Archive;
+ const Archive *Parent;
+ /// \brief Includes header but not padding byte.
+ StringRef Data;
+ /// \brief Offset from Data to the start of the file.
+ uint16_t StartOfFile;
+
+ const ArchiveMemberHeader *getHeader() const {
+ return reinterpret_cast<const ArchiveMemberHeader *>(Data.data());
+ }
+
+ bool isThinMember() const;
+
+ public:
+ Child(const Archive *Parent, const char *Start, std::error_code *EC);
+ Child(const Archive *Parent, StringRef Data, uint16_t StartOfFile);
+
+ bool operator ==(const Child &other) const {
+ assert(Parent == other.Parent);
+ return Data.begin() == other.Data.begin();
+ }
+
+ const Archive *getParent() const { return Parent; }
+ ErrorOr<Child> getNext() const;
+
+ ErrorOr<StringRef> getName() const;
+ StringRef getRawName() const { return getHeader()->getName(); }
+ sys::TimeValue getLastModified() const {
+ return getHeader()->getLastModified();
+ }
+ StringRef getRawLastModified() const {
+ return getHeader()->getRawLastModified();
+ }
+ unsigned getUID() const { return getHeader()->getUID(); }
+ unsigned getGID() const { return getHeader()->getGID(); }
+ sys::fs::perms getAccessMode() const {
+ return getHeader()->getAccessMode();
+ }
+ /// \return the size of the archive member without the header or padding.
+ ErrorOr<uint64_t> getSize() const;
+ /// \return the size in the archive header for this member.
+ ErrorOr<uint64_t> getRawSize() const;
+
+ ErrorOr<StringRef> getBuffer() const;
+ uint64_t getChildOffset() const;
+
+ ErrorOr<MemoryBufferRef> getMemoryBufferRef() const;
+
+ ErrorOr<std::unique_ptr<Binary>>
+ getAsBinary(LLVMContext *Context = nullptr) const;
+ };
+
+ class child_iterator {
+ ErrorOr<Child> child;
+
+ public:
+ child_iterator() : child(Child(nullptr, nullptr, nullptr)) {}
+ child_iterator(const Child &c) : child(c) {}
+ child_iterator(std::error_code EC) : child(EC) {}
+ const ErrorOr<Child> *operator->() const { return &child; }
+ const ErrorOr<Child> &operator*() const { return child; }
+
+ bool operator==(const child_iterator &other) const {
+ // We ignore error states so that comparisions with end() work, which
+ // allows range loops.
+ if (child.getError() || other.child.getError())
+ return false;
+ return *child == *other.child;
+ }
+
+ bool operator!=(const child_iterator &other) const {
+ return !(*this == other);
+ }
+
+ // Code in loops with child_iterators must check for errors on each loop
+ // iteration. And if there is an error break out of the loop.
+ child_iterator &operator++() { // Preincrement
+ assert(child && "Can't increment iterator with error");
+ child = child->getNext();
+ return *this;
+ }
+ };
+
+ class Symbol {
+ const Archive *Parent;
+ uint32_t SymbolIndex;
+ uint32_t StringIndex; // Extra index to the string.
+
+ public:
+ bool operator ==(const Symbol &other) const {
+ return (Parent == other.Parent) && (SymbolIndex == other.SymbolIndex);
+ }
+
+ Symbol(const Archive *p, uint32_t symi, uint32_t stri)
+ : Parent(p)
+ , SymbolIndex(symi)
+ , StringIndex(stri) {}
+ StringRef getName() const;
+ ErrorOr<Child> getMember() const;
+ Symbol getNext() const;
+ };
+
+ class symbol_iterator {
+ Symbol symbol;
+ public:
+ symbol_iterator(const Symbol &s) : symbol(s) {}
+ const Symbol *operator->() const { return &symbol; }
+ const Symbol &operator*() const { return symbol; }
+
+ bool operator==(const symbol_iterator &other) const {
+ return symbol == other.symbol;
+ }
+
+ bool operator!=(const symbol_iterator &other) const {
+ return !(*this == other);
+ }
+
+ symbol_iterator& operator++() { // Preincrement
+ symbol = symbol.getNext();
+ return *this;
+ }
+ };
+
+ Archive(MemoryBufferRef Source, std::error_code &EC);
+ static ErrorOr<std::unique_ptr<Archive>> create(MemoryBufferRef Source);
+
+ enum Kind {
+ K_GNU,
+ K_MIPS64,
+ K_BSD,
+ K_COFF
+ };
+
+ Kind kind() const { return (Kind)Format; }
+ bool isThin() const { return IsThin; }
+
+ child_iterator child_begin(bool SkipInternal = true) const;
+ child_iterator child_end() const;
+ iterator_range<child_iterator> children(bool SkipInternal = true) const {
+ return make_range(child_begin(SkipInternal), child_end());
+ }
+
+ symbol_iterator symbol_begin() const;
+ symbol_iterator symbol_end() const;
+ iterator_range<symbol_iterator> symbols() const {
+ return make_range(symbol_begin(), symbol_end());
+ }
+
+ // Cast methods.
+ static inline bool classof(Binary const *v) {
+ return v->isArchive();
+ }
+
+ // check if a symbol is in the archive
+ child_iterator findSym(StringRef name) const;
+
+ bool hasSymbolTable() const;
+ StringRef getSymbolTable() const { return SymbolTable; }
+ uint32_t getNumberOfSymbols() const;
+
+private:
+ StringRef SymbolTable;
+ StringRef StringTable;
+
+ StringRef FirstRegularData;
+ uint16_t FirstRegularStartOfFile = -1;
+ void setFirstRegular(const Child &C);
+
+ unsigned Format : 2;
+ unsigned IsThin : 1;
+ mutable std::vector<std::unique_ptr<MemoryBuffer>> ThinBuffers;
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/ArchiveWriter.h b/contrib/llvm/include/llvm/Object/ArchiveWriter.h
new file mode 100644
index 0000000..b5d2ba3
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/ArchiveWriter.h
@@ -0,0 +1,48 @@
+//===- ArchiveWriter.h - ar archive file format writer ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the writeArchive function for writing an archive file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ARCHIVEWRITER_H
+#define LLVM_OBJECT_ARCHIVEWRITER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Support/FileSystem.h"
+
+namespace llvm {
+
+class NewArchiveIterator {
+ bool IsNewMember;
+ StringRef Name;
+
+ object::Archive::Child OldMember;
+
+public:
+ NewArchiveIterator(const object::Archive::Child &OldMember, StringRef Name);
+ NewArchiveIterator(StringRef FileName);
+ bool isNewMember() const;
+ StringRef getName() const;
+
+ const object::Archive::Child &getOld() const;
+
+ StringRef getNew() const;
+ llvm::ErrorOr<int> getFD(sys::fs::file_status &NewStatus) const;
+ const sys::fs::file_status &getStatus() const;
+};
+
+std::pair<StringRef, std::error_code>
+writeArchive(StringRef ArcName, std::vector<NewArchiveIterator> &NewMembers,
+ bool WriteSymtab, object::Archive::Kind Kind, bool Deterministic,
+ bool Thin);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/Binary.h b/contrib/llvm/include/llvm/Object/Binary.h
new file mode 100644
index 0000000..a0d1127
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/Binary.h
@@ -0,0 +1,192 @@
+//===- Binary.h - A generic binary file -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Binary class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_BINARY_H
+#define LLVM_OBJECT_BINARY_H
+
+#include "llvm/Object/Error.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+
+class LLVMContext;
+class StringRef;
+
+namespace object {
+
+class Binary {
+private:
+ Binary() = delete;
+ Binary(const Binary &other) = delete;
+
+ unsigned int TypeID;
+
+protected:
+ MemoryBufferRef Data;
+
+ Binary(unsigned int Type, MemoryBufferRef Source);
+
+ enum {
+ ID_Archive,
+ ID_MachOUniversalBinary,
+ ID_COFFImportFile,
+ ID_IR, // LLVM IR
+ ID_FunctionIndex, // Function summary index
+
+ // Object and children.
+ ID_StartObjects,
+ ID_COFF,
+
+ ID_ELF32L, // ELF 32-bit, little endian
+ ID_ELF32B, // ELF 32-bit, big endian
+ ID_ELF64L, // ELF 64-bit, little endian
+ ID_ELF64B, // ELF 64-bit, big endian
+
+ ID_MachO32L, // MachO 32-bit, little endian
+ ID_MachO32B, // MachO 32-bit, big endian
+ ID_MachO64L, // MachO 64-bit, little endian
+ ID_MachO64B, // MachO 64-bit, big endian
+
+ ID_EndObjects
+ };
+
+ static inline unsigned int getELFType(bool isLE, bool is64Bits) {
+ if (isLE)
+ return is64Bits ? ID_ELF64L : ID_ELF32L;
+ else
+ return is64Bits ? ID_ELF64B : ID_ELF32B;
+ }
+
+ static unsigned int getMachOType(bool isLE, bool is64Bits) {
+ if (isLE)
+ return is64Bits ? ID_MachO64L : ID_MachO32L;
+ else
+ return is64Bits ? ID_MachO64B : ID_MachO32B;
+ }
+
+public:
+ virtual ~Binary();
+
+ StringRef getData() const;
+ StringRef getFileName() const;
+ MemoryBufferRef getMemoryBufferRef() const;
+
+ // Cast methods.
+ unsigned int getType() const { return TypeID; }
+
+ // Convenience methods
+ bool isObject() const {
+ return TypeID > ID_StartObjects && TypeID < ID_EndObjects;
+ }
+
+ bool isSymbolic() const {
+ return isIR() || isObject();
+ }
+
+ bool isArchive() const {
+ return TypeID == ID_Archive;
+ }
+
+ bool isMachOUniversalBinary() const {
+ return TypeID == ID_MachOUniversalBinary;
+ }
+
+ bool isELF() const {
+ return TypeID >= ID_ELF32L && TypeID <= ID_ELF64B;
+ }
+
+ bool isMachO() const {
+ return TypeID >= ID_MachO32L && TypeID <= ID_MachO64B;
+ }
+
+ bool isCOFF() const {
+ return TypeID == ID_COFF;
+ }
+
+ bool isCOFFImportFile() const {
+ return TypeID == ID_COFFImportFile;
+ }
+
+ bool isIR() const {
+ return TypeID == ID_IR;
+ }
+
+ bool isFunctionIndex() const { return TypeID == ID_FunctionIndex; }
+
+ bool isLittleEndian() const {
+ return !(TypeID == ID_ELF32B || TypeID == ID_ELF64B ||
+ TypeID == ID_MachO32B || TypeID == ID_MachO64B);
+ }
+};
+
+/// @brief Create a Binary from Source, autodetecting the file type.
+///
+/// @param Source The data to create the Binary from.
+ErrorOr<std::unique_ptr<Binary>> createBinary(MemoryBufferRef Source,
+ LLVMContext *Context = nullptr);
+
+template <typename T> class OwningBinary {
+ std::unique_ptr<T> Bin;
+ std::unique_ptr<MemoryBuffer> Buf;
+
+public:
+ OwningBinary();
+ OwningBinary(std::unique_ptr<T> Bin, std::unique_ptr<MemoryBuffer> Buf);
+ OwningBinary(OwningBinary<T>&& Other);
+ OwningBinary<T> &operator=(OwningBinary<T> &&Other);
+
+ std::pair<std::unique_ptr<T>, std::unique_ptr<MemoryBuffer>> takeBinary();
+
+ T* getBinary();
+ const T* getBinary() const;
+};
+
+template <typename T>
+OwningBinary<T>::OwningBinary(std::unique_ptr<T> Bin,
+ std::unique_ptr<MemoryBuffer> Buf)
+ : Bin(std::move(Bin)), Buf(std::move(Buf)) {}
+
+template <typename T> OwningBinary<T>::OwningBinary() {}
+
+template <typename T>
+OwningBinary<T>::OwningBinary(OwningBinary &&Other)
+ : Bin(std::move(Other.Bin)), Buf(std::move(Other.Buf)) {}
+
+template <typename T>
+OwningBinary<T> &OwningBinary<T>::operator=(OwningBinary &&Other) {
+ Bin = std::move(Other.Bin);
+ Buf = std::move(Other.Buf);
+ return *this;
+}
+
+template <typename T>
+std::pair<std::unique_ptr<T>, std::unique_ptr<MemoryBuffer>>
+OwningBinary<T>::takeBinary() {
+ return std::make_pair(std::move(Bin), std::move(Buf));
+}
+
+template <typename T> T* OwningBinary<T>::getBinary() {
+ return Bin.get();
+}
+
+template <typename T> const T* OwningBinary<T>::getBinary() const {
+ return Bin.get();
+}
+
+ErrorOr<OwningBinary<Binary>> createBinary(StringRef Path);
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/COFF.h b/contrib/llvm/include/llvm/Object/COFF.h
new file mode 100644
index 0000000..1b0e2e3
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/COFF.h
@@ -0,0 +1,912 @@
+//===- COFF.h - COFF object file implementation -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the COFFObjectFile class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_COFF_H
+#define LLVM_OBJECT_COFF_H
+
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/COFF.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorOr.h"
+
+namespace llvm {
+template <typename T> class ArrayRef;
+
+namespace object {
+class ImportDirectoryEntryRef;
+class DelayImportDirectoryEntryRef;
+class ExportDirectoryEntryRef;
+class ImportedSymbolRef;
+class BaseRelocRef;
+typedef content_iterator<ImportDirectoryEntryRef> import_directory_iterator;
+typedef content_iterator<DelayImportDirectoryEntryRef>
+ delay_import_directory_iterator;
+typedef content_iterator<ExportDirectoryEntryRef> export_directory_iterator;
+typedef content_iterator<ImportedSymbolRef> imported_symbol_iterator;
+typedef content_iterator<BaseRelocRef> base_reloc_iterator;
+
+/// The DOS compatible header at the front of all PE/COFF executables.
+struct dos_header {
+ char Magic[2];
+ support::ulittle16_t UsedBytesInTheLastPage;
+ support::ulittle16_t FileSizeInPages;
+ support::ulittle16_t NumberOfRelocationItems;
+ support::ulittle16_t HeaderSizeInParagraphs;
+ support::ulittle16_t MinimumExtraParagraphs;
+ support::ulittle16_t MaximumExtraParagraphs;
+ support::ulittle16_t InitialRelativeSS;
+ support::ulittle16_t InitialSP;
+ support::ulittle16_t Checksum;
+ support::ulittle16_t InitialIP;
+ support::ulittle16_t InitialRelativeCS;
+ support::ulittle16_t AddressOfRelocationTable;
+ support::ulittle16_t OverlayNumber;
+ support::ulittle16_t Reserved[4];
+ support::ulittle16_t OEMid;
+ support::ulittle16_t OEMinfo;
+ support::ulittle16_t Reserved2[10];
+ support::ulittle32_t AddressOfNewExeHeader;
+};
+
+struct coff_file_header {
+ support::ulittle16_t Machine;
+ support::ulittle16_t NumberOfSections;
+ support::ulittle32_t TimeDateStamp;
+ support::ulittle32_t PointerToSymbolTable;
+ support::ulittle32_t NumberOfSymbols;
+ support::ulittle16_t SizeOfOptionalHeader;
+ support::ulittle16_t Characteristics;
+
+ bool isImportLibrary() const { return NumberOfSections == 0xffff; }
+};
+
+struct coff_bigobj_file_header {
+ support::ulittle16_t Sig1;
+ support::ulittle16_t Sig2;
+ support::ulittle16_t Version;
+ support::ulittle16_t Machine;
+ support::ulittle32_t TimeDateStamp;
+ uint8_t UUID[16];
+ support::ulittle32_t unused1;
+ support::ulittle32_t unused2;
+ support::ulittle32_t unused3;
+ support::ulittle32_t unused4;
+ support::ulittle32_t NumberOfSections;
+ support::ulittle32_t PointerToSymbolTable;
+ support::ulittle32_t NumberOfSymbols;
+};
+
+/// The 32-bit PE header that follows the COFF header.
+struct pe32_header {
+ support::ulittle16_t Magic;
+ uint8_t MajorLinkerVersion;
+ uint8_t MinorLinkerVersion;
+ support::ulittle32_t SizeOfCode;
+ support::ulittle32_t SizeOfInitializedData;
+ support::ulittle32_t SizeOfUninitializedData;
+ support::ulittle32_t AddressOfEntryPoint;
+ support::ulittle32_t BaseOfCode;
+ support::ulittle32_t BaseOfData;
+ support::ulittle32_t ImageBase;
+ support::ulittle32_t SectionAlignment;
+ support::ulittle32_t FileAlignment;
+ support::ulittle16_t MajorOperatingSystemVersion;
+ support::ulittle16_t MinorOperatingSystemVersion;
+ support::ulittle16_t MajorImageVersion;
+ support::ulittle16_t MinorImageVersion;
+ support::ulittle16_t MajorSubsystemVersion;
+ support::ulittle16_t MinorSubsystemVersion;
+ support::ulittle32_t Win32VersionValue;
+ support::ulittle32_t SizeOfImage;
+ support::ulittle32_t SizeOfHeaders;
+ support::ulittle32_t CheckSum;
+ support::ulittle16_t Subsystem;
+ // FIXME: This should be DllCharacteristics.
+ support::ulittle16_t DLLCharacteristics;
+ support::ulittle32_t SizeOfStackReserve;
+ support::ulittle32_t SizeOfStackCommit;
+ support::ulittle32_t SizeOfHeapReserve;
+ support::ulittle32_t SizeOfHeapCommit;
+ support::ulittle32_t LoaderFlags;
+ // FIXME: This should be NumberOfRvaAndSizes.
+ support::ulittle32_t NumberOfRvaAndSize;
+};
+
+/// The 64-bit PE header that follows the COFF header.
+struct pe32plus_header {
+ support::ulittle16_t Magic;
+ uint8_t MajorLinkerVersion;
+ uint8_t MinorLinkerVersion;
+ support::ulittle32_t SizeOfCode;
+ support::ulittle32_t SizeOfInitializedData;
+ support::ulittle32_t SizeOfUninitializedData;
+ support::ulittle32_t AddressOfEntryPoint;
+ support::ulittle32_t BaseOfCode;
+ support::ulittle64_t ImageBase;
+ support::ulittle32_t SectionAlignment;
+ support::ulittle32_t FileAlignment;
+ support::ulittle16_t MajorOperatingSystemVersion;
+ support::ulittle16_t MinorOperatingSystemVersion;
+ support::ulittle16_t MajorImageVersion;
+ support::ulittle16_t MinorImageVersion;
+ support::ulittle16_t MajorSubsystemVersion;
+ support::ulittle16_t MinorSubsystemVersion;
+ support::ulittle32_t Win32VersionValue;
+ support::ulittle32_t SizeOfImage;
+ support::ulittle32_t SizeOfHeaders;
+ support::ulittle32_t CheckSum;
+ support::ulittle16_t Subsystem;
+ support::ulittle16_t DLLCharacteristics;
+ support::ulittle64_t SizeOfStackReserve;
+ support::ulittle64_t SizeOfStackCommit;
+ support::ulittle64_t SizeOfHeapReserve;
+ support::ulittle64_t SizeOfHeapCommit;
+ support::ulittle32_t LoaderFlags;
+ support::ulittle32_t NumberOfRvaAndSize;
+};
+
+struct data_directory {
+ support::ulittle32_t RelativeVirtualAddress;
+ support::ulittle32_t Size;
+};
+
+struct import_directory_table_entry {
+ support::ulittle32_t ImportLookupTableRVA;
+ support::ulittle32_t TimeDateStamp;
+ support::ulittle32_t ForwarderChain;
+ support::ulittle32_t NameRVA;
+ support::ulittle32_t ImportAddressTableRVA;
+};
+
+template <typename IntTy>
+struct import_lookup_table_entry {
+ IntTy Data;
+
+ bool isOrdinal() const { return Data < 0; }
+
+ uint16_t getOrdinal() const {
+ assert(isOrdinal() && "ILT entry is not an ordinal!");
+ return Data & 0xFFFF;
+ }
+
+ uint32_t getHintNameRVA() const {
+ assert(!isOrdinal() && "ILT entry is not a Hint/Name RVA!");
+ return Data & 0xFFFFFFFF;
+ }
+};
+
+typedef import_lookup_table_entry<support::little32_t>
+ import_lookup_table_entry32;
+typedef import_lookup_table_entry<support::little64_t>
+ import_lookup_table_entry64;
+
+struct delay_import_directory_table_entry {
+ // dumpbin reports this field as "Characteristics" instead of "Attributes".
+ support::ulittle32_t Attributes;
+ support::ulittle32_t Name;
+ support::ulittle32_t ModuleHandle;
+ support::ulittle32_t DelayImportAddressTable;
+ support::ulittle32_t DelayImportNameTable;
+ support::ulittle32_t BoundDelayImportTable;
+ support::ulittle32_t UnloadDelayImportTable;
+ support::ulittle32_t TimeStamp;
+};
+
+struct export_directory_table_entry {
+ support::ulittle32_t ExportFlags;
+ support::ulittle32_t TimeDateStamp;
+ support::ulittle16_t MajorVersion;
+ support::ulittle16_t MinorVersion;
+ support::ulittle32_t NameRVA;
+ support::ulittle32_t OrdinalBase;
+ support::ulittle32_t AddressTableEntries;
+ support::ulittle32_t NumberOfNamePointers;
+ support::ulittle32_t ExportAddressTableRVA;
+ support::ulittle32_t NamePointerRVA;
+ support::ulittle32_t OrdinalTableRVA;
+};
+
+union export_address_table_entry {
+ support::ulittle32_t ExportRVA;
+ support::ulittle32_t ForwarderRVA;
+};
+
+typedef support::ulittle32_t export_name_pointer_table_entry;
+typedef support::ulittle16_t export_ordinal_table_entry;
+
+struct StringTableOffset {
+ support::ulittle32_t Zeroes;
+ support::ulittle32_t Offset;
+};
+
+template <typename SectionNumberType>
+struct coff_symbol {
+ union {
+ char ShortName[COFF::NameSize];
+ StringTableOffset Offset;
+ } Name;
+
+ support::ulittle32_t Value;
+ SectionNumberType SectionNumber;
+
+ support::ulittle16_t Type;
+
+ uint8_t StorageClass;
+ uint8_t NumberOfAuxSymbols;
+};
+
+typedef coff_symbol<support::ulittle16_t> coff_symbol16;
+typedef coff_symbol<support::ulittle32_t> coff_symbol32;
+
+// Contains only common parts of coff_symbol16 and coff_symbol32.
+struct coff_symbol_generic {
+ union {
+ char ShortName[COFF::NameSize];
+ StringTableOffset Offset;
+ } Name;
+ support::ulittle32_t Value;
+};
+
+class COFFSymbolRef {
+public:
+ COFFSymbolRef(const coff_symbol16 *CS) : CS16(CS), CS32(nullptr) {}
+ COFFSymbolRef(const coff_symbol32 *CS) : CS16(nullptr), CS32(CS) {}
+ COFFSymbolRef() : CS16(nullptr), CS32(nullptr) {}
+
+ const void *getRawPtr() const {
+ return CS16 ? static_cast<const void *>(CS16) : CS32;
+ }
+
+ const coff_symbol_generic *getGeneric() const {
+ if (CS16)
+ return reinterpret_cast<const coff_symbol_generic *>(CS16);
+ return reinterpret_cast<const coff_symbol_generic *>(CS32);
+ }
+
+ friend bool operator<(COFFSymbolRef A, COFFSymbolRef B) {
+ return A.getRawPtr() < B.getRawPtr();
+ }
+
+ bool isBigObj() const {
+ if (CS16)
+ return false;
+ if (CS32)
+ return true;
+ llvm_unreachable("COFFSymbolRef points to nothing!");
+ }
+
+ const char *getShortName() const {
+ return CS16 ? CS16->Name.ShortName : CS32->Name.ShortName;
+ }
+
+ const StringTableOffset &getStringTableOffset() const {
+ assert(isSet() && "COFFSymbolRef points to nothing!");
+ return CS16 ? CS16->Name.Offset : CS32->Name.Offset;
+ }
+
+ uint32_t getValue() const { return CS16 ? CS16->Value : CS32->Value; }
+
+ int32_t getSectionNumber() const {
+ assert(isSet() && "COFFSymbolRef points to nothing!");
+ if (CS16) {
+ // Reserved sections are returned as negative numbers.
+ if (CS16->SectionNumber <= COFF::MaxNumberOfSections16)
+ return CS16->SectionNumber;
+ return static_cast<int16_t>(CS16->SectionNumber);
+ }
+ return static_cast<int32_t>(CS32->SectionNumber);
+ }
+
+ uint16_t getType() const {
+ assert(isSet() && "COFFSymbolRef points to nothing!");
+ return CS16 ? CS16->Type : CS32->Type;
+ }
+
+ uint8_t getStorageClass() const {
+ assert(isSet() && "COFFSymbolRef points to nothing!");
+ return CS16 ? CS16->StorageClass : CS32->StorageClass;
+ }
+
+ uint8_t getNumberOfAuxSymbols() const {
+ assert(isSet() && "COFFSymbolRef points to nothing!");
+ return CS16 ? CS16->NumberOfAuxSymbols : CS32->NumberOfAuxSymbols;
+ }
+
+ uint8_t getBaseType() const { return getType() & 0x0F; }
+
+ uint8_t getComplexType() const {
+ return (getType() & 0xF0) >> COFF::SCT_COMPLEX_TYPE_SHIFT;
+ }
+
+ bool isAbsolute() const {
+ return getSectionNumber() == -1;
+ }
+
+ bool isExternal() const {
+ return getStorageClass() == COFF::IMAGE_SYM_CLASS_EXTERNAL;
+ }
+
+ bool isCommon() const {
+ return isExternal() && getSectionNumber() == COFF::IMAGE_SYM_UNDEFINED &&
+ getValue() != 0;
+ }
+
+ bool isUndefined() const {
+ return isExternal() && getSectionNumber() == COFF::IMAGE_SYM_UNDEFINED &&
+ getValue() == 0;
+ }
+
+ bool isWeakExternal() const {
+ return getStorageClass() == COFF::IMAGE_SYM_CLASS_WEAK_EXTERNAL;
+ }
+
+ bool isFunctionDefinition() const {
+ return isExternal() && getBaseType() == COFF::IMAGE_SYM_TYPE_NULL &&
+ getComplexType() == COFF::IMAGE_SYM_DTYPE_FUNCTION &&
+ !COFF::isReservedSectionNumber(getSectionNumber());
+ }
+
+ bool isFunctionLineInfo() const {
+ return getStorageClass() == COFF::IMAGE_SYM_CLASS_FUNCTION;
+ }
+
+ bool isAnyUndefined() const {
+ return isUndefined() || isWeakExternal();
+ }
+
+ bool isFileRecord() const {
+ return getStorageClass() == COFF::IMAGE_SYM_CLASS_FILE;
+ }
+
+ bool isSection() const {
+ return getStorageClass() == COFF::IMAGE_SYM_CLASS_SECTION;
+ }
+
+ bool isSectionDefinition() const {
+ // C++/CLI creates external ABS symbols for non-const appdomain globals.
+ // These are also followed by an auxiliary section definition.
+ bool isAppdomainGlobal =
+ getStorageClass() == COFF::IMAGE_SYM_CLASS_EXTERNAL &&
+ getSectionNumber() == COFF::IMAGE_SYM_ABSOLUTE;
+ bool isOrdinarySection = getStorageClass() == COFF::IMAGE_SYM_CLASS_STATIC;
+ if (!getNumberOfAuxSymbols())
+ return false;
+ return isAppdomainGlobal || isOrdinarySection;
+ }
+
+ bool isCLRToken() const {
+ return getStorageClass() == COFF::IMAGE_SYM_CLASS_CLR_TOKEN;
+ }
+
+private:
+ bool isSet() const { return CS16 || CS32; }
+
+ const coff_symbol16 *CS16;
+ const coff_symbol32 *CS32;
+};
+
+struct coff_section {
+ char Name[COFF::NameSize];
+ support::ulittle32_t VirtualSize;
+ support::ulittle32_t VirtualAddress;
+ support::ulittle32_t SizeOfRawData;
+ support::ulittle32_t PointerToRawData;
+ support::ulittle32_t PointerToRelocations;
+ support::ulittle32_t PointerToLinenumbers;
+ support::ulittle16_t NumberOfRelocations;
+ support::ulittle16_t NumberOfLinenumbers;
+ support::ulittle32_t Characteristics;
+
+ // Returns true if the actual number of relocations is stored in
+ // VirtualAddress field of the first relocation table entry.
+ bool hasExtendedRelocations() const {
+ return (Characteristics & COFF::IMAGE_SCN_LNK_NRELOC_OVFL) &&
+ NumberOfRelocations == UINT16_MAX;
+ }
+};
+
+struct coff_relocation {
+ support::ulittle32_t VirtualAddress;
+ support::ulittle32_t SymbolTableIndex;
+ support::ulittle16_t Type;
+};
+
+struct coff_aux_function_definition {
+ support::ulittle32_t TagIndex;
+ support::ulittle32_t TotalSize;
+ support::ulittle32_t PointerToLinenumber;
+ support::ulittle32_t PointerToNextFunction;
+};
+
+struct coff_aux_bf_and_ef_symbol {
+ char Unused1[4];
+ support::ulittle16_t Linenumber;
+ char Unused2[6];
+ support::ulittle32_t PointerToNextFunction;
+};
+
+struct coff_aux_weak_external {
+ support::ulittle32_t TagIndex;
+ support::ulittle32_t Characteristics;
+};
+
+struct coff_aux_section_definition {
+ support::ulittle32_t Length;
+ support::ulittle16_t NumberOfRelocations;
+ support::ulittle16_t NumberOfLinenumbers;
+ support::ulittle32_t CheckSum;
+ support::ulittle16_t NumberLowPart;
+ uint8_t Selection;
+ uint8_t Unused;
+ support::ulittle16_t NumberHighPart;
+ int32_t getNumber(bool IsBigObj) const {
+ uint32_t Number = static_cast<uint32_t>(NumberLowPart);
+ if (IsBigObj)
+ Number |= static_cast<uint32_t>(NumberHighPart) << 16;
+ return static_cast<int32_t>(Number);
+ }
+};
+
+struct coff_aux_clr_token {
+ uint8_t AuxType;
+ uint8_t Reserved;
+ support::ulittle32_t SymbolTableIndex;
+};
+
+struct coff_import_header {
+ support::ulittle16_t Sig1;
+ support::ulittle16_t Sig2;
+ support::ulittle16_t Version;
+ support::ulittle16_t Machine;
+ support::ulittle32_t TimeDateStamp;
+ support::ulittle32_t SizeOfData;
+ support::ulittle16_t OrdinalHint;
+ support::ulittle16_t TypeInfo;
+ int getType() const { return TypeInfo & 0x3; }
+ int getNameType() const { return (TypeInfo >> 2) & 0x7; }
+};
+
+struct coff_import_directory_table_entry {
+ support::ulittle32_t ImportLookupTableRVA;
+ support::ulittle32_t TimeDateStamp;
+ support::ulittle32_t ForwarderChain;
+ support::ulittle32_t NameRVA;
+ support::ulittle32_t ImportAddressTableRVA;
+};
+
+struct coff_load_configuration32 {
+ support::ulittle32_t Characteristics;
+ support::ulittle32_t TimeDateStamp;
+ support::ulittle16_t MajorVersion;
+ support::ulittle16_t MinorVersion;
+ support::ulittle32_t GlobalFlagsClear;
+ support::ulittle32_t GlobalFlagsSet;
+ support::ulittle32_t CriticalSectionDefaultTimeout;
+ support::ulittle32_t DeCommitFreeBlockThreshold;
+ support::ulittle32_t DeCommitTotalFreeThreshold;
+ support::ulittle32_t LockPrefixTable;
+ support::ulittle32_t MaximumAllocationSize;
+ support::ulittle32_t VirtualMemoryThreshold;
+ support::ulittle32_t ProcessAffinityMask;
+ support::ulittle32_t ProcessHeapFlags;
+ support::ulittle16_t CSDVersion;
+ support::ulittle16_t Reserved;
+ support::ulittle32_t EditList;
+ support::ulittle32_t SecurityCookie;
+ support::ulittle32_t SEHandlerTable;
+ support::ulittle32_t SEHandlerCount;
+};
+
+struct coff_load_configuration64 {
+ support::ulittle32_t Characteristics;
+ support::ulittle32_t TimeDateStamp;
+ support::ulittle16_t MajorVersion;
+ support::ulittle16_t MinorVersion;
+ support::ulittle32_t GlobalFlagsClear;
+ support::ulittle32_t GlobalFlagsSet;
+ support::ulittle32_t CriticalSectionDefaultTimeout;
+ support::ulittle32_t DeCommitFreeBlockThreshold;
+ support::ulittle32_t DeCommitTotalFreeThreshold;
+ support::ulittle32_t LockPrefixTable;
+ support::ulittle32_t MaximumAllocationSize;
+ support::ulittle32_t VirtualMemoryThreshold;
+ support::ulittle32_t ProcessAffinityMask;
+ support::ulittle32_t ProcessHeapFlags;
+ support::ulittle16_t CSDVersion;
+ support::ulittle16_t Reserved;
+ support::ulittle32_t EditList;
+ support::ulittle64_t SecurityCookie;
+ support::ulittle64_t SEHandlerTable;
+ support::ulittle64_t SEHandlerCount;
+};
+
+struct coff_runtime_function_x64 {
+ support::ulittle32_t BeginAddress;
+ support::ulittle32_t EndAddress;
+ support::ulittle32_t UnwindInformation;
+};
+
+struct coff_base_reloc_block_header {
+ support::ulittle32_t PageRVA;
+ support::ulittle32_t BlockSize;
+};
+
+struct coff_base_reloc_block_entry {
+ support::ulittle16_t Data;
+ int getType() const { return Data >> 12; }
+ int getOffset() const { return Data & ((1 << 12) - 1); }
+};
+
+class COFFObjectFile : public ObjectFile {
+private:
+ friend class ImportDirectoryEntryRef;
+ friend class ExportDirectoryEntryRef;
+ const coff_file_header *COFFHeader;
+ const coff_bigobj_file_header *COFFBigObjHeader;
+ const pe32_header *PE32Header;
+ const pe32plus_header *PE32PlusHeader;
+ const data_directory *DataDirectory;
+ const coff_section *SectionTable;
+ const coff_symbol16 *SymbolTable16;
+ const coff_symbol32 *SymbolTable32;
+ const char *StringTable;
+ uint32_t StringTableSize;
+ const import_directory_table_entry *ImportDirectory;
+ uint32_t NumberOfImportDirectory;
+ const delay_import_directory_table_entry *DelayImportDirectory;
+ uint32_t NumberOfDelayImportDirectory;
+ const export_directory_table_entry *ExportDirectory;
+ const coff_base_reloc_block_header *BaseRelocHeader;
+ const coff_base_reloc_block_header *BaseRelocEnd;
+
+ std::error_code getString(uint32_t offset, StringRef &Res) const;
+
+ template <typename coff_symbol_type>
+ const coff_symbol_type *toSymb(DataRefImpl Symb) const;
+ const coff_section *toSec(DataRefImpl Sec) const;
+ const coff_relocation *toRel(DataRefImpl Rel) const;
+
+ std::error_code initSymbolTablePtr();
+ std::error_code initImportTablePtr();
+ std::error_code initDelayImportTablePtr();
+ std::error_code initExportTablePtr();
+ std::error_code initBaseRelocPtr();
+
+public:
+ uintptr_t getSymbolTable() const {
+ if (SymbolTable16)
+ return reinterpret_cast<uintptr_t>(SymbolTable16);
+ if (SymbolTable32)
+ return reinterpret_cast<uintptr_t>(SymbolTable32);
+ return uintptr_t(0);
+ }
+ uint16_t getMachine() const {
+ if (COFFHeader)
+ return COFFHeader->Machine;
+ if (COFFBigObjHeader)
+ return COFFBigObjHeader->Machine;
+ llvm_unreachable("no COFF header!");
+ }
+ uint16_t getSizeOfOptionalHeader() const {
+ if (COFFHeader)
+ return COFFHeader->isImportLibrary() ? 0
+ : COFFHeader->SizeOfOptionalHeader;
+ // bigobj doesn't have this field.
+ if (COFFBigObjHeader)
+ return 0;
+ llvm_unreachable("no COFF header!");
+ }
+ uint16_t getCharacteristics() const {
+ if (COFFHeader)
+ return COFFHeader->isImportLibrary() ? 0 : COFFHeader->Characteristics;
+ // bigobj doesn't have characteristics to speak of,
+ // editbin will silently lie to you if you attempt to set any.
+ if (COFFBigObjHeader)
+ return 0;
+ llvm_unreachable("no COFF header!");
+ }
+ uint32_t getTimeDateStamp() const {
+ if (COFFHeader)
+ return COFFHeader->TimeDateStamp;
+ if (COFFBigObjHeader)
+ return COFFBigObjHeader->TimeDateStamp;
+ llvm_unreachable("no COFF header!");
+ }
+ uint32_t getNumberOfSections() const {
+ if (COFFHeader)
+ return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSections;
+ if (COFFBigObjHeader)
+ return COFFBigObjHeader->NumberOfSections;
+ llvm_unreachable("no COFF header!");
+ }
+ uint32_t getPointerToSymbolTable() const {
+ if (COFFHeader)
+ return COFFHeader->isImportLibrary() ? 0
+ : COFFHeader->PointerToSymbolTable;
+ if (COFFBigObjHeader)
+ return COFFBigObjHeader->PointerToSymbolTable;
+ llvm_unreachable("no COFF header!");
+ }
+ uint32_t getNumberOfSymbols() const {
+ if (COFFHeader)
+ return COFFHeader->isImportLibrary() ? 0 : COFFHeader->NumberOfSymbols;
+ if (COFFBigObjHeader)
+ return COFFBigObjHeader->NumberOfSymbols;
+ llvm_unreachable("no COFF header!");
+ }
+protected:
+ void moveSymbolNext(DataRefImpl &Symb) const override;
+ ErrorOr<StringRef> getSymbolName(DataRefImpl Symb) const override;
+ ErrorOr<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
+ uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
+ uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
+ uint32_t getSymbolFlags(DataRefImpl Symb) const override;
+ SymbolRef::Type getSymbolType(DataRefImpl Symb) const override;
+ ErrorOr<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
+ void moveSectionNext(DataRefImpl &Sec) const override;
+ std::error_code getSectionName(DataRefImpl Sec,
+ StringRef &Res) const override;
+ uint64_t getSectionAddress(DataRefImpl Sec) const override;
+ uint64_t getSectionSize(DataRefImpl Sec) const override;
+ std::error_code getSectionContents(DataRefImpl Sec,
+ StringRef &Res) const override;
+ uint64_t getSectionAlignment(DataRefImpl Sec) const override;
+ bool isSectionText(DataRefImpl Sec) const override;
+ bool isSectionData(DataRefImpl Sec) const override;
+ bool isSectionBSS(DataRefImpl Sec) const override;
+ bool isSectionVirtual(DataRefImpl Sec) const override;
+ relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
+ relocation_iterator section_rel_end(DataRefImpl Sec) const override;
+
+ void moveRelocationNext(DataRefImpl &Rel) const override;
+ uint64_t getRelocationOffset(DataRefImpl Rel) const override;
+ symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
+ uint64_t getRelocationType(DataRefImpl Rel) const override;
+ void getRelocationTypeName(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const override;
+
+public:
+ COFFObjectFile(MemoryBufferRef Object, std::error_code &EC);
+ basic_symbol_iterator symbol_begin_impl() const override;
+ basic_symbol_iterator symbol_end_impl() const override;
+ section_iterator section_begin() const override;
+ section_iterator section_end() const override;
+
+ const coff_section *getCOFFSection(const SectionRef &Section) const;
+ COFFSymbolRef getCOFFSymbol(const DataRefImpl &Ref) const;
+ COFFSymbolRef getCOFFSymbol(const SymbolRef &Symbol) const;
+ const coff_relocation *getCOFFRelocation(const RelocationRef &Reloc) const;
+ unsigned getSectionID(SectionRef Sec) const;
+ unsigned getSymbolSectionID(SymbolRef Sym) const;
+
+ uint8_t getBytesInAddress() const override;
+ StringRef getFileFormatName() const override;
+ unsigned getArch() const override;
+
+ import_directory_iterator import_directory_begin() const;
+ import_directory_iterator import_directory_end() const;
+ delay_import_directory_iterator delay_import_directory_begin() const;
+ delay_import_directory_iterator delay_import_directory_end() const;
+ export_directory_iterator export_directory_begin() const;
+ export_directory_iterator export_directory_end() const;
+ base_reloc_iterator base_reloc_begin() const;
+ base_reloc_iterator base_reloc_end() const;
+
+ iterator_range<import_directory_iterator> import_directories() const;
+ iterator_range<delay_import_directory_iterator>
+ delay_import_directories() const;
+ iterator_range<export_directory_iterator> export_directories() const;
+ iterator_range<base_reloc_iterator> base_relocs() const;
+
+ const dos_header *getDOSHeader() const {
+ if (!PE32Header && !PE32PlusHeader)
+ return nullptr;
+ return reinterpret_cast<const dos_header *>(base());
+ }
+ std::error_code getPE32Header(const pe32_header *&Res) const;
+ std::error_code getPE32PlusHeader(const pe32plus_header *&Res) const;
+ std::error_code getDataDirectory(uint32_t index,
+ const data_directory *&Res) const;
+ std::error_code getSection(int32_t index, const coff_section *&Res) const;
+ template <typename coff_symbol_type>
+ std::error_code getSymbol(uint32_t Index,
+ const coff_symbol_type *&Res) const {
+ if (Index >= getNumberOfSymbols())
+ return object_error::parse_failed;
+
+ Res = reinterpret_cast<coff_symbol_type *>(getSymbolTable()) + Index;
+ return std::error_code();
+ }
+ ErrorOr<COFFSymbolRef> getSymbol(uint32_t index) const {
+ if (SymbolTable16) {
+ const coff_symbol16 *Symb = nullptr;
+ if (std::error_code EC = getSymbol(index, Symb))
+ return EC;
+ return COFFSymbolRef(Symb);
+ }
+ if (SymbolTable32) {
+ const coff_symbol32 *Symb = nullptr;
+ if (std::error_code EC = getSymbol(index, Symb))
+ return EC;
+ return COFFSymbolRef(Symb);
+ }
+ return object_error::parse_failed;
+ }
+ template <typename T>
+ std::error_code getAuxSymbol(uint32_t index, const T *&Res) const {
+ ErrorOr<COFFSymbolRef> s = getSymbol(index);
+ if (std::error_code EC = s.getError())
+ return EC;
+ Res = reinterpret_cast<const T *>(s->getRawPtr());
+ return std::error_code();
+ }
+ std::error_code getSymbolName(COFFSymbolRef Symbol, StringRef &Res) const;
+ std::error_code getSymbolName(const coff_symbol_generic *Symbol,
+ StringRef &Res) const;
+
+ ArrayRef<uint8_t> getSymbolAuxData(COFFSymbolRef Symbol) const;
+
+ size_t getSymbolTableEntrySize() const {
+ if (COFFHeader)
+ return sizeof(coff_symbol16);
+ if (COFFBigObjHeader)
+ return sizeof(coff_symbol32);
+ llvm_unreachable("null symbol table pointer!");
+ }
+
+ iterator_range<const coff_relocation *>
+ getRelocations(const coff_section *Sec) const;
+
+ std::error_code getSectionName(const coff_section *Sec, StringRef &Res) const;
+ uint64_t getSectionSize(const coff_section *Sec) const;
+ std::error_code getSectionContents(const coff_section *Sec,
+ ArrayRef<uint8_t> &Res) const;
+
+ uint64_t getImageBase() const;
+ std::error_code getVaPtr(uint64_t VA, uintptr_t &Res) const;
+ std::error_code getRvaPtr(uint32_t Rva, uintptr_t &Res) const;
+ std::error_code getHintName(uint32_t Rva, uint16_t &Hint,
+ StringRef &Name) const;
+
+ bool isRelocatableObject() const override;
+ bool is64() const { return PE32PlusHeader; }
+
+ static inline bool classof(const Binary *v) { return v->isCOFF(); }
+};
+
+// The iterator for the import directory table.
+class ImportDirectoryEntryRef {
+public:
+ ImportDirectoryEntryRef() : OwningObject(nullptr) {}
+ ImportDirectoryEntryRef(const import_directory_table_entry *Table, uint32_t I,
+ const COFFObjectFile *Owner)
+ : ImportTable(Table), Index(I), OwningObject(Owner) {}
+
+ bool operator==(const ImportDirectoryEntryRef &Other) const;
+ void moveNext();
+
+ imported_symbol_iterator imported_symbol_begin() const;
+ imported_symbol_iterator imported_symbol_end() const;
+ iterator_range<imported_symbol_iterator> imported_symbols() const;
+
+ std::error_code getName(StringRef &Result) const;
+ std::error_code getImportLookupTableRVA(uint32_t &Result) const;
+ std::error_code getImportAddressTableRVA(uint32_t &Result) const;
+
+ std::error_code
+ getImportTableEntry(const import_directory_table_entry *&Result) const;
+
+ std::error_code
+ getImportLookupEntry(const import_lookup_table_entry32 *&Result) const;
+
+private:
+ const import_directory_table_entry *ImportTable;
+ uint32_t Index;
+ const COFFObjectFile *OwningObject;
+};
+
+class DelayImportDirectoryEntryRef {
+public:
+ DelayImportDirectoryEntryRef() : OwningObject(nullptr) {}
+ DelayImportDirectoryEntryRef(const delay_import_directory_table_entry *T,
+ uint32_t I, const COFFObjectFile *Owner)
+ : Table(T), Index(I), OwningObject(Owner) {}
+
+ bool operator==(const DelayImportDirectoryEntryRef &Other) const;
+ void moveNext();
+
+ imported_symbol_iterator imported_symbol_begin() const;
+ imported_symbol_iterator imported_symbol_end() const;
+ iterator_range<imported_symbol_iterator> imported_symbols() const;
+
+ std::error_code getName(StringRef &Result) const;
+ std::error_code getDelayImportTable(
+ const delay_import_directory_table_entry *&Result) const;
+ std::error_code getImportAddress(int AddrIndex, uint64_t &Result) const;
+
+private:
+ const delay_import_directory_table_entry *Table;
+ uint32_t Index;
+ const COFFObjectFile *OwningObject;
+};
+
+// The iterator for the export directory table entry.
+class ExportDirectoryEntryRef {
+public:
+ ExportDirectoryEntryRef() : OwningObject(nullptr) {}
+ ExportDirectoryEntryRef(const export_directory_table_entry *Table, uint32_t I,
+ const COFFObjectFile *Owner)
+ : ExportTable(Table), Index(I), OwningObject(Owner) {}
+
+ bool operator==(const ExportDirectoryEntryRef &Other) const;
+ void moveNext();
+
+ std::error_code getDllName(StringRef &Result) const;
+ std::error_code getOrdinalBase(uint32_t &Result) const;
+ std::error_code getOrdinal(uint32_t &Result) const;
+ std::error_code getExportRVA(uint32_t &Result) const;
+ std::error_code getSymbolName(StringRef &Result) const;
+
+private:
+ const export_directory_table_entry *ExportTable;
+ uint32_t Index;
+ const COFFObjectFile *OwningObject;
+};
+
+class ImportedSymbolRef {
+public:
+ ImportedSymbolRef() : OwningObject(nullptr) {}
+ ImportedSymbolRef(const import_lookup_table_entry32 *Entry, uint32_t I,
+ const COFFObjectFile *Owner)
+ : Entry32(Entry), Entry64(nullptr), Index(I), OwningObject(Owner) {}
+ ImportedSymbolRef(const import_lookup_table_entry64 *Entry, uint32_t I,
+ const COFFObjectFile *Owner)
+ : Entry32(nullptr), Entry64(Entry), Index(I), OwningObject(Owner) {}
+
+ bool operator==(const ImportedSymbolRef &Other) const;
+ void moveNext();
+
+ std::error_code getSymbolName(StringRef &Result) const;
+ std::error_code getOrdinal(uint16_t &Result) const;
+
+private:
+ const import_lookup_table_entry32 *Entry32;
+ const import_lookup_table_entry64 *Entry64;
+ uint32_t Index;
+ const COFFObjectFile *OwningObject;
+};
+
+class BaseRelocRef {
+public:
+ BaseRelocRef() : OwningObject(nullptr) {}
+ BaseRelocRef(const coff_base_reloc_block_header *Header,
+ const COFFObjectFile *Owner)
+ : Header(Header), Index(0), OwningObject(Owner) {}
+
+ bool operator==(const BaseRelocRef &Other) const;
+ void moveNext();
+
+ std::error_code getType(uint8_t &Type) const;
+ std::error_code getRVA(uint32_t &Result) const;
+
+private:
+ const coff_base_reloc_block_header *Header;
+ uint32_t Index;
+ const COFFObjectFile *OwningObject;
+};
+
+} // end namespace object
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/COFFImportFile.h b/contrib/llvm/include/llvm/Object/COFFImportFile.h
new file mode 100644
index 0000000..b04a44e
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/COFFImportFile.h
@@ -0,0 +1,74 @@
+//===- COFFImportFile.h - COFF short import file implementation -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF short import file is a special kind of file which contains
+// only symbol names for DLL-exported symbols. This class implements
+// SymbolicFile interface for the file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_COFF_IMPORT_FILE_H
+#define LLVM_OBJECT_COFF_IMPORT_FILE_H
+
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace object {
+
+class COFFImportFile : public SymbolicFile {
+public:
+ COFFImportFile(MemoryBufferRef Source)
+ : SymbolicFile(ID_COFFImportFile, Source) {}
+
+ static inline bool classof(Binary const *V) { return V->isCOFFImportFile(); }
+
+ void moveSymbolNext(DataRefImpl &Symb) const override { ++Symb.p; }
+
+ std::error_code printSymbolName(raw_ostream &OS,
+ DataRefImpl Symb) const override {
+ if (Symb.p == 0)
+ OS << "__imp_";
+ OS << StringRef(Data.getBufferStart() + sizeof(coff_import_header));
+ return std::error_code();
+ }
+
+ uint32_t getSymbolFlags(DataRefImpl Symb) const override {
+ return SymbolRef::SF_Global;
+ }
+
+ basic_symbol_iterator symbol_begin_impl() const override {
+ return BasicSymbolRef(DataRefImpl(), this);
+ }
+
+ basic_symbol_iterator symbol_end_impl() const override {
+ DataRefImpl Symb;
+ Symb.p = isCode() ? 2 : 1;
+ return BasicSymbolRef(Symb, this);
+ }
+
+ const coff_import_header *getCOFFImportHeader() const {
+ return reinterpret_cast<const object::coff_import_header *>(
+ Data.getBufferStart());
+ }
+
+private:
+ bool isCode() const {
+ return getCOFFImportHeader()->getType() == COFF::IMPORT_CODE;
+ }
+};
+
+} // namespace object
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/COFFYAML.h b/contrib/llvm/include/llvm/Object/COFFYAML.h
new file mode 100644
index 0000000..12a2522
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/COFFYAML.h
@@ -0,0 +1,223 @@
+//===- COFFYAML.h - COFF YAMLIO implementation ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares classes for handling the YAML representation of COFF.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_COFFYAML_H
+#define LLVM_OBJECT_COFFYAML_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/MC/YAML.h"
+#include "llvm/Support/COFF.h"
+
+namespace llvm {
+
+namespace COFF {
+inline Characteristics operator|(Characteristics a, Characteristics b) {
+ uint32_t Ret = static_cast<uint32_t>(a) | static_cast<uint32_t>(b);
+ return static_cast<Characteristics>(Ret);
+}
+
+inline SectionCharacteristics operator|(SectionCharacteristics a,
+ SectionCharacteristics b) {
+ uint32_t Ret = static_cast<uint32_t>(a) | static_cast<uint32_t>(b);
+ return static_cast<SectionCharacteristics>(Ret);
+}
+
+inline DLLCharacteristics operator|(DLLCharacteristics a,
+ DLLCharacteristics b) {
+ uint16_t Ret = static_cast<uint16_t>(a) | static_cast<uint16_t>(b);
+ return static_cast<DLLCharacteristics>(Ret);
+}
+}
+
+// The structure of the yaml files is not an exact 1:1 match to COFF. In order
+// to use yaml::IO, we use these structures which are closer to the source.
+namespace COFFYAML {
+ LLVM_YAML_STRONG_TYPEDEF(uint8_t, COMDATType)
+ LLVM_YAML_STRONG_TYPEDEF(uint32_t, WeakExternalCharacteristics)
+ LLVM_YAML_STRONG_TYPEDEF(uint8_t, AuxSymbolType)
+
+ struct Relocation {
+ uint32_t VirtualAddress;
+ uint16_t Type;
+ StringRef SymbolName;
+ };
+
+ struct Section {
+ COFF::section Header;
+ unsigned Alignment;
+ yaml::BinaryRef SectionData;
+ std::vector<Relocation> Relocations;
+ StringRef Name;
+ Section();
+ };
+
+ struct Symbol {
+ COFF::symbol Header;
+ COFF::SymbolBaseType SimpleType;
+ COFF::SymbolComplexType ComplexType;
+ Optional<COFF::AuxiliaryFunctionDefinition> FunctionDefinition;
+ Optional<COFF::AuxiliarybfAndefSymbol> bfAndefSymbol;
+ Optional<COFF::AuxiliaryWeakExternal> WeakExternal;
+ StringRef File;
+ Optional<COFF::AuxiliarySectionDefinition> SectionDefinition;
+ Optional<COFF::AuxiliaryCLRToken> CLRToken;
+ StringRef Name;
+ Symbol();
+ };
+
+ struct PEHeader {
+ COFF::PE32Header Header;
+ Optional<COFF::DataDirectory> DataDirectories[COFF::NUM_DATA_DIRECTORIES];
+ };
+
+ struct Object {
+ Optional<PEHeader> OptionalHeader;
+ COFF::header Header;
+ std::vector<Section> Sections;
+ std::vector<Symbol> Symbols;
+ Object();
+ };
+}
+}
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Section)
+LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Symbol)
+LLVM_YAML_IS_SEQUENCE_VECTOR(COFFYAML::Relocation)
+
+namespace llvm {
+namespace yaml {
+
+template <>
+struct ScalarEnumerationTraits<COFFYAML::WeakExternalCharacteristics> {
+ static void enumeration(IO &IO, COFFYAML::WeakExternalCharacteristics &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFFYAML::AuxSymbolType> {
+ static void enumeration(IO &IO, COFFYAML::AuxSymbolType &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFFYAML::COMDATType> {
+ static void enumeration(IO &IO, COFFYAML::COMDATType &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::MachineTypes> {
+ static void enumeration(IO &IO, COFF::MachineTypes &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::SymbolBaseType> {
+ static void enumeration(IO &IO, COFF::SymbolBaseType &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::SymbolStorageClass> {
+ static void enumeration(IO &IO, COFF::SymbolStorageClass &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::SymbolComplexType> {
+ static void enumeration(IO &IO, COFF::SymbolComplexType &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::RelocationTypeI386> {
+ static void enumeration(IO &IO, COFF::RelocationTypeI386 &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::RelocationTypeAMD64> {
+ static void enumeration(IO &IO, COFF::RelocationTypeAMD64 &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<COFF::WindowsSubsystem> {
+ static void enumeration(IO &IO, COFF::WindowsSubsystem &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<COFF::Characteristics> {
+ static void bitset(IO &IO, COFF::Characteristics &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<COFF::SectionCharacteristics> {
+ static void bitset(IO &IO, COFF::SectionCharacteristics &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<COFF::DLLCharacteristics> {
+ static void bitset(IO &IO, COFF::DLLCharacteristics &Value);
+};
+
+template <>
+struct MappingTraits<COFFYAML::Relocation> {
+ static void mapping(IO &IO, COFFYAML::Relocation &Rel);
+};
+
+template <>
+struct MappingTraits<COFFYAML::PEHeader> {
+ static void mapping(IO &IO, COFFYAML::PEHeader &PH);
+};
+
+template <>
+struct MappingTraits<COFF::DataDirectory> {
+ static void mapping(IO &IO, COFF::DataDirectory &DD);
+};
+
+template <>
+struct MappingTraits<COFF::header> {
+ static void mapping(IO &IO, COFF::header &H);
+};
+
+template <> struct MappingTraits<COFF::AuxiliaryFunctionDefinition> {
+ static void mapping(IO &IO, COFF::AuxiliaryFunctionDefinition &AFD);
+};
+
+template <> struct MappingTraits<COFF::AuxiliarybfAndefSymbol> {
+ static void mapping(IO &IO, COFF::AuxiliarybfAndefSymbol &AAS);
+};
+
+template <> struct MappingTraits<COFF::AuxiliaryWeakExternal> {
+ static void mapping(IO &IO, COFF::AuxiliaryWeakExternal &AWE);
+};
+
+template <> struct MappingTraits<COFF::AuxiliarySectionDefinition> {
+ static void mapping(IO &IO, COFF::AuxiliarySectionDefinition &ASD);
+};
+
+template <> struct MappingTraits<COFF::AuxiliaryCLRToken> {
+ static void mapping(IO &IO, COFF::AuxiliaryCLRToken &ACT);
+};
+
+template <>
+struct MappingTraits<COFFYAML::Symbol> {
+ static void mapping(IO &IO, COFFYAML::Symbol &S);
+};
+
+template <>
+struct MappingTraits<COFFYAML::Section> {
+ static void mapping(IO &IO, COFFYAML::Section &Sec);
+};
+
+template <>
+struct MappingTraits<COFFYAML::Object> {
+ static void mapping(IO &IO, COFFYAML::Object &Obj);
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/ELF.h b/contrib/llvm/include/llvm/Object/ELF.h
new file mode 100644
index 0000000..b0eaa3f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/ELF.h
@@ -0,0 +1,542 @@
+//===- ELF.h - ELF object file implementation -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ELFFile template class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ELF_H
+#define LLVM_OBJECT_ELF_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Object/ELFTypes.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace object {
+
+StringRef getELFRelocationTypeName(uint32_t Machine, uint32_t Type);
+
+// Subclasses of ELFFile may need this for template instantiation
+inline std::pair<unsigned char, unsigned char>
+getElfArchType(StringRef Object) {
+ if (Object.size() < ELF::EI_NIDENT)
+ return std::make_pair((uint8_t)ELF::ELFCLASSNONE,
+ (uint8_t)ELF::ELFDATANONE);
+ return std::make_pair((uint8_t)Object[ELF::EI_CLASS],
+ (uint8_t)Object[ELF::EI_DATA]);
+}
+
+template <class ELFT>
+class ELFFile {
+public:
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ typedef typename std::conditional<ELFT::Is64Bits,
+ uint64_t, uint32_t>::type uintX_t;
+
+ typedef Elf_Ehdr_Impl<ELFT> Elf_Ehdr;
+ typedef Elf_Shdr_Impl<ELFT> Elf_Shdr;
+ typedef Elf_Sym_Impl<ELFT> Elf_Sym;
+ typedef Elf_Dyn_Impl<ELFT> Elf_Dyn;
+ typedef Elf_Phdr_Impl<ELFT> Elf_Phdr;
+ typedef Elf_Rel_Impl<ELFT, false> Elf_Rel;
+ typedef Elf_Rel_Impl<ELFT, true> Elf_Rela;
+ typedef Elf_Verdef_Impl<ELFT> Elf_Verdef;
+ typedef Elf_Verdaux_Impl<ELFT> Elf_Verdaux;
+ typedef Elf_Verneed_Impl<ELFT> Elf_Verneed;
+ typedef Elf_Vernaux_Impl<ELFT> Elf_Vernaux;
+ typedef Elf_Versym_Impl<ELFT> Elf_Versym;
+ typedef Elf_Hash_Impl<ELFT> Elf_Hash;
+ typedef Elf_GnuHash_Impl<ELFT> Elf_GnuHash;
+ typedef iterator_range<const Elf_Dyn *> Elf_Dyn_Range;
+ typedef iterator_range<const Elf_Shdr *> Elf_Shdr_Range;
+ typedef iterator_range<const Elf_Sym *> Elf_Sym_Range;
+
+ const uint8_t *base() const {
+ return reinterpret_cast<const uint8_t *>(Buf.data());
+ }
+
+private:
+
+ StringRef Buf;
+
+ const Elf_Ehdr *Header;
+ const Elf_Shdr *SectionHeaderTable = nullptr;
+ StringRef DotShstrtab; // Section header string table.
+
+public:
+ template<typename T>
+ const T *getEntry(uint32_t Section, uint32_t Entry) const;
+ template <typename T>
+ const T *getEntry(const Elf_Shdr *Section, uint32_t Entry) const;
+
+ ErrorOr<StringRef> getStringTable(const Elf_Shdr *Section) const;
+ ErrorOr<StringRef> getStringTableForSymtab(const Elf_Shdr &Section) const;
+
+ ErrorOr<ArrayRef<Elf_Word>> getSHNDXTable(const Elf_Shdr &Section) const;
+
+ void VerifyStrTab(const Elf_Shdr *sh) const;
+
+ StringRef getRelocationTypeName(uint32_t Type) const;
+ void getRelocationTypeName(uint32_t Type,
+ SmallVectorImpl<char> &Result) const;
+
+ /// \brief Get the symbol for a given relocation.
+ const Elf_Sym *getRelocationSymbol(const Elf_Rel *Rel,
+ const Elf_Shdr *SymTab) const;
+
+ ELFFile(StringRef Object, std::error_code &EC);
+
+ bool isMipsELF64() const {
+ return Header->e_machine == ELF::EM_MIPS &&
+ Header->getFileClass() == ELF::ELFCLASS64;
+ }
+
+ bool isMips64EL() const {
+ return Header->e_machine == ELF::EM_MIPS &&
+ Header->getFileClass() == ELF::ELFCLASS64 &&
+ Header->getDataEncoding() == ELF::ELFDATA2LSB;
+ }
+
+ ErrorOr<const Elf_Dyn *> dynamic_table_begin(const Elf_Phdr *Phdr) const;
+ ErrorOr<const Elf_Dyn *> dynamic_table_end(const Elf_Phdr *Phdr) const;
+ ErrorOr<Elf_Dyn_Range> dynamic_table(const Elf_Phdr *Phdr) const {
+ ErrorOr<const Elf_Dyn *> Begin = dynamic_table_begin(Phdr);
+ if (std::error_code EC = Begin.getError())
+ return EC;
+ ErrorOr<const Elf_Dyn *> End = dynamic_table_end(Phdr);
+ if (std::error_code EC = End.getError())
+ return EC;
+ return make_range(*Begin, *End);
+ }
+
+ const Elf_Shdr *section_begin() const;
+ const Elf_Shdr *section_end() const;
+ Elf_Shdr_Range sections() const {
+ return make_range(section_begin(), section_end());
+ }
+
+ const Elf_Sym *symbol_begin(const Elf_Shdr *Sec) const {
+ if (!Sec)
+ return nullptr;
+ if (Sec->sh_entsize != sizeof(Elf_Sym))
+ report_fatal_error("Invalid symbol size");
+ return reinterpret_cast<const Elf_Sym *>(base() + Sec->sh_offset);
+ }
+ const Elf_Sym *symbol_end(const Elf_Shdr *Sec) const {
+ if (!Sec)
+ return nullptr;
+ uint64_t Size = Sec->sh_size;
+ if (Size % sizeof(Elf_Sym))
+ report_fatal_error("Invalid symbol table size");
+ return symbol_begin(Sec) + Size / sizeof(Elf_Sym);
+ }
+ Elf_Sym_Range symbols(const Elf_Shdr *Sec) const {
+ return make_range(symbol_begin(Sec), symbol_end(Sec));
+ }
+
+ typedef iterator_range<const Elf_Rela *> Elf_Rela_Range;
+
+ const Elf_Rela *rela_begin(const Elf_Shdr *sec) const {
+ if (sec->sh_entsize != sizeof(Elf_Rela))
+ report_fatal_error("Invalid relocation entry size");
+ return reinterpret_cast<const Elf_Rela *>(base() + sec->sh_offset);
+ }
+
+ const Elf_Rela *rela_end(const Elf_Shdr *sec) const {
+ uint64_t Size = sec->sh_size;
+ if (Size % sizeof(Elf_Rela))
+ report_fatal_error("Invalid relocation table size");
+ return rela_begin(sec) + Size / sizeof(Elf_Rela);
+ }
+
+ Elf_Rela_Range relas(const Elf_Shdr *Sec) const {
+ return make_range(rela_begin(Sec), rela_end(Sec));
+ }
+
+ const Elf_Rel *rel_begin(const Elf_Shdr *sec) const {
+ if (sec->sh_entsize != sizeof(Elf_Rel))
+ report_fatal_error("Invalid relocation entry size");
+ return reinterpret_cast<const Elf_Rel *>(base() + sec->sh_offset);
+ }
+
+ const Elf_Rel *rel_end(const Elf_Shdr *sec) const {
+ uint64_t Size = sec->sh_size;
+ if (Size % sizeof(Elf_Rel))
+ report_fatal_error("Invalid relocation table size");
+ return rel_begin(sec) + Size / sizeof(Elf_Rel);
+ }
+
+ typedef iterator_range<const Elf_Rel *> Elf_Rel_Range;
+ Elf_Rel_Range rels(const Elf_Shdr *Sec) const {
+ return make_range(rel_begin(Sec), rel_end(Sec));
+ }
+
+ /// \brief Iterate over program header table.
+ const Elf_Phdr *program_header_begin() const {
+ if (Header->e_phnum && Header->e_phentsize != sizeof(Elf_Phdr))
+ report_fatal_error("Invalid program header size");
+ return reinterpret_cast<const Elf_Phdr *>(base() + Header->e_phoff);
+ }
+
+ const Elf_Phdr *program_header_end() const {
+ return program_header_begin() + Header->e_phnum;
+ }
+
+ typedef iterator_range<const Elf_Phdr *> Elf_Phdr_Range;
+
+ const Elf_Phdr_Range program_headers() const {
+ return make_range(program_header_begin(), program_header_end());
+ }
+
+ uint64_t getNumSections() const;
+ uintX_t getStringTableIndex() const;
+ uint32_t getExtendedSymbolTableIndex(const Elf_Sym *Sym,
+ const Elf_Shdr *SymTab,
+ ArrayRef<Elf_Word> ShndxTable) const;
+ const Elf_Ehdr *getHeader() const { return Header; }
+ ErrorOr<const Elf_Shdr *> getSection(const Elf_Sym *Sym,
+ const Elf_Shdr *SymTab,
+ ArrayRef<Elf_Word> ShndxTable) const;
+ ErrorOr<const Elf_Shdr *> getSection(uint32_t Index) const;
+
+ const Elf_Sym *getSymbol(const Elf_Shdr *Sec, uint32_t Index) const {
+ return &*(symbol_begin(Sec) + Index);
+ }
+
+ ErrorOr<StringRef> getSectionName(const Elf_Shdr *Section) const;
+ template <typename T>
+ ErrorOr<ArrayRef<T>> getSectionContentsAsArray(const Elf_Shdr *Sec) const;
+ ErrorOr<ArrayRef<uint8_t> > getSectionContents(const Elf_Shdr *Sec) const;
+};
+
+typedef ELFFile<ELFType<support::little, false>> ELF32LEFile;
+typedef ELFFile<ELFType<support::little, true>> ELF64LEFile;
+typedef ELFFile<ELFType<support::big, false>> ELF32BEFile;
+typedef ELFFile<ELFType<support::big, true>> ELF64BEFile;
+
+template <class ELFT>
+uint32_t ELFFile<ELFT>::getExtendedSymbolTableIndex(
+ const Elf_Sym *Sym, const Elf_Shdr *SymTab,
+ ArrayRef<Elf_Word> ShndxTable) const {
+ assert(Sym->st_shndx == ELF::SHN_XINDEX);
+ unsigned Index = Sym - symbol_begin(SymTab);
+
+ // The size of the table was checked in getSHNDXTable.
+ return ShndxTable[Index];
+}
+
+template <class ELFT>
+ErrorOr<const typename ELFFile<ELFT>::Elf_Shdr *>
+ELFFile<ELFT>::getSection(const Elf_Sym *Sym, const Elf_Shdr *SymTab,
+ ArrayRef<Elf_Word> ShndxTable) const {
+ uint32_t Index = Sym->st_shndx;
+ if (Index == ELF::SHN_XINDEX)
+ return getSection(getExtendedSymbolTableIndex(Sym, SymTab, ShndxTable));
+
+ if (Index == ELF::SHN_UNDEF || Index >= ELF::SHN_LORESERVE)
+ return nullptr;
+ return getSection(Sym->st_shndx);
+}
+
+template <class ELFT>
+template <typename T>
+ErrorOr<ArrayRef<T>>
+ELFFile<ELFT>::getSectionContentsAsArray(const Elf_Shdr *Sec) const {
+ uintX_t Offset = Sec->sh_offset;
+ uintX_t Size = Sec->sh_size;
+
+ if (Size % sizeof(T))
+ return object_error::parse_failed;
+ if (Offset + Size > Buf.size())
+ return object_error::parse_failed;
+
+ const T *Start = reinterpret_cast<const T *>(base() + Offset);
+ return makeArrayRef(Start, Size / sizeof(T));
+}
+
+template <class ELFT>
+ErrorOr<ArrayRef<uint8_t>>
+ELFFile<ELFT>::getSectionContents(const Elf_Shdr *Sec) const {
+ return getSectionContentsAsArray<uint8_t>(Sec);
+}
+
+template <class ELFT>
+StringRef ELFFile<ELFT>::getRelocationTypeName(uint32_t Type) const {
+ return getELFRelocationTypeName(Header->e_machine, Type);
+}
+
+template <class ELFT>
+void ELFFile<ELFT>::getRelocationTypeName(uint32_t Type,
+ SmallVectorImpl<char> &Result) const {
+ if (!isMipsELF64()) {
+ StringRef Name = getRelocationTypeName(Type);
+ Result.append(Name.begin(), Name.end());
+ } else {
+ // The Mips N64 ABI allows up to three operations to be specified per
+ // relocation record. Unfortunately there's no easy way to test for the
+ // presence of N64 ELFs as they have no special flag that identifies them
+ // as being N64. We can safely assume at the moment that all Mips
+ // ELFCLASS64 ELFs are N64. New Mips64 ABIs should provide enough
+ // information to disambiguate between old vs new ABIs.
+ uint8_t Type1 = (Type >> 0) & 0xFF;
+ uint8_t Type2 = (Type >> 8) & 0xFF;
+ uint8_t Type3 = (Type >> 16) & 0xFF;
+
+ // Concat all three relocation type names.
+ StringRef Name = getRelocationTypeName(Type1);
+ Result.append(Name.begin(), Name.end());
+
+ Name = getRelocationTypeName(Type2);
+ Result.append(1, '/');
+ Result.append(Name.begin(), Name.end());
+
+ Name = getRelocationTypeName(Type3);
+ Result.append(1, '/');
+ Result.append(Name.begin(), Name.end());
+ }
+}
+
+template <class ELFT>
+const typename ELFFile<ELFT>::Elf_Sym *
+ELFFile<ELFT>::getRelocationSymbol(const Elf_Rel *Rel,
+ const Elf_Shdr *SymTab) const {
+ uint32_t Index = Rel->getSymbol(isMips64EL());
+ if (Index == 0)
+ return nullptr;
+ return getEntry<Elf_Sym>(SymTab, Index);
+}
+
+template <class ELFT>
+uint64_t ELFFile<ELFT>::getNumSections() const {
+ assert(Header && "Header not initialized!");
+ if (Header->e_shnum == ELF::SHN_UNDEF && Header->e_shoff > 0) {
+ assert(SectionHeaderTable && "SectionHeaderTable not initialized!");
+ return SectionHeaderTable->sh_size;
+ }
+ return Header->e_shnum;
+}
+
+template <class ELFT>
+typename ELFFile<ELFT>::uintX_t ELFFile<ELFT>::getStringTableIndex() const {
+ if (Header->e_shnum == ELF::SHN_UNDEF) {
+ if (Header->e_shstrndx == ELF::SHN_HIRESERVE)
+ return SectionHeaderTable->sh_link;
+ if (Header->e_shstrndx >= getNumSections())
+ return 0;
+ }
+ return Header->e_shstrndx;
+}
+
+template <class ELFT>
+ELFFile<ELFT>::ELFFile(StringRef Object, std::error_code &EC)
+ : Buf(Object) {
+ const uint64_t FileSize = Buf.size();
+
+ if (sizeof(Elf_Ehdr) > FileSize) {
+ // File too short!
+ EC = object_error::parse_failed;
+ return;
+ }
+
+ Header = reinterpret_cast<const Elf_Ehdr *>(base());
+
+ if (Header->e_shoff == 0)
+ return;
+
+ const uint64_t SectionTableOffset = Header->e_shoff;
+
+ if (SectionTableOffset + sizeof(Elf_Shdr) > FileSize) {
+ // Section header table goes past end of file!
+ EC = object_error::parse_failed;
+ return;
+ }
+
+ // The getNumSections() call below depends on SectionHeaderTable being set.
+ SectionHeaderTable =
+ reinterpret_cast<const Elf_Shdr *>(base() + SectionTableOffset);
+ const uint64_t SectionTableSize = getNumSections() * Header->e_shentsize;
+
+ if (SectionTableOffset + SectionTableSize > FileSize) {
+ // Section table goes past end of file!
+ EC = object_error::parse_failed;
+ return;
+ }
+
+ // Get string table sections.
+ uintX_t StringTableIndex = getStringTableIndex();
+ if (StringTableIndex) {
+ ErrorOr<const Elf_Shdr *> StrTabSecOrErr = getSection(StringTableIndex);
+ if ((EC = StrTabSecOrErr.getError()))
+ return;
+
+ ErrorOr<StringRef> StringTableOrErr = getStringTable(*StrTabSecOrErr);
+ if ((EC = StringTableOrErr.getError()))
+ return;
+ DotShstrtab = *StringTableOrErr;
+ }
+
+ EC = std::error_code();
+}
+
+template <class ELFT>
+static bool compareAddr(uint64_t VAddr, const Elf_Phdr_Impl<ELFT> *Phdr) {
+ return VAddr < Phdr->p_vaddr;
+}
+
+template <class ELFT>
+const typename ELFFile<ELFT>::Elf_Shdr *ELFFile<ELFT>::section_begin() const {
+ if (Header->e_shentsize != sizeof(Elf_Shdr))
+ report_fatal_error(
+ "Invalid section header entry size (e_shentsize) in ELF header");
+ return reinterpret_cast<const Elf_Shdr *>(base() + Header->e_shoff);
+}
+
+template <class ELFT>
+const typename ELFFile<ELFT>::Elf_Shdr *ELFFile<ELFT>::section_end() const {
+ return section_begin() + getNumSections();
+}
+
+template <class ELFT>
+ErrorOr<const typename ELFFile<ELFT>::Elf_Dyn *>
+ELFFile<ELFT>::dynamic_table_begin(const Elf_Phdr *Phdr) const {
+ if (!Phdr)
+ return nullptr;
+ assert(Phdr->p_type == ELF::PT_DYNAMIC && "Got the wrong program header");
+ uintX_t Offset = Phdr->p_offset;
+ if (Offset > Buf.size())
+ return object_error::parse_failed;
+ return reinterpret_cast<const Elf_Dyn *>(base() + Offset);
+}
+
+template <class ELFT>
+ErrorOr<const typename ELFFile<ELFT>::Elf_Dyn *>
+ELFFile<ELFT>::dynamic_table_end(const Elf_Phdr *Phdr) const {
+ if (!Phdr)
+ return nullptr;
+ assert(Phdr->p_type == ELF::PT_DYNAMIC && "Got the wrong program header");
+ uintX_t Size = Phdr->p_filesz;
+ if (Size % sizeof(Elf_Dyn))
+ return object_error::elf_invalid_dynamic_table_size;
+ // FIKME: Check for overflow?
+ uintX_t End = Phdr->p_offset + Size;
+ if (End > Buf.size())
+ return object_error::parse_failed;
+ return reinterpret_cast<const Elf_Dyn *>(base() + End);
+}
+
+template <class ELFT>
+template <typename T>
+const T *ELFFile<ELFT>::getEntry(uint32_t Section, uint32_t Entry) const {
+ ErrorOr<const Elf_Shdr *> Sec = getSection(Section);
+ if (std::error_code EC = Sec.getError())
+ report_fatal_error(EC.message());
+ return getEntry<T>(*Sec, Entry);
+}
+
+template <class ELFT>
+template <typename T>
+const T *ELFFile<ELFT>::getEntry(const Elf_Shdr *Section,
+ uint32_t Entry) const {
+ return reinterpret_cast<const T *>(base() + Section->sh_offset +
+ (Entry * Section->sh_entsize));
+}
+
+template <class ELFT>
+ErrorOr<const typename ELFFile<ELFT>::Elf_Shdr *>
+ELFFile<ELFT>::getSection(uint32_t Index) const {
+ assert(SectionHeaderTable && "SectionHeaderTable not initialized!");
+ if (Index >= getNumSections())
+ return object_error::invalid_section_index;
+
+ return reinterpret_cast<const Elf_Shdr *>(
+ reinterpret_cast<const char *>(SectionHeaderTable) +
+ (Index * Header->e_shentsize));
+}
+
+template <class ELFT>
+ErrorOr<StringRef>
+ELFFile<ELFT>::getStringTable(const Elf_Shdr *Section) const {
+ if (Section->sh_type != ELF::SHT_STRTAB)
+ return object_error::parse_failed;
+ uint64_t Offset = Section->sh_offset;
+ uint64_t Size = Section->sh_size;
+ if (Offset + Size > Buf.size())
+ return object_error::parse_failed;
+ StringRef Data((const char *)base() + Section->sh_offset, Size);
+ if (Data[Size - 1] != '\0')
+ return object_error::string_table_non_null_end;
+ return Data;
+}
+
+template <class ELFT>
+ErrorOr<ArrayRef<typename ELFFile<ELFT>::Elf_Word>>
+ELFFile<ELFT>::getSHNDXTable(const Elf_Shdr &Section) const {
+ assert(Section.sh_type == ELF::SHT_SYMTAB_SHNDX);
+ const Elf_Word *ShndxTableBegin =
+ reinterpret_cast<const Elf_Word *>(base() + Section.sh_offset);
+ uintX_t Size = Section.sh_size;
+ if (Size % sizeof(uint32_t))
+ return object_error::parse_failed;
+ uintX_t NumSymbols = Size / sizeof(uint32_t);
+ const Elf_Word *ShndxTableEnd = ShndxTableBegin + NumSymbols;
+ if (reinterpret_cast<const char *>(ShndxTableEnd) > Buf.end())
+ return object_error::parse_failed;
+ ErrorOr<const Elf_Shdr *> SymTableOrErr = getSection(Section.sh_link);
+ if (std::error_code EC = SymTableOrErr.getError())
+ return EC;
+ const Elf_Shdr &SymTable = **SymTableOrErr;
+ if (SymTable.sh_type != ELF::SHT_SYMTAB &&
+ SymTable.sh_type != ELF::SHT_DYNSYM)
+ return object_error::parse_failed;
+ if (NumSymbols != (SymTable.sh_size / sizeof(Elf_Sym)))
+ return object_error::parse_failed;
+ return makeArrayRef(ShndxTableBegin, ShndxTableEnd);
+}
+
+template <class ELFT>
+ErrorOr<StringRef>
+ELFFile<ELFT>::getStringTableForSymtab(const Elf_Shdr &Sec) const {
+ if (Sec.sh_type != ELF::SHT_SYMTAB && Sec.sh_type != ELF::SHT_DYNSYM)
+ return object_error::parse_failed;
+ ErrorOr<const Elf_Shdr *> SectionOrErr = getSection(Sec.sh_link);
+ if (std::error_code EC = SectionOrErr.getError())
+ return EC;
+ return getStringTable(*SectionOrErr);
+}
+
+template <class ELFT>
+ErrorOr<StringRef>
+ELFFile<ELFT>::getSectionName(const Elf_Shdr *Section) const {
+ uint32_t Offset = Section->sh_name;
+ if (Offset == 0)
+ return StringRef();
+ if (Offset >= DotShstrtab.size())
+ return object_error::parse_failed;
+ return StringRef(DotShstrtab.data() + Offset);
+}
+
+/// This function returns the hash value for a symbol in the .dynsym section
+/// Name of the API remains consistent as specified in the libelf
+/// REF : http://www.sco.com/developers/gabi/latest/ch5.dynamic.html#hash
+static inline unsigned elf_hash(StringRef &symbolName) {
+ unsigned h = 0, g;
+ for (unsigned i = 0, j = symbolName.size(); i < j; i++) {
+ h = (h << 4) + symbolName[i];
+ g = h & 0xf0000000L;
+ if (g != 0)
+ h ^= g >> 24;
+ h &= ~g;
+ }
+ return h;
+}
+} // end namespace object
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/ELFObjectFile.h b/contrib/llvm/include/llvm/Object/ELFObjectFile.h
new file mode 100644
index 0000000..5823848
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/ELFObjectFile.h
@@ -0,0 +1,930 @@
+//===- ELFObjectFile.h - ELF object file implementation ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the ELFObjectFile template class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ELFOBJECTFILE_H
+#define LLVM_OBJECT_ELFOBJECTFILE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cctype>
+#include <limits>
+#include <utility>
+
+namespace llvm {
+namespace object {
+
+class elf_symbol_iterator;
+class ELFSymbolRef;
+class ELFRelocationRef;
+
+class ELFObjectFileBase : public ObjectFile {
+ friend class ELFSymbolRef;
+ friend class ELFSectionRef;
+ friend class ELFRelocationRef;
+
+protected:
+ ELFObjectFileBase(unsigned int Type, MemoryBufferRef Source);
+
+ virtual uint64_t getSymbolSize(DataRefImpl Symb) const = 0;
+ virtual uint8_t getSymbolOther(DataRefImpl Symb) const = 0;
+ virtual uint8_t getSymbolELFType(DataRefImpl Symb) const = 0;
+
+ virtual uint32_t getSectionType(DataRefImpl Sec) const = 0;
+ virtual uint64_t getSectionFlags(DataRefImpl Sec) const = 0;
+
+ virtual ErrorOr<int64_t> getRelocationAddend(DataRefImpl Rel) const = 0;
+public:
+
+ typedef iterator_range<elf_symbol_iterator> elf_symbol_iterator_range;
+ virtual elf_symbol_iterator_range getDynamicSymbolIterators() const = 0;
+
+ elf_symbol_iterator_range symbols() const;
+
+ static inline bool classof(const Binary *v) { return v->isELF(); }
+};
+
+class ELFSectionRef : public SectionRef {
+public:
+ ELFSectionRef(const SectionRef &B) : SectionRef(B) {
+ assert(isa<ELFObjectFileBase>(SectionRef::getObject()));
+ }
+
+ const ELFObjectFileBase *getObject() const {
+ return cast<ELFObjectFileBase>(SectionRef::getObject());
+ }
+
+ uint32_t getType() const {
+ return getObject()->getSectionType(getRawDataRefImpl());
+ }
+
+ uint64_t getFlags() const {
+ return getObject()->getSectionFlags(getRawDataRefImpl());
+ }
+};
+
+class elf_section_iterator : public section_iterator {
+public:
+ elf_section_iterator(const section_iterator &B) : section_iterator(B) {
+ assert(isa<ELFObjectFileBase>(B->getObject()));
+ }
+
+ const ELFSectionRef *operator->() const {
+ return static_cast<const ELFSectionRef *>(section_iterator::operator->());
+ }
+
+ const ELFSectionRef &operator*() const {
+ return static_cast<const ELFSectionRef &>(section_iterator::operator*());
+ }
+};
+
+class ELFSymbolRef : public SymbolRef {
+public:
+ ELFSymbolRef(const SymbolRef &B) : SymbolRef(B) {
+ assert(isa<ELFObjectFileBase>(SymbolRef::getObject()));
+ }
+
+ const ELFObjectFileBase *getObject() const {
+ return cast<ELFObjectFileBase>(BasicSymbolRef::getObject());
+ }
+
+ uint64_t getSize() const {
+ return getObject()->getSymbolSize(getRawDataRefImpl());
+ }
+
+ uint8_t getOther() const {
+ return getObject()->getSymbolOther(getRawDataRefImpl());
+ }
+
+ uint8_t getELFType() const {
+ return getObject()->getSymbolELFType(getRawDataRefImpl());
+ }
+};
+
+class elf_symbol_iterator : public symbol_iterator {
+public:
+ elf_symbol_iterator(const basic_symbol_iterator &B)
+ : symbol_iterator(SymbolRef(B->getRawDataRefImpl(),
+ cast<ELFObjectFileBase>(B->getObject()))) {}
+
+ const ELFSymbolRef *operator->() const {
+ return static_cast<const ELFSymbolRef *>(symbol_iterator::operator->());
+ }
+
+ const ELFSymbolRef &operator*() const {
+ return static_cast<const ELFSymbolRef &>(symbol_iterator::operator*());
+ }
+};
+
+class ELFRelocationRef : public RelocationRef {
+public:
+ ELFRelocationRef(const RelocationRef &B) : RelocationRef(B) {
+ assert(isa<ELFObjectFileBase>(RelocationRef::getObject()));
+ }
+
+ const ELFObjectFileBase *getObject() const {
+ return cast<ELFObjectFileBase>(RelocationRef::getObject());
+ }
+
+ ErrorOr<int64_t> getAddend() const {
+ return getObject()->getRelocationAddend(getRawDataRefImpl());
+ }
+};
+
+class elf_relocation_iterator : public relocation_iterator {
+public:
+ elf_relocation_iterator(const relocation_iterator &B)
+ : relocation_iterator(RelocationRef(
+ B->getRawDataRefImpl(), cast<ELFObjectFileBase>(B->getObject()))) {}
+
+ const ELFRelocationRef *operator->() const {
+ return static_cast<const ELFRelocationRef *>(
+ relocation_iterator::operator->());
+ }
+
+ const ELFRelocationRef &operator*() const {
+ return static_cast<const ELFRelocationRef &>(
+ relocation_iterator::operator*());
+ }
+};
+
+inline ELFObjectFileBase::elf_symbol_iterator_range
+ELFObjectFileBase::symbols() const {
+ return elf_symbol_iterator_range(symbol_begin(), symbol_end());
+}
+
+template <class ELFT> class ELFObjectFile : public ELFObjectFileBase {
+ uint64_t getSymbolSize(DataRefImpl Sym) const override;
+
+public:
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+
+ typedef typename ELFFile<ELFT>::uintX_t uintX_t;
+
+ typedef typename ELFFile<ELFT>::Elf_Sym Elf_Sym;
+ typedef typename ELFFile<ELFT>::Elf_Shdr Elf_Shdr;
+ typedef typename ELFFile<ELFT>::Elf_Ehdr Elf_Ehdr;
+ typedef typename ELFFile<ELFT>::Elf_Rel Elf_Rel;
+ typedef typename ELFFile<ELFT>::Elf_Rela Elf_Rela;
+ typedef typename ELFFile<ELFT>::Elf_Dyn Elf_Dyn;
+
+protected:
+ ELFFile<ELFT> EF;
+
+ const Elf_Shdr *DotDynSymSec = nullptr; // Dynamic symbol table section.
+ const Elf_Shdr *DotSymtabSec = nullptr; // Symbol table section.
+ ArrayRef<Elf_Word> ShndxTable;
+
+ void moveSymbolNext(DataRefImpl &Symb) const override;
+ ErrorOr<StringRef> getSymbolName(DataRefImpl Symb) const override;
+ ErrorOr<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
+ uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
+ uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
+ uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
+ uint32_t getSymbolFlags(DataRefImpl Symb) const override;
+ uint8_t getSymbolOther(DataRefImpl Symb) const override;
+ uint8_t getSymbolELFType(DataRefImpl Symb) const override;
+ SymbolRef::Type getSymbolType(DataRefImpl Symb) const override;
+ ErrorOr<section_iterator> getSymbolSection(const Elf_Sym *Symb,
+ const Elf_Shdr *SymTab) const;
+ ErrorOr<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
+
+ void moveSectionNext(DataRefImpl &Sec) const override;
+ std::error_code getSectionName(DataRefImpl Sec,
+ StringRef &Res) const override;
+ uint64_t getSectionAddress(DataRefImpl Sec) const override;
+ uint64_t getSectionSize(DataRefImpl Sec) const override;
+ std::error_code getSectionContents(DataRefImpl Sec,
+ StringRef &Res) const override;
+ uint64_t getSectionAlignment(DataRefImpl Sec) const override;
+ bool isSectionText(DataRefImpl Sec) const override;
+ bool isSectionData(DataRefImpl Sec) const override;
+ bool isSectionBSS(DataRefImpl Sec) const override;
+ bool isSectionVirtual(DataRefImpl Sec) const override;
+ relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
+ relocation_iterator section_rel_end(DataRefImpl Sec) const override;
+ section_iterator getRelocatedSection(DataRefImpl Sec) const override;
+
+ void moveRelocationNext(DataRefImpl &Rel) const override;
+ uint64_t getRelocationOffset(DataRefImpl Rel) const override;
+ symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
+ uint64_t getRelocationType(DataRefImpl Rel) const override;
+ void getRelocationTypeName(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const override;
+
+ uint32_t getSectionType(DataRefImpl Sec) const override;
+ uint64_t getSectionFlags(DataRefImpl Sec) const override;
+ StringRef getRelocationTypeName(uint32_t Type) const;
+
+ /// \brief Get the relocation section that contains \a Rel.
+ const Elf_Shdr *getRelSection(DataRefImpl Rel) const {
+ return *EF.getSection(Rel.d.a);
+ }
+
+ DataRefImpl toDRI(const Elf_Shdr *SymTable, unsigned SymbolNum) const {
+ DataRefImpl DRI;
+ if (!SymTable) {
+ DRI.d.a = 0;
+ DRI.d.b = 0;
+ return DRI;
+ }
+ assert(SymTable->sh_type == ELF::SHT_SYMTAB ||
+ SymTable->sh_type == ELF::SHT_DYNSYM);
+
+ uintptr_t SHT = reinterpret_cast<uintptr_t>(EF.section_begin());
+ unsigned SymTableIndex =
+ (reinterpret_cast<uintptr_t>(SymTable) - SHT) / sizeof(Elf_Shdr);
+
+ DRI.d.a = SymTableIndex;
+ DRI.d.b = SymbolNum;
+ return DRI;
+ }
+
+ const Elf_Shdr *toELFShdrIter(DataRefImpl Sec) const {
+ return reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ }
+
+ DataRefImpl toDRI(const Elf_Shdr *Sec) const {
+ DataRefImpl DRI;
+ DRI.p = reinterpret_cast<uintptr_t>(Sec);
+ return DRI;
+ }
+
+ DataRefImpl toDRI(const Elf_Dyn *Dyn) const {
+ DataRefImpl DRI;
+ DRI.p = reinterpret_cast<uintptr_t>(Dyn);
+ return DRI;
+ }
+
+ bool isExportedToOtherDSO(const Elf_Sym *ESym) const {
+ unsigned char Binding = ESym->getBinding();
+ unsigned char Visibility = ESym->getVisibility();
+
+ // A symbol is exported if its binding is either GLOBAL or WEAK, and its
+ // visibility is either DEFAULT or PROTECTED. All other symbols are not
+ // exported.
+ if ((Binding == ELF::STB_GLOBAL || Binding == ELF::STB_WEAK) &&
+ (Visibility == ELF::STV_DEFAULT || Visibility == ELF::STV_PROTECTED))
+ return true;
+
+ return false;
+ }
+
+ // This flag is used for classof, to distinguish ELFObjectFile from
+ // its subclass. If more subclasses will be created, this flag will
+ // have to become an enum.
+ bool isDyldELFObject;
+
+public:
+ ELFObjectFile(MemoryBufferRef Object, std::error_code &EC);
+
+ const Elf_Rel *getRel(DataRefImpl Rel) const;
+ const Elf_Rela *getRela(DataRefImpl Rela) const;
+
+ const Elf_Sym *getSymbol(DataRefImpl Sym) const {
+ return EF.template getEntry<Elf_Sym>(Sym.d.a, Sym.d.b);
+ }
+
+ const Elf_Shdr *getSection(DataRefImpl Sec) const {
+ return reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ }
+
+ basic_symbol_iterator symbol_begin_impl() const override;
+ basic_symbol_iterator symbol_end_impl() const override;
+
+ elf_symbol_iterator dynamic_symbol_begin() const;
+ elf_symbol_iterator dynamic_symbol_end() const;
+
+ section_iterator section_begin() const override;
+ section_iterator section_end() const override;
+
+ ErrorOr<int64_t> getRelocationAddend(DataRefImpl Rel) const override;
+
+ uint8_t getBytesInAddress() const override;
+ StringRef getFileFormatName() const override;
+ unsigned getArch() const override;
+
+ std::error_code getPlatformFlags(unsigned &Result) const override {
+ Result = EF.getHeader()->e_flags;
+ return std::error_code();
+ }
+
+ const ELFFile<ELFT> *getELFFile() const { return &EF; }
+
+ bool isDyldType() const { return isDyldELFObject; }
+ static inline bool classof(const Binary *v) {
+ return v->getType() == getELFType(ELFT::TargetEndianness == support::little,
+ ELFT::Is64Bits);
+ }
+
+ elf_symbol_iterator_range getDynamicSymbolIterators() const override;
+
+ bool isRelocatableObject() const override;
+};
+
+typedef ELFObjectFile<ELFType<support::little, false>> ELF32LEObjectFile;
+typedef ELFObjectFile<ELFType<support::little, true>> ELF64LEObjectFile;
+typedef ELFObjectFile<ELFType<support::big, false>> ELF32BEObjectFile;
+typedef ELFObjectFile<ELFType<support::big, true>> ELF64BEObjectFile;
+
+template <class ELFT>
+void ELFObjectFile<ELFT>::moveSymbolNext(DataRefImpl &Sym) const {
+ ++Sym.d.b;
+}
+
+template <class ELFT>
+ErrorOr<StringRef> ELFObjectFile<ELFT>::getSymbolName(DataRefImpl Sym) const {
+ const Elf_Sym *ESym = getSymbol(Sym);
+ const Elf_Shdr *SymTableSec = *EF.getSection(Sym.d.a);
+ const Elf_Shdr *StringTableSec = *EF.getSection(SymTableSec->sh_link);
+ StringRef SymTable = *EF.getStringTable(StringTableSec);
+ return ESym->getName(SymTable);
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionFlags(DataRefImpl Sec) const {
+ return getSection(Sec)->sh_flags;
+}
+
+template <class ELFT>
+uint32_t ELFObjectFile<ELFT>::getSectionType(DataRefImpl Sec) const {
+ return getSection(Sec)->sh_type;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSymbolValueImpl(DataRefImpl Symb) const {
+ const Elf_Sym *ESym = getSymbol(Symb);
+ uint64_t Ret = ESym->st_value;
+ if (ESym->st_shndx == ELF::SHN_ABS)
+ return Ret;
+
+ const Elf_Ehdr *Header = EF.getHeader();
+ // Clear the ARM/Thumb or microMIPS indicator flag.
+ if ((Header->e_machine == ELF::EM_ARM || Header->e_machine == ELF::EM_MIPS) &&
+ ESym->getType() == ELF::STT_FUNC)
+ Ret &= ~1;
+
+ return Ret;
+}
+
+template <class ELFT>
+ErrorOr<uint64_t>
+ELFObjectFile<ELFT>::getSymbolAddress(DataRefImpl Symb) const {
+ uint64_t Result = getSymbolValue(Symb);
+ const Elf_Sym *ESym = getSymbol(Symb);
+ switch (ESym->st_shndx) {
+ case ELF::SHN_COMMON:
+ case ELF::SHN_UNDEF:
+ case ELF::SHN_ABS:
+ return Result;
+ }
+
+ const Elf_Ehdr *Header = EF.getHeader();
+ const Elf_Shdr *SymTab = *EF.getSection(Symb.d.a);
+
+ if (Header->e_type == ELF::ET_REL) {
+ ErrorOr<const Elf_Shdr *> SectionOrErr =
+ EF.getSection(ESym, SymTab, ShndxTable);
+ if (std::error_code EC = SectionOrErr.getError())
+ return EC;
+ const Elf_Shdr *Section = *SectionOrErr;
+ if (Section)
+ Result += Section->sh_addr;
+ }
+
+ return Result;
+}
+
+template <class ELFT>
+uint32_t ELFObjectFile<ELFT>::getSymbolAlignment(DataRefImpl Symb) const {
+ const Elf_Sym *Sym = getSymbol(Symb);
+ if (Sym->st_shndx == ELF::SHN_COMMON)
+ return Sym->st_value;
+ return 0;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSymbolSize(DataRefImpl Sym) const {
+ return getSymbol(Sym)->st_size;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getCommonSymbolSizeImpl(DataRefImpl Symb) const {
+ return getSymbol(Symb)->st_size;
+}
+
+template <class ELFT>
+uint8_t ELFObjectFile<ELFT>::getSymbolOther(DataRefImpl Symb) const {
+ return getSymbol(Symb)->st_other;
+}
+
+template <class ELFT>
+uint8_t ELFObjectFile<ELFT>::getSymbolELFType(DataRefImpl Symb) const {
+ return getSymbol(Symb)->getType();
+}
+
+template <class ELFT>
+SymbolRef::Type ELFObjectFile<ELFT>::getSymbolType(DataRefImpl Symb) const {
+ const Elf_Sym *ESym = getSymbol(Symb);
+
+ switch (ESym->getType()) {
+ case ELF::STT_NOTYPE:
+ return SymbolRef::ST_Unknown;
+ case ELF::STT_SECTION:
+ return SymbolRef::ST_Debug;
+ case ELF::STT_FILE:
+ return SymbolRef::ST_File;
+ case ELF::STT_FUNC:
+ return SymbolRef::ST_Function;
+ case ELF::STT_OBJECT:
+ case ELF::STT_COMMON:
+ case ELF::STT_TLS:
+ return SymbolRef::ST_Data;
+ default:
+ return SymbolRef::ST_Other;
+ }
+}
+
+template <class ELFT>
+uint32_t ELFObjectFile<ELFT>::getSymbolFlags(DataRefImpl Sym) const {
+ const Elf_Sym *ESym = getSymbol(Sym);
+
+ uint32_t Result = SymbolRef::SF_None;
+
+ if (ESym->getBinding() != ELF::STB_LOCAL)
+ Result |= SymbolRef::SF_Global;
+
+ if (ESym->getBinding() == ELF::STB_WEAK)
+ Result |= SymbolRef::SF_Weak;
+
+ if (ESym->st_shndx == ELF::SHN_ABS)
+ Result |= SymbolRef::SF_Absolute;
+
+ if (ESym->getType() == ELF::STT_FILE || ESym->getType() == ELF::STT_SECTION ||
+ ESym == EF.symbol_begin(DotSymtabSec) ||
+ ESym == EF.symbol_begin(DotDynSymSec))
+ Result |= SymbolRef::SF_FormatSpecific;
+
+ if (EF.getHeader()->e_machine == ELF::EM_ARM) {
+ if (ErrorOr<StringRef> NameOrErr = getSymbolName(Sym)) {
+ StringRef Name = *NameOrErr;
+ if (Name.startswith("$d") || Name.startswith("$t") ||
+ Name.startswith("$a"))
+ Result |= SymbolRef::SF_FormatSpecific;
+ }
+ }
+
+ if (ESym->st_shndx == ELF::SHN_UNDEF)
+ Result |= SymbolRef::SF_Undefined;
+
+ if (ESym->getType() == ELF::STT_COMMON || ESym->st_shndx == ELF::SHN_COMMON)
+ Result |= SymbolRef::SF_Common;
+
+ if (isExportedToOtherDSO(ESym))
+ Result |= SymbolRef::SF_Exported;
+
+ if (ESym->getVisibility() == ELF::STV_HIDDEN)
+ Result |= SymbolRef::SF_Hidden;
+
+ return Result;
+}
+
+template <class ELFT>
+ErrorOr<section_iterator>
+ELFObjectFile<ELFT>::getSymbolSection(const Elf_Sym *ESym,
+ const Elf_Shdr *SymTab) const {
+ ErrorOr<const Elf_Shdr *> ESecOrErr = EF.getSection(ESym, SymTab, ShndxTable);
+ if (std::error_code EC = ESecOrErr.getError())
+ return EC;
+
+ const Elf_Shdr *ESec = *ESecOrErr;
+ if (!ESec)
+ return section_end();
+
+ DataRefImpl Sec;
+ Sec.p = reinterpret_cast<intptr_t>(ESec);
+ return section_iterator(SectionRef(Sec, this));
+}
+
+template <class ELFT>
+ErrorOr<section_iterator>
+ELFObjectFile<ELFT>::getSymbolSection(DataRefImpl Symb) const {
+ const Elf_Sym *Sym = getSymbol(Symb);
+ const Elf_Shdr *SymTab = *EF.getSection(Symb.d.a);
+ return getSymbolSection(Sym, SymTab);
+}
+
+template <class ELFT>
+void ELFObjectFile<ELFT>::moveSectionNext(DataRefImpl &Sec) const {
+ const Elf_Shdr *ESec = getSection(Sec);
+ Sec = toDRI(++ESec);
+}
+
+template <class ELFT>
+std::error_code ELFObjectFile<ELFT>::getSectionName(DataRefImpl Sec,
+ StringRef &Result) const {
+ ErrorOr<StringRef> Name = EF.getSectionName(&*getSection(Sec));
+ if (!Name)
+ return Name.getError();
+ Result = *Name;
+ return std::error_code();
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionAddress(DataRefImpl Sec) const {
+ return getSection(Sec)->sh_addr;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionSize(DataRefImpl Sec) const {
+ return getSection(Sec)->sh_size;
+}
+
+template <class ELFT>
+std::error_code
+ELFObjectFile<ELFT>::getSectionContents(DataRefImpl Sec,
+ StringRef &Result) const {
+ const Elf_Shdr *EShdr = getSection(Sec);
+ Result = StringRef((const char *)base() + EShdr->sh_offset, EShdr->sh_size);
+ return std::error_code();
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getSectionAlignment(DataRefImpl Sec) const {
+ return getSection(Sec)->sh_addralign;
+}
+
+template <class ELFT>
+bool ELFObjectFile<ELFT>::isSectionText(DataRefImpl Sec) const {
+ return getSection(Sec)->sh_flags & ELF::SHF_EXECINSTR;
+}
+
+template <class ELFT>
+bool ELFObjectFile<ELFT>::isSectionData(DataRefImpl Sec) const {
+ const Elf_Shdr *EShdr = getSection(Sec);
+ return EShdr->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE) &&
+ EShdr->sh_type == ELF::SHT_PROGBITS;
+}
+
+template <class ELFT>
+bool ELFObjectFile<ELFT>::isSectionBSS(DataRefImpl Sec) const {
+ const Elf_Shdr *EShdr = getSection(Sec);
+ return EShdr->sh_flags & (ELF::SHF_ALLOC | ELF::SHF_WRITE) &&
+ EShdr->sh_type == ELF::SHT_NOBITS;
+}
+
+template <class ELFT>
+bool ELFObjectFile<ELFT>::isSectionVirtual(DataRefImpl Sec) const {
+ return getSection(Sec)->sh_type == ELF::SHT_NOBITS;
+}
+
+template <class ELFT>
+relocation_iterator
+ELFObjectFile<ELFT>::section_rel_begin(DataRefImpl Sec) const {
+ DataRefImpl RelData;
+ uintptr_t SHT = reinterpret_cast<uintptr_t>(EF.section_begin());
+ RelData.d.a = (Sec.p - SHT) / EF.getHeader()->e_shentsize;
+ RelData.d.b = 0;
+
+ const Elf_Shdr *S = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ if (S->sh_type != ELF::SHT_RELA && S->sh_type != ELF::SHT_REL)
+ return relocation_iterator(RelocationRef(RelData, this));
+
+ const Elf_Shdr *RelSec = getRelSection(RelData);
+ ErrorOr<const Elf_Shdr *> SymSecOrErr = EF.getSection(RelSec->sh_link);
+ if (std::error_code EC = SymSecOrErr.getError())
+ report_fatal_error(EC.message());
+ const Elf_Shdr *SymSec = *SymSecOrErr;
+ uint32_t SymSecType = SymSec->sh_type;
+ if (SymSecType != ELF::SHT_SYMTAB && SymSecType != ELF::SHT_DYNSYM)
+ report_fatal_error("Invalid symbol table section type!");
+ if (SymSecType == ELF::SHT_DYNSYM)
+ RelData.d.b = 1;
+
+ return relocation_iterator(RelocationRef(RelData, this));
+}
+
+template <class ELFT>
+relocation_iterator
+ELFObjectFile<ELFT>::section_rel_end(DataRefImpl Sec) const {
+ const Elf_Shdr *S = reinterpret_cast<const Elf_Shdr *>(Sec.p);
+ relocation_iterator Begin = section_rel_begin(Sec);
+ if (S->sh_type != ELF::SHT_RELA && S->sh_type != ELF::SHT_REL)
+ return Begin;
+ DataRefImpl RelData = Begin->getRawDataRefImpl();
+ RelData.d.b += (S->sh_size / S->sh_entsize) << 1;
+ return relocation_iterator(RelocationRef(RelData, this));
+}
+
+template <class ELFT>
+section_iterator
+ELFObjectFile<ELFT>::getRelocatedSection(DataRefImpl Sec) const {
+ if (EF.getHeader()->e_type != ELF::ET_REL)
+ return section_end();
+
+ const Elf_Shdr *EShdr = getSection(Sec);
+ uintX_t Type = EShdr->sh_type;
+ if (Type != ELF::SHT_REL && Type != ELF::SHT_RELA)
+ return section_end();
+
+ ErrorOr<const Elf_Shdr *> R = EF.getSection(EShdr->sh_info);
+ if (std::error_code EC = R.getError())
+ report_fatal_error(EC.message());
+ return section_iterator(SectionRef(toDRI(*R), this));
+}
+
+// Relocations
+template <class ELFT>
+void ELFObjectFile<ELFT>::moveRelocationNext(DataRefImpl &Rel) const {
+ Rel.d.b += 2;
+}
+
+template <class ELFT>
+symbol_iterator
+ELFObjectFile<ELFT>::getRelocationSymbol(DataRefImpl Rel) const {
+ uint32_t symbolIdx;
+ const Elf_Shdr *sec = getRelSection(Rel);
+ if (sec->sh_type == ELF::SHT_REL)
+ symbolIdx = getRel(Rel)->getSymbol(EF.isMips64EL());
+ else
+ symbolIdx = getRela(Rel)->getSymbol(EF.isMips64EL());
+ if (!symbolIdx)
+ return symbol_end();
+
+ bool IsDyn = Rel.d.b & 1;
+ DataRefImpl SymbolData;
+ if (IsDyn)
+ SymbolData = toDRI(DotDynSymSec, symbolIdx);
+ else
+ SymbolData = toDRI(DotSymtabSec, symbolIdx);
+ return symbol_iterator(SymbolRef(SymbolData, this));
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getRelocationOffset(DataRefImpl Rel) const {
+ assert(EF.getHeader()->e_type == ELF::ET_REL &&
+ "Only relocatable object files have relocation offsets");
+ const Elf_Shdr *sec = getRelSection(Rel);
+ if (sec->sh_type == ELF::SHT_REL)
+ return getRel(Rel)->r_offset;
+
+ return getRela(Rel)->r_offset;
+}
+
+template <class ELFT>
+uint64_t ELFObjectFile<ELFT>::getRelocationType(DataRefImpl Rel) const {
+ const Elf_Shdr *sec = getRelSection(Rel);
+ if (sec->sh_type == ELF::SHT_REL)
+ return getRel(Rel)->getType(EF.isMips64EL());
+ else
+ return getRela(Rel)->getType(EF.isMips64EL());
+}
+
+template <class ELFT>
+StringRef ELFObjectFile<ELFT>::getRelocationTypeName(uint32_t Type) const {
+ return getELFRelocationTypeName(EF.getHeader()->e_machine, Type);
+}
+
+template <class ELFT>
+void ELFObjectFile<ELFT>::getRelocationTypeName(
+ DataRefImpl Rel, SmallVectorImpl<char> &Result) const {
+ uint32_t type = getRelocationType(Rel);
+ EF.getRelocationTypeName(type, Result);
+}
+
+template <class ELFT>
+ErrorOr<int64_t>
+ELFObjectFile<ELFT>::getRelocationAddend(DataRefImpl Rel) const {
+ if (getRelSection(Rel)->sh_type != ELF::SHT_RELA)
+ return object_error::parse_failed;
+ return (int64_t)getRela(Rel)->r_addend;
+}
+
+template <class ELFT>
+const typename ELFObjectFile<ELFT>::Elf_Rel *
+ELFObjectFile<ELFT>::getRel(DataRefImpl Rel) const {
+ assert(getRelSection(Rel)->sh_type == ELF::SHT_REL);
+ return EF.template getEntry<Elf_Rel>(Rel.d.a, Rel.d.b >> 1);
+}
+
+template <class ELFT>
+const typename ELFObjectFile<ELFT>::Elf_Rela *
+ELFObjectFile<ELFT>::getRela(DataRefImpl Rela) const {
+ assert(getRelSection(Rela)->sh_type == ELF::SHT_RELA);
+ return EF.template getEntry<Elf_Rela>(Rela.d.a, Rela.d.b >> 1);
+}
+
+template <class ELFT>
+ELFObjectFile<ELFT>::ELFObjectFile(MemoryBufferRef Object, std::error_code &EC)
+ : ELFObjectFileBase(
+ getELFType(ELFT::TargetEndianness == support::little, ELFT::Is64Bits),
+ Object),
+ EF(Data.getBuffer(), EC) {
+ if (EC)
+ return;
+ for (const Elf_Shdr &Sec : EF.sections()) {
+ switch (Sec.sh_type) {
+ case ELF::SHT_DYNSYM: {
+ if (DotDynSymSec) {
+ // More than one .dynsym!
+ EC = object_error::parse_failed;
+ return;
+ }
+ DotDynSymSec = &Sec;
+ break;
+ }
+ case ELF::SHT_SYMTAB: {
+ if (DotSymtabSec) {
+ // More than one .dynsym!
+ EC = object_error::parse_failed;
+ return;
+ }
+ DotSymtabSec = &Sec;
+ break;
+ }
+ case ELF::SHT_SYMTAB_SHNDX: {
+ ErrorOr<ArrayRef<Elf_Word>> TableOrErr = EF.getSHNDXTable(Sec);
+ if ((EC = TableOrErr.getError()))
+ return;
+ ShndxTable = *TableOrErr;
+ break;
+ }
+ }
+ }
+}
+
+template <class ELFT>
+basic_symbol_iterator ELFObjectFile<ELFT>::symbol_begin_impl() const {
+ DataRefImpl Sym = toDRI(DotSymtabSec, 0);
+ return basic_symbol_iterator(SymbolRef(Sym, this));
+}
+
+template <class ELFT>
+basic_symbol_iterator ELFObjectFile<ELFT>::symbol_end_impl() const {
+ const Elf_Shdr *SymTab = DotSymtabSec;
+ if (!SymTab)
+ return symbol_begin_impl();
+ DataRefImpl Sym = toDRI(SymTab, SymTab->sh_size / sizeof(Elf_Sym));
+ return basic_symbol_iterator(SymbolRef(Sym, this));
+}
+
+template <class ELFT>
+elf_symbol_iterator ELFObjectFile<ELFT>::dynamic_symbol_begin() const {
+ DataRefImpl Sym = toDRI(DotDynSymSec, 0);
+ return symbol_iterator(SymbolRef(Sym, this));
+}
+
+template <class ELFT>
+elf_symbol_iterator ELFObjectFile<ELFT>::dynamic_symbol_end() const {
+ const Elf_Shdr *SymTab = DotDynSymSec;
+ DataRefImpl Sym = toDRI(SymTab, SymTab->sh_size / sizeof(Elf_Sym));
+ return basic_symbol_iterator(SymbolRef(Sym, this));
+}
+
+template <class ELFT>
+section_iterator ELFObjectFile<ELFT>::section_begin() const {
+ return section_iterator(SectionRef(toDRI(EF.section_begin()), this));
+}
+
+template <class ELFT>
+section_iterator ELFObjectFile<ELFT>::section_end() const {
+ return section_iterator(SectionRef(toDRI(EF.section_end()), this));
+}
+
+template <class ELFT>
+uint8_t ELFObjectFile<ELFT>::getBytesInAddress() const {
+ return ELFT::Is64Bits ? 8 : 4;
+}
+
+template <class ELFT>
+StringRef ELFObjectFile<ELFT>::getFileFormatName() const {
+ bool IsLittleEndian = ELFT::TargetEndianness == support::little;
+ switch (EF.getHeader()->e_ident[ELF::EI_CLASS]) {
+ case ELF::ELFCLASS32:
+ switch (EF.getHeader()->e_machine) {
+ case ELF::EM_386:
+ return "ELF32-i386";
+ case ELF::EM_IAMCU:
+ return "ELF32-iamcu";
+ case ELF::EM_X86_64:
+ return "ELF32-x86-64";
+ case ELF::EM_ARM:
+ return (IsLittleEndian ? "ELF32-arm-little" : "ELF32-arm-big");
+ case ELF::EM_AVR:
+ return "ELF32-avr";
+ case ELF::EM_HEXAGON:
+ return "ELF32-hexagon";
+ case ELF::EM_MIPS:
+ return "ELF32-mips";
+ case ELF::EM_PPC:
+ return "ELF32-ppc";
+ case ELF::EM_SPARC:
+ case ELF::EM_SPARC32PLUS:
+ return "ELF32-sparc";
+ default:
+ return "ELF32-unknown";
+ }
+ case ELF::ELFCLASS64:
+ switch (EF.getHeader()->e_machine) {
+ case ELF::EM_386:
+ return "ELF64-i386";
+ case ELF::EM_X86_64:
+ return "ELF64-x86-64";
+ case ELF::EM_AARCH64:
+ return (IsLittleEndian ? "ELF64-aarch64-little" : "ELF64-aarch64-big");
+ case ELF::EM_PPC64:
+ return "ELF64-ppc64";
+ case ELF::EM_S390:
+ return "ELF64-s390";
+ case ELF::EM_SPARCV9:
+ return "ELF64-sparc";
+ case ELF::EM_MIPS:
+ return "ELF64-mips";
+ default:
+ return "ELF64-unknown";
+ }
+ default:
+ // FIXME: Proper error handling.
+ report_fatal_error("Invalid ELFCLASS!");
+ }
+}
+
+template <class ELFT>
+unsigned ELFObjectFile<ELFT>::getArch() const {
+ bool IsLittleEndian = ELFT::TargetEndianness == support::little;
+ switch (EF.getHeader()->e_machine) {
+ case ELF::EM_386:
+ case ELF::EM_IAMCU:
+ return Triple::x86;
+ case ELF::EM_X86_64:
+ return Triple::x86_64;
+ case ELF::EM_AARCH64:
+ return Triple::aarch64;
+ case ELF::EM_ARM:
+ return Triple::arm;
+ case ELF::EM_AVR:
+ return Triple::avr;
+ case ELF::EM_HEXAGON:
+ return Triple::hexagon;
+ case ELF::EM_MIPS:
+ switch (EF.getHeader()->e_ident[ELF::EI_CLASS]) {
+ case ELF::ELFCLASS32:
+ return IsLittleEndian ? Triple::mipsel : Triple::mips;
+ case ELF::ELFCLASS64:
+ return IsLittleEndian ? Triple::mips64el : Triple::mips64;
+ default:
+ report_fatal_error("Invalid ELFCLASS!");
+ }
+ case ELF::EM_PPC:
+ return Triple::ppc;
+ case ELF::EM_PPC64:
+ return IsLittleEndian ? Triple::ppc64le : Triple::ppc64;
+ case ELF::EM_S390:
+ return Triple::systemz;
+
+ case ELF::EM_SPARC:
+ case ELF::EM_SPARC32PLUS:
+ return IsLittleEndian ? Triple::sparcel : Triple::sparc;
+ case ELF::EM_SPARCV9:
+ return Triple::sparcv9;
+
+ default:
+ return Triple::UnknownArch;
+ }
+}
+
+template <class ELFT>
+ELFObjectFileBase::elf_symbol_iterator_range
+ELFObjectFile<ELFT>::getDynamicSymbolIterators() const {
+ return make_range(dynamic_symbol_begin(), dynamic_symbol_end());
+}
+
+template <class ELFT> bool ELFObjectFile<ELFT>::isRelocatableObject() const {
+ return EF.getHeader()->e_type == ELF::ET_REL;
+}
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/ELFTypes.h b/contrib/llvm/include/llvm/Object/ELFTypes.h
new file mode 100644
index 0000000..07b312a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/ELFTypes.h
@@ -0,0 +1,567 @@
+//===- ELFTypes.h - Endian specific types for ELF ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ELFTYPES_H
+#define LLVM_OBJECT_ELFTYPES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Object/Error.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorOr.h"
+
+namespace llvm {
+namespace object {
+
+using support::endianness;
+
+template <endianness target_endianness, bool is64Bits> struct ELFType {
+ static const endianness TargetEndianness = target_endianness;
+ static const bool Is64Bits = is64Bits;
+};
+
+typedef ELFType<support::little, false> ELF32LE;
+typedef ELFType<support::big, false> ELF32BE;
+typedef ELFType<support::little, true> ELF64LE;
+typedef ELFType<support::big, true> ELF64BE;
+
+// Use an alignment of 2 for the typedefs since that is the worst case for
+// ELF files in archives.
+
+// Templates to choose Elf_Addr and Elf_Off depending on is64Bits.
+template <endianness target_endianness> struct ELFDataTypeTypedefHelperCommon {
+ typedef support::detail::packed_endian_specific_integral<
+ uint16_t, target_endianness, 2> Elf_Half;
+ typedef support::detail::packed_endian_specific_integral<
+ uint32_t, target_endianness, 2> Elf_Word;
+ typedef support::detail::packed_endian_specific_integral<
+ int32_t, target_endianness, 2> Elf_Sword;
+ typedef support::detail::packed_endian_specific_integral<
+ uint64_t, target_endianness, 2> Elf_Xword;
+ typedef support::detail::packed_endian_specific_integral<
+ int64_t, target_endianness, 2> Elf_Sxword;
+};
+
+template <class ELFT> struct ELFDataTypeTypedefHelper;
+
+/// ELF 32bit types.
+template <endianness TargetEndianness>
+struct ELFDataTypeTypedefHelper<ELFType<TargetEndianness, false>>
+ : ELFDataTypeTypedefHelperCommon<TargetEndianness> {
+ typedef uint32_t value_type;
+ typedef support::detail::packed_endian_specific_integral<
+ value_type, TargetEndianness, 2> Elf_Addr;
+ typedef support::detail::packed_endian_specific_integral<
+ value_type, TargetEndianness, 2> Elf_Off;
+};
+
+/// ELF 64bit types.
+template <endianness TargetEndianness>
+struct ELFDataTypeTypedefHelper<ELFType<TargetEndianness, true>>
+ : ELFDataTypeTypedefHelperCommon<TargetEndianness> {
+ typedef uint64_t value_type;
+ typedef support::detail::packed_endian_specific_integral<
+ value_type, TargetEndianness, 2> Elf_Addr;
+ typedef support::detail::packed_endian_specific_integral<
+ value_type, TargetEndianness, 2> Elf_Off;
+};
+
+// I really don't like doing this, but the alternative is copypasta.
+#define LLVM_ELF_IMPORT_TYPES(E, W) \
+ typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Addr Elf_Addr; \
+ typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Off Elf_Off; \
+ typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Half Elf_Half; \
+ typedef typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Word Elf_Word; \
+ typedef \
+ typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Sword Elf_Sword; \
+ typedef \
+ typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Xword Elf_Xword; \
+ typedef \
+ typename ELFDataTypeTypedefHelper<ELFType<E, W>>::Elf_Sxword Elf_Sxword;
+
+#define LLVM_ELF_IMPORT_TYPES_ELFT(ELFT) \
+ LLVM_ELF_IMPORT_TYPES(ELFT::TargetEndianness, ELFT::Is64Bits)
+
+// Section header.
+template <class ELFT> struct Elf_Shdr_Base;
+
+template <endianness TargetEndianness>
+struct Elf_Shdr_Base<ELFType<TargetEndianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+ Elf_Word sh_name; // Section name (index into string table)
+ Elf_Word sh_type; // Section type (SHT_*)
+ Elf_Word sh_flags; // Section flags (SHF_*)
+ Elf_Addr sh_addr; // Address where section is to be loaded
+ Elf_Off sh_offset; // File offset of section data, in bytes
+ Elf_Word sh_size; // Size of section, in bytes
+ Elf_Word sh_link; // Section type-specific header table index link
+ Elf_Word sh_info; // Section type-specific extra information
+ Elf_Word sh_addralign; // Section address alignment
+ Elf_Word sh_entsize; // Size of records contained within the section
+};
+
+template <endianness TargetEndianness>
+struct Elf_Shdr_Base<ELFType<TargetEndianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+ Elf_Word sh_name; // Section name (index into string table)
+ Elf_Word sh_type; // Section type (SHT_*)
+ Elf_Xword sh_flags; // Section flags (SHF_*)
+ Elf_Addr sh_addr; // Address where section is to be loaded
+ Elf_Off sh_offset; // File offset of section data, in bytes
+ Elf_Xword sh_size; // Size of section, in bytes
+ Elf_Word sh_link; // Section type-specific header table index link
+ Elf_Word sh_info; // Section type-specific extra information
+ Elf_Xword sh_addralign; // Section address alignment
+ Elf_Xword sh_entsize; // Size of records contained within the section
+};
+
+template <class ELFT>
+struct Elf_Shdr_Impl : Elf_Shdr_Base<ELFT> {
+ using Elf_Shdr_Base<ELFT>::sh_entsize;
+ using Elf_Shdr_Base<ELFT>::sh_size;
+
+ /// @brief Get the number of entities this section contains if it has any.
+ unsigned getEntityCount() const {
+ if (sh_entsize == 0)
+ return 0;
+ return sh_size / sh_entsize;
+ }
+};
+
+template <class ELFT> struct Elf_Sym_Base;
+
+template <endianness TargetEndianness>
+struct Elf_Sym_Base<ELFType<TargetEndianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+ Elf_Word st_name; // Symbol name (index into string table)
+ Elf_Addr st_value; // Value or address associated with the symbol
+ Elf_Word st_size; // Size of the symbol
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf_Half st_shndx; // Which section (header table index) it's defined in
+};
+
+template <endianness TargetEndianness>
+struct Elf_Sym_Base<ELFType<TargetEndianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+ Elf_Word st_name; // Symbol name (index into string table)
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf_Half st_shndx; // Which section (header table index) it's defined in
+ Elf_Addr st_value; // Value or address associated with the symbol
+ Elf_Xword st_size; // Size of the symbol
+};
+
+template <class ELFT>
+struct Elf_Sym_Impl : Elf_Sym_Base<ELFT> {
+ using Elf_Sym_Base<ELFT>::st_info;
+ using Elf_Sym_Base<ELFT>::st_shndx;
+ using Elf_Sym_Base<ELFT>::st_other;
+ using Elf_Sym_Base<ELFT>::st_value;
+
+ // These accessors and mutators correspond to the ELF32_ST_BIND,
+ // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ uint64_t getValue() const { return st_value; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
+ st_info = (b << 4) + (t & 0x0f);
+ }
+
+ /// Access to the STV_xxx flag stored in the first two bits of st_other.
+ /// STV_DEFAULT: 0
+ /// STV_INTERNAL: 1
+ /// STV_HIDDEN: 2
+ /// STV_PROTECTED: 3
+ unsigned char getVisibility() const { return st_other & 0x3; }
+ void setVisibility(unsigned char v) {
+ assert(v < 4 && "Invalid value for visibility");
+ st_other = (st_other & ~0x3) | v;
+ }
+
+ bool isAbsolute() const { return st_shndx == ELF::SHN_ABS; }
+ bool isCommon() const {
+ return getType() == ELF::STT_COMMON || st_shndx == ELF::SHN_COMMON;
+ }
+ bool isDefined() const { return !isUndefined(); }
+ bool isProcessorSpecific() const {
+ return st_shndx >= ELF::SHN_LOPROC && st_shndx <= ELF::SHN_HIPROC;
+ }
+ bool isOSSpecific() const {
+ return st_shndx >= ELF::SHN_LOOS && st_shndx <= ELF::SHN_HIOS;
+ }
+ bool isReserved() const {
+ // ELF::SHN_HIRESERVE is 0xffff so st_shndx <= ELF::SHN_HIRESERVE is always
+ // true and some compilers warn about it.
+ return st_shndx >= ELF::SHN_LORESERVE;
+ }
+ bool isUndefined() const { return st_shndx == ELF::SHN_UNDEF; }
+ bool isExternal() const {
+ return getBinding() != ELF::STB_LOCAL;
+ }
+
+ ErrorOr<StringRef> getName(StringRef StrTab) const;
+};
+
+template <class ELFT>
+ErrorOr<StringRef> Elf_Sym_Impl<ELFT>::getName(StringRef StrTab) const {
+ uint32_t Offset = this->st_name;
+ if (Offset >= StrTab.size())
+ return object_error::parse_failed;
+ return StringRef(StrTab.data() + Offset);
+}
+
+/// Elf_Versym: This is the structure of entries in the SHT_GNU_versym section
+/// (.gnu.version). This structure is identical for ELF32 and ELF64.
+template <class ELFT>
+struct Elf_Versym_Impl {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ Elf_Half vs_index; // Version index with flags (e.g. VERSYM_HIDDEN)
+};
+
+template <class ELFT> struct Elf_Verdaux_Impl;
+
+/// Elf_Verdef: This is the structure of entries in the SHT_GNU_verdef section
+/// (.gnu.version_d). This structure is identical for ELF32 and ELF64.
+template <class ELFT>
+struct Elf_Verdef_Impl {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ typedef Elf_Verdaux_Impl<ELFT> Elf_Verdaux;
+ Elf_Half vd_version; // Version of this structure (e.g. VER_DEF_CURRENT)
+ Elf_Half vd_flags; // Bitwise flags (VER_DEF_*)
+ Elf_Half vd_ndx; // Version index, used in .gnu.version entries
+ Elf_Half vd_cnt; // Number of Verdaux entries
+ Elf_Word vd_hash; // Hash of name
+ Elf_Word vd_aux; // Offset to the first Verdaux entry (in bytes)
+ Elf_Word vd_next; // Offset to the next Verdef entry (in bytes)
+
+ /// Get the first Verdaux entry for this Verdef.
+ const Elf_Verdaux *getAux() const {
+ return reinterpret_cast<const Elf_Verdaux *>((const char *)this + vd_aux);
+ }
+};
+
+/// Elf_Verdaux: This is the structure of auxiliary data in the SHT_GNU_verdef
+/// section (.gnu.version_d). This structure is identical for ELF32 and ELF64.
+template <class ELFT>
+struct Elf_Verdaux_Impl {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ Elf_Word vda_name; // Version name (offset in string table)
+ Elf_Word vda_next; // Offset to next Verdaux entry (in bytes)
+};
+
+/// Elf_Verneed: This is the structure of entries in the SHT_GNU_verneed
+/// section (.gnu.version_r). This structure is identical for ELF32 and ELF64.
+template <class ELFT>
+struct Elf_Verneed_Impl {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ Elf_Half vn_version; // Version of this structure (e.g. VER_NEED_CURRENT)
+ Elf_Half vn_cnt; // Number of associated Vernaux entries
+ Elf_Word vn_file; // Library name (string table offset)
+ Elf_Word vn_aux; // Offset to first Vernaux entry (in bytes)
+ Elf_Word vn_next; // Offset to next Verneed entry (in bytes)
+};
+
+/// Elf_Vernaux: This is the structure of auxiliary data in SHT_GNU_verneed
+/// section (.gnu.version_r). This structure is identical for ELF32 and ELF64.
+template <class ELFT>
+struct Elf_Vernaux_Impl {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ Elf_Word vna_hash; // Hash of dependency name
+ Elf_Half vna_flags; // Bitwise Flags (VER_FLAG_*)
+ Elf_Half vna_other; // Version index, used in .gnu.version entries
+ Elf_Word vna_name; // Dependency name
+ Elf_Word vna_next; // Offset to next Vernaux entry (in bytes)
+};
+
+/// Elf_Dyn_Base: This structure matches the form of entries in the dynamic
+/// table section (.dynamic) look like.
+template <class ELFT> struct Elf_Dyn_Base;
+
+template <endianness TargetEndianness>
+struct Elf_Dyn_Base<ELFType<TargetEndianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+ Elf_Sword d_tag;
+ union {
+ Elf_Word d_val;
+ Elf_Addr d_ptr;
+ } d_un;
+};
+
+template <endianness TargetEndianness>
+struct Elf_Dyn_Base<ELFType<TargetEndianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+ Elf_Sxword d_tag;
+ union {
+ Elf_Xword d_val;
+ Elf_Addr d_ptr;
+ } d_un;
+};
+
+/// Elf_Dyn_Impl: This inherits from Elf_Dyn_Base, adding getters.
+template <class ELFT>
+struct Elf_Dyn_Impl : Elf_Dyn_Base<ELFT> {
+ using Elf_Dyn_Base<ELFT>::d_tag;
+ using Elf_Dyn_Base<ELFT>::d_un;
+ typedef typename std::conditional<ELFT::Is64Bits,
+ int64_t, int32_t>::type intX_t;
+ typedef typename std::conditional<ELFT::Is64Bits,
+ uint64_t, uint32_t>::type uintX_t;
+ intX_t getTag() const { return d_tag; }
+ uintX_t getVal() const { return d_un.d_val; }
+ uintX_t getPtr() const { return d_un.d_ptr; }
+};
+
+// Elf_Rel: Elf Relocation
+template <class ELFT, bool isRela> struct Elf_Rel_Impl;
+
+template <endianness TargetEndianness>
+struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+ Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
+ Elf_Word r_info; // Symbol table index and type of relocation to apply
+
+ uint32_t getRInfo(bool isMips64EL) const {
+ assert(!isMips64EL);
+ return r_info;
+ }
+ void setRInfo(uint32_t R, bool IsMips64EL) {
+ assert(!IsMips64EL);
+ r_info = R;
+ }
+
+ // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
+ // and ELF32_R_INFO macros defined in the ELF specification:
+ uint32_t getSymbol(bool isMips64EL) const {
+ return this->getRInfo(isMips64EL) >> 8;
+ }
+ unsigned char getType(bool isMips64EL) const {
+ return (unsigned char)(this->getRInfo(isMips64EL) & 0x0ff);
+ }
+ void setSymbol(uint32_t s, bool IsMips64EL) {
+ setSymbolAndType(s, getType(), IsMips64EL);
+ }
+ void setType(unsigned char t, bool IsMips64EL) {
+ setSymbolAndType(getSymbol(), t, IsMips64EL);
+ }
+ void setSymbolAndType(uint32_t s, unsigned char t, bool IsMips64EL) {
+ this->setRInfo((s << 8) + t, IsMips64EL);
+ }
+};
+
+template <endianness TargetEndianness>
+struct Elf_Rel_Impl<ELFType<TargetEndianness, false>, true>
+ : public Elf_Rel_Impl<ELFType<TargetEndianness, false>, false> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+ Elf_Sword r_addend; // Compute value for relocatable field by adding this
+};
+
+template <endianness TargetEndianness>
+struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+ Elf_Addr r_offset; // Location (file byte offset, or program virtual addr)
+ Elf_Xword r_info; // Symbol table index and type of relocation to apply
+
+ uint64_t getRInfo(bool isMips64EL) const {
+ uint64_t t = r_info;
+ if (!isMips64EL)
+ return t;
+ // Mips64 little endian has a "special" encoding of r_info. Instead of one
+ // 64 bit little endian number, it is a little endian 32 bit number followed
+ // by a 32 bit big endian number.
+ return (t << 32) | ((t >> 8) & 0xff000000) | ((t >> 24) & 0x00ff0000) |
+ ((t >> 40) & 0x0000ff00) | ((t >> 56) & 0x000000ff);
+ }
+ void setRInfo(uint64_t R, bool IsMips64EL) {
+ if (IsMips64EL)
+ r_info = (R >> 32) | ((R & 0xff000000) << 8) | ((R & 0x00ff0000) << 24) |
+ ((R & 0x0000ff00) << 40) | ((R & 0x000000ff) << 56);
+ else
+ r_info = R;
+ }
+
+ // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+ // and ELF64_R_INFO macros defined in the ELF specification:
+ uint32_t getSymbol(bool isMips64EL) const {
+ return (uint32_t)(this->getRInfo(isMips64EL) >> 32);
+ }
+ uint32_t getType(bool isMips64EL) const {
+ return (uint32_t)(this->getRInfo(isMips64EL) & 0xffffffffL);
+ }
+ void setSymbol(uint32_t s, bool IsMips64EL) {
+ setSymbolAndType(s, getType(), IsMips64EL);
+ }
+ void setType(uint32_t t, bool IsMips64EL) {
+ setSymbolAndType(getSymbol(), t, IsMips64EL);
+ }
+ void setSymbolAndType(uint32_t s, uint32_t t, bool IsMips64EL) {
+ this->setRInfo(((uint64_t)s << 32) + (t & 0xffffffffL), IsMips64EL);
+ }
+};
+
+template <endianness TargetEndianness>
+struct Elf_Rel_Impl<ELFType<TargetEndianness, true>, true>
+ : public Elf_Rel_Impl<ELFType<TargetEndianness, true>, false> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+ Elf_Sxword r_addend; // Compute value for relocatable field by adding this.
+};
+
+template <class ELFT>
+struct Elf_Ehdr_Impl {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes
+ Elf_Half e_type; // Type of file (see ET_*)
+ Elf_Half e_machine; // Required architecture for this file (see EM_*)
+ Elf_Word e_version; // Must be equal to 1
+ Elf_Addr e_entry; // Address to jump to in order to start program
+ Elf_Off e_phoff; // Program header table's file offset, in bytes
+ Elf_Off e_shoff; // Section header table's file offset, in bytes
+ Elf_Word e_flags; // Processor-specific flags
+ Elf_Half e_ehsize; // Size of ELF header, in bytes
+ Elf_Half e_phentsize; // Size of an entry in the program header table
+ Elf_Half e_phnum; // Number of entries in the program header table
+ Elf_Half e_shentsize; // Size of an entry in the section header table
+ Elf_Half e_shnum; // Number of entries in the section header table
+ Elf_Half e_shstrndx; // Section header table index of section name
+ // string table
+ bool checkMagic() const {
+ return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0;
+ }
+ unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; }
+};
+
+template <class ELFT> struct Elf_Phdr_Impl;
+
+template <endianness TargetEndianness>
+struct Elf_Phdr_Impl<ELFType<TargetEndianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+ Elf_Word p_type; // Type of segment
+ Elf_Off p_offset; // FileOffset where segment is located, in bytes
+ Elf_Addr p_vaddr; // Virtual Address of beginning of segment
+ Elf_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
+ Elf_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf_Word p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf_Word p_flags; // Segment flags
+ Elf_Word p_align; // Segment alignment constraint
+};
+
+template <endianness TargetEndianness>
+struct Elf_Phdr_Impl<ELFType<TargetEndianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+ Elf_Word p_type; // Type of segment
+ Elf_Word p_flags; // Segment flags
+ Elf_Off p_offset; // FileOffset where segment is located, in bytes
+ Elf_Addr p_vaddr; // Virtual Address of beginning of segment
+ Elf_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
+ Elf_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf_Xword p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf_Xword p_align; // Segment alignment constraint
+};
+
+// ELFT needed for endianess.
+template <class ELFT>
+struct Elf_Hash_Impl {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ Elf_Word nbucket;
+ Elf_Word nchain;
+
+ ArrayRef<Elf_Word> buckets() const {
+ return ArrayRef<Elf_Word>(&nbucket + 2, &nbucket + 2 + nbucket);
+ }
+
+ ArrayRef<Elf_Word> chains() const {
+ return ArrayRef<Elf_Word>(&nbucket + 2 + nbucket,
+ &nbucket + 2 + nbucket + nchain);
+ }
+};
+
+// .gnu.hash section
+template <class ELFT>
+struct Elf_GnuHash_Impl {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ Elf_Word nbuckets;
+ Elf_Word symndx;
+ Elf_Word maskwords;
+ Elf_Word shift2;
+
+ ArrayRef<Elf_Off> filter() const {
+ return ArrayRef<Elf_Off>(reinterpret_cast<const Elf_Off *>(&shift2 + 1),
+ maskwords);
+ }
+
+ ArrayRef<Elf_Word> buckets() const {
+ return ArrayRef<Elf_Word>(
+ reinterpret_cast<const Elf_Word *>(filter().end()), nbuckets);
+ }
+
+ ArrayRef<Elf_Word> values(unsigned DynamicSymCount) const {
+ return ArrayRef<Elf_Word>(buckets().end(), DynamicSymCount - symndx);
+ }
+};
+
+// MIPS .reginfo section
+template <class ELFT>
+struct Elf_Mips_RegInfo;
+
+template <llvm::support::endianness TargetEndianness>
+struct Elf_Mips_RegInfo<ELFType<TargetEndianness, false>> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, false)
+ Elf_Word ri_gprmask; // bit-mask of used general registers
+ Elf_Word ri_cprmask[4]; // bit-mask of used co-processor registers
+ Elf_Addr ri_gp_value; // gp register value
+};
+
+template <llvm::support::endianness TargetEndianness>
+struct Elf_Mips_RegInfo<ELFType<TargetEndianness, true>> {
+ LLVM_ELF_IMPORT_TYPES(TargetEndianness, true)
+ Elf_Word ri_gprmask; // bit-mask of used general registers
+ Elf_Word ri_pad; // unused padding field
+ Elf_Word ri_cprmask[4]; // bit-mask of used co-processor registers
+ Elf_Addr ri_gp_value; // gp register value
+};
+
+// .MIPS.options section
+template <class ELFT> struct Elf_Mips_Options {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ uint8_t kind; // Determines interpretation of variable part of descriptor
+ uint8_t size; // Byte size of descriptor, including this header
+ Elf_Half section; // Section header index of section affected,
+ // or 0 for global options
+ Elf_Word info; // Kind-specific information
+
+ const Elf_Mips_RegInfo<ELFT> &getRegInfo() const {
+ assert(kind == llvm::ELF::ODK_REGINFO);
+ return *reinterpret_cast<const Elf_Mips_RegInfo<ELFT> *>(
+ (const uint8_t *)this + sizeof(Elf_Mips_Options));
+ }
+};
+
+// .MIPS.abiflags section content
+template <class ELFT> struct Elf_Mips_ABIFlags {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+ Elf_Half version; // Version of the structure
+ uint8_t isa_level; // ISA level: 1-5, 32, and 64
+ uint8_t isa_rev; // ISA revision (0 for MIPS I - MIPS V)
+ uint8_t gpr_size; // General purpose registers size
+ uint8_t cpr1_size; // Co-processor 1 registers size
+ uint8_t cpr2_size; // Co-processor 2 registers size
+ uint8_t fp_abi; // Floating-point ABI flag
+ Elf_Word isa_ext; // Processor-specific extension
+ Elf_Word ases; // ASEs flags
+ Elf_Word flags1; // General flags
+ Elf_Word flags2; // General flags
+};
+
+} // end namespace object.
+} // end namespace llvm.
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/ELFYAML.h b/contrib/llvm/include/llvm/Object/ELFYAML.h
new file mode 100644
index 0000000..df0aa50
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/ELFYAML.h
@@ -0,0 +1,319 @@
+//===- ELFYAML.h - ELF YAMLIO implementation --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief This file declares classes for handling the YAML representation
+/// of ELF.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ELFYAML_H
+#define LLVM_OBJECT_ELFYAML_H
+
+#include "llvm/MC/YAML.h"
+#include "llvm/Support/ELF.h"
+
+namespace llvm {
+namespace ELFYAML {
+
+// These types are invariant across 32/64-bit ELF, so for simplicity just
+// directly give them their exact sizes. We don't need to worry about
+// endianness because these are just the types in the YAMLIO structures,
+// and are appropriately converted to the necessary endianness when
+// reading/generating binary object files.
+// The naming of these types is intended to be ELF_PREFIX, where PREFIX is
+// the common prefix of the respective constants. E.g. ELF_EM corresponds
+// to the `e_machine` constants, like `EM_X86_64`.
+// In the future, these would probably be better suited by C++11 enum
+// class's with appropriate fixed underlying type.
+LLVM_YAML_STRONG_TYPEDEF(uint16_t, ELF_ET)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_EM)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFCLASS)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFDATA)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_ELFOSABI)
+// Just use 64, since it can hold 32-bit values too.
+LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_EF)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_SHT)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, ELF_REL)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_RSS)
+// Just use 64, since it can hold 32-bit values too.
+LLVM_YAML_STRONG_TYPEDEF(uint64_t, ELF_SHF)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STT)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STV)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, ELF_STO)
+
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_AFL_REG)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_ABI_FP)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_EXT)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_ASE)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_FLAGS1)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_ISA)
+
+// For now, hardcode 64 bits everywhere that 32 or 64 would be needed
+// since 64-bit can hold 32-bit values too.
+struct FileHeader {
+ ELF_ELFCLASS Class;
+ ELF_ELFDATA Data;
+ ELF_ELFOSABI OSABI;
+ ELF_ET Type;
+ ELF_EM Machine;
+ ELF_EF Flags;
+ llvm::yaml::Hex64 Entry;
+};
+struct Symbol {
+ StringRef Name;
+ ELF_STT Type;
+ StringRef Section;
+ llvm::yaml::Hex64 Value;
+ llvm::yaml::Hex64 Size;
+ uint8_t Other;
+};
+struct LocalGlobalWeakSymbols {
+ std::vector<Symbol> Local;
+ std::vector<Symbol> Global;
+ std::vector<Symbol> Weak;
+};
+
+struct SectionOrType {
+ StringRef sectionNameOrType;
+};
+
+struct Section {
+ enum class SectionKind {
+ Group,
+ RawContent,
+ Relocation,
+ NoBits,
+ MipsABIFlags
+ };
+ SectionKind Kind;
+ StringRef Name;
+ ELF_SHT Type;
+ ELF_SHF Flags;
+ llvm::yaml::Hex64 Address;
+ StringRef Link;
+ StringRef Info;
+ llvm::yaml::Hex64 AddressAlign;
+ Section(SectionKind Kind) : Kind(Kind) {}
+ virtual ~Section();
+};
+struct RawContentSection : Section {
+ yaml::BinaryRef Content;
+ llvm::yaml::Hex64 Size;
+ RawContentSection() : Section(SectionKind::RawContent) {}
+ static bool classof(const Section *S) {
+ return S->Kind == SectionKind::RawContent;
+ }
+};
+
+struct NoBitsSection : Section {
+ llvm::yaml::Hex64 Size;
+ NoBitsSection() : Section(SectionKind::NoBits) {}
+ static bool classof(const Section *S) {
+ return S->Kind == SectionKind::NoBits;
+ }
+};
+
+struct Group : Section {
+ // Members of a group contain a flag and a list of section indices
+ // that are part of the group.
+ std::vector<SectionOrType> Members;
+ Group() : Section(SectionKind::Group) {}
+ static bool classof(const Section *S) {
+ return S->Kind == SectionKind::Group;
+ }
+};
+
+struct Relocation {
+ llvm::yaml::Hex64 Offset;
+ int64_t Addend;
+ ELF_REL Type;
+ StringRef Symbol;
+};
+struct RelocationSection : Section {
+ std::vector<Relocation> Relocations;
+ RelocationSection() : Section(SectionKind::Relocation) {}
+ static bool classof(const Section *S) {
+ return S->Kind == SectionKind::Relocation;
+ }
+};
+
+// Represents .MIPS.abiflags section
+struct MipsABIFlags : Section {
+ llvm::yaml::Hex16 Version;
+ MIPS_ISA ISALevel;
+ llvm::yaml::Hex8 ISARevision;
+ MIPS_AFL_REG GPRSize;
+ MIPS_AFL_REG CPR1Size;
+ MIPS_AFL_REG CPR2Size;
+ MIPS_ABI_FP FpABI;
+ MIPS_AFL_EXT ISAExtension;
+ MIPS_AFL_ASE ASEs;
+ MIPS_AFL_FLAGS1 Flags1;
+ llvm::yaml::Hex32 Flags2;
+ MipsABIFlags() : Section(SectionKind::MipsABIFlags) {}
+ static bool classof(const Section *S) {
+ return S->Kind == SectionKind::MipsABIFlags;
+ }
+};
+
+struct Object {
+ FileHeader Header;
+ std::vector<std::unique_ptr<Section>> Sections;
+ // Although in reality the symbols reside in a section, it is a lot
+ // cleaner and nicer if we read them from the YAML as a separate
+ // top-level key, which automatically ensures that invariants like there
+ // being a single SHT_SYMTAB section are upheld.
+ LocalGlobalWeakSymbols Symbols;
+};
+
+} // end namespace ELFYAML
+} // end namespace llvm
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(std::unique_ptr<llvm::ELFYAML::Section>)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Symbol)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::Relocation)
+LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::ELFYAML::SectionOrType)
+
+namespace llvm {
+namespace yaml {
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_ET> {
+ static void enumeration(IO &IO, ELFYAML::ELF_ET &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_EM> {
+ static void enumeration(IO &IO, ELFYAML::ELF_EM &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_ELFCLASS> {
+ static void enumeration(IO &IO, ELFYAML::ELF_ELFCLASS &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_ELFDATA> {
+ static void enumeration(IO &IO, ELFYAML::ELF_ELFDATA &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_ELFOSABI> {
+ static void enumeration(IO &IO, ELFYAML::ELF_ELFOSABI &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<ELFYAML::ELF_EF> {
+ static void bitset(IO &IO, ELFYAML::ELF_EF &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_SHT> {
+ static void enumeration(IO &IO, ELFYAML::ELF_SHT &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<ELFYAML::ELF_SHF> {
+ static void bitset(IO &IO, ELFYAML::ELF_SHF &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_STT> {
+ static void enumeration(IO &IO, ELFYAML::ELF_STT &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_STV> {
+ static void enumeration(IO &IO, ELFYAML::ELF_STV &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<ELFYAML::ELF_STO> {
+ static void bitset(IO &IO, ELFYAML::ELF_STO &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_REL> {
+ static void enumeration(IO &IO, ELFYAML::ELF_REL &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::ELF_RSS> {
+ static void enumeration(IO &IO, ELFYAML::ELF_RSS &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::MIPS_AFL_REG> {
+ static void enumeration(IO &IO, ELFYAML::MIPS_AFL_REG &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::MIPS_ABI_FP> {
+ static void enumeration(IO &IO, ELFYAML::MIPS_ABI_FP &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::MIPS_AFL_EXT> {
+ static void enumeration(IO &IO, ELFYAML::MIPS_AFL_EXT &Value);
+};
+
+template <>
+struct ScalarEnumerationTraits<ELFYAML::MIPS_ISA> {
+ static void enumeration(IO &IO, ELFYAML::MIPS_ISA &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<ELFYAML::MIPS_AFL_ASE> {
+ static void bitset(IO &IO, ELFYAML::MIPS_AFL_ASE &Value);
+};
+
+template <>
+struct ScalarBitSetTraits<ELFYAML::MIPS_AFL_FLAGS1> {
+ static void bitset(IO &IO, ELFYAML::MIPS_AFL_FLAGS1 &Value);
+};
+
+template <>
+struct MappingTraits<ELFYAML::FileHeader> {
+ static void mapping(IO &IO, ELFYAML::FileHeader &FileHdr);
+};
+
+template <>
+struct MappingTraits<ELFYAML::Symbol> {
+ static void mapping(IO &IO, ELFYAML::Symbol &Symbol);
+};
+
+template <>
+struct MappingTraits<ELFYAML::LocalGlobalWeakSymbols> {
+ static void mapping(IO &IO, ELFYAML::LocalGlobalWeakSymbols &Symbols);
+};
+
+template <> struct MappingTraits<ELFYAML::Relocation> {
+ static void mapping(IO &IO, ELFYAML::Relocation &Rel);
+};
+
+template <>
+struct MappingTraits<std::unique_ptr<ELFYAML::Section>> {
+ static void mapping(IO &IO, std::unique_ptr<ELFYAML::Section> &Section);
+ static StringRef validate(IO &io, std::unique_ptr<ELFYAML::Section> &Section);
+};
+
+template <>
+struct MappingTraits<ELFYAML::Object> {
+ static void mapping(IO &IO, ELFYAML::Object &Object);
+};
+
+template <> struct MappingTraits<ELFYAML::SectionOrType> {
+ static void mapping(IO &IO, ELFYAML::SectionOrType &sectionOrType);
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/Error.h b/contrib/llvm/include/llvm/Object/Error.h
new file mode 100644
index 0000000..0f79a6e
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/Error.h
@@ -0,0 +1,52 @@
+//===- Error.h - system_error extensions for Object -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This declares a new error_category for the Object library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_ERROR_H
+#define LLVM_OBJECT_ERROR_H
+
+#include <system_error>
+
+namespace llvm {
+namespace object {
+
+const std::error_category &object_category();
+
+enum class object_error {
+ // Error code 0 is absent. Use std::error_code() instead.
+ arch_not_found = 1,
+ invalid_file_type,
+ parse_failed,
+ unexpected_eof,
+ string_table_non_null_end,
+ invalid_section_index,
+ bitcode_section_not_found,
+ elf_invalid_dynamic_table_size,
+ macho_small_load_command,
+ macho_load_segment_too_many_sections,
+ macho_load_segment_too_small,
+};
+
+inline std::error_code make_error_code(object_error e) {
+ return std::error_code(static_cast<int>(e), object_category());
+}
+
+} // end namespace object.
+
+} // end namespace llvm.
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::object::object_error> : std::true_type {};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/FunctionIndexObjectFile.h b/contrib/llvm/include/llvm/Object/FunctionIndexObjectFile.h
new file mode 100644
index 0000000..74b461d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/FunctionIndexObjectFile.h
@@ -0,0 +1,110 @@
+//===- FunctionIndexObjectFile.h - Function index file implementation -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the FunctionIndexObjectFile template class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_FUNCTIONINDEXOBJECTFILE_H
+#define LLVM_OBJECT_FUNCTIONINDEXOBJECTFILE_H
+
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/Object/SymbolicFile.h"
+
+namespace llvm {
+class FunctionInfoIndex;
+class Module;
+
+namespace object {
+class ObjectFile;
+
+/// This class is used to read just the function summary index related
+/// sections out of the given object (which may contain a single module's
+/// bitcode or be a combined index bitcode file). It builds a FunctionInfoIndex
+/// object.
+class FunctionIndexObjectFile : public SymbolicFile {
+ std::unique_ptr<FunctionInfoIndex> Index;
+
+public:
+ FunctionIndexObjectFile(MemoryBufferRef Object,
+ std::unique_ptr<FunctionInfoIndex> I);
+ ~FunctionIndexObjectFile() override;
+
+ // TODO: Walk through FunctionMap entries for function symbols.
+ // However, currently these interfaces are not used by any consumers.
+ void moveSymbolNext(DataRefImpl &Symb) const override {
+ llvm_unreachable("not implemented");
+ }
+ std::error_code printSymbolName(raw_ostream &OS,
+ DataRefImpl Symb) const override {
+ llvm_unreachable("not implemented");
+ return std::error_code();
+ }
+ uint32_t getSymbolFlags(DataRefImpl Symb) const override {
+ llvm_unreachable("not implemented");
+ return 0;
+ }
+ basic_symbol_iterator symbol_begin_impl() const override {
+ llvm_unreachable("not implemented");
+ return basic_symbol_iterator(BasicSymbolRef());
+ }
+ basic_symbol_iterator symbol_end_impl() const override {
+ llvm_unreachable("not implemented");
+ return basic_symbol_iterator(BasicSymbolRef());
+ }
+
+ const FunctionInfoIndex &getIndex() const {
+ return const_cast<FunctionIndexObjectFile *>(this)->getIndex();
+ }
+ FunctionInfoIndex &getIndex() { return *Index; }
+ std::unique_ptr<FunctionInfoIndex> takeIndex();
+
+ static inline bool classof(const Binary *v) { return v->isFunctionIndex(); }
+
+ /// \brief Finds and returns bitcode embedded in the given object file, or an
+ /// error code if not found.
+ static ErrorOr<MemoryBufferRef> findBitcodeInObject(const ObjectFile &Obj);
+
+ /// \brief Finds and returns bitcode in the given memory buffer (which may
+ /// be either a bitcode file or a native object file with embedded bitcode),
+ /// or an error code if not found.
+ static ErrorOr<MemoryBufferRef>
+ findBitcodeInMemBuffer(MemoryBufferRef Object);
+
+ /// \brief Looks for function summary in the given memory buffer,
+ /// returns true if found, else false.
+ static bool
+ hasFunctionSummaryInMemBuffer(MemoryBufferRef Object,
+ DiagnosticHandlerFunction DiagnosticHandler);
+
+ /// \brief Parse function index in the given memory buffer.
+ /// Return new FunctionIndexObjectFile instance containing parsed function
+ /// summary/index.
+ static ErrorOr<std::unique_ptr<FunctionIndexObjectFile>>
+ create(MemoryBufferRef Object, DiagnosticHandlerFunction DiagnosticHandler,
+ bool IsLazy = false);
+
+ /// \brief Parse the function summary information for function with the
+ /// given name out of the given buffer. Parsed information is
+ /// stored on the index object saved in this object.
+ std::error_code
+ findFunctionSummaryInMemBuffer(MemoryBufferRef Object,
+ DiagnosticHandlerFunction DiagnosticHandler,
+ StringRef FunctionName);
+};
+}
+
+/// Parse the function index out of an IR file and return the function
+/// index object if found, or nullptr if not.
+ErrorOr<std::unique_ptr<FunctionInfoIndex>>
+getFunctionIndexForFile(StringRef Path,
+ DiagnosticHandlerFunction DiagnosticHandler);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/IRObjectFile.h b/contrib/llvm/include/llvm/Object/IRObjectFile.h
new file mode 100644
index 0000000..ef65528
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/IRObjectFile.h
@@ -0,0 +1,74 @@
+//===- IRObjectFile.h - LLVM IR object file implementation ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the IRObjectFile template class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_IROBJECTFILE_H
+#define LLVM_OBJECT_IROBJECTFILE_H
+
+#include "llvm/Object/SymbolicFile.h"
+
+namespace llvm {
+class Mangler;
+class Module;
+class GlobalValue;
+
+namespace object {
+class ObjectFile;
+
+class IRObjectFile : public SymbolicFile {
+ std::unique_ptr<Module> M;
+ std::unique_ptr<Mangler> Mang;
+ std::vector<std::pair<std::string, uint32_t>> AsmSymbols;
+
+public:
+ IRObjectFile(MemoryBufferRef Object, std::unique_ptr<Module> M);
+ ~IRObjectFile() override;
+ void moveSymbolNext(DataRefImpl &Symb) const override;
+ std::error_code printSymbolName(raw_ostream &OS,
+ DataRefImpl Symb) const override;
+ uint32_t getSymbolFlags(DataRefImpl Symb) const override;
+ GlobalValue *getSymbolGV(DataRefImpl Symb);
+ const GlobalValue *getSymbolGV(DataRefImpl Symb) const {
+ return const_cast<IRObjectFile *>(this)->getSymbolGV(Symb);
+ }
+ basic_symbol_iterator symbol_begin_impl() const override;
+ basic_symbol_iterator symbol_end_impl() const override;
+
+ const Module &getModule() const {
+ return const_cast<IRObjectFile*>(this)->getModule();
+ }
+ Module &getModule() {
+ return *M;
+ }
+ std::unique_ptr<Module> takeModule();
+
+ static inline bool classof(const Binary *v) {
+ return v->isIR();
+ }
+
+ /// \brief Finds and returns bitcode embedded in the given object file, or an
+ /// error code if not found.
+ static ErrorOr<MemoryBufferRef> findBitcodeInObject(const ObjectFile &Obj);
+
+ /// \brief Finds and returns bitcode in the given memory buffer (which may
+ /// be either a bitcode file or a native object file with embedded bitcode),
+ /// or an error code if not found.
+ static ErrorOr<MemoryBufferRef>
+ findBitcodeInMemBuffer(MemoryBufferRef Object);
+
+ static ErrorOr<std::unique_ptr<IRObjectFile>> create(MemoryBufferRef Object,
+ LLVMContext &Context);
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/MachO.h b/contrib/llvm/include/llvm/Object/MachO.h
new file mode 100644
index 0000000..e02ce3b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/MachO.h
@@ -0,0 +1,523 @@
+//===- MachO.h - MachO object file implementation ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MachOObjectFile class, which implement the ObjectFile
+// interface for MachO files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_MACHO_H
+#define LLVM_OBJECT_MACHO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/MachO.h"
+
+namespace llvm {
+namespace object {
+
+/// DiceRef - This is a value type class that represents a single
+/// data in code entry in the table in a Mach-O object file.
+class DiceRef {
+ DataRefImpl DicePimpl;
+ const ObjectFile *OwningObject;
+
+public:
+ DiceRef() : OwningObject(nullptr) { }
+
+ DiceRef(DataRefImpl DiceP, const ObjectFile *Owner);
+
+ bool operator==(const DiceRef &Other) const;
+ bool operator<(const DiceRef &Other) const;
+
+ void moveNext();
+
+ std::error_code getOffset(uint32_t &Result) const;
+ std::error_code getLength(uint16_t &Result) const;
+ std::error_code getKind(uint16_t &Result) const;
+
+ DataRefImpl getRawDataRefImpl() const;
+ const ObjectFile *getObjectFile() const;
+};
+typedef content_iterator<DiceRef> dice_iterator;
+
+/// ExportEntry encapsulates the current-state-of-the-walk used when doing a
+/// non-recursive walk of the trie data structure. This allows you to iterate
+/// across all exported symbols using:
+/// for (const llvm::object::ExportEntry &AnExport : Obj->exports()) {
+/// }
+class ExportEntry {
+public:
+ ExportEntry(ArrayRef<uint8_t> Trie);
+
+ StringRef name() const;
+ uint64_t flags() const;
+ uint64_t address() const;
+ uint64_t other() const;
+ StringRef otherName() const;
+ uint32_t nodeOffset() const;
+
+ bool operator==(const ExportEntry &) const;
+
+ void moveNext();
+
+private:
+ friend class MachOObjectFile;
+ void moveToFirst();
+ void moveToEnd();
+ uint64_t readULEB128(const uint8_t *&p);
+ void pushDownUntilBottom();
+ void pushNode(uint64_t Offset);
+
+ // Represents a node in the mach-o exports trie.
+ struct NodeState {
+ NodeState(const uint8_t *Ptr);
+ const uint8_t *Start;
+ const uint8_t *Current;
+ uint64_t Flags;
+ uint64_t Address;
+ uint64_t Other;
+ const char *ImportName;
+ unsigned ChildCount;
+ unsigned NextChildIndex;
+ unsigned ParentStringLength;
+ bool IsExportNode;
+ };
+
+ ArrayRef<uint8_t> Trie;
+ SmallString<256> CumulativeString;
+ SmallVector<NodeState, 16> Stack;
+ bool Malformed;
+ bool Done;
+};
+typedef content_iterator<ExportEntry> export_iterator;
+
+/// MachORebaseEntry encapsulates the current state in the decompression of
+/// rebasing opcodes. This allows you to iterate through the compressed table of
+/// rebasing using:
+/// for (const llvm::object::MachORebaseEntry &Entry : Obj->rebaseTable()) {
+/// }
+class MachORebaseEntry {
+public:
+ MachORebaseEntry(ArrayRef<uint8_t> opcodes, bool is64Bit);
+
+ uint32_t segmentIndex() const;
+ uint64_t segmentOffset() const;
+ StringRef typeName() const;
+
+ bool operator==(const MachORebaseEntry &) const;
+
+ void moveNext();
+
+private:
+ friend class MachOObjectFile;
+ void moveToFirst();
+ void moveToEnd();
+ uint64_t readULEB128();
+
+ ArrayRef<uint8_t> Opcodes;
+ const uint8_t *Ptr;
+ uint64_t SegmentOffset;
+ uint32_t SegmentIndex;
+ uint64_t RemainingLoopCount;
+ uint64_t AdvanceAmount;
+ uint8_t RebaseType;
+ uint8_t PointerSize;
+ bool Malformed;
+ bool Done;
+};
+typedef content_iterator<MachORebaseEntry> rebase_iterator;
+
+/// MachOBindEntry encapsulates the current state in the decompression of
+/// binding opcodes. This allows you to iterate through the compressed table of
+/// bindings using:
+/// for (const llvm::object::MachOBindEntry &Entry : Obj->bindTable()) {
+/// }
+class MachOBindEntry {
+public:
+ enum class Kind { Regular, Lazy, Weak };
+
+ MachOBindEntry(ArrayRef<uint8_t> Opcodes, bool is64Bit, MachOBindEntry::Kind);
+
+ uint32_t segmentIndex() const;
+ uint64_t segmentOffset() const;
+ StringRef typeName() const;
+ StringRef symbolName() const;
+ uint32_t flags() const;
+ int64_t addend() const;
+ int ordinal() const;
+
+ bool operator==(const MachOBindEntry &) const;
+
+ void moveNext();
+
+private:
+ friend class MachOObjectFile;
+ void moveToFirst();
+ void moveToEnd();
+ uint64_t readULEB128();
+ int64_t readSLEB128();
+
+ ArrayRef<uint8_t> Opcodes;
+ const uint8_t *Ptr;
+ uint64_t SegmentOffset;
+ uint32_t SegmentIndex;
+ StringRef SymbolName;
+ int Ordinal;
+ uint32_t Flags;
+ int64_t Addend;
+ uint64_t RemainingLoopCount;
+ uint64_t AdvanceAmount;
+ uint8_t BindType;
+ uint8_t PointerSize;
+ Kind TableKind;
+ bool Malformed;
+ bool Done;
+};
+typedef content_iterator<MachOBindEntry> bind_iterator;
+
+class MachOObjectFile : public ObjectFile {
+public:
+ struct LoadCommandInfo {
+ const char *Ptr; // Where in memory the load command is.
+ MachO::load_command C; // The command itself.
+ };
+ typedef SmallVector<LoadCommandInfo, 4> LoadCommandList;
+ typedef LoadCommandList::const_iterator load_command_iterator;
+
+ MachOObjectFile(MemoryBufferRef Object, bool IsLittleEndian, bool Is64Bits,
+ std::error_code &EC);
+
+ void moveSymbolNext(DataRefImpl &Symb) const override;
+
+ uint64_t getNValue(DataRefImpl Sym) const;
+ ErrorOr<StringRef> getSymbolName(DataRefImpl Symb) const override;
+
+ // MachO specific.
+ std::error_code getIndirectName(DataRefImpl Symb, StringRef &Res) const;
+ unsigned getSectionType(SectionRef Sec) const;
+
+ ErrorOr<uint64_t> getSymbolAddress(DataRefImpl Symb) const override;
+ uint32_t getSymbolAlignment(DataRefImpl Symb) const override;
+ uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const override;
+ SymbolRef::Type getSymbolType(DataRefImpl Symb) const override;
+ uint32_t getSymbolFlags(DataRefImpl Symb) const override;
+ ErrorOr<section_iterator> getSymbolSection(DataRefImpl Symb) const override;
+ unsigned getSymbolSectionID(SymbolRef Symb) const;
+ unsigned getSectionID(SectionRef Sec) const;
+
+ void moveSectionNext(DataRefImpl &Sec) const override;
+ std::error_code getSectionName(DataRefImpl Sec,
+ StringRef &Res) const override;
+ uint64_t getSectionAddress(DataRefImpl Sec) const override;
+ uint64_t getSectionSize(DataRefImpl Sec) const override;
+ std::error_code getSectionContents(DataRefImpl Sec,
+ StringRef &Res) const override;
+ uint64_t getSectionAlignment(DataRefImpl Sec) const override;
+ bool isSectionText(DataRefImpl Sec) const override;
+ bool isSectionData(DataRefImpl Sec) const override;
+ bool isSectionBSS(DataRefImpl Sec) const override;
+ bool isSectionVirtual(DataRefImpl Sec) const override;
+ relocation_iterator section_rel_begin(DataRefImpl Sec) const override;
+ relocation_iterator section_rel_end(DataRefImpl Sec) const override;
+
+ void moveRelocationNext(DataRefImpl &Rel) const override;
+ uint64_t getRelocationOffset(DataRefImpl Rel) const override;
+ symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
+ section_iterator getRelocationSection(DataRefImpl Rel) const;
+ uint64_t getRelocationType(DataRefImpl Rel) const override;
+ void getRelocationTypeName(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const override;
+ uint8_t getRelocationLength(DataRefImpl Rel) const;
+
+ // MachO specific.
+ std::error_code getLibraryShortNameByIndex(unsigned Index, StringRef &) const;
+
+ section_iterator getRelocationRelocatedSection(relocation_iterator Rel) const;
+
+ // TODO: Would be useful to have an iterator based version
+ // of the load command interface too.
+
+ basic_symbol_iterator symbol_begin_impl() const override;
+ basic_symbol_iterator symbol_end_impl() const override;
+
+ // MachO specific.
+ basic_symbol_iterator getSymbolByIndex(unsigned Index) const;
+
+ section_iterator section_begin() const override;
+ section_iterator section_end() const override;
+
+ uint8_t getBytesInAddress() const override;
+
+ StringRef getFileFormatName() const override;
+ unsigned getArch() const override;
+ Triple getArch(const char **McpuDefault, Triple *ThumbTriple) const;
+
+ relocation_iterator section_rel_begin(unsigned Index) const;
+ relocation_iterator section_rel_end(unsigned Index) const;
+
+ dice_iterator begin_dices() const;
+ dice_iterator end_dices() const;
+
+ load_command_iterator begin_load_commands() const;
+ load_command_iterator end_load_commands() const;
+ iterator_range<load_command_iterator> load_commands() const;
+
+ /// For use iterating over all exported symbols.
+ iterator_range<export_iterator> exports() const;
+
+ /// For use examining a trie not in a MachOObjectFile.
+ static iterator_range<export_iterator> exports(ArrayRef<uint8_t> Trie);
+
+ /// For use iterating over all rebase table entries.
+ iterator_range<rebase_iterator> rebaseTable() const;
+
+ /// For use examining rebase opcodes not in a MachOObjectFile.
+ static iterator_range<rebase_iterator> rebaseTable(ArrayRef<uint8_t> Opcodes,
+ bool is64);
+
+ /// For use iterating over all bind table entries.
+ iterator_range<bind_iterator> bindTable() const;
+
+ /// For use iterating over all lazy bind table entries.
+ iterator_range<bind_iterator> lazyBindTable() const;
+
+ /// For use iterating over all lazy bind table entries.
+ iterator_range<bind_iterator> weakBindTable() const;
+
+ /// For use examining bind opcodes not in a MachOObjectFile.
+ static iterator_range<bind_iterator> bindTable(ArrayRef<uint8_t> Opcodes,
+ bool is64,
+ MachOBindEntry::Kind);
+
+
+ // In a MachO file, sections have a segment name. This is used in the .o
+ // files. They have a single segment, but this field specifies which segment
+ // a section should be put in in the final object.
+ StringRef getSectionFinalSegmentName(DataRefImpl Sec) const;
+
+ // Names are stored as 16 bytes. These returns the raw 16 bytes without
+ // interpreting them as a C string.
+ ArrayRef<char> getSectionRawName(DataRefImpl Sec) const;
+ ArrayRef<char> getSectionRawFinalSegmentName(DataRefImpl Sec) const;
+
+ // MachO specific Info about relocations.
+ bool isRelocationScattered(const MachO::any_relocation_info &RE) const;
+ unsigned getPlainRelocationSymbolNum(
+ const MachO::any_relocation_info &RE) const;
+ bool getPlainRelocationExternal(const MachO::any_relocation_info &RE) const;
+ bool getScatteredRelocationScattered(
+ const MachO::any_relocation_info &RE) const;
+ uint32_t getScatteredRelocationValue(
+ const MachO::any_relocation_info &RE) const;
+ uint32_t getScatteredRelocationType(
+ const MachO::any_relocation_info &RE) const;
+ unsigned getAnyRelocationAddress(const MachO::any_relocation_info &RE) const;
+ unsigned getAnyRelocationPCRel(const MachO::any_relocation_info &RE) const;
+ unsigned getAnyRelocationLength(const MachO::any_relocation_info &RE) const;
+ unsigned getAnyRelocationType(const MachO::any_relocation_info &RE) const;
+ SectionRef getAnyRelocationSection(const MachO::any_relocation_info &RE) const;
+
+ // MachO specific structures.
+ MachO::section getSection(DataRefImpl DRI) const;
+ MachO::section_64 getSection64(DataRefImpl DRI) const;
+ MachO::section getSection(const LoadCommandInfo &L, unsigned Index) const;
+ MachO::section_64 getSection64(const LoadCommandInfo &L,unsigned Index) const;
+ MachO::nlist getSymbolTableEntry(DataRefImpl DRI) const;
+ MachO::nlist_64 getSymbol64TableEntry(DataRefImpl DRI) const;
+
+ MachO::linkedit_data_command
+ getLinkeditDataLoadCommand(const LoadCommandInfo &L) const;
+ MachO::segment_command
+ getSegmentLoadCommand(const LoadCommandInfo &L) const;
+ MachO::segment_command_64
+ getSegment64LoadCommand(const LoadCommandInfo &L) const;
+ MachO::linker_option_command
+ getLinkerOptionLoadCommand(const LoadCommandInfo &L) const;
+ MachO::version_min_command
+ getVersionMinLoadCommand(const LoadCommandInfo &L) const;
+ MachO::dylib_command
+ getDylibIDLoadCommand(const LoadCommandInfo &L) const;
+ MachO::dyld_info_command
+ getDyldInfoLoadCommand(const LoadCommandInfo &L) const;
+ MachO::dylinker_command
+ getDylinkerCommand(const LoadCommandInfo &L) const;
+ MachO::uuid_command
+ getUuidCommand(const LoadCommandInfo &L) const;
+ MachO::rpath_command
+ getRpathCommand(const LoadCommandInfo &L) const;
+ MachO::source_version_command
+ getSourceVersionCommand(const LoadCommandInfo &L) const;
+ MachO::entry_point_command
+ getEntryPointCommand(const LoadCommandInfo &L) const;
+ MachO::encryption_info_command
+ getEncryptionInfoCommand(const LoadCommandInfo &L) const;
+ MachO::encryption_info_command_64
+ getEncryptionInfoCommand64(const LoadCommandInfo &L) const;
+ MachO::sub_framework_command
+ getSubFrameworkCommand(const LoadCommandInfo &L) const;
+ MachO::sub_umbrella_command
+ getSubUmbrellaCommand(const LoadCommandInfo &L) const;
+ MachO::sub_library_command
+ getSubLibraryCommand(const LoadCommandInfo &L) const;
+ MachO::sub_client_command
+ getSubClientCommand(const LoadCommandInfo &L) const;
+ MachO::routines_command
+ getRoutinesCommand(const LoadCommandInfo &L) const;
+ MachO::routines_command_64
+ getRoutinesCommand64(const LoadCommandInfo &L) const;
+ MachO::thread_command
+ getThreadCommand(const LoadCommandInfo &L) const;
+
+ MachO::any_relocation_info getRelocation(DataRefImpl Rel) const;
+ MachO::data_in_code_entry getDice(DataRefImpl Rel) const;
+ const MachO::mach_header &getHeader() const;
+ const MachO::mach_header_64 &getHeader64() const;
+ uint32_t
+ getIndirectSymbolTableEntry(const MachO::dysymtab_command &DLC,
+ unsigned Index) const;
+ MachO::data_in_code_entry getDataInCodeTableEntry(uint32_t DataOffset,
+ unsigned Index) const;
+ MachO::symtab_command getSymtabLoadCommand() const;
+ MachO::dysymtab_command getDysymtabLoadCommand() const;
+ MachO::linkedit_data_command getDataInCodeLoadCommand() const;
+ MachO::linkedit_data_command getLinkOptHintsLoadCommand() const;
+ ArrayRef<uint8_t> getDyldInfoRebaseOpcodes() const;
+ ArrayRef<uint8_t> getDyldInfoBindOpcodes() const;
+ ArrayRef<uint8_t> getDyldInfoWeakBindOpcodes() const;
+ ArrayRef<uint8_t> getDyldInfoLazyBindOpcodes() const;
+ ArrayRef<uint8_t> getDyldInfoExportsTrie() const;
+ ArrayRef<uint8_t> getUuid() const;
+
+ StringRef getStringTableData() const;
+ bool is64Bit() const;
+ void ReadULEB128s(uint64_t Index, SmallVectorImpl<uint64_t> &Out) const;
+
+ static StringRef guessLibraryShortName(StringRef Name, bool &isFramework,
+ StringRef &Suffix);
+
+ static Triple::ArchType getArch(uint32_t CPUType);
+ static Triple getArch(uint32_t CPUType, uint32_t CPUSubType,
+ const char **McpuDefault = nullptr);
+ static Triple getThumbArch(uint32_t CPUType, uint32_t CPUSubType,
+ const char **McpuDefault = nullptr);
+ static Triple getArch(uint32_t CPUType, uint32_t CPUSubType,
+ const char **McpuDefault, Triple *ThumbTriple);
+ static bool isValidArch(StringRef ArchFlag);
+ static Triple getHostArch();
+
+ bool isRelocatableObject() const override;
+
+ bool hasPageZeroSegment() const { return HasPageZeroSegment; }
+
+ static bool classof(const Binary *v) {
+ return v->isMachO();
+ }
+
+ static uint32_t
+ getVersionMinMajor(MachO::version_min_command &C, bool SDK) {
+ uint32_t VersionOrSDK = (SDK) ? C.sdk : C.version;
+ return (VersionOrSDK >> 16) & 0xffff;
+ }
+
+ static uint32_t
+ getVersionMinMinor(MachO::version_min_command &C, bool SDK) {
+ uint32_t VersionOrSDK = (SDK) ? C.sdk : C.version;
+ return (VersionOrSDK >> 8) & 0xff;
+ }
+
+ static uint32_t
+ getVersionMinUpdate(MachO::version_min_command &C, bool SDK) {
+ uint32_t VersionOrSDK = (SDK) ? C.sdk : C.version;
+ return VersionOrSDK & 0xff;
+ }
+
+private:
+ uint64_t getSymbolValueImpl(DataRefImpl Symb) const override;
+
+ union {
+ MachO::mach_header_64 Header64;
+ MachO::mach_header Header;
+ };
+ typedef SmallVector<const char*, 1> SectionList;
+ SectionList Sections;
+ typedef SmallVector<const char*, 1> LibraryList;
+ LibraryList Libraries;
+ LoadCommandList LoadCommands;
+ typedef SmallVector<StringRef, 1> LibraryShortName;
+ mutable LibraryShortName LibrariesShortNames;
+ const char *SymtabLoadCmd;
+ const char *DysymtabLoadCmd;
+ const char *DataInCodeLoadCmd;
+ const char *LinkOptHintsLoadCmd;
+ const char *DyldInfoLoadCmd;
+ const char *UuidLoadCmd;
+ bool HasPageZeroSegment;
+};
+
+/// DiceRef
+inline DiceRef::DiceRef(DataRefImpl DiceP, const ObjectFile *Owner)
+ : DicePimpl(DiceP) , OwningObject(Owner) {}
+
+inline bool DiceRef::operator==(const DiceRef &Other) const {
+ return DicePimpl == Other.DicePimpl;
+}
+
+inline bool DiceRef::operator<(const DiceRef &Other) const {
+ return DicePimpl < Other.DicePimpl;
+}
+
+inline void DiceRef::moveNext() {
+ const MachO::data_in_code_entry *P =
+ reinterpret_cast<const MachO::data_in_code_entry *>(DicePimpl.p);
+ DicePimpl.p = reinterpret_cast<uintptr_t>(P + 1);
+}
+
+// Since a Mach-O data in code reference, a DiceRef, can only be created when
+// the OwningObject ObjectFile is a MachOObjectFile a static_cast<> is used for
+// the methods that get the values of the fields of the reference.
+
+inline std::error_code DiceRef::getOffset(uint32_t &Result) const {
+ const MachOObjectFile *MachOOF =
+ static_cast<const MachOObjectFile *>(OwningObject);
+ MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
+ Result = Dice.offset;
+ return std::error_code();
+}
+
+inline std::error_code DiceRef::getLength(uint16_t &Result) const {
+ const MachOObjectFile *MachOOF =
+ static_cast<const MachOObjectFile *>(OwningObject);
+ MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
+ Result = Dice.length;
+ return std::error_code();
+}
+
+inline std::error_code DiceRef::getKind(uint16_t &Result) const {
+ const MachOObjectFile *MachOOF =
+ static_cast<const MachOObjectFile *>(OwningObject);
+ MachO::data_in_code_entry Dice = MachOOF->getDice(DicePimpl);
+ Result = Dice.kind;
+ return std::error_code();
+}
+
+inline DataRefImpl DiceRef::getRawDataRefImpl() const {
+ return DicePimpl;
+}
+
+inline const ObjectFile *DiceRef::getObjectFile() const {
+ return OwningObject;
+}
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/MachOUniversal.h b/contrib/llvm/include/llvm/Object/MachOUniversal.h
new file mode 100644
index 0000000..a11d381
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/MachOUniversal.h
@@ -0,0 +1,118 @@
+//===- MachOUniversal.h - Mach-O universal binaries -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares Mach-O fat/universal binaries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_MACHOUNIVERSAL_H
+#define LLVM_OBJECT_MACHOUNIVERSAL_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/MachO.h"
+
+namespace llvm {
+namespace object {
+
+class MachOUniversalBinary : public Binary {
+ virtual void anchor();
+
+ uint32_t NumberOfObjects;
+public:
+ class ObjectForArch {
+ const MachOUniversalBinary *Parent;
+ /// \brief Index of object in the universal binary.
+ uint32_t Index;
+ /// \brief Descriptor of the object.
+ MachO::fat_arch Header;
+
+ public:
+ ObjectForArch(const MachOUniversalBinary *Parent, uint32_t Index);
+
+ void clear() {
+ Parent = nullptr;
+ Index = 0;
+ }
+
+ bool operator==(const ObjectForArch &Other) const {
+ return (Parent == Other.Parent) && (Index == Other.Index);
+ }
+
+ ObjectForArch getNext() const { return ObjectForArch(Parent, Index + 1); }
+ uint32_t getCPUType() const { return Header.cputype; }
+ uint32_t getCPUSubType() const { return Header.cpusubtype; }
+ uint32_t getOffset() const { return Header.offset; }
+ uint32_t getSize() const { return Header.size; }
+ uint32_t getAlign() const { return Header.align; }
+ std::string getArchTypeName() const {
+ Triple T = MachOObjectFile::getArch(Header.cputype, Header.cpusubtype);
+ return T.getArchName();
+ }
+
+ ErrorOr<std::unique_ptr<MachOObjectFile>> getAsObjectFile() const;
+
+ ErrorOr<std::unique_ptr<Archive>> getAsArchive() const;
+ };
+
+ class object_iterator {
+ ObjectForArch Obj;
+ public:
+ object_iterator(const ObjectForArch &Obj) : Obj(Obj) {}
+ const ObjectForArch *operator->() const { return &Obj; }
+ const ObjectForArch &operator*() const { return Obj; }
+
+ bool operator==(const object_iterator &Other) const {
+ return Obj == Other.Obj;
+ }
+ bool operator!=(const object_iterator &Other) const {
+ return !(*this == Other);
+ }
+
+ object_iterator& operator++() { // Preincrement
+ Obj = Obj.getNext();
+ return *this;
+ }
+ };
+
+ MachOUniversalBinary(MemoryBufferRef Souce, std::error_code &EC);
+ static ErrorOr<std::unique_ptr<MachOUniversalBinary>>
+ create(MemoryBufferRef Source);
+
+ object_iterator begin_objects() const {
+ return ObjectForArch(this, 0);
+ }
+ object_iterator end_objects() const {
+ return ObjectForArch(nullptr, 0);
+ }
+
+ iterator_range<object_iterator> objects() const {
+ return make_range(begin_objects(), end_objects());
+ }
+
+ uint32_t getNumberOfObjects() const { return NumberOfObjects; }
+
+ // Cast methods.
+ static inline bool classof(Binary const *V) {
+ return V->isMachOUniversalBinary();
+ }
+
+ ErrorOr<std::unique_ptr<MachOObjectFile>>
+ getObjectForArch(StringRef ArchName) const;
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/ObjectFile.h b/contrib/llvm/include/llvm/Object/ObjectFile.h
new file mode 100644
index 0000000..ce0c891
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/ObjectFile.h
@@ -0,0 +1,459 @@
+//===- ObjectFile.h - File format independent object file -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares a file format independent ObjectFile class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_OBJECTFILE_H
+#define LLVM_OBJECT_OBJECTFILE_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/SymbolicFile.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstring>
+#include <vector>
+
+namespace llvm {
+namespace object {
+
+class ObjectFile;
+class COFFObjectFile;
+class MachOObjectFile;
+
+class SymbolRef;
+class symbol_iterator;
+class SectionRef;
+typedef content_iterator<SectionRef> section_iterator;
+
+/// This is a value type class that represents a single relocation in the list
+/// of relocations in the object file.
+class RelocationRef {
+ DataRefImpl RelocationPimpl;
+ const ObjectFile *OwningObject;
+
+public:
+ RelocationRef() : OwningObject(nullptr) { }
+
+ RelocationRef(DataRefImpl RelocationP, const ObjectFile *Owner);
+
+ bool operator==(const RelocationRef &Other) const;
+
+ void moveNext();
+
+ uint64_t getOffset() const;
+ symbol_iterator getSymbol() const;
+ uint64_t getType() const;
+
+ /// @brief Get a string that represents the type of this relocation.
+ ///
+ /// This is for display purposes only.
+ void getTypeName(SmallVectorImpl<char> &Result) const;
+
+ DataRefImpl getRawDataRefImpl() const;
+ const ObjectFile *getObject() const;
+};
+typedef content_iterator<RelocationRef> relocation_iterator;
+
+/// This is a value type class that represents a single section in the list of
+/// sections in the object file.
+class SectionRef {
+ friend class SymbolRef;
+ DataRefImpl SectionPimpl;
+ const ObjectFile *OwningObject;
+
+public:
+ SectionRef() : OwningObject(nullptr) { }
+
+ SectionRef(DataRefImpl SectionP, const ObjectFile *Owner);
+
+ bool operator==(const SectionRef &Other) const;
+ bool operator!=(const SectionRef &Other) const;
+ bool operator<(const SectionRef &Other) const;
+
+ void moveNext();
+
+ std::error_code getName(StringRef &Result) const;
+ uint64_t getAddress() const;
+ uint64_t getSize() const;
+ std::error_code getContents(StringRef &Result) const;
+
+ /// @brief Get the alignment of this section as the actual value (not log 2).
+ uint64_t getAlignment() const;
+
+ bool isText() const;
+ bool isData() const;
+ bool isBSS() const;
+ bool isVirtual() const;
+
+ bool containsSymbol(SymbolRef S) const;
+
+ relocation_iterator relocation_begin() const;
+ relocation_iterator relocation_end() const;
+ iterator_range<relocation_iterator> relocations() const {
+ return make_range(relocation_begin(), relocation_end());
+ }
+ section_iterator getRelocatedSection() const;
+
+ DataRefImpl getRawDataRefImpl() const;
+ const ObjectFile *getObject() const;
+};
+
+/// This is a value type class that represents a single symbol in the list of
+/// symbols in the object file.
+class SymbolRef : public BasicSymbolRef {
+ friend class SectionRef;
+
+public:
+ SymbolRef() : BasicSymbolRef() {}
+
+ enum Type {
+ ST_Unknown, // Type not specified
+ ST_Data,
+ ST_Debug,
+ ST_File,
+ ST_Function,
+ ST_Other
+ };
+
+ SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner);
+ SymbolRef(const BasicSymbolRef &B) : BasicSymbolRef(B) {
+ assert(isa<ObjectFile>(BasicSymbolRef::getObject()));
+ }
+
+ ErrorOr<StringRef> getName() const;
+ /// Returns the symbol virtual address (i.e. address at which it will be
+ /// mapped).
+ ErrorOr<uint64_t> getAddress() const;
+
+ /// Return the value of the symbol depending on the object this can be an
+ /// offset or a virtual address.
+ uint64_t getValue() const;
+
+ /// @brief Get the alignment of this symbol as the actual value (not log 2).
+ uint32_t getAlignment() const;
+ uint64_t getCommonSize() const;
+ SymbolRef::Type getType() const;
+
+ /// @brief Get section this symbol is defined in reference to. Result is
+ /// end_sections() if it is undefined or is an absolute symbol.
+ ErrorOr<section_iterator> getSection() const;
+
+ const ObjectFile *getObject() const;
+};
+
+class symbol_iterator : public basic_symbol_iterator {
+public:
+ symbol_iterator(SymbolRef Sym) : basic_symbol_iterator(Sym) {}
+ symbol_iterator(const basic_symbol_iterator &B)
+ : basic_symbol_iterator(SymbolRef(B->getRawDataRefImpl(),
+ cast<ObjectFile>(B->getObject()))) {}
+
+ const SymbolRef *operator->() const {
+ const BasicSymbolRef &P = basic_symbol_iterator::operator *();
+ return static_cast<const SymbolRef*>(&P);
+ }
+
+ const SymbolRef &operator*() const {
+ const BasicSymbolRef &P = basic_symbol_iterator::operator *();
+ return static_cast<const SymbolRef&>(P);
+ }
+};
+
+/// This class is the base class for all object file types. Concrete instances
+/// of this object are created by createObjectFile, which figures out which type
+/// to create.
+class ObjectFile : public SymbolicFile {
+ virtual void anchor();
+ ObjectFile() = delete;
+ ObjectFile(const ObjectFile &other) = delete;
+
+protected:
+ ObjectFile(unsigned int Type, MemoryBufferRef Source);
+
+ const uint8_t *base() const {
+ return reinterpret_cast<const uint8_t *>(Data.getBufferStart());
+ }
+
+ // These functions are for SymbolRef to call internally. The main goal of
+ // this is to allow SymbolRef::SymbolPimpl to point directly to the symbol
+ // entry in the memory mapped object file. SymbolPimpl cannot contain any
+ // virtual functions because then it could not point into the memory mapped
+ // file.
+ //
+ // Implementations assume that the DataRefImpl is valid and has not been
+ // modified externally. It's UB otherwise.
+ friend class SymbolRef;
+ virtual ErrorOr<StringRef> getSymbolName(DataRefImpl Symb) const = 0;
+ std::error_code printSymbolName(raw_ostream &OS,
+ DataRefImpl Symb) const override;
+ virtual ErrorOr<uint64_t> getSymbolAddress(DataRefImpl Symb) const = 0;
+ virtual uint64_t getSymbolValueImpl(DataRefImpl Symb) const = 0;
+ virtual uint32_t getSymbolAlignment(DataRefImpl Symb) const;
+ virtual uint64_t getCommonSymbolSizeImpl(DataRefImpl Symb) const = 0;
+ virtual SymbolRef::Type getSymbolType(DataRefImpl Symb) const = 0;
+ virtual ErrorOr<section_iterator>
+ getSymbolSection(DataRefImpl Symb) const = 0;
+
+ // Same as above for SectionRef.
+ friend class SectionRef;
+ virtual void moveSectionNext(DataRefImpl &Sec) const = 0;
+ virtual std::error_code getSectionName(DataRefImpl Sec,
+ StringRef &Res) const = 0;
+ virtual uint64_t getSectionAddress(DataRefImpl Sec) const = 0;
+ virtual uint64_t getSectionSize(DataRefImpl Sec) const = 0;
+ virtual std::error_code getSectionContents(DataRefImpl Sec,
+ StringRef &Res) const = 0;
+ virtual uint64_t getSectionAlignment(DataRefImpl Sec) const = 0;
+ virtual bool isSectionText(DataRefImpl Sec) const = 0;
+ virtual bool isSectionData(DataRefImpl Sec) const = 0;
+ virtual bool isSectionBSS(DataRefImpl Sec) const = 0;
+ // A section is 'virtual' if its contents aren't present in the object image.
+ virtual bool isSectionVirtual(DataRefImpl Sec) const = 0;
+ virtual relocation_iterator section_rel_begin(DataRefImpl Sec) const = 0;
+ virtual relocation_iterator section_rel_end(DataRefImpl Sec) const = 0;
+ virtual section_iterator getRelocatedSection(DataRefImpl Sec) const;
+
+ // Same as above for RelocationRef.
+ friend class RelocationRef;
+ virtual void moveRelocationNext(DataRefImpl &Rel) const = 0;
+ virtual uint64_t getRelocationOffset(DataRefImpl Rel) const = 0;
+ virtual symbol_iterator getRelocationSymbol(DataRefImpl Rel) const = 0;
+ virtual uint64_t getRelocationType(DataRefImpl Rel) const = 0;
+ virtual void getRelocationTypeName(DataRefImpl Rel,
+ SmallVectorImpl<char> &Result) const = 0;
+
+ uint64_t getSymbolValue(DataRefImpl Symb) const;
+
+public:
+ uint64_t getCommonSymbolSize(DataRefImpl Symb) const {
+ assert(getSymbolFlags(Symb) & SymbolRef::SF_Common);
+ return getCommonSymbolSizeImpl(Symb);
+ }
+
+ typedef iterator_range<symbol_iterator> symbol_iterator_range;
+ symbol_iterator_range symbols() const {
+ return symbol_iterator_range(symbol_begin(), symbol_end());
+ }
+
+ virtual section_iterator section_begin() const = 0;
+ virtual section_iterator section_end() const = 0;
+
+ typedef iterator_range<section_iterator> section_iterator_range;
+ section_iterator_range sections() const {
+ return section_iterator_range(section_begin(), section_end());
+ }
+
+ /// @brief The number of bytes used to represent an address in this object
+ /// file format.
+ virtual uint8_t getBytesInAddress() const = 0;
+
+ virtual StringRef getFileFormatName() const = 0;
+ virtual /* Triple::ArchType */ unsigned getArch() const = 0;
+
+ /// Returns platform-specific object flags, if any.
+ virtual std::error_code getPlatformFlags(unsigned &Result) const {
+ Result = 0;
+ return object_error::invalid_file_type;
+ }
+
+ /// True if this is a relocatable object (.o/.obj).
+ virtual bool isRelocatableObject() const = 0;
+
+ /// @returns Pointer to ObjectFile subclass to handle this type of object.
+ /// @param ObjectPath The path to the object file. ObjectPath.isObject must
+ /// return true.
+ /// @brief Create ObjectFile from path.
+ static ErrorOr<OwningBinary<ObjectFile>>
+ createObjectFile(StringRef ObjectPath);
+
+ static ErrorOr<std::unique_ptr<ObjectFile>>
+ createObjectFile(MemoryBufferRef Object, sys::fs::file_magic Type);
+ static ErrorOr<std::unique_ptr<ObjectFile>>
+ createObjectFile(MemoryBufferRef Object) {
+ return createObjectFile(Object, sys::fs::file_magic::unknown);
+ }
+
+
+ static inline bool classof(const Binary *v) {
+ return v->isObject();
+ }
+
+ static ErrorOr<std::unique_ptr<COFFObjectFile>>
+ createCOFFObjectFile(MemoryBufferRef Object);
+
+ static ErrorOr<std::unique_ptr<ObjectFile>>
+ createELFObjectFile(MemoryBufferRef Object);
+
+ static ErrorOr<std::unique_ptr<MachOObjectFile>>
+ createMachOObjectFile(MemoryBufferRef Object);
+};
+
+// Inline function definitions.
+inline SymbolRef::SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner)
+ : BasicSymbolRef(SymbolP, Owner) {}
+
+inline ErrorOr<StringRef> SymbolRef::getName() const {
+ return getObject()->getSymbolName(getRawDataRefImpl());
+}
+
+inline ErrorOr<uint64_t> SymbolRef::getAddress() const {
+ return getObject()->getSymbolAddress(getRawDataRefImpl());
+}
+
+inline uint64_t SymbolRef::getValue() const {
+ return getObject()->getSymbolValue(getRawDataRefImpl());
+}
+
+inline uint32_t SymbolRef::getAlignment() const {
+ return getObject()->getSymbolAlignment(getRawDataRefImpl());
+}
+
+inline uint64_t SymbolRef::getCommonSize() const {
+ return getObject()->getCommonSymbolSize(getRawDataRefImpl());
+}
+
+inline ErrorOr<section_iterator> SymbolRef::getSection() const {
+ return getObject()->getSymbolSection(getRawDataRefImpl());
+}
+
+inline SymbolRef::Type SymbolRef::getType() const {
+ return getObject()->getSymbolType(getRawDataRefImpl());
+}
+
+inline const ObjectFile *SymbolRef::getObject() const {
+ const SymbolicFile *O = BasicSymbolRef::getObject();
+ return cast<ObjectFile>(O);
+}
+
+
+/// SectionRef
+inline SectionRef::SectionRef(DataRefImpl SectionP,
+ const ObjectFile *Owner)
+ : SectionPimpl(SectionP)
+ , OwningObject(Owner) {}
+
+inline bool SectionRef::operator==(const SectionRef &Other) const {
+ return SectionPimpl == Other.SectionPimpl;
+}
+
+inline bool SectionRef::operator!=(const SectionRef &Other) const {
+ return SectionPimpl != Other.SectionPimpl;
+}
+
+inline bool SectionRef::operator<(const SectionRef &Other) const {
+ return SectionPimpl < Other.SectionPimpl;
+}
+
+inline void SectionRef::moveNext() {
+ return OwningObject->moveSectionNext(SectionPimpl);
+}
+
+inline std::error_code SectionRef::getName(StringRef &Result) const {
+ return OwningObject->getSectionName(SectionPimpl, Result);
+}
+
+inline uint64_t SectionRef::getAddress() const {
+ return OwningObject->getSectionAddress(SectionPimpl);
+}
+
+inline uint64_t SectionRef::getSize() const {
+ return OwningObject->getSectionSize(SectionPimpl);
+}
+
+inline std::error_code SectionRef::getContents(StringRef &Result) const {
+ return OwningObject->getSectionContents(SectionPimpl, Result);
+}
+
+inline uint64_t SectionRef::getAlignment() const {
+ return OwningObject->getSectionAlignment(SectionPimpl);
+}
+
+inline bool SectionRef::isText() const {
+ return OwningObject->isSectionText(SectionPimpl);
+}
+
+inline bool SectionRef::isData() const {
+ return OwningObject->isSectionData(SectionPimpl);
+}
+
+inline bool SectionRef::isBSS() const {
+ return OwningObject->isSectionBSS(SectionPimpl);
+}
+
+inline bool SectionRef::isVirtual() const {
+ return OwningObject->isSectionVirtual(SectionPimpl);
+}
+
+inline relocation_iterator SectionRef::relocation_begin() const {
+ return OwningObject->section_rel_begin(SectionPimpl);
+}
+
+inline relocation_iterator SectionRef::relocation_end() const {
+ return OwningObject->section_rel_end(SectionPimpl);
+}
+
+inline section_iterator SectionRef::getRelocatedSection() const {
+ return OwningObject->getRelocatedSection(SectionPimpl);
+}
+
+inline DataRefImpl SectionRef::getRawDataRefImpl() const {
+ return SectionPimpl;
+}
+
+inline const ObjectFile *SectionRef::getObject() const {
+ return OwningObject;
+}
+
+/// RelocationRef
+inline RelocationRef::RelocationRef(DataRefImpl RelocationP,
+ const ObjectFile *Owner)
+ : RelocationPimpl(RelocationP)
+ , OwningObject(Owner) {}
+
+inline bool RelocationRef::operator==(const RelocationRef &Other) const {
+ return RelocationPimpl == Other.RelocationPimpl;
+}
+
+inline void RelocationRef::moveNext() {
+ return OwningObject->moveRelocationNext(RelocationPimpl);
+}
+
+inline uint64_t RelocationRef::getOffset() const {
+ return OwningObject->getRelocationOffset(RelocationPimpl);
+}
+
+inline symbol_iterator RelocationRef::getSymbol() const {
+ return OwningObject->getRelocationSymbol(RelocationPimpl);
+}
+
+inline uint64_t RelocationRef::getType() const {
+ return OwningObject->getRelocationType(RelocationPimpl);
+}
+
+inline void RelocationRef::getTypeName(SmallVectorImpl<char> &Result) const {
+ return OwningObject->getRelocationTypeName(RelocationPimpl, Result);
+}
+
+inline DataRefImpl RelocationRef::getRawDataRefImpl() const {
+ return RelocationPimpl;
+}
+
+inline const ObjectFile *RelocationRef::getObject() const {
+ return OwningObject;
+}
+
+
+} // end namespace object
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/RelocVisitor.h b/contrib/llvm/include/llvm/Object/RelocVisitor.h
new file mode 100644
index 0000000..d5e4258
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/RelocVisitor.h
@@ -0,0 +1,420 @@
+//===-- RelocVisitor.h - Visitor for object file relocations -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a wrapper around all the different types of relocations
+// in different file formats, such that a client can handle them in a unified
+// manner by only implementing a minimal number of functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_RELOCVISITOR_H
+#define LLVM_OBJECT_RELOCVISITOR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ELF.h"
+#include "llvm/Support/MachO.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace object {
+
+struct RelocToApply {
+ // The computed value after applying the relevant relocations.
+ int64_t Value;
+
+ // The width of the value; how many bytes to touch when applying the
+ // relocation.
+ char Width;
+ RelocToApply(int64_t Value, char Width) : Value(Value), Width(Width) {}
+ RelocToApply() : Value(0), Width(0) {}
+};
+
+/// @brief Base class for object file relocation visitors.
+class RelocVisitor {
+public:
+ explicit RelocVisitor(const ObjectFile &Obj)
+ : ObjToVisit(Obj), HasError(false) {}
+
+ // TODO: Should handle multiple applied relocations via either passing in the
+ // previously computed value or just count paired relocations as a single
+ // visit.
+ RelocToApply visit(uint32_t RelocType, RelocationRef R, uint64_t Value = 0) {
+ if (isa<ELFObjectFileBase>(ObjToVisit))
+ return visitELF(RelocType, R, Value);
+ if (isa<COFFObjectFile>(ObjToVisit))
+ return visitCOFF(RelocType, R, Value);
+ if (isa<MachOObjectFile>(ObjToVisit))
+ return visitMachO(RelocType, R, Value);
+
+ HasError = true;
+ return RelocToApply();
+ }
+
+ bool error() { return HasError; }
+
+private:
+ const ObjectFile &ObjToVisit;
+ bool HasError;
+
+ RelocToApply visitELF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
+ if (ObjToVisit.getBytesInAddress() == 8) { // 64-bit object file
+ switch (ObjToVisit.getArch()) {
+ case Triple::x86_64:
+ switch (RelocType) {
+ case llvm::ELF::R_X86_64_NONE:
+ return visitELF_X86_64_NONE(R);
+ case llvm::ELF::R_X86_64_64:
+ return visitELF_X86_64_64(R, Value);
+ case llvm::ELF::R_X86_64_PC32:
+ return visitELF_X86_64_PC32(R, Value);
+ case llvm::ELF::R_X86_64_32:
+ return visitELF_X86_64_32(R, Value);
+ case llvm::ELF::R_X86_64_32S:
+ return visitELF_X86_64_32S(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ case Triple::aarch64:
+ switch (RelocType) {
+ case llvm::ELF::R_AARCH64_ABS32:
+ return visitELF_AARCH64_ABS32(R, Value);
+ case llvm::ELF::R_AARCH64_ABS64:
+ return visitELF_AARCH64_ABS64(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ case Triple::mips64el:
+ case Triple::mips64:
+ switch (RelocType) {
+ case llvm::ELF::R_MIPS_32:
+ return visitELF_MIPS64_32(R, Value);
+ case llvm::ELF::R_MIPS_64:
+ return visitELF_MIPS64_64(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ case Triple::ppc64le:
+ case Triple::ppc64:
+ switch (RelocType) {
+ case llvm::ELF::R_PPC64_ADDR32:
+ return visitELF_PPC64_ADDR32(R, Value);
+ case llvm::ELF::R_PPC64_ADDR64:
+ return visitELF_PPC64_ADDR64(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ case Triple::systemz:
+ switch (RelocType) {
+ case llvm::ELF::R_390_32:
+ return visitELF_390_32(R, Value);
+ case llvm::ELF::R_390_64:
+ return visitELF_390_64(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ case Triple::sparcv9:
+ switch (RelocType) {
+ case llvm::ELF::R_SPARC_32:
+ case llvm::ELF::R_SPARC_UA32:
+ return visitELF_SPARCV9_32(R, Value);
+ case llvm::ELF::R_SPARC_64:
+ case llvm::ELF::R_SPARC_UA64:
+ return visitELF_SPARCV9_64(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ } else if (ObjToVisit.getBytesInAddress() == 4) { // 32-bit object file
+ switch (ObjToVisit.getArch()) {
+ case Triple::x86:
+ switch (RelocType) {
+ case llvm::ELF::R_386_NONE:
+ return visitELF_386_NONE(R);
+ case llvm::ELF::R_386_32:
+ return visitELF_386_32(R, Value);
+ case llvm::ELF::R_386_PC32:
+ return visitELF_386_PC32(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ case Triple::ppc:
+ switch (RelocType) {
+ case llvm::ELF::R_PPC_ADDR32:
+ return visitELF_PPC_ADDR32(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ case Triple::arm:
+ case Triple::armeb:
+ switch (RelocType) {
+ default:
+ HasError = true;
+ return RelocToApply();
+ case llvm::ELF::R_ARM_ABS32:
+ return visitELF_ARM_ABS32(R, Value);
+ }
+ case Triple::mipsel:
+ case Triple::mips:
+ switch (RelocType) {
+ case llvm::ELF::R_MIPS_32:
+ return visitELF_MIPS_32(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ case Triple::sparc:
+ switch (RelocType) {
+ case llvm::ELF::R_SPARC_32:
+ case llvm::ELF::R_SPARC_UA32:
+ return visitELF_SPARC_32(R, Value);
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ default:
+ HasError = true;
+ return RelocToApply();
+ }
+ } else {
+ report_fatal_error("Invalid word size in object file");
+ }
+ }
+
+ RelocToApply visitCOFF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
+ switch (ObjToVisit.getArch()) {
+ case Triple::x86:
+ switch (RelocType) {
+ case COFF::IMAGE_REL_I386_SECREL:
+ return visitCOFF_I386_SECREL(R, Value);
+ case COFF::IMAGE_REL_I386_DIR32:
+ return visitCOFF_I386_DIR32(R, Value);
+ }
+ break;
+ case Triple::x86_64:
+ switch (RelocType) {
+ case COFF::IMAGE_REL_AMD64_SECREL:
+ return visitCOFF_AMD64_SECREL(R, Value);
+ case COFF::IMAGE_REL_AMD64_ADDR64:
+ return visitCOFF_AMD64_ADDR64(R, Value);
+ }
+ break;
+ }
+ HasError = true;
+ return RelocToApply();
+ }
+
+ RelocToApply visitMachO(uint32_t RelocType, RelocationRef R, uint64_t Value) {
+ switch (ObjToVisit.getArch()) {
+ default: break;
+ case Triple::x86_64:
+ switch (RelocType) {
+ default: break;
+ case MachO::X86_64_RELOC_UNSIGNED:
+ return visitMACHO_X86_64_UNSIGNED(R, Value);
+ }
+ }
+ HasError = true;
+ return RelocToApply();
+ }
+
+ int64_t getELFAddend(RelocationRef R) {
+ ErrorOr<int64_t> AddendOrErr = ELFRelocationRef(R).getAddend();
+ if (std::error_code EC = AddendOrErr.getError())
+ report_fatal_error(EC.message());
+ return *AddendOrErr;
+ }
+
+ uint8_t getLengthMachO64(RelocationRef R) {
+ const MachOObjectFile *Obj = cast<MachOObjectFile>(R.getObject());
+ return Obj->getRelocationLength(R.getRawDataRefImpl());
+ }
+
+ /// Operations
+
+ /// 386-ELF
+ RelocToApply visitELF_386_NONE(RelocationRef R) {
+ return RelocToApply(0, 0);
+ }
+
+ // Ideally the Addend here will be the addend in the data for
+ // the relocation. It's not actually the case for Rel relocations.
+ RelocToApply visitELF_386_32(RelocationRef R, uint64_t Value) {
+ return RelocToApply(Value, 4);
+ }
+
+ RelocToApply visitELF_386_PC32(RelocationRef R, uint64_t Value) {
+ uint64_t Address = R.getOffset();
+ return RelocToApply(Value - Address, 4);
+ }
+
+ /// X86-64 ELF
+ RelocToApply visitELF_X86_64_NONE(RelocationRef R) {
+ return RelocToApply(0, 0);
+ }
+ RelocToApply visitELF_X86_64_64(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ return RelocToApply(Value + Addend, 8);
+ }
+ RelocToApply visitELF_X86_64_PC32(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ uint64_t Address = R.getOffset();
+ return RelocToApply(Value + Addend - Address, 4);
+ }
+ RelocToApply visitELF_X86_64_32(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
+ return RelocToApply(Res, 4);
+ }
+ RelocToApply visitELF_X86_64_32S(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ int32_t Res = (Value + Addend) & 0xFFFFFFFF;
+ return RelocToApply(Res, 4);
+ }
+
+ /// PPC64 ELF
+ RelocToApply visitELF_PPC64_ADDR32(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
+ return RelocToApply(Res, 4);
+ }
+ RelocToApply visitELF_PPC64_ADDR64(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ return RelocToApply(Value + Addend, 8);
+ }
+
+ /// PPC32 ELF
+ RelocToApply visitELF_PPC_ADDR32(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
+ return RelocToApply(Res, 4);
+ }
+
+ /// MIPS ELF
+ RelocToApply visitELF_MIPS_32(RelocationRef R, uint64_t Value) {
+ uint32_t Res = Value & 0xFFFFFFFF;
+ return RelocToApply(Res, 4);
+ }
+
+ /// MIPS64 ELF
+ RelocToApply visitELF_MIPS64_32(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ uint32_t Res = (Value + Addend) & 0xFFFFFFFF;
+ return RelocToApply(Res, 4);
+ }
+
+ RelocToApply visitELF_MIPS64_64(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ uint64_t Res = (Value + Addend);
+ return RelocToApply(Res, 8);
+ }
+
+ // AArch64 ELF
+ RelocToApply visitELF_AARCH64_ABS32(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ int64_t Res = Value + Addend;
+
+ // Overflow check allows for both signed and unsigned interpretation.
+ if (Res < INT32_MIN || Res > UINT32_MAX)
+ HasError = true;
+
+ return RelocToApply(static_cast<uint32_t>(Res), 4);
+ }
+
+ RelocToApply visitELF_AARCH64_ABS64(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ return RelocToApply(Value + Addend, 8);
+ }
+
+ // SystemZ ELF
+ RelocToApply visitELF_390_32(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ int64_t Res = Value + Addend;
+
+ // Overflow check allows for both signed and unsigned interpretation.
+ if (Res < INT32_MIN || Res > UINT32_MAX)
+ HasError = true;
+
+ return RelocToApply(static_cast<uint32_t>(Res), 4);
+ }
+
+ RelocToApply visitELF_390_64(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ return RelocToApply(Value + Addend, 8);
+ }
+
+ RelocToApply visitELF_SPARC_32(RelocationRef R, uint32_t Value) {
+ int32_t Addend = getELFAddend(R);
+ return RelocToApply(Value + Addend, 4);
+ }
+
+ RelocToApply visitELF_SPARCV9_32(RelocationRef R, uint64_t Value) {
+ int32_t Addend = getELFAddend(R);
+ return RelocToApply(Value + Addend, 4);
+ }
+
+ RelocToApply visitELF_SPARCV9_64(RelocationRef R, uint64_t Value) {
+ int64_t Addend = getELFAddend(R);
+ return RelocToApply(Value + Addend, 8);
+ }
+
+ RelocToApply visitELF_ARM_ABS32(RelocationRef R, uint64_t Value) {
+ int64_t Res = Value;
+
+ // Overflow check allows for both signed and unsigned interpretation.
+ if (Res < INT32_MIN || Res > UINT32_MAX)
+ HasError = true;
+
+ return RelocToApply(static_cast<uint32_t>(Res), 4);
+ }
+
+ /// I386 COFF
+ RelocToApply visitCOFF_I386_SECREL(RelocationRef R, uint64_t Value) {
+ return RelocToApply(static_cast<uint32_t>(Value), /*Width=*/4);
+ }
+
+ RelocToApply visitCOFF_I386_DIR32(RelocationRef R, uint64_t Value) {
+ return RelocToApply(static_cast<uint32_t>(Value), /*Width=*/4);
+ }
+
+ /// AMD64 COFF
+ RelocToApply visitCOFF_AMD64_SECREL(RelocationRef R, uint64_t Value) {
+ return RelocToApply(static_cast<uint32_t>(Value), /*Width=*/4);
+ }
+
+ RelocToApply visitCOFF_AMD64_ADDR64(RelocationRef R, uint64_t Value) {
+ return RelocToApply(Value, /*Width=*/8);
+ }
+
+ // X86_64 MachO
+ RelocToApply visitMACHO_X86_64_UNSIGNED(RelocationRef R, uint64_t Value) {
+ uint8_t Length = getLengthMachO64(R);
+ Length = 1<<Length;
+ return RelocToApply(Value, Length);
+ }
+};
+
+}
+}
+#endif
diff --git a/contrib/llvm/include/llvm/Object/StackMapParser.h b/contrib/llvm/include/llvm/Object/StackMapParser.h
new file mode 100644
index 0000000..276eab6
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/StackMapParser.h
@@ -0,0 +1,442 @@
+//===-------- StackMapParser.h - StackMap Parsing Support -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_STACKMAPPARSER_H
+#define LLVM_CODEGEN_STACKMAPPARSER_H
+
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Endian.h"
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+template <support::endianness Endianness>
+class StackMapV1Parser {
+public:
+
+ template <typename AccessorT>
+ class AccessorIterator {
+ public:
+
+ AccessorIterator(AccessorT A) : A(A) {}
+ AccessorIterator& operator++() { A = A.next(); return *this; }
+ AccessorIterator operator++(int) {
+ auto tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ bool operator==(const AccessorIterator &Other) {
+ return A.P == Other.A.P;
+ }
+
+ bool operator!=(const AccessorIterator &Other) { return !(*this == Other); }
+
+ AccessorT& operator*() { return A; }
+ AccessorT* operator->() { return &A; }
+
+ private:
+ AccessorT A;
+ };
+
+ /// Accessor for function records.
+ class FunctionAccessor {
+ friend class StackMapV1Parser;
+ public:
+
+ /// Get the function address.
+ uint64_t getFunctionAddress() const {
+ return read<uint64_t>(P);
+ }
+
+ /// Get the function's stack size.
+ uint32_t getStackSize() const {
+ return read<uint64_t>(P + sizeof(uint64_t));
+ }
+
+ private:
+ FunctionAccessor(const uint8_t *P) : P(P) {}
+
+ const static int FunctionAccessorSize = 2 * sizeof(uint64_t);
+
+ FunctionAccessor next() const {
+ return FunctionAccessor(P + FunctionAccessorSize);
+ }
+
+ const uint8_t *P;
+ };
+
+ /// Accessor for constants.
+ class ConstantAccessor {
+ friend class StackMapV1Parser;
+ public:
+
+ /// Return the value of this constant.
+ uint64_t getValue() const { return read<uint64_t>(P); }
+
+ private:
+
+ ConstantAccessor(const uint8_t *P) : P(P) {}
+
+ const static int ConstantAccessorSize = sizeof(uint64_t);
+
+ ConstantAccessor next() const {
+ return ConstantAccessor(P + ConstantAccessorSize);
+ }
+
+ const uint8_t *P;
+ };
+
+ // Forward-declare RecordAccessor so we can friend it below.
+ class RecordAccessor;
+
+ enum class LocationKind : uint8_t {
+ Register = 1, Direct = 2, Indirect = 3, Constant = 4, ConstantIndex = 5
+ };
+
+
+ /// Accessor for location records.
+ class LocationAccessor {
+ friend class StackMapV1Parser;
+ friend class RecordAccessor;
+ public:
+
+ /// Get the Kind for this location.
+ LocationKind getKind() const {
+ return LocationKind(P[KindOffset]);
+ }
+
+ /// Get the Dwarf register number for this location.
+ uint16_t getDwarfRegNum() const {
+ return read<uint16_t>(P + DwarfRegNumOffset);
+ }
+
+ /// Get the small-constant for this location. (Kind must be Constant).
+ uint32_t getSmallConstant() const {
+ assert(getKind() == LocationKind::Constant && "Not a small constant.");
+ return read<uint32_t>(P + SmallConstantOffset);
+ }
+
+ /// Get the constant-index for this location. (Kind must be ConstantIndex).
+ uint32_t getConstantIndex() const {
+ assert(getKind() == LocationKind::ConstantIndex &&
+ "Not a constant-index.");
+ return read<uint32_t>(P + SmallConstantOffset);
+ }
+
+ /// Get the offset for this location. (Kind must be Direct or Indirect).
+ int32_t getOffset() const {
+ assert((getKind() == LocationKind::Direct ||
+ getKind() == LocationKind::Indirect) &&
+ "Not direct or indirect.");
+ return read<int32_t>(P + SmallConstantOffset);
+ }
+
+ private:
+
+ LocationAccessor(const uint8_t *P) : P(P) {}
+
+ LocationAccessor next() const {
+ return LocationAccessor(P + LocationAccessorSize);
+ }
+
+ static const int KindOffset = 0;
+ static const int DwarfRegNumOffset = KindOffset + sizeof(uint16_t);
+ static const int SmallConstantOffset = DwarfRegNumOffset + sizeof(uint16_t);
+ static const int LocationAccessorSize = sizeof(uint64_t);
+
+ const uint8_t *P;
+ };
+
+ /// Accessor for stackmap live-out fields.
+ class LiveOutAccessor {
+ friend class StackMapV1Parser;
+ friend class RecordAccessor;
+ public:
+
+ /// Get the Dwarf register number for this live-out.
+ uint16_t getDwarfRegNum() const {
+ return read<uint16_t>(P + DwarfRegNumOffset);
+ }
+
+ /// Get the size in bytes of live [sub]register.
+ unsigned getSizeInBytes() const {
+ return read<uint8_t>(P + SizeOffset);
+ }
+
+ private:
+
+ LiveOutAccessor(const uint8_t *P) : P(P) {}
+
+ LiveOutAccessor next() const {
+ return LiveOutAccessor(P + LiveOutAccessorSize);
+ }
+
+ static const int DwarfRegNumOffset = 0;
+ static const int SizeOffset =
+ DwarfRegNumOffset + sizeof(uint16_t) + sizeof(uint8_t);
+ static const int LiveOutAccessorSize = sizeof(uint32_t);
+
+ const uint8_t *P;
+ };
+
+ /// Accessor for stackmap records.
+ class RecordAccessor {
+ friend class StackMapV1Parser;
+ public:
+
+ typedef AccessorIterator<LocationAccessor> location_iterator;
+ typedef AccessorIterator<LiveOutAccessor> liveout_iterator;
+
+ /// Get the patchpoint/stackmap ID for this record.
+ uint64_t getID() const {
+ return read<uint64_t>(P + PatchpointIDOffset);
+ }
+
+ /// Get the instruction offset (from the start of the containing function)
+ /// for this record.
+ uint32_t getInstructionOffset() const {
+ return read<uint32_t>(P + InstructionOffsetOffset);
+ }
+
+ /// Get the number of locations contained in this record.
+ uint16_t getNumLocations() const {
+ return read<uint16_t>(P + NumLocationsOffset);
+ }
+
+ /// Get the location with the given index.
+ LocationAccessor getLocation(unsigned LocationIndex) const {
+ unsigned LocationOffset =
+ LocationListOffset + LocationIndex * LocationSize;
+ return LocationAccessor(P + LocationOffset);
+ }
+
+ /// Begin iterator for locations.
+ location_iterator location_begin() const {
+ return location_iterator(getLocation(0));
+ }
+
+ /// End iterator for locations.
+ location_iterator location_end() const {
+ return location_iterator(getLocation(getNumLocations()));
+ }
+
+ /// Iterator range for locations.
+ iterator_range<location_iterator> locations() const {
+ return make_range(location_begin(), location_end());
+ }
+
+ /// Get the number of liveouts contained in this record.
+ uint16_t getNumLiveOuts() const {
+ return read<uint16_t>(P + getNumLiveOutsOffset());
+ }
+
+ /// Get the live-out with the given index.
+ LiveOutAccessor getLiveOut(unsigned LiveOutIndex) const {
+ unsigned LiveOutOffset =
+ getNumLiveOutsOffset() + sizeof(uint16_t) + LiveOutIndex * LiveOutSize;
+ return LiveOutAccessor(P + LiveOutOffset);
+ }
+
+ /// Begin iterator for live-outs.
+ liveout_iterator liveouts_begin() const {
+ return liveout_iterator(getLiveOut(0));
+ }
+
+
+ /// End iterator for live-outs.
+ liveout_iterator liveouts_end() const {
+ return liveout_iterator(getLiveOut(getNumLiveOuts()));
+ }
+
+ /// Iterator range for live-outs.
+ iterator_range<liveout_iterator> liveouts() const {
+ return make_range(liveouts_begin(), liveouts_end());
+ }
+
+ private:
+
+ RecordAccessor(const uint8_t *P) : P(P) {}
+
+ unsigned getNumLiveOutsOffset() const {
+ return LocationListOffset + LocationSize * getNumLocations() +
+ sizeof(uint16_t);
+ }
+
+ unsigned getSizeInBytes() const {
+ unsigned RecordSize =
+ getNumLiveOutsOffset() + sizeof(uint16_t) + getNumLiveOuts() * LiveOutSize;
+ return (RecordSize + 7) & ~0x7;
+ }
+
+ RecordAccessor next() const {
+ return RecordAccessor(P + getSizeInBytes());
+ }
+
+ static const unsigned PatchpointIDOffset = 0;
+ static const unsigned InstructionOffsetOffset =
+ PatchpointIDOffset + sizeof(uint64_t);
+ static const unsigned NumLocationsOffset =
+ InstructionOffsetOffset + sizeof(uint32_t) + sizeof(uint16_t);
+ static const unsigned LocationListOffset =
+ NumLocationsOffset + sizeof(uint16_t);
+ static const unsigned LocationSize = sizeof(uint64_t);
+ static const unsigned LiveOutSize = sizeof(uint32_t);
+
+ const uint8_t *P;
+ };
+
+ /// Construct a parser for a version-1 stackmap. StackMap data will be read
+ /// from the given array.
+ StackMapV1Parser(ArrayRef<uint8_t> StackMapSection)
+ : StackMapSection(StackMapSection) {
+ ConstantsListOffset = FunctionListOffset + getNumFunctions() * FunctionSize;
+
+ assert(StackMapSection[0] == 1 &&
+ "StackMapV1Parser can only parse version 1 stackmaps");
+
+ unsigned CurrentRecordOffset =
+ ConstantsListOffset + getNumConstants() * ConstantSize;
+
+ for (unsigned I = 0, E = getNumRecords(); I != E; ++I) {
+ StackMapRecordOffsets.push_back(CurrentRecordOffset);
+ CurrentRecordOffset +=
+ RecordAccessor(&StackMapSection[CurrentRecordOffset]).getSizeInBytes();
+ }
+ }
+
+ typedef AccessorIterator<FunctionAccessor> function_iterator;
+ typedef AccessorIterator<ConstantAccessor> constant_iterator;
+ typedef AccessorIterator<RecordAccessor> record_iterator;
+
+ /// Get the version number of this stackmap. (Always returns 1).
+ unsigned getVersion() const { return 1; }
+
+ /// Get the number of functions in the stack map.
+ uint32_t getNumFunctions() const {
+ return read<uint32_t>(&StackMapSection[NumFunctionsOffset]);
+ }
+
+ /// Get the number of large constants in the stack map.
+ uint32_t getNumConstants() const {
+ return read<uint32_t>(&StackMapSection[NumConstantsOffset]);
+ }
+
+ /// Get the number of stackmap records in the stackmap.
+ uint32_t getNumRecords() const {
+ return read<uint32_t>(&StackMapSection[NumRecordsOffset]);
+ }
+
+ /// Return an FunctionAccessor for the given function index.
+ FunctionAccessor getFunction(unsigned FunctionIndex) const {
+ return FunctionAccessor(StackMapSection.data() +
+ getFunctionOffset(FunctionIndex));
+ }
+
+ /// Begin iterator for functions.
+ function_iterator functions_begin() const {
+ return function_iterator(getFunction(0));
+ }
+
+ /// End iterator for functions.
+ function_iterator functions_end() const {
+ return function_iterator(
+ FunctionAccessor(StackMapSection.data() +
+ getFunctionOffset(getNumFunctions())));
+ }
+
+ /// Iterator range for functions.
+ iterator_range<function_iterator> functions() const {
+ return make_range(functions_begin(), functions_end());
+ }
+
+ /// Return the large constant at the given index.
+ ConstantAccessor getConstant(unsigned ConstantIndex) const {
+ return ConstantAccessor(StackMapSection.data() +
+ getConstantOffset(ConstantIndex));
+ }
+
+ /// Begin iterator for constants.
+ constant_iterator constants_begin() const {
+ return constant_iterator(getConstant(0));
+ }
+
+ /// End iterator for constants.
+ constant_iterator constants_end() const {
+ return constant_iterator(
+ ConstantAccessor(StackMapSection.data() +
+ getConstantOffset(getNumConstants())));
+ }
+
+ /// Iterator range for constants.
+ iterator_range<constant_iterator> constants() const {
+ return make_range(constants_begin(), constants_end());
+ }
+
+ /// Return a RecordAccessor for the given record index.
+ RecordAccessor getRecord(unsigned RecordIndex) const {
+ std::size_t RecordOffset = StackMapRecordOffsets[RecordIndex];
+ return RecordAccessor(StackMapSection.data() + RecordOffset);
+ }
+
+ /// Begin iterator for records.
+ record_iterator records_begin() const {
+ if (getNumRecords() == 0)
+ return record_iterator(RecordAccessor(nullptr));
+ return record_iterator(getRecord(0));
+ }
+
+ /// End iterator for records.
+ record_iterator records_end() const {
+ // Records need to be handled specially, since we cache the start addresses
+ // for them: We can't just compute the 1-past-the-end address, we have to
+ // look at the last record and use the 'next' method.
+ if (getNumRecords() == 0)
+ return record_iterator(RecordAccessor(nullptr));
+ return record_iterator(getRecord(getNumRecords() - 1).next());
+ }
+
+ /// Iterator range for records.
+ iterator_range<record_iterator> records() const {
+ return make_range(records_begin(), records_end());
+ }
+
+private:
+
+ template <typename T>
+ static T read(const uint8_t *P) {
+ return support::endian::read<T, Endianness, 1>(P);
+ }
+
+ static const unsigned HeaderOffset = 0;
+ static const unsigned NumFunctionsOffset = HeaderOffset + sizeof(uint32_t);
+ static const unsigned NumConstantsOffset = NumFunctionsOffset + sizeof(uint32_t);
+ static const unsigned NumRecordsOffset = NumConstantsOffset + sizeof(uint32_t);
+ static const unsigned FunctionListOffset = NumRecordsOffset + sizeof(uint32_t);
+
+ static const unsigned FunctionSize = 2 * sizeof(uint64_t);
+ static const unsigned ConstantSize = sizeof(uint64_t);
+
+ std::size_t getFunctionOffset(unsigned FunctionIndex) const {
+ return FunctionListOffset + FunctionIndex * FunctionSize;
+ }
+
+ std::size_t getConstantOffset(unsigned ConstantIndex) const {
+ return ConstantsListOffset + ConstantIndex * ConstantSize;
+ }
+
+ ArrayRef<uint8_t> StackMapSection;
+ unsigned ConstantsListOffset;
+ std::vector<unsigned> StackMapRecordOffsets;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/SymbolSize.h b/contrib/llvm/include/llvm/Object/SymbolSize.h
new file mode 100644
index 0000000..f2ce70f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/SymbolSize.h
@@ -0,0 +1,23 @@
+//===- SymbolSize.h ---------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_SYMBOLSIZE_H
+#define LLVM_OBJECT_SYMBOLSIZE_H
+
+#include "llvm/Object/ObjectFile.h"
+
+namespace llvm {
+namespace object {
+std::vector<std::pair<SymbolRef, uint64_t>>
+computeSymbolSizes(const ObjectFile &O);
+}
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Object/SymbolicFile.h b/contrib/llvm/include/llvm/Object/SymbolicFile.h
new file mode 100644
index 0000000..0c5b381
--- /dev/null
+++ b/contrib/llvm/include/llvm/Object/SymbolicFile.h
@@ -0,0 +1,207 @@
+//===- SymbolicFile.h - Interface that only provides symbols ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SymbolicFile interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECT_SYMBOLICFILE_H
+#define LLVM_OBJECT_SYMBOLICFILE_H
+
+#include "llvm/Object/Binary.h"
+#include "llvm/Support/Format.h"
+
+namespace llvm {
+namespace object {
+
+union DataRefImpl {
+ // This entire union should probably be a
+ // char[max(8, sizeof(uintptr_t))] and require the impl to cast.
+ struct {
+ uint32_t a, b;
+ } d;
+ uintptr_t p;
+ DataRefImpl() { std::memset(this, 0, sizeof(DataRefImpl)); }
+};
+
+template <typename OStream>
+OStream& operator<<(OStream &OS, const DataRefImpl &D) {
+ OS << "(" << format("0x%x8", D.p) << " (" << format("0x%x8", D.d.a) << ", " << format("0x%x8", D.d.b) << "))";
+ return OS;
+}
+
+inline bool operator==(const DataRefImpl &a, const DataRefImpl &b) {
+ // Check bitwise identical. This is the only legal way to compare a union w/o
+ // knowing which member is in use.
+ return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0;
+}
+
+inline bool operator!=(const DataRefImpl &a, const DataRefImpl &b) {
+ return !operator==(a, b);
+}
+
+inline bool operator<(const DataRefImpl &a, const DataRefImpl &b) {
+ // Check bitwise identical. This is the only legal way to compare a union w/o
+ // knowing which member is in use.
+ return std::memcmp(&a, &b, sizeof(DataRefImpl)) < 0;
+}
+
+template <class content_type>
+class content_iterator
+ : public std::iterator<std::forward_iterator_tag, content_type> {
+ content_type Current;
+
+public:
+ content_iterator(content_type symb) : Current(symb) {}
+
+ const content_type *operator->() const { return &Current; }
+
+ const content_type &operator*() const { return Current; }
+
+ bool operator==(const content_iterator &other) const {
+ return Current == other.Current;
+ }
+
+ bool operator!=(const content_iterator &other) const {
+ return !(*this == other);
+ }
+
+ content_iterator &operator++() { // preincrement
+ Current.moveNext();
+ return *this;
+ }
+};
+
+class SymbolicFile;
+
+/// This is a value type class that represents a single symbol in the list of
+/// symbols in the object file.
+class BasicSymbolRef {
+ DataRefImpl SymbolPimpl;
+ const SymbolicFile *OwningObject;
+
+public:
+ // FIXME: should we add a SF_Text?
+ enum Flags : unsigned {
+ SF_None = 0,
+ SF_Undefined = 1U << 0, // Symbol is defined in another object file
+ SF_Global = 1U << 1, // Global symbol
+ SF_Weak = 1U << 2, // Weak symbol
+ SF_Absolute = 1U << 3, // Absolute symbol
+ SF_Common = 1U << 4, // Symbol has common linkage
+ SF_Indirect = 1U << 5, // Symbol is an alias to another symbol
+ SF_Exported = 1U << 6, // Symbol is visible to other DSOs
+ SF_FormatSpecific = 1U << 7, // Specific to the object file format
+ // (e.g. section symbols)
+ SF_Thumb = 1U << 8, // Thumb symbol in a 32-bit ARM binary
+ SF_Hidden = 1U << 9, // Symbol has hidden visibility
+ SF_Const = 1U << 10, // Symbol value is constant
+ };
+
+ BasicSymbolRef() : OwningObject(nullptr) { }
+ BasicSymbolRef(DataRefImpl SymbolP, const SymbolicFile *Owner);
+
+ bool operator==(const BasicSymbolRef &Other) const;
+ bool operator<(const BasicSymbolRef &Other) const;
+
+ void moveNext();
+
+ std::error_code printName(raw_ostream &OS) const;
+
+ /// Get symbol flags (bitwise OR of SymbolRef::Flags)
+ uint32_t getFlags() const;
+
+ DataRefImpl getRawDataRefImpl() const;
+ const SymbolicFile *getObject() const;
+};
+
+typedef content_iterator<BasicSymbolRef> basic_symbol_iterator;
+
+class SymbolicFile : public Binary {
+public:
+ ~SymbolicFile() override;
+ SymbolicFile(unsigned int Type, MemoryBufferRef Source);
+
+ // virtual interface.
+ virtual void moveSymbolNext(DataRefImpl &Symb) const = 0;
+
+ virtual std::error_code printSymbolName(raw_ostream &OS,
+ DataRefImpl Symb) const = 0;
+
+ virtual uint32_t getSymbolFlags(DataRefImpl Symb) const = 0;
+
+ virtual basic_symbol_iterator symbol_begin_impl() const = 0;
+
+ virtual basic_symbol_iterator symbol_end_impl() const = 0;
+
+ // convenience wrappers.
+ basic_symbol_iterator symbol_begin() const {
+ return symbol_begin_impl();
+ }
+ basic_symbol_iterator symbol_end() const {
+ return symbol_end_impl();
+ }
+ typedef iterator_range<basic_symbol_iterator> basic_symbol_iterator_range;
+ basic_symbol_iterator_range symbols() const {
+ return basic_symbol_iterator_range(symbol_begin(), symbol_end());
+ }
+
+ // construction aux.
+ static ErrorOr<std::unique_ptr<SymbolicFile>>
+ createSymbolicFile(MemoryBufferRef Object, sys::fs::file_magic Type,
+ LLVMContext *Context);
+
+ static ErrorOr<std::unique_ptr<SymbolicFile>>
+ createSymbolicFile(MemoryBufferRef Object) {
+ return createSymbolicFile(Object, sys::fs::file_magic::unknown, nullptr);
+ }
+ static ErrorOr<OwningBinary<SymbolicFile>>
+ createSymbolicFile(StringRef ObjectPath);
+
+ static inline bool classof(const Binary *v) {
+ return v->isSymbolic();
+ }
+};
+
+inline BasicSymbolRef::BasicSymbolRef(DataRefImpl SymbolP,
+ const SymbolicFile *Owner)
+ : SymbolPimpl(SymbolP), OwningObject(Owner) {}
+
+inline bool BasicSymbolRef::operator==(const BasicSymbolRef &Other) const {
+ return SymbolPimpl == Other.SymbolPimpl;
+}
+
+inline bool BasicSymbolRef::operator<(const BasicSymbolRef &Other) const {
+ return SymbolPimpl < Other.SymbolPimpl;
+}
+
+inline void BasicSymbolRef::moveNext() {
+ return OwningObject->moveSymbolNext(SymbolPimpl);
+}
+
+inline std::error_code BasicSymbolRef::printName(raw_ostream &OS) const {
+ return OwningObject->printSymbolName(OS, SymbolPimpl);
+}
+
+inline uint32_t BasicSymbolRef::getFlags() const {
+ return OwningObject->getSymbolFlags(SymbolPimpl);
+}
+
+inline DataRefImpl BasicSymbolRef::getRawDataRefImpl() const {
+ return SymbolPimpl;
+}
+
+inline const SymbolicFile *BasicSymbolRef::getObject() const {
+ return OwningObject;
+}
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Option/Arg.h b/contrib/llvm/include/llvm/Option/Arg.h
new file mode 100644
index 0000000..99d3296
--- /dev/null
+++ b/contrib/llvm/include/llvm/Option/Arg.h
@@ -0,0 +1,127 @@
+//===--- Arg.h - Parsed Argument Classes ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Defines the llvm::Arg class for parsed arguments.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OPTION_ARG_H
+#define LLVM_OPTION_ARG_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/Option.h"
+#include <string>
+
+namespace llvm {
+namespace opt {
+class ArgList;
+
+/// \brief A concrete instance of a particular driver option.
+///
+/// The Arg class encodes just enough information to be able to
+/// derive the argument values efficiently.
+class Arg {
+ Arg(const Arg &) = delete;
+ void operator=(const Arg &) = delete;
+
+private:
+ /// \brief The option this argument is an instance of.
+ const Option Opt;
+
+ /// \brief The argument this argument was derived from (during tool chain
+ /// argument translation), if any.
+ const Arg *BaseArg;
+
+ /// \brief How this instance of the option was spelled.
+ StringRef Spelling;
+
+ /// \brief The index at which this argument appears in the containing
+ /// ArgList.
+ unsigned Index;
+
+ /// \brief Was this argument used to effect compilation?
+ ///
+ /// This is used for generating "argument unused" diagnostics.
+ mutable unsigned Claimed : 1;
+
+ /// \brief Does this argument own its values?
+ mutable unsigned OwnsValues : 1;
+
+ /// \brief The argument values, as C strings.
+ SmallVector<const char *, 2> Values;
+
+public:
+ Arg(const Option Opt, StringRef Spelling, unsigned Index,
+ const Arg *BaseArg = nullptr);
+ Arg(const Option Opt, StringRef Spelling, unsigned Index,
+ const char *Value0, const Arg *BaseArg = nullptr);
+ Arg(const Option Opt, StringRef Spelling, unsigned Index,
+ const char *Value0, const char *Value1, const Arg *BaseArg = nullptr);
+ ~Arg();
+
+ const Option &getOption() const { return Opt; }
+ StringRef getSpelling() const { return Spelling; }
+ unsigned getIndex() const { return Index; }
+
+ /// \brief Return the base argument which generated this arg.
+ ///
+ /// This is either the argument itself or the argument it was
+ /// derived from during tool chain specific argument translation.
+ const Arg &getBaseArg() const {
+ return BaseArg ? *BaseArg : *this;
+ }
+ void setBaseArg(const Arg *BaseArg) { this->BaseArg = BaseArg; }
+
+ bool getOwnsValues() const { return OwnsValues; }
+ void setOwnsValues(bool Value) const { OwnsValues = Value; }
+
+ bool isClaimed() const { return getBaseArg().Claimed; }
+
+ /// \brief Set the Arg claimed bit.
+ void claim() const { getBaseArg().Claimed = true; }
+
+ unsigned getNumValues() const { return Values.size(); }
+ const char *getValue(unsigned N = 0) const {
+ return Values[N];
+ }
+
+ SmallVectorImpl<const char *> &getValues() { return Values; }
+ const SmallVectorImpl<const char *> &getValues() const { return Values; }
+
+ bool containsValue(StringRef Value) const {
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i)
+ if (Values[i] == Value)
+ return true;
+ return false;
+ }
+
+ /// \brief Append the argument onto the given array as strings.
+ void render(const ArgList &Args, ArgStringList &Output) const;
+
+ /// \brief Append the argument, render as an input, onto the given
+ /// array as strings.
+ ///
+ /// The distinction is that some options only render their values
+ /// when rendered as a input (e.g., Xlinker).
+ void renderAsInput(const ArgList &Args, ArgStringList &Output) const;
+
+ void print(raw_ostream &O) const;
+ void dump() const;
+
+ /// \brief Return a formatted version of the argument and
+ /// its values, for debugging and diagnostics.
+ std::string getAsString(const ArgList &Args) const;
+};
+
+} // end namespace opt
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Option/ArgList.h b/contrib/llvm/include/llvm/Option/ArgList.h
new file mode 100644
index 0000000..89771b5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Option/ArgList.h
@@ -0,0 +1,465 @@
+//===--- ArgList.h - Argument List Management -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OPTION_ARGLIST_H
+#define LLVM_OPTION_ARGLIST_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/OptSpecifier.h"
+#include "llvm/Option/Option.h"
+#include <list>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace llvm {
+namespace opt {
+class ArgList;
+class Option;
+
+/// arg_iterator - Iterates through arguments stored inside an ArgList.
+class arg_iterator {
+ /// The current argument.
+ SmallVectorImpl<Arg*>::const_iterator Current;
+
+ /// The argument list we are iterating over.
+ const ArgList &Args;
+
+ /// Optional filters on the arguments which will be match. Most clients
+ /// should never want to iterate over arguments without filters, so we won't
+ /// bother to factor this into two separate iterator implementations.
+ //
+ // FIXME: Make efficient; the idea is to provide efficient iteration over
+ // all arguments which match a particular id and then just provide an
+ // iterator combinator which takes multiple iterators which can be
+ // efficiently compared and returns them in order.
+ OptSpecifier Id0, Id1, Id2;
+
+ void SkipToNextArg();
+
+public:
+ typedef Arg * const * value_type;
+ typedef Arg * const & reference;
+ typedef Arg * const * pointer;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ arg_iterator(SmallVectorImpl<Arg *>::const_iterator it, const ArgList &Args,
+ OptSpecifier Id0 = 0U, OptSpecifier Id1 = 0U,
+ OptSpecifier Id2 = 0U)
+ : Current(it), Args(Args), Id0(Id0), Id1(Id1), Id2(Id2) {
+ SkipToNextArg();
+ }
+
+ operator const Arg*() { return *Current; }
+ reference operator*() const { return *Current; }
+ pointer operator->() const { return Current; }
+
+ arg_iterator &operator++() {
+ ++Current;
+ SkipToNextArg();
+ return *this;
+ }
+
+ arg_iterator operator++(int) {
+ arg_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(arg_iterator LHS, arg_iterator RHS) {
+ return LHS.Current == RHS.Current;
+ }
+ friend bool operator!=(arg_iterator LHS, arg_iterator RHS) {
+ return !(LHS == RHS);
+ }
+};
+
+/// ArgList - Ordered collection of driver arguments.
+///
+/// The ArgList class manages a list of Arg instances as well as
+/// auxiliary data and convenience methods to allow Tools to quickly
+/// check for the presence of Arg instances for a particular Option
+/// and to iterate over groups of arguments.
+class ArgList {
+public:
+ typedef SmallVector<Arg*, 16> arglist_type;
+ typedef arglist_type::iterator iterator;
+ typedef arglist_type::const_iterator const_iterator;
+ typedef arglist_type::reverse_iterator reverse_iterator;
+ typedef arglist_type::const_reverse_iterator const_reverse_iterator;
+
+private:
+ /// The internal list of arguments.
+ arglist_type Args;
+
+protected:
+ // Make the default special members protected so they won't be used to slice
+ // derived objects, but can still be used by derived objects to implement
+ // their own special members.
+ ArgList() = default;
+ // Explicit move operations to ensure the container is cleared post-move
+ // otherwise it could lead to a double-delete in the case of moving of an
+ // InputArgList which deletes the contents of the container. If we could fix
+ // up the ownership here (delegate storage/ownership to the derived class so
+ // it can be a container of unique_ptr) this would be simpler.
+ ArgList(ArgList &&RHS) : Args(std::move(RHS.Args)) { RHS.Args.clear(); }
+ ArgList &operator=(ArgList &&RHS) {
+ Args = std::move(RHS.Args);
+ RHS.Args.clear();
+ return *this;
+ }
+ // Protect the dtor to ensure this type is never destroyed polymorphically.
+ ~ArgList() = default;
+
+public:
+
+ /// @name Arg Access
+ /// @{
+
+ /// append - Append \p A to the arg list.
+ void append(Arg *A);
+
+ arglist_type &getArgs() { return Args; }
+ const arglist_type &getArgs() const { return Args; }
+
+ unsigned size() const { return Args.size(); }
+
+ /// @}
+ /// @name Arg Iteration
+ /// @{
+
+ iterator begin() { return Args.begin(); }
+ iterator end() { return Args.end(); }
+
+ reverse_iterator rbegin() { return Args.rbegin(); }
+ reverse_iterator rend() { return Args.rend(); }
+
+ const_iterator begin() const { return Args.begin(); }
+ const_iterator end() const { return Args.end(); }
+
+ const_reverse_iterator rbegin() const { return Args.rbegin(); }
+ const_reverse_iterator rend() const { return Args.rend(); }
+
+ arg_iterator filtered_begin(OptSpecifier Id0 = 0U, OptSpecifier Id1 = 0U,
+ OptSpecifier Id2 = 0U) const {
+ return arg_iterator(Args.begin(), *this, Id0, Id1, Id2);
+ }
+ arg_iterator filtered_end() const {
+ return arg_iterator(Args.end(), *this);
+ }
+
+ iterator_range<arg_iterator> filtered(OptSpecifier Id0 = 0U,
+ OptSpecifier Id1 = 0U,
+ OptSpecifier Id2 = 0U) const {
+ return make_range(filtered_begin(Id0, Id1, Id2), filtered_end());
+ }
+
+ /// @}
+ /// @name Arg Removal
+ /// @{
+
+ /// eraseArg - Remove any option matching \p Id.
+ void eraseArg(OptSpecifier Id);
+
+ /// @}
+ /// @name Arg Access
+ /// @{
+
+ /// hasArg - Does the arg list contain any option matching \p Id.
+ ///
+ /// \p Claim Whether the argument should be claimed, if it exists.
+ bool hasArgNoClaim(OptSpecifier Id) const {
+ return getLastArgNoClaim(Id) != nullptr;
+ }
+ bool hasArg(OptSpecifier Id) const {
+ return getLastArg(Id) != nullptr;
+ }
+ bool hasArg(OptSpecifier Id0, OptSpecifier Id1) const {
+ return getLastArg(Id0, Id1) != nullptr;
+ }
+ bool hasArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2) const {
+ return getLastArg(Id0, Id1, Id2) != nullptr;
+ }
+
+ /// getLastArg - Return the last argument matching \p Id, or null.
+ ///
+ /// \p Claim Whether the argument should be claimed, if it exists.
+ Arg *getLastArgNoClaim(OptSpecifier Id) const;
+ Arg *getLastArgNoClaim(OptSpecifier Id0, OptSpecifier Id1) const;
+ Arg *getLastArgNoClaim(OptSpecifier Id0, OptSpecifier Id1,
+ OptSpecifier Id2) const;
+ Arg *getLastArgNoClaim(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3) const;
+ Arg *getLastArg(OptSpecifier Id) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3, OptSpecifier Id4) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3, OptSpecifier Id4, OptSpecifier Id5) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3, OptSpecifier Id4, OptSpecifier Id5,
+ OptSpecifier Id6) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3, OptSpecifier Id4, OptSpecifier Id5,
+ OptSpecifier Id6, OptSpecifier Id7) const;
+
+ /// getArgString - Return the input argument string at \p Index.
+ virtual const char *getArgString(unsigned Index) const = 0;
+
+ /// getNumInputArgStrings - Return the number of original argument strings,
+ /// which are guaranteed to be the first strings in the argument string
+ /// list.
+ virtual unsigned getNumInputArgStrings() const = 0;
+
+ /// @}
+ /// @name Argument Lookup Utilities
+ /// @{
+
+ /// getLastArgValue - Return the value of the last argument, or a default.
+ StringRef getLastArgValue(OptSpecifier Id,
+ StringRef Default = "") const;
+
+ /// getAllArgValues - Get the values of all instances of the given argument
+ /// as strings.
+ std::vector<std::string> getAllArgValues(OptSpecifier Id) const;
+
+ /// @}
+ /// @name Translation Utilities
+ /// @{
+
+ /// hasFlag - Given an option \p Pos and its negative form \p Neg, return
+ /// true if the option is present, false if the negation is present, and
+ /// \p Default if neither option is given. If both the option and its
+ /// negation are present, the last one wins.
+ bool hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default=true) const;
+
+ /// hasFlag - Given an option \p Pos, an alias \p PosAlias and its negative
+ /// form \p Neg, return true if the option or its alias is present, false if
+ /// the negation is present, and \p Default if none of the options are
+ /// given. If multiple options are present, the last one wins.
+ bool hasFlag(OptSpecifier Pos, OptSpecifier PosAlias, OptSpecifier Neg,
+ bool Default = true) const;
+
+ /// AddLastArg - Render only the last argument match \p Id0, if present.
+ void AddLastArg(ArgStringList &Output, OptSpecifier Id0) const;
+ void AddLastArg(ArgStringList &Output, OptSpecifier Id0,
+ OptSpecifier Id1) const;
+
+ /// AddAllArgs - Render all arguments matching any of the given ids.
+ void AddAllArgs(ArgStringList &Output, ArrayRef<OptSpecifier> Ids) const;
+
+ /// AddAllArgs - Render all arguments matching the given ids.
+ void AddAllArgs(ArgStringList &Output, OptSpecifier Id0,
+ OptSpecifier Id1 = 0U, OptSpecifier Id2 = 0U) const;
+
+ /// AddAllArgValues - Render the argument values of all arguments
+ /// matching the given ids.
+ void AddAllArgValues(ArgStringList &Output, OptSpecifier Id0,
+ OptSpecifier Id1 = 0U, OptSpecifier Id2 = 0U) const;
+
+ /// AddAllArgsTranslated - Render all the arguments matching the
+ /// given ids, but forced to separate args and using the provided
+ /// name instead of the first option value.
+ ///
+ /// \param Joined - If true, render the argument as joined with
+ /// the option specifier.
+ void AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0,
+ const char *Translation,
+ bool Joined = false) const;
+
+ /// ClaimAllArgs - Claim all arguments which match the given
+ /// option id.
+ void ClaimAllArgs(OptSpecifier Id0) const;
+
+ /// ClaimAllArgs - Claim all arguments.
+ ///
+ void ClaimAllArgs() const;
+
+ /// @}
+ /// @name Arg Synthesis
+ /// @{
+
+ /// Construct a constant string pointer whose
+ /// lifetime will match that of the ArgList.
+ virtual const char *MakeArgStringRef(StringRef Str) const = 0;
+ const char *MakeArgString(const Twine &Str) const {
+ SmallString<256> Buf;
+ return MakeArgStringRef(Str.toStringRef(Buf));
+ }
+
+ /// \brief Create an arg string for (\p LHS + \p RHS), reusing the
+ /// string at \p Index if possible.
+ const char *GetOrMakeJoinedArgString(unsigned Index, StringRef LHS,
+ StringRef RHS) const;
+
+ void print(raw_ostream &O) const;
+ void dump() const;
+
+ /// @}
+};
+
+class InputArgList final : public ArgList {
+private:
+ /// List of argument strings used by the contained Args.
+ ///
+ /// This is mutable since we treat the ArgList as being the list
+ /// of Args, and allow routines to add new strings (to have a
+ /// convenient place to store the memory) via MakeIndex.
+ mutable ArgStringList ArgStrings;
+
+ /// Strings for synthesized arguments.
+ ///
+ /// This is mutable since we treat the ArgList as being the list
+ /// of Args, and allow routines to add new strings (to have a
+ /// convenient place to store the memory) via MakeIndex.
+ mutable std::list<std::string> SynthesizedStrings;
+
+ /// The number of original input argument strings.
+ unsigned NumInputArgStrings;
+
+ /// Release allocated arguments.
+ void releaseMemory();
+
+public:
+ InputArgList(const char* const *ArgBegin, const char* const *ArgEnd);
+ InputArgList(InputArgList &&RHS)
+ : ArgList(std::move(RHS)), ArgStrings(std::move(RHS.ArgStrings)),
+ SynthesizedStrings(std::move(RHS.SynthesizedStrings)),
+ NumInputArgStrings(RHS.NumInputArgStrings) {}
+ InputArgList &operator=(InputArgList &&RHS) {
+ releaseMemory();
+ ArgList::operator=(std::move(RHS));
+ ArgStrings = std::move(RHS.ArgStrings);
+ SynthesizedStrings = std::move(RHS.SynthesizedStrings);
+ NumInputArgStrings = RHS.NumInputArgStrings;
+ return *this;
+ }
+ ~InputArgList() { releaseMemory(); }
+
+ const char *getArgString(unsigned Index) const override {
+ return ArgStrings[Index];
+ }
+
+ unsigned getNumInputArgStrings() const override {
+ return NumInputArgStrings;
+ }
+
+ /// @name Arg Synthesis
+ /// @{
+
+public:
+ /// MakeIndex - Get an index for the given string(s).
+ unsigned MakeIndex(StringRef String0) const;
+ unsigned MakeIndex(StringRef String0, StringRef String1) const;
+
+ using ArgList::MakeArgString;
+ const char *MakeArgStringRef(StringRef Str) const override;
+
+ /// @}
+};
+
+/// DerivedArgList - An ordered collection of driver arguments,
+/// whose storage may be in another argument list.
+class DerivedArgList final : public ArgList {
+ const InputArgList &BaseArgs;
+
+ /// The list of arguments we synthesized.
+ mutable SmallVector<std::unique_ptr<Arg>, 16> SynthesizedArgs;
+
+public:
+ /// Construct a new derived arg list from \p BaseArgs.
+ DerivedArgList(const InputArgList &BaseArgs);
+
+ const char *getArgString(unsigned Index) const override {
+ return BaseArgs.getArgString(Index);
+ }
+
+ unsigned getNumInputArgStrings() const override {
+ return BaseArgs.getNumInputArgStrings();
+ }
+
+ const InputArgList &getBaseArgs() const {
+ return BaseArgs;
+ }
+
+ /// @name Arg Synthesis
+ /// @{
+
+ /// AddSynthesizedArg - Add a argument to the list of synthesized arguments
+ /// (to be freed).
+ void AddSynthesizedArg(Arg *A);
+
+ using ArgList::MakeArgString;
+ const char *MakeArgStringRef(StringRef Str) const override;
+
+ /// AddFlagArg - Construct a new FlagArg for the given option \p Id and
+ /// append it to the argument list.
+ void AddFlagArg(const Arg *BaseArg, const Option Opt) {
+ append(MakeFlagArg(BaseArg, Opt));
+ }
+
+ /// AddPositionalArg - Construct a new Positional arg for the given option
+ /// \p Id, with the provided \p Value and append it to the argument
+ /// list.
+ void AddPositionalArg(const Arg *BaseArg, const Option Opt,
+ StringRef Value) {
+ append(MakePositionalArg(BaseArg, Opt, Value));
+ }
+
+
+ /// AddSeparateArg - Construct a new Positional arg for the given option
+ /// \p Id, with the provided \p Value and append it to the argument
+ /// list.
+ void AddSeparateArg(const Arg *BaseArg, const Option Opt,
+ StringRef Value) {
+ append(MakeSeparateArg(BaseArg, Opt, Value));
+ }
+
+
+ /// AddJoinedArg - Construct a new Positional arg for the given option
+ /// \p Id, with the provided \p Value and append it to the argument list.
+ void AddJoinedArg(const Arg *BaseArg, const Option Opt,
+ StringRef Value) {
+ append(MakeJoinedArg(BaseArg, Opt, Value));
+ }
+
+
+ /// MakeFlagArg - Construct a new FlagArg for the given option \p Id.
+ Arg *MakeFlagArg(const Arg *BaseArg, const Option Opt) const;
+
+ /// MakePositionalArg - Construct a new Positional arg for the
+ /// given option \p Id, with the provided \p Value.
+ Arg *MakePositionalArg(const Arg *BaseArg, const Option Opt,
+ StringRef Value) const;
+
+ /// MakeSeparateArg - Construct a new Positional arg for the
+ /// given option \p Id, with the provided \p Value.
+ Arg *MakeSeparateArg(const Arg *BaseArg, const Option Opt,
+ StringRef Value) const;
+
+ /// MakeJoinedArg - Construct a new Positional arg for the
+ /// given option \p Id, with the provided \p Value.
+ Arg *MakeJoinedArg(const Arg *BaseArg, const Option Opt,
+ StringRef Value) const;
+
+ /// @}
+};
+
+} // end namespace opt
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Option/OptParser.td b/contrib/llvm/include/llvm/Option/OptParser.td
new file mode 100644
index 0000000..dbf240d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Option/OptParser.td
@@ -0,0 +1,132 @@
+//===--- OptParser.td - Common Option Parsing Interfaces ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the common interfaces used by the option parsing TableGen
+// backend.
+//
+//===----------------------------------------------------------------------===//
+
+// Define the kinds of options.
+
+class OptionKind<string name, int precedence = 0, bit sentinel = 0> {
+ string Name = name;
+ // The kind precedence, kinds with lower precedence are matched first.
+ int Precedence = precedence;
+ // Indicate a sentinel option.
+ bit Sentinel = sentinel;
+}
+
+// An option group.
+def KIND_GROUP : OptionKind<"Group">;
+// The input option kind.
+def KIND_INPUT : OptionKind<"Input", 1, 1>;
+// The unknown option kind.
+def KIND_UNKNOWN : OptionKind<"Unknown", 2, 1>;
+// A flag with no values.
+def KIND_FLAG : OptionKind<"Flag">;
+// An option which prefixes its (single) value.
+def KIND_JOINED : OptionKind<"Joined", 1>;
+// An option which is followed by its value.
+def KIND_SEPARATE : OptionKind<"Separate">;
+// An option followed by its values, which are separated by commas.
+def KIND_COMMAJOINED : OptionKind<"CommaJoined">;
+// An option which is which takes multiple (separate) arguments.
+def KIND_MULTIARG : OptionKind<"MultiArg">;
+// An option which is either joined to its (non-empty) value, or followed by its
+// value.
+def KIND_JOINED_OR_SEPARATE : OptionKind<"JoinedOrSeparate">;
+// An option which is both joined to its (first) value, and followed by its
+// (second) value.
+def KIND_JOINED_AND_SEPARATE : OptionKind<"JoinedAndSeparate">;
+// An option which consumes all remaining arguments if there are any.
+def KIND_REMAINING_ARGS : OptionKind<"RemainingArgs">;
+
+// Define the option flags.
+
+class OptionFlag {}
+
+// HelpHidden - The option should not be displayed in --help, even if it has
+// help text. Clients *can* use this in conjunction with the OptTable::PrintHelp
+// arguments to implement hidden help groups.
+def HelpHidden : OptionFlag;
+
+// RenderAsInput - The option should not render the name when rendered as an
+// input (i.e., the option is rendered as values).
+def RenderAsInput : OptionFlag;
+
+// RenderJoined - The option should be rendered joined, even if separate (only
+// sensible on single value separate options).
+def RenderJoined : OptionFlag;
+
+// RenderSeparate - The option should be rendered separately, even if joined
+// (only sensible on joined options).
+def RenderSeparate : OptionFlag;
+
+// Define the option group class.
+
+class OptionGroup<string name> {
+ string EnumName = ?; // Uses the def name if undefined.
+ string Name = name;
+ string HelpText = ?;
+ OptionGroup Group = ?;
+ list<OptionFlag> Flags = [];
+}
+
+// Define the option class.
+
+class Option<list<string> prefixes, string name, OptionKind kind> {
+ string EnumName = ?; // Uses the def name if undefined.
+ list<string> Prefixes = prefixes;
+ string Name = name;
+ OptionKind Kind = kind;
+ // Used by MultiArg option kind.
+ int NumArgs = 0;
+ string HelpText = ?;
+ string MetaVarName = ?;
+ list<OptionFlag> Flags = [];
+ OptionGroup Group = ?;
+ Option Alias = ?;
+ list<string> AliasArgs = [];
+}
+
+// Helpers for defining options.
+
+class Flag<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_FLAG>;
+class Joined<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_JOINED>;
+class Separate<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_SEPARATE>;
+class CommaJoined<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_COMMAJOINED>;
+class MultiArg<list<string> prefixes, string name, int numargs>
+ : Option<prefixes, name, KIND_MULTIARG> {
+ int NumArgs = numargs;
+}
+class JoinedOrSeparate<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_JOINED_OR_SEPARATE>;
+class JoinedAndSeparate<list<string> prefixes, string name>
+ : Option<prefixes, name, KIND_JOINED_AND_SEPARATE>;
+
+// Mix-ins for adding optional attributes.
+
+class Alias<Option alias> { Option Alias = alias; }
+class AliasArgs<list<string> aliasargs> { list<string> AliasArgs = aliasargs; }
+class EnumName<string name> { string EnumName = name; }
+class Flags<list<OptionFlag> flags> { list<OptionFlag> Flags = flags; }
+class Group<OptionGroup group> { OptionGroup Group = group; }
+class HelpText<string text> { string HelpText = text; }
+class MetaVarName<string name> { string MetaVarName = name; }
+
+// Predefined options.
+
+// FIXME: Have generator validate that these appear in correct position (and
+// aren't duplicated).
+def INPUT : Option<[], "<input>", KIND_INPUT>;
+def UNKNOWN : Option<[], "<unknown>", KIND_UNKNOWN>;
diff --git a/contrib/llvm/include/llvm/Option/OptSpecifier.h b/contrib/llvm/include/llvm/Option/OptSpecifier.h
new file mode 100644
index 0000000..0b2aaae
--- /dev/null
+++ b/contrib/llvm/include/llvm/Option/OptSpecifier.h
@@ -0,0 +1,41 @@
+//===--- OptSpecifier.h - Option Specifiers ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OPTION_OPTSPECIFIER_H
+#define LLVM_OPTION_OPTSPECIFIER_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+namespace opt {
+ class Option;
+
+ /// OptSpecifier - Wrapper class for abstracting references to option IDs.
+ class OptSpecifier {
+ unsigned ID;
+
+ private:
+ explicit OptSpecifier(bool) = delete;
+
+ public:
+ OptSpecifier() : ID(0) {}
+ /*implicit*/ OptSpecifier(unsigned ID) : ID(ID) {}
+ /*implicit*/ OptSpecifier(const Option *Opt);
+
+ bool isValid() const { return ID != 0; }
+
+ unsigned getID() const { return ID; }
+
+ bool operator==(OptSpecifier Opt) const { return ID == Opt.getID(); }
+ bool operator!=(OptSpecifier Opt) const { return !(*this == Opt); }
+ };
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Option/OptTable.h b/contrib/llvm/include/llvm/Option/OptTable.h
new file mode 100644
index 0000000..390e527
--- /dev/null
+++ b/contrib/llvm/include/llvm/Option/OptTable.h
@@ -0,0 +1,174 @@
+//===--- OptTable.h - Option Table ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OPTION_OPTTABLE_H
+#define LLVM_OPTION_OPTTABLE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Option/OptSpecifier.h"
+
+namespace llvm {
+class raw_ostream;
+namespace opt {
+class Arg;
+class ArgList;
+class InputArgList;
+class Option;
+
+/// \brief Provide access to the Option info table.
+///
+/// The OptTable class provides a layer of indirection which allows Option
+/// instance to be created lazily. In the common case, only a few options will
+/// be needed at runtime; the OptTable class maintains enough information to
+/// parse command lines without instantiating Options, while letting other
+/// parts of the driver still use Option instances where convenient.
+class OptTable {
+public:
+ /// \brief Entry for a single option instance in the option data table.
+ struct Info {
+ /// A null terminated array of prefix strings to apply to name while
+ /// matching.
+ const char *const *Prefixes;
+ const char *Name;
+ const char *HelpText;
+ const char *MetaVar;
+ unsigned ID;
+ unsigned char Kind;
+ unsigned char Param;
+ unsigned short Flags;
+ unsigned short GroupID;
+ unsigned short AliasID;
+ const char *AliasArgs;
+ };
+
+private:
+ /// \brief The static option information table.
+ ArrayRef<Info> OptionInfos;
+ bool IgnoreCase;
+
+ unsigned TheInputOptionID;
+ unsigned TheUnknownOptionID;
+
+ /// The index of the first option which can be parsed (i.e., is not a
+ /// special option like 'input' or 'unknown', and is not an option group).
+ unsigned FirstSearchableIndex;
+
+ /// The union of all option prefixes. If an argument does not begin with
+ /// one of these, it is an input.
+ StringSet<> PrefixesUnion;
+ std::string PrefixChars;
+
+private:
+ const Info &getInfo(OptSpecifier Opt) const {
+ unsigned id = Opt.getID();
+ assert(id > 0 && id - 1 < getNumOptions() && "Invalid Option ID.");
+ return OptionInfos[id - 1];
+ }
+
+protected:
+ OptTable(ArrayRef<Info> OptionInfos, bool IgnoreCase = false);
+
+public:
+ ~OptTable();
+
+ /// \brief Return the total number of option classes.
+ unsigned getNumOptions() const { return OptionInfos.size(); }
+
+ /// \brief Get the given Opt's Option instance, lazily creating it
+ /// if necessary.
+ ///
+ /// \return The option, or null for the INVALID option id.
+ const Option getOption(OptSpecifier Opt) const;
+
+ /// \brief Lookup the name of the given option.
+ const char *getOptionName(OptSpecifier id) const {
+ return getInfo(id).Name;
+ }
+
+ /// \brief Get the kind of the given option.
+ unsigned getOptionKind(OptSpecifier id) const {
+ return getInfo(id).Kind;
+ }
+
+ /// \brief Get the group id for the given option.
+ unsigned getOptionGroupID(OptSpecifier id) const {
+ return getInfo(id).GroupID;
+ }
+
+ /// \brief Get the help text to use to describe this option.
+ const char *getOptionHelpText(OptSpecifier id) const {
+ return getInfo(id).HelpText;
+ }
+
+ /// \brief Get the meta-variable name to use when describing
+ /// this options values in the help text.
+ const char *getOptionMetaVar(OptSpecifier id) const {
+ return getInfo(id).MetaVar;
+ }
+
+ /// \brief Parse a single argument; returning the new argument and
+ /// updating Index.
+ ///
+ /// \param [in,out] Index - The current parsing position in the argument
+ /// string list; on return this will be the index of the next argument
+ /// string to parse.
+ /// \param [in] FlagsToInclude - Only parse options with any of these flags.
+ /// Zero is the default which includes all flags.
+ /// \param [in] FlagsToExclude - Don't parse options with this flag. Zero
+ /// is the default and means exclude nothing.
+ ///
+ /// \return The parsed argument, or 0 if the argument is missing values
+ /// (in which case Index still points at the conceptual next argument string
+ /// to parse).
+ Arg *ParseOneArg(const ArgList &Args, unsigned &Index,
+ unsigned FlagsToInclude = 0,
+ unsigned FlagsToExclude = 0) const;
+
+ /// \brief Parse an list of arguments into an InputArgList.
+ ///
+ /// The resulting InputArgList will reference the strings in [\p ArgBegin,
+ /// \p ArgEnd), and their lifetime should extend past that of the returned
+ /// InputArgList.
+ ///
+ /// The only error that can occur in this routine is if an argument is
+ /// missing values; in this case \p MissingArgCount will be non-zero.
+ ///
+ /// \param MissingArgIndex - On error, the index of the option which could
+ /// not be parsed.
+ /// \param MissingArgCount - On error, the number of missing options.
+ /// \param FlagsToInclude - Only parse options with any of these flags.
+ /// Zero is the default which includes all flags.
+ /// \param FlagsToExclude - Don't parse options with this flag. Zero
+ /// is the default and means exclude nothing.
+ /// \return An InputArgList; on error this will contain all the options
+ /// which could be parsed.
+ InputArgList ParseArgs(ArrayRef<const char *> Args, unsigned &MissingArgIndex,
+ unsigned &MissingArgCount, unsigned FlagsToInclude = 0,
+ unsigned FlagsToExclude = 0) const;
+
+ /// \brief Render the help text for an option table.
+ ///
+ /// \param OS - The stream to write the help text to.
+ /// \param Name - The name to use in the usage line.
+ /// \param Title - The title to use in the usage line.
+ /// \param FlagsToInclude - If non-zero, only include options with any
+ /// of these flags set.
+ /// \param FlagsToExclude - Exclude options with any of these flags set.
+ void PrintHelp(raw_ostream &OS, const char *Name,
+ const char *Title, unsigned FlagsToInclude,
+ unsigned FlagsToExclude) const;
+
+ void PrintHelp(raw_ostream &OS, const char *Name,
+ const char *Title, bool ShowHidden = false) const;
+};
+} // end namespace opt
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Option/Option.h b/contrib/llvm/include/llvm/Option/Option.h
new file mode 100644
index 0000000..494987a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Option/Option.h
@@ -0,0 +1,205 @@
+//===--- Option.h - Abstract Driver Options ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OPTION_OPTION_H
+#define LLVM_OPTION_OPTION_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+namespace opt {
+class Arg;
+class ArgList;
+/// ArgStringList - Type used for constructing argv lists for subprocesses.
+typedef SmallVector<const char*, 16> ArgStringList;
+
+/// Base flags for all options. Custom flags may be added after.
+enum DriverFlag {
+ HelpHidden = (1 << 0),
+ RenderAsInput = (1 << 1),
+ RenderJoined = (1 << 2),
+ RenderSeparate = (1 << 3)
+};
+
+/// Option - Abstract representation for a single form of driver
+/// argument.
+///
+/// An Option class represents a form of option that the driver
+/// takes, for example how many arguments the option has and how
+/// they can be provided. Individual option instances store
+/// additional information about what group the option is a member
+/// of (if any), if the option is an alias, and a number of
+/// flags. At runtime the driver parses the command line into
+/// concrete Arg instances, each of which corresponds to a
+/// particular Option instance.
+class Option {
+public:
+ enum OptionClass {
+ GroupClass = 0,
+ InputClass,
+ UnknownClass,
+ FlagClass,
+ JoinedClass,
+ SeparateClass,
+ RemainingArgsClass,
+ CommaJoinedClass,
+ MultiArgClass,
+ JoinedOrSeparateClass,
+ JoinedAndSeparateClass
+ };
+
+ enum RenderStyleKind {
+ RenderCommaJoinedStyle,
+ RenderJoinedStyle,
+ RenderSeparateStyle,
+ RenderValuesStyle
+ };
+
+protected:
+ const OptTable::Info *Info;
+ const OptTable *Owner;
+
+public:
+ Option(const OptTable::Info *Info, const OptTable *Owner);
+
+ bool isValid() const {
+ return Info != nullptr;
+ }
+
+ unsigned getID() const {
+ assert(Info && "Must have a valid info!");
+ return Info->ID;
+ }
+
+ OptionClass getKind() const {
+ assert(Info && "Must have a valid info!");
+ return OptionClass(Info->Kind);
+ }
+
+ /// \brief Get the name of this option without any prefix.
+ StringRef getName() const {
+ assert(Info && "Must have a valid info!");
+ return Info->Name;
+ }
+
+ const Option getGroup() const {
+ assert(Info && "Must have a valid info!");
+ assert(Owner && "Must have a valid owner!");
+ return Owner->getOption(Info->GroupID);
+ }
+
+ const Option getAlias() const {
+ assert(Info && "Must have a valid info!");
+ assert(Owner && "Must have a valid owner!");
+ return Owner->getOption(Info->AliasID);
+ }
+
+ /// \brief Get the alias arguments as a \0 separated list.
+ /// E.g. ["foo", "bar"] would be returned as "foo\0bar\0".
+ const char *getAliasArgs() const {
+ assert(Info && "Must have a valid info!");
+ assert((!Info->AliasArgs || Info->AliasArgs[0] != 0) &&
+ "AliasArgs should be either 0 or non-empty.");
+
+ return Info->AliasArgs;
+ }
+
+ /// \brief Get the default prefix for this option.
+ StringRef getPrefix() const {
+ const char *Prefix = *Info->Prefixes;
+ return Prefix ? Prefix : StringRef();
+ }
+
+ /// \brief Get the name of this option with the default prefix.
+ std::string getPrefixedName() const {
+ std::string Ret = getPrefix();
+ Ret += getName();
+ return Ret;
+ }
+
+ unsigned getNumArgs() const { return Info->Param; }
+
+ bool hasNoOptAsInput() const { return Info->Flags & RenderAsInput;}
+
+ RenderStyleKind getRenderStyle() const {
+ if (Info->Flags & RenderJoined)
+ return RenderJoinedStyle;
+ if (Info->Flags & RenderSeparate)
+ return RenderSeparateStyle;
+ switch (getKind()) {
+ case GroupClass:
+ case InputClass:
+ case UnknownClass:
+ return RenderValuesStyle;
+ case JoinedClass:
+ case JoinedAndSeparateClass:
+ return RenderJoinedStyle;
+ case CommaJoinedClass:
+ return RenderCommaJoinedStyle;
+ case FlagClass:
+ case SeparateClass:
+ case MultiArgClass:
+ case JoinedOrSeparateClass:
+ case RemainingArgsClass:
+ return RenderSeparateStyle;
+ }
+ llvm_unreachable("Unexpected kind!");
+ }
+
+ /// Test if this option has the flag \a Val.
+ bool hasFlag(unsigned Val) const {
+ return Info->Flags & Val;
+ }
+
+ /// getUnaliasedOption - Return the final option this option
+ /// aliases (itself, if the option has no alias).
+ const Option getUnaliasedOption() const {
+ const Option Alias = getAlias();
+ if (Alias.isValid()) return Alias.getUnaliasedOption();
+ return *this;
+ }
+
+ /// getRenderName - Return the name to use when rendering this
+ /// option.
+ StringRef getRenderName() const {
+ return getUnaliasedOption().getName();
+ }
+
+ /// matches - Predicate for whether this option is part of the
+ /// given option (which may be a group).
+ ///
+ /// Note that matches against options which are an alias should never be
+ /// done -- aliases do not participate in matching and so such a query will
+ /// always be false.
+ bool matches(OptSpecifier ID) const;
+
+ /// accept - Potentially accept the current argument, returning a
+ /// new Arg instance, or 0 if the option does not accept this
+ /// argument (or the argument is missing values).
+ ///
+ /// If the option accepts the current argument, accept() sets
+ /// Index to the position where argument parsing should resume
+ /// (even if the argument is missing values).
+ ///
+ /// \param ArgSize The number of bytes taken up by the matched Option prefix
+ /// and name. This is used to determine where joined values
+ /// start.
+ Arg *accept(const ArgList &Args, unsigned &Index, unsigned ArgSize) const;
+
+ void print(raw_ostream &O) const;
+ void dump() const;
+};
+
+} // end namespace opt
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Pass.h b/contrib/llvm/include/llvm/Pass.h
new file mode 100644
index 0000000..3c4d838
--- /dev/null
+++ b/contrib/llvm/include/llvm/Pass.h
@@ -0,0 +1,380 @@
+//===- llvm/Pass.h - Base class for Passes ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a base class that indicates that a specified class is a
+// transformation pass implementation.
+//
+// Passes are designed this way so that it is possible to run passes in a cache
+// and organizationally optimal order without having to specify it at the front
+// end. This allows arbitrary passes to be strung together and have them
+// executed as efficiently as possible.
+//
+// Passes should extend one of the classes below, depending on the guarantees
+// that it can make about what will be modified as it is run. For example, most
+// global optimizations should derive from FunctionPass, because they do not add
+// or delete functions, they operate on the internals of the function.
+//
+// Note that this file #includes PassSupport.h and PassAnalysisSupport.h (at the
+// bottom), so the APIs exposed by these files are also automatically available
+// to all users of this file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASS_H
+#define LLVM_PASS_H
+
+#include "llvm/Support/Compiler.h"
+#include <string>
+
+namespace llvm {
+
+class BasicBlock;
+class Function;
+class Module;
+class AnalysisUsage;
+class PassInfo;
+class ImmutablePass;
+class PMStack;
+class AnalysisResolver;
+class PMDataManager;
+class raw_ostream;
+class StringRef;
+
+// AnalysisID - Use the PassInfo to identify a pass...
+typedef const void* AnalysisID;
+
+/// Different types of internal pass managers. External pass managers
+/// (PassManager and FunctionPassManager) are not represented here.
+/// Ordering of pass manager types is important here.
+enum PassManagerType {
+ PMT_Unknown = 0,
+ PMT_ModulePassManager = 1, ///< MPPassManager
+ PMT_CallGraphPassManager, ///< CGPassManager
+ PMT_FunctionPassManager, ///< FPPassManager
+ PMT_LoopPassManager, ///< LPPassManager
+ PMT_RegionPassManager, ///< RGPassManager
+ PMT_BasicBlockPassManager, ///< BBPassManager
+ PMT_Last
+};
+
+// Different types of passes.
+enum PassKind {
+ PT_BasicBlock,
+ PT_Region,
+ PT_Loop,
+ PT_Function,
+ PT_CallGraphSCC,
+ PT_Module,
+ PT_PassManager
+};
+
+//===----------------------------------------------------------------------===//
+/// Pass interface - Implemented by all 'passes'. Subclass this if you are an
+/// interprocedural optimization or you do not fit into any of the more
+/// constrained passes described below.
+///
+class Pass {
+ AnalysisResolver *Resolver; // Used to resolve analysis
+ const void *PassID;
+ PassKind Kind;
+ void operator=(const Pass&) = delete;
+ Pass(const Pass &) = delete;
+
+public:
+ explicit Pass(PassKind K, char &pid)
+ : Resolver(nullptr), PassID(&pid), Kind(K) { }
+ virtual ~Pass();
+
+
+ PassKind getPassKind() const { return Kind; }
+
+ /// getPassName - Return a nice clean name for a pass. This usually
+ /// implemented in terms of the name that is registered by one of the
+ /// Registration templates, but can be overloaded directly.
+ ///
+ virtual const char *getPassName() const;
+
+ /// getPassID - Return the PassID number that corresponds to this pass.
+ AnalysisID getPassID() const {
+ return PassID;
+ }
+
+ /// doInitialization - Virtual method overridden by subclasses to do
+ /// any necessary initialization before any pass is run.
+ ///
+ virtual bool doInitialization(Module &) { return false; }
+
+ /// doFinalization - Virtual method overriden by subclasses to do any
+ /// necessary clean up after all passes have run.
+ ///
+ virtual bool doFinalization(Module &) { return false; }
+
+ /// print - Print out the internal state of the pass. This is called by
+ /// Analyze to print out the contents of an analysis. Otherwise it is not
+ /// necessary to implement this method. Beware that the module pointer MAY be
+ /// null. This automatically forwards to a virtual function that does not
+ /// provide the Module* in case the analysis doesn't need it it can just be
+ /// ignored.
+ ///
+ virtual void print(raw_ostream &O, const Module *M) const;
+ void dump() const; // dump - Print to stderr.
+
+ /// createPrinterPass - Get a Pass appropriate to print the IR this
+ /// pass operates on (Module, Function or MachineFunction).
+ virtual Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const = 0;
+
+ /// Each pass is responsible for assigning a pass manager to itself.
+ /// PMS is the stack of available pass manager.
+ virtual void assignPassManager(PMStack &,
+ PassManagerType) {}
+ /// Check if available pass managers are suitable for this pass or not.
+ virtual void preparePassManager(PMStack &);
+
+ /// Return what kind of Pass Manager can manage this pass.
+ virtual PassManagerType getPotentialPassManagerType() const;
+
+ // Access AnalysisResolver
+ void setResolver(AnalysisResolver *AR);
+ AnalysisResolver *getResolver() const { return Resolver; }
+
+ /// getAnalysisUsage - This function should be overriden by passes that need
+ /// analysis information to do their job. If a pass specifies that it uses a
+ /// particular analysis result to this function, it can then use the
+ /// getAnalysis<AnalysisType>() function, below.
+ ///
+ virtual void getAnalysisUsage(AnalysisUsage &) const;
+
+ /// releaseMemory() - This member can be implemented by a pass if it wants to
+ /// be able to release its memory when it is no longer needed. The default
+ /// behavior of passes is to hold onto memory for the entire duration of their
+ /// lifetime (which is the entire compile time). For pipelined passes, this
+ /// is not a big deal because that memory gets recycled every time the pass is
+ /// invoked on another program unit. For IP passes, it is more important to
+ /// free memory when it is unused.
+ ///
+ /// Optionally implement this function to release pass memory when it is no
+ /// longer used.
+ ///
+ virtual void releaseMemory();
+
+ /// getAdjustedAnalysisPointer - This method is used when a pass implements
+ /// an analysis interface through multiple inheritance. If needed, it should
+ /// override this to adjust the this pointer as needed for the specified pass
+ /// info.
+ virtual void *getAdjustedAnalysisPointer(AnalysisID ID);
+ virtual ImmutablePass *getAsImmutablePass();
+ virtual PMDataManager *getAsPMDataManager();
+
+ /// verifyAnalysis() - This member can be implemented by a analysis pass to
+ /// check state of analysis information.
+ virtual void verifyAnalysis() const;
+
+ // dumpPassStructure - Implement the -debug-passes=PassStructure option
+ virtual void dumpPassStructure(unsigned Offset = 0);
+
+ // lookupPassInfo - Return the pass info object for the specified pass class,
+ // or null if it is not known.
+ static const PassInfo *lookupPassInfo(const void *TI);
+
+ // lookupPassInfo - Return the pass info object for the pass with the given
+ // argument string, or null if it is not known.
+ static const PassInfo *lookupPassInfo(StringRef Arg);
+
+ // createPass - Create a object for the specified pass class,
+ // or null if it is not known.
+ static Pass *createPass(AnalysisID ID);
+
+ /// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to
+ /// get analysis information that might be around, for example to update it.
+ /// This is different than getAnalysis in that it can fail (if the analysis
+ /// results haven't been computed), so should only be used if you can handle
+ /// the case when the analysis is not available. This method is often used by
+ /// transformation APIs to update analysis results for a pass automatically as
+ /// the transform is performed.
+ ///
+ template<typename AnalysisType> AnalysisType *
+ getAnalysisIfAvailable() const; // Defined in PassAnalysisSupport.h
+
+ /// mustPreserveAnalysisID - This method serves the same function as
+ /// getAnalysisIfAvailable, but works if you just have an AnalysisID. This
+ /// obviously cannot give you a properly typed instance of the class if you
+ /// don't have the class name available (use getAnalysisIfAvailable if you
+ /// do), but it can tell you if you need to preserve the pass at least.
+ ///
+ bool mustPreserveAnalysisID(char &AID) const;
+
+ /// getAnalysis<AnalysisType>() - This function is used by subclasses to get
+ /// to the analysis information that they claim to use by overriding the
+ /// getAnalysisUsage function.
+ ///
+ template<typename AnalysisType>
+ AnalysisType &getAnalysis() const; // Defined in PassAnalysisSupport.h
+
+ template<typename AnalysisType>
+ AnalysisType &getAnalysis(Function &F); // Defined in PassAnalysisSupport.h
+
+ template<typename AnalysisType>
+ AnalysisType &getAnalysisID(AnalysisID PI) const;
+
+ template<typename AnalysisType>
+ AnalysisType &getAnalysisID(AnalysisID PI, Function &F);
+};
+
+
+//===----------------------------------------------------------------------===//
+/// ModulePass class - This class is used to implement unstructured
+/// interprocedural optimizations and analyses. ModulePasses may do anything
+/// they want to the program.
+///
+class ModulePass : public Pass {
+public:
+ /// createPrinterPass - Get a module printer pass.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override;
+
+ /// runOnModule - Virtual method overriden by subclasses to process the module
+ /// being operated on.
+ virtual bool runOnModule(Module &M) = 0;
+
+ void assignPassManager(PMStack &PMS, PassManagerType T) override;
+
+ /// Return what kind of Pass Manager can manage this pass.
+ PassManagerType getPotentialPassManagerType() const override;
+
+ explicit ModulePass(char &pid) : Pass(PT_Module, pid) {}
+ // Force out-of-line virtual method.
+ ~ModulePass() override;
+};
+
+
+//===----------------------------------------------------------------------===//
+/// ImmutablePass class - This class is used to provide information that does
+/// not need to be run. This is useful for things like target information and
+/// "basic" versions of AnalysisGroups.
+///
+class ImmutablePass : public ModulePass {
+public:
+ /// initializePass - This method may be overriden by immutable passes to allow
+ /// them to perform various initialization actions they require. This is
+ /// primarily because an ImmutablePass can "require" another ImmutablePass,
+ /// and if it does, the overloaded version of initializePass may get access to
+ /// these passes with getAnalysis<>.
+ ///
+ virtual void initializePass();
+
+ ImmutablePass *getAsImmutablePass() override { return this; }
+
+ /// ImmutablePasses are never run.
+ ///
+ bool runOnModule(Module &) override { return false; }
+
+ explicit ImmutablePass(char &pid)
+ : ModulePass(pid) {}
+
+ // Force out-of-line virtual method.
+ ~ImmutablePass() override;
+};
+
+//===----------------------------------------------------------------------===//
+/// FunctionPass class - This class is used to implement most global
+/// optimizations. Optimizations should subclass this class if they meet the
+/// following constraints:
+///
+/// 1. Optimizations are organized globally, i.e., a function at a time
+/// 2. Optimizing a function does not cause the addition or removal of any
+/// functions in the module
+///
+class FunctionPass : public Pass {
+public:
+ explicit FunctionPass(char &pid) : Pass(PT_Function, pid) {}
+
+ /// createPrinterPass - Get a function printer pass.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override;
+
+ /// runOnFunction - Virtual method overriden by subclasses to do the
+ /// per-function processing of the pass.
+ ///
+ virtual bool runOnFunction(Function &F) = 0;
+
+ void assignPassManager(PMStack &PMS, PassManagerType T) override;
+
+ /// Return what kind of Pass Manager can manage this pass.
+ PassManagerType getPotentialPassManagerType() const override;
+
+protected:
+ /// skipOptnoneFunction - This function has Attribute::OptimizeNone
+ /// and most transformation passes should skip it.
+ bool skipOptnoneFunction(const Function &F) const;
+};
+
+
+
+//===----------------------------------------------------------------------===//
+/// BasicBlockPass class - This class is used to implement most local
+/// optimizations. Optimizations should subclass this class if they
+/// meet the following constraints:
+/// 1. Optimizations are local, operating on either a basic block or
+/// instruction at a time.
+/// 2. Optimizations do not modify the CFG of the contained function, or any
+/// other basic block in the function.
+/// 3. Optimizations conform to all of the constraints of FunctionPasses.
+///
+class BasicBlockPass : public Pass {
+public:
+ explicit BasicBlockPass(char &pid) : Pass(PT_BasicBlock, pid) {}
+
+ /// createPrinterPass - Get a basic block printer pass.
+ Pass *createPrinterPass(raw_ostream &O,
+ const std::string &Banner) const override;
+
+ using llvm::Pass::doInitialization;
+ using llvm::Pass::doFinalization;
+
+ /// doInitialization - Virtual method overridden by BasicBlockPass subclasses
+ /// to do any necessary per-function initialization.
+ ///
+ virtual bool doInitialization(Function &);
+
+ /// runOnBasicBlock - Virtual method overriden by subclasses to do the
+ /// per-basicblock processing of the pass.
+ ///
+ virtual bool runOnBasicBlock(BasicBlock &BB) = 0;
+
+ /// doFinalization - Virtual method overriden by BasicBlockPass subclasses to
+ /// do any post processing needed after all passes have run.
+ ///
+ virtual bool doFinalization(Function &);
+
+ void assignPassManager(PMStack &PMS, PassManagerType T) override;
+
+ /// Return what kind of Pass Manager can manage this pass.
+ PassManagerType getPotentialPassManagerType() const override;
+
+protected:
+ /// skipOptnoneFunction - Containing function has Attribute::OptimizeNone
+ /// and most transformation passes should skip it.
+ bool skipOptnoneFunction(const BasicBlock &BB) const;
+};
+
+/// If the user specifies the -time-passes argument on an LLVM tool command line
+/// then the value of this boolean will be true, otherwise false.
+/// @brief This is the storage for the -time-passes option.
+extern bool TimePassesIsEnabled;
+
+} // End llvm namespace
+
+// Include support files that contain important APIs commonly used by Passes,
+// but that we want to separate out to make it easier to read the header files.
+//
+#include "llvm/PassSupport.h"
+#include "llvm/PassAnalysisSupport.h"
+
+#endif
diff --git a/contrib/llvm/include/llvm/PassAnalysisSupport.h b/contrib/llvm/include/llvm/PassAnalysisSupport.h
new file mode 100644
index 0000000..492a4ef
--- /dev/null
+++ b/contrib/llvm/include/llvm/PassAnalysisSupport.h
@@ -0,0 +1,275 @@
+//===- llvm/PassAnalysisSupport.h - Analysis Pass Support code --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines stuff that is used to define and "use" Analysis Passes.
+// This file is automatically #included by Pass.h, so:
+//
+// NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY
+//
+// Instead, #include Pass.h
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSANALYSISSUPPORT_H
+#define LLVM_PASSANALYSISSUPPORT_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Pass.h"
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// Represent the analysis usage information of a pass. This tracks analyses
+/// that the pass REQUIRES (must be available when the pass runs), REQUIRES
+/// TRANSITIVE (must be available throughout the lifetime of the pass), and
+/// analyses that the pass PRESERVES (the pass does not invalidate the results
+/// of these analyses). This information is provided by a pass to the Pass
+/// infrastructure through the getAnalysisUsage virtual function.
+///
+class AnalysisUsage {
+public:
+ typedef SmallVectorImpl<AnalysisID> VectorType;
+
+private:
+ /// Sets of analyses required and preserved by a pass
+ // TODO: It's not clear that SmallVector is an appropriate data structure for
+ // this usecase. The sizes were picked to minimize wasted space, but are
+ // otherwise fairly meaningless.
+ SmallVector<AnalysisID, 8> Required;
+ SmallVector<AnalysisID, 2> RequiredTransitive;
+ SmallVector<AnalysisID, 2> Preserved;
+ SmallVector<AnalysisID, 0> Used;
+ bool PreservesAll;
+
+public:
+ AnalysisUsage() : PreservesAll(false) {}
+
+ ///@{
+ /// Add the specified ID to the required set of the usage info for a pass.
+ AnalysisUsage &addRequiredID(const void *ID);
+ AnalysisUsage &addRequiredID(char &ID);
+ template<class PassClass>
+ AnalysisUsage &addRequired() {
+ return addRequiredID(PassClass::ID);
+ }
+
+ AnalysisUsage &addRequiredTransitiveID(char &ID);
+ template<class PassClass>
+ AnalysisUsage &addRequiredTransitive() {
+ return addRequiredTransitiveID(PassClass::ID);
+ }
+ ///@}
+
+ ///@{
+ /// Add the specified ID to the set of analyses preserved by this pass.
+ AnalysisUsage &addPreservedID(const void *ID) {
+ Preserved.push_back(ID);
+ return *this;
+ }
+ AnalysisUsage &addPreservedID(char &ID) {
+ Preserved.push_back(&ID);
+ return *this;
+ }
+ /// Add the specified Pass class to the set of analyses preserved by this pass.
+ template<class PassClass>
+ AnalysisUsage &addPreserved() {
+ Preserved.push_back(&PassClass::ID);
+ return *this;
+ }
+ ///@}
+
+ ///@{
+ /// Add the specified ID to the set of analyses used by this pass if they are
+ /// available..
+ AnalysisUsage &addUsedIfAvailableID(const void *ID) {
+ Used.push_back(ID);
+ return *this;
+ }
+ AnalysisUsage &addUsedIfAvailableID(char &ID) {
+ Used.push_back(&ID);
+ return *this;
+ }
+ /// Add the specified Pass class to the set of analyses used by this pass.
+ template<class PassClass>
+ AnalysisUsage &addUsedIfAvailable() {
+ Used.push_back(&PassClass::ID);
+ return *this;
+ }
+ ///@}
+
+ /// Add the Pass with the specified argument string to the set of analyses
+ /// preserved by this pass. If no such Pass exists, do nothing. This can be
+ /// useful when a pass is trivially preserved, but may not be linked in. Be
+ /// careful about spelling!
+ AnalysisUsage &addPreserved(StringRef Arg);
+
+ /// Set by analyses that do not transform their input at all
+ void setPreservesAll() { PreservesAll = true; }
+
+ /// Determine whether a pass said it does not transform its input at all
+ bool getPreservesAll() const { return PreservesAll; }
+
+ /// This function should be called by the pass, iff they do not:
+ ///
+ /// 1. Add or remove basic blocks from the function
+ /// 2. Modify terminator instructions in any way.
+ ///
+ /// This function annotates the AnalysisUsage info object to say that analyses
+ /// that only depend on the CFG are preserved by this pass.
+ ///
+ void setPreservesCFG();
+
+ const VectorType &getRequiredSet() const { return Required; }
+ const VectorType &getRequiredTransitiveSet() const {
+ return RequiredTransitive;
+ }
+ const VectorType &getPreservedSet() const { return Preserved; }
+ const VectorType &getUsedSet() const { return Used; }
+};
+
+//===----------------------------------------------------------------------===//
+/// AnalysisResolver - Simple interface used by Pass objects to pull all
+/// analysis information out of pass manager that is responsible to manage
+/// the pass.
+///
+class PMDataManager;
+class AnalysisResolver {
+private:
+ AnalysisResolver() = delete;
+
+public:
+ explicit AnalysisResolver(PMDataManager &P) : PM(P) { }
+
+ inline PMDataManager &getPMDataManager() { return PM; }
+
+ /// Find pass that is implementing PI.
+ Pass *findImplPass(AnalysisID PI) {
+ Pass *ResultPass = nullptr;
+ for (unsigned i = 0; i < AnalysisImpls.size() ; ++i) {
+ if (AnalysisImpls[i].first == PI) {
+ ResultPass = AnalysisImpls[i].second;
+ break;
+ }
+ }
+ return ResultPass;
+ }
+
+ /// Find pass that is implementing PI. Initialize pass for Function F.
+ Pass *findImplPass(Pass *P, AnalysisID PI, Function &F);
+
+ void addAnalysisImplsPair(AnalysisID PI, Pass *P) {
+ if (findImplPass(PI) == P)
+ return;
+ std::pair<AnalysisID, Pass*> pir = std::make_pair(PI,P);
+ AnalysisImpls.push_back(pir);
+ }
+
+ /// Clear cache that is used to connect a pass to the the analysis (PassInfo).
+ void clearAnalysisImpls() {
+ AnalysisImpls.clear();
+ }
+
+ /// Return analysis result or null if it doesn't exist.
+ Pass *getAnalysisIfAvailable(AnalysisID ID, bool Direction) const;
+
+private:
+ /// This keeps track of which passes implements the interfaces that are
+ /// required by the current pass (to implement getAnalysis()).
+ std::vector<std::pair<AnalysisID, Pass*> > AnalysisImpls;
+
+ /// PassManager that is used to resolve analysis info
+ PMDataManager &PM;
+};
+
+/// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to
+/// get analysis information that might be around, for example to update it.
+/// This is different than getAnalysis in that it can fail (if the analysis
+/// results haven't been computed), so should only be used if you can handle
+/// the case when the analysis is not available. This method is often used by
+/// transformation APIs to update analysis results for a pass automatically as
+/// the transform is performed.
+///
+template<typename AnalysisType>
+AnalysisType *Pass::getAnalysisIfAvailable() const {
+ assert(Resolver && "Pass not resident in a PassManager object!");
+
+ const void *PI = &AnalysisType::ID;
+
+ Pass *ResultPass = Resolver->getAnalysisIfAvailable(PI, true);
+ if (!ResultPass) return nullptr;
+
+ // Because the AnalysisType may not be a subclass of pass (for
+ // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
+ // adjust the return pointer (because the class may multiply inherit, once
+ // from pass, once from AnalysisType).
+ return (AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
+}
+
+/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
+/// to the analysis information that they claim to use by overriding the
+/// getAnalysisUsage function.
+///
+template<typename AnalysisType>
+AnalysisType &Pass::getAnalysis() const {
+ assert(Resolver && "Pass has not been inserted into a PassManager object!");
+ return getAnalysisID<AnalysisType>(&AnalysisType::ID);
+}
+
+template<typename AnalysisType>
+AnalysisType &Pass::getAnalysisID(AnalysisID PI) const {
+ assert(PI && "getAnalysis for unregistered pass!");
+ assert(Resolver&&"Pass has not been inserted into a PassManager object!");
+ // PI *must* appear in AnalysisImpls. Because the number of passes used
+ // should be a small number, we just do a linear search over a (dense)
+ // vector.
+ Pass *ResultPass = Resolver->findImplPass(PI);
+ assert (ResultPass &&
+ "getAnalysis*() called on an analysis that was not "
+ "'required' by pass!");
+
+ // Because the AnalysisType may not be a subclass of pass (for
+ // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
+ // adjust the return pointer (because the class may multiply inherit, once
+ // from pass, once from AnalysisType).
+ return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
+}
+
+/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
+/// to the analysis information that they claim to use by overriding the
+/// getAnalysisUsage function.
+///
+template<typename AnalysisType>
+AnalysisType &Pass::getAnalysis(Function &F) {
+ assert(Resolver &&"Pass has not been inserted into a PassManager object!");
+
+ return getAnalysisID<AnalysisType>(&AnalysisType::ID, F);
+}
+
+template<typename AnalysisType>
+AnalysisType &Pass::getAnalysisID(AnalysisID PI, Function &F) {
+ assert(PI && "getAnalysis for unregistered pass!");
+ assert(Resolver && "Pass has not been inserted into a PassManager object!");
+ // PI *must* appear in AnalysisImpls. Because the number of passes used
+ // should be a small number, we just do a linear search over a (dense)
+ // vector.
+ Pass *ResultPass = Resolver->findImplPass(this, PI, F);
+ assert(ResultPass && "Unable to find requested analysis info");
+
+ // Because the AnalysisType may not be a subclass of pass (for
+ // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
+ // adjust the return pointer (because the class may multiply inherit, once
+ // from pass, once from AnalysisType).
+ return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/PassInfo.h b/contrib/llvm/include/llvm/PassInfo.h
new file mode 100644
index 0000000..cee4ade
--- /dev/null
+++ b/contrib/llvm/include/llvm/PassInfo.h
@@ -0,0 +1,143 @@
+//===- llvm/PassInfo.h - Pass Info class ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines and implements the PassInfo class.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_PASSINFO_H
+#define LLVM_PASSINFO_H
+
+#include <cassert>
+#include <vector>
+
+namespace llvm {
+
+class Pass;
+class TargetMachine;
+
+//===---------------------------------------------------------------------------
+/// PassInfo class - An instance of this class exists for every pass known by
+/// the system, and can be obtained from a live Pass by calling its
+/// getPassInfo() method. These objects are set up by the RegisterPass<>
+/// template.
+///
+class PassInfo {
+public:
+ typedef Pass* (*NormalCtor_t)();
+ typedef Pass *(*TargetMachineCtor_t)(TargetMachine *);
+
+private:
+ const char *const PassName; // Nice name for Pass
+ const char *const PassArgument; // Command Line argument to run this pass
+ const void *PassID;
+ const bool IsCFGOnlyPass; // Pass only looks at the CFG.
+ const bool IsAnalysis; // True if an analysis pass.
+ const bool IsAnalysisGroup; // True if an analysis group.
+ std::vector<const PassInfo *> ItfImpl; // Interfaces implemented by this pass
+
+ NormalCtor_t NormalCtor;
+ TargetMachineCtor_t TargetMachineCtor;
+
+public:
+ /// PassInfo ctor - Do not call this directly, this should only be invoked
+ /// through RegisterPass.
+ PassInfo(const char *name, const char *arg, const void *pi,
+ NormalCtor_t normal, bool isCFGOnly, bool is_analysis,
+ TargetMachineCtor_t machine = nullptr)
+ : PassName(name), PassArgument(arg), PassID(pi), IsCFGOnlyPass(isCFGOnly),
+ IsAnalysis(is_analysis), IsAnalysisGroup(false), NormalCtor(normal),
+ TargetMachineCtor(machine) {}
+ /// PassInfo ctor - Do not call this directly, this should only be invoked
+ /// through RegisterPass. This version is for use by analysis groups; it
+ /// does not auto-register the pass.
+ PassInfo(const char *name, const void *pi)
+ : PassName(name), PassArgument(""), PassID(pi), IsCFGOnlyPass(false),
+ IsAnalysis(false), IsAnalysisGroup(true), NormalCtor(nullptr),
+ TargetMachineCtor(nullptr) {}
+
+ /// getPassName - Return the friendly name for the pass, never returns null
+ ///
+ const char *getPassName() const { return PassName; }
+
+ /// getPassArgument - Return the command line option that may be passed to
+ /// 'opt' that will cause this pass to be run. This will return null if there
+ /// is no argument.
+ ///
+ const char *getPassArgument() const { return PassArgument; }
+
+ /// getTypeInfo - Return the id object for the pass...
+ /// TODO : Rename
+ const void *getTypeInfo() const { return PassID; }
+
+ /// Return true if this PassID implements the specified ID pointer.
+ bool isPassID(const void *IDPtr) const { return PassID == IDPtr; }
+
+ /// isAnalysisGroup - Return true if this is an analysis group, not a normal
+ /// pass.
+ ///
+ bool isAnalysisGroup() const { return IsAnalysisGroup; }
+ bool isAnalysis() const { return IsAnalysis; }
+
+ /// isCFGOnlyPass - return true if this pass only looks at the CFG for the
+ /// function.
+ bool isCFGOnlyPass() const { return IsCFGOnlyPass; }
+
+ /// getNormalCtor - Return a pointer to a function, that when called, creates
+ /// an instance of the pass and returns it. This pointer may be null if there
+ /// is no default constructor for the pass.
+ ///
+ NormalCtor_t getNormalCtor() const {
+ return NormalCtor;
+ }
+ void setNormalCtor(NormalCtor_t Ctor) {
+ NormalCtor = Ctor;
+ }
+
+ /// getTargetMachineCtor - Return a pointer to a function, that when called
+ /// with a TargetMachine, creates an instance of the pass and returns it.
+ /// This pointer may be null if there is no constructor with a TargetMachine
+ /// for the pass.
+ ///
+ TargetMachineCtor_t getTargetMachineCtor() const { return TargetMachineCtor; }
+ void setTargetMachineCtor(TargetMachineCtor_t Ctor) {
+ TargetMachineCtor = Ctor;
+ }
+
+ /// createPass() - Use this method to create an instance of this pass.
+ Pass *createPass() const {
+ assert((!isAnalysisGroup() || NormalCtor) &&
+ "No default implementation found for analysis group!");
+ assert(NormalCtor &&
+ "Cannot call createPass on PassInfo without default ctor!");
+ return NormalCtor();
+ }
+
+ /// addInterfaceImplemented - This method is called when this pass is
+ /// registered as a member of an analysis group with the RegisterAnalysisGroup
+ /// template.
+ ///
+ void addInterfaceImplemented(const PassInfo *ItfPI) {
+ ItfImpl.push_back(ItfPI);
+ }
+
+ /// getInterfacesImplemented - Return a list of all of the analysis group
+ /// interfaces implemented by this pass.
+ ///
+ const std::vector<const PassInfo*> &getInterfacesImplemented() const {
+ return ItfImpl;
+ }
+
+private:
+ void operator=(const PassInfo &) = delete;
+ PassInfo(const PassInfo &) = delete;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/PassRegistry.h b/contrib/llvm/include/llvm/PassRegistry.h
new file mode 100644
index 0000000..e7fe1f5
--- /dev/null
+++ b/contrib/llvm/include/llvm/PassRegistry.h
@@ -0,0 +1,99 @@
+//===- llvm/PassRegistry.h - Pass Information Registry ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines PassRegistry, a class that is used in the initialization
+// and registration of passes. At application startup, passes are registered
+// with the PassRegistry, which is later provided to the PassManager for
+// dependency resolution and similar tasks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSREGISTRY_H
+#define LLVM_PASSREGISTRY_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/PassInfo.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/RWMutex.h"
+#include <vector>
+
+namespace llvm {
+
+class PassInfo;
+struct PassRegistrationListener;
+
+/// PassRegistry - This class manages the registration and intitialization of
+/// the pass subsystem as application startup, and assists the PassManager
+/// in resolving pass dependencies.
+/// NOTE: PassRegistry is NOT thread-safe. If you want to use LLVM on multiple
+/// threads simultaneously, you will need to use a separate PassRegistry on
+/// each thread.
+class PassRegistry {
+ mutable sys::SmartRWMutex<true> Lock;
+
+ /// PassInfoMap - Keep track of the PassInfo object for each registered pass.
+ typedef DenseMap<const void *, const PassInfo *> MapType;
+ MapType PassInfoMap;
+
+ typedef StringMap<const PassInfo *> StringMapType;
+ StringMapType PassInfoStringMap;
+
+ std::vector<std::unique_ptr<const PassInfo>> ToFree;
+ std::vector<PassRegistrationListener *> Listeners;
+
+public:
+ PassRegistry() {}
+ ~PassRegistry();
+
+ /// getPassRegistry - Access the global registry object, which is
+ /// automatically initialized at application launch and destroyed by
+ /// llvm_shutdown.
+ static PassRegistry *getPassRegistry();
+
+ /// getPassInfo - Look up a pass' corresponding PassInfo, indexed by the pass'
+ /// type identifier (&MyPass::ID).
+ const PassInfo *getPassInfo(const void *TI) const;
+
+ /// getPassInfo - Look up a pass' corresponding PassInfo, indexed by the pass'
+ /// argument string.
+ const PassInfo *getPassInfo(StringRef Arg) const;
+
+ /// registerPass - Register a pass (by means of its PassInfo) with the
+ /// registry. Required in order to use the pass with a PassManager.
+ void registerPass(const PassInfo &PI, bool ShouldFree = false);
+
+ /// registerAnalysisGroup - Register an analysis group (or a pass implementing
+ // an analysis group) with the registry. Like registerPass, this is required
+ // in order for a PassManager to be able to use this group/pass.
+ void registerAnalysisGroup(const void *InterfaceID, const void *PassID,
+ PassInfo &Registeree, bool isDefault,
+ bool ShouldFree = false);
+
+ /// enumerateWith - Enumerate the registered passes, calling the provided
+ /// PassRegistrationListener's passEnumerate() callback on each of them.
+ void enumerateWith(PassRegistrationListener *L);
+
+ /// addRegistrationListener - Register the given PassRegistrationListener
+ /// to receive passRegistered() callbacks whenever a new pass is registered.
+ void addRegistrationListener(PassRegistrationListener *L);
+
+ /// removeRegistrationListener - Unregister a PassRegistrationListener so that
+ /// it no longer receives passRegistered() callbacks.
+ void removeRegistrationListener(PassRegistrationListener *L);
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_STDCXX_CONVERSION_FUNCTIONS(PassRegistry, LLVMPassRegistryRef)
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/PassSupport.h b/contrib/llvm/include/llvm/PassSupport.h
new file mode 100644
index 0000000..7c3d49f
--- /dev/null
+++ b/contrib/llvm/include/llvm/PassSupport.h
@@ -0,0 +1,250 @@
+//===- llvm/PassSupport.h - Pass Support code -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines stuff that is used to define and "use" Passes. This file
+// is automatically #included by Pass.h, so:
+//
+// NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY
+//
+// Instead, #include Pass.h.
+//
+// This file defines Pass registration code and classes used for it.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSSUPPORT_H
+#define LLVM_PASSSUPPORT_H
+
+#include "Pass.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/PassInfo.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Compiler.h"
+#include <vector>
+
+namespace llvm {
+
+class TargetMachine;
+
+#define CALL_ONCE_INITIALIZATION(function) \
+ static volatile sys::cas_flag initialized = 0; \
+ sys::cas_flag old_val = sys::CompareAndSwap(&initialized, 1, 0); \
+ if (old_val == 0) { \
+ function(Registry); \
+ sys::MemoryFence(); \
+ TsanIgnoreWritesBegin(); \
+ TsanHappensBefore(&initialized); \
+ initialized = 2; \
+ TsanIgnoreWritesEnd(); \
+ } else { \
+ sys::cas_flag tmp = initialized; \
+ sys::MemoryFence(); \
+ while (tmp != 2) { \
+ tmp = initialized; \
+ sys::MemoryFence(); \
+ } \
+ } \
+ TsanHappensAfter(&initialized);
+
+#define INITIALIZE_PASS(passName, arg, name, cfg, analysis) \
+ static void* initialize##passName##PassOnce(PassRegistry &Registry) { \
+ PassInfo *PI = new PassInfo(name, arg, & passName ::ID, \
+ PassInfo::NormalCtor_t(callDefaultCtor< passName >), cfg, analysis); \
+ Registry.registerPass(*PI, true); \
+ return PI; \
+ } \
+ void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##passName##PassOnce) \
+ }
+
+#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis) \
+ static void* initialize##passName##PassOnce(PassRegistry &Registry) {
+
+#define INITIALIZE_PASS_DEPENDENCY(depName) \
+ initialize##depName##Pass(Registry);
+#define INITIALIZE_AG_DEPENDENCY(depName) \
+ initialize##depName##AnalysisGroup(Registry);
+
+#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis) \
+ PassInfo *PI = new PassInfo(name, arg, & passName ::ID, \
+ PassInfo::NormalCtor_t(callDefaultCtor< passName >), cfg, analysis); \
+ Registry.registerPass(*PI, true); \
+ return PI; \
+ } \
+ void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##passName##PassOnce) \
+ }
+
+#define INITIALIZE_PASS_WITH_OPTIONS(PassName, Arg, Name, Cfg, Analysis) \
+ INITIALIZE_PASS_BEGIN(PassName, Arg, Name, Cfg, Analysis) \
+ PassName::registerOptions(); \
+ INITIALIZE_PASS_END(PassName, Arg, Name, Cfg, Analysis)
+
+#define INITIALIZE_PASS_WITH_OPTIONS_BEGIN(PassName, Arg, Name, Cfg, Analysis) \
+ INITIALIZE_PASS_BEGIN(PassName, Arg, Name, Cfg, Analysis) \
+ PassName::registerOptions(); \
+
+template<typename PassName>
+Pass *callDefaultCtor() { return new PassName(); }
+
+template <typename PassName> Pass *callTargetMachineCtor(TargetMachine *TM) {
+ return new PassName(TM);
+}
+
+//===---------------------------------------------------------------------------
+/// RegisterPass<t> template - This template class is used to notify the system
+/// that a Pass is available for use, and registers it into the internal
+/// database maintained by the PassManager. Unless this template is used, opt,
+/// for example will not be able to see the pass and attempts to create the pass
+/// will fail. This template is used in the follow manner (at global scope, in
+/// your .cpp file):
+///
+/// static RegisterPass<YourPassClassName> tmp("passopt", "My Pass Name");
+///
+/// This statement will cause your pass to be created by calling the default
+/// constructor exposed by the pass. If you have a different constructor that
+/// must be called, create a global constructor function (which takes the
+/// arguments you need and returns a Pass*) and register your pass like this:
+///
+/// static RegisterPass<PassClassName> tmp("passopt", "My Name");
+///
+template<typename passName>
+struct RegisterPass : public PassInfo {
+
+ // Register Pass using default constructor...
+ RegisterPass(const char *PassArg, const char *Name, bool CFGOnly = false,
+ bool is_analysis = false)
+ : PassInfo(Name, PassArg, &passName::ID,
+ PassInfo::NormalCtor_t(callDefaultCtor<passName>),
+ CFGOnly, is_analysis) {
+ PassRegistry::getPassRegistry()->registerPass(*this);
+ }
+};
+
+
+/// RegisterAnalysisGroup - Register a Pass as a member of an analysis _group_.
+/// Analysis groups are used to define an interface (which need not derive from
+/// Pass) that is required by passes to do their job. Analysis Groups differ
+/// from normal analyses because any available implementation of the group will
+/// be used if it is available.
+///
+/// If no analysis implementing the interface is available, a default
+/// implementation is created and added. A pass registers itself as the default
+/// implementation by specifying 'true' as the second template argument of this
+/// class.
+///
+/// In addition to registering itself as an analysis group member, a pass must
+/// register itself normally as well. Passes may be members of multiple groups
+/// and may still be "required" specifically by name.
+///
+/// The actual interface may also be registered as well (by not specifying the
+/// second template argument). The interface should be registered to associate
+/// a nice name with the interface.
+///
+class RegisterAGBase : public PassInfo {
+public:
+ RegisterAGBase(const char *Name,
+ const void *InterfaceID,
+ const void *PassID = nullptr,
+ bool isDefault = false);
+};
+
+template<typename Interface, bool Default = false>
+struct RegisterAnalysisGroup : public RegisterAGBase {
+ explicit RegisterAnalysisGroup(PassInfo &RPB)
+ : RegisterAGBase(RPB.getPassName(),
+ &Interface::ID, RPB.getTypeInfo(),
+ Default) {
+ }
+
+ explicit RegisterAnalysisGroup(const char *Name)
+ : RegisterAGBase(Name, &Interface::ID) {
+ }
+};
+
+#define INITIALIZE_ANALYSIS_GROUP(agName, name, defaultPass) \
+ static void* initialize##agName##AnalysisGroupOnce(PassRegistry &Registry) { \
+ initialize##defaultPass##Pass(Registry); \
+ PassInfo *AI = new PassInfo(name, & agName :: ID); \
+ Registry.registerAnalysisGroup(& agName ::ID, 0, *AI, false, true); \
+ return AI; \
+ } \
+ void llvm::initialize##agName##AnalysisGroup(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##agName##AnalysisGroupOnce) \
+ }
+
+
+#define INITIALIZE_AG_PASS(passName, agName, arg, name, cfg, analysis, def) \
+ static void* initialize##passName##PassOnce(PassRegistry &Registry) { \
+ if (!def) initialize##agName##AnalysisGroup(Registry); \
+ PassInfo *PI = new PassInfo(name, arg, & passName ::ID, \
+ PassInfo::NormalCtor_t(callDefaultCtor< passName >), cfg, analysis); \
+ Registry.registerPass(*PI, true); \
+ \
+ PassInfo *AI = new PassInfo(name, & agName :: ID); \
+ Registry.registerAnalysisGroup(& agName ::ID, & passName ::ID, \
+ *AI, def, true); \
+ return AI; \
+ } \
+ void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##passName##PassOnce) \
+ }
+
+
+#define INITIALIZE_AG_PASS_BEGIN(passName, agName, arg, n, cfg, analysis, def) \
+ static void* initialize##passName##PassOnce(PassRegistry &Registry) { \
+ if (!def) initialize##agName##AnalysisGroup(Registry);
+
+#define INITIALIZE_AG_PASS_END(passName, agName, arg, n, cfg, analysis, def) \
+ PassInfo *PI = new PassInfo(n, arg, & passName ::ID, \
+ PassInfo::NormalCtor_t(callDefaultCtor< passName >), cfg, analysis); \
+ Registry.registerPass(*PI, true); \
+ \
+ PassInfo *AI = new PassInfo(n, & agName :: ID); \
+ Registry.registerAnalysisGroup(& agName ::ID, & passName ::ID, \
+ *AI, def, true); \
+ return AI; \
+ } \
+ void llvm::initialize##passName##Pass(PassRegistry &Registry) { \
+ CALL_ONCE_INITIALIZATION(initialize##passName##PassOnce) \
+ }
+
+//===---------------------------------------------------------------------------
+/// PassRegistrationListener class - This class is meant to be derived from by
+/// clients that are interested in which passes get registered and unregistered
+/// at runtime (which can be because of the RegisterPass constructors being run
+/// as the program starts up, or may be because a shared object just got
+/// loaded).
+///
+struct PassRegistrationListener {
+
+ PassRegistrationListener() {}
+ virtual ~PassRegistrationListener() {}
+
+ /// Callback functions - These functions are invoked whenever a pass is loaded
+ /// or removed from the current executable.
+ ///
+ virtual void passRegistered(const PassInfo *) {}
+
+ /// enumeratePasses - Iterate over the registered passes, calling the
+ /// passEnumerate callback on each PassInfo object.
+ ///
+ void enumeratePasses();
+
+ /// passEnumerate - Callback function invoked when someone calls
+ /// enumeratePasses on this PassRegistrationListener object.
+ ///
+ virtual void passEnumerate(const PassInfo *) {}
+};
+
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Passes/PassBuilder.h b/contrib/llvm/include/llvm/Passes/PassBuilder.h
new file mode 100644
index 0000000..1e605e3
--- /dev/null
+++ b/contrib/llvm/include/llvm/Passes/PassBuilder.h
@@ -0,0 +1,105 @@
+//===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Interfaces for registering analysis passes, producing common pass manager
+/// configurations, and parsing of pass pipelines.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PASSES_PASSBUILDER_H
+#define LLVM_PASSES_PASSBUILDER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+class TargetMachine;
+
+/// \brief This class provides access to building LLVM's passes.
+///
+/// It's members provide the baseline state available to passes during their
+/// construction. The \c PassRegistry.def file specifies how to construct all
+/// of the built-in passes, and those may reference these members during
+/// construction.
+class PassBuilder {
+ TargetMachine *TM;
+
+public:
+ explicit PassBuilder(TargetMachine *TM = nullptr) : TM(TM) {}
+
+ /// \brief Registers all available module analysis passes.
+ ///
+ /// This is an interface that can be used to populate a \c
+ /// ModuleAnalysisManager with all registered module analyses. Callers can
+ /// still manually register any additional analyses.
+ void registerModuleAnalyses(ModuleAnalysisManager &MAM);
+
+ /// \brief Registers all available CGSCC analysis passes.
+ ///
+ /// This is an interface that can be used to populate a \c CGSCCAnalysisManager
+ /// with all registered CGSCC analyses. Callers can still manually register any
+ /// additional analyses.
+ void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
+
+ /// \brief Registers all available function analysis passes.
+ ///
+ /// This is an interface that can be used to populate a \c
+ /// FunctionAnalysisManager with all registered function analyses. Callers can
+ /// still manually register any additional analyses.
+ void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
+
+ /// \brief Parse a textual pass pipeline description into a \c ModulePassManager.
+ ///
+ /// The format of the textual pass pipeline description looks something like:
+ ///
+ /// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
+ ///
+ /// Pass managers have ()s describing the nest structure of passes. All passes
+ /// are comma separated. As a special shortcut, if the very first pass is not
+ /// a module pass (as a module pass manager is), this will automatically form
+ /// the shortest stack of pass managers that allow inserting that first pass.
+ /// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop passes
+ /// 'lpassN', all of these are valid:
+ ///
+ /// fpass1,fpass2,fpass3
+ /// cgpass1,cgpass2,cgpass3
+ /// lpass1,lpass2,lpass3
+ ///
+ /// And they are equivalent to the following (resp.):
+ ///
+ /// module(function(fpass1,fpass2,fpass3))
+ /// module(cgscc(cgpass1,cgpass2,cgpass3))
+ /// module(function(loop(lpass1,lpass2,lpass3)))
+ ///
+ /// This shortcut is especially useful for debugging and testing small pass
+ /// combinations. Note that these shortcuts don't introduce any other magic. If
+ /// the sequence of passes aren't all the exact same kind of pass, it will be
+ /// an error. You cannot mix different levels implicitly, you must explicitly
+ /// form a pass manager in which to nest passes.
+ bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
+ bool VerifyEachPass = true, bool DebugLogging = false);
+
+private:
+ bool parseModulePassName(ModulePassManager &MPM, StringRef Name);
+ bool parseCGSCCPassName(CGSCCPassManager &CGPM, StringRef Name);
+ bool parseFunctionPassName(FunctionPassManager &FPM, StringRef Name);
+ bool parseFunctionPassPipeline(FunctionPassManager &FPM,
+ StringRef &PipelineText, bool VerifyEachPass,
+ bool DebugLogging);
+ bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM, StringRef &PipelineText,
+ bool VerifyEachPass, bool DebugLogging);
+ bool parseModulePassPipeline(ModulePassManager &MPM, StringRef &PipelineText,
+ bool VerifyEachPass, bool DebugLogging);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/ProfileData/CoverageMapping.h b/contrib/llvm/include/llvm/ProfileData/CoverageMapping.h
new file mode 100644
index 0000000..3790e13
--- /dev/null
+++ b/contrib/llvm/include/llvm/ProfileData/CoverageMapping.h
@@ -0,0 +1,509 @@
+//=-- CoverageMapping.h - Code coverage mapping support ---------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Code coverage mapping data is generated by clang and read by
+// llvm-cov to show code coverage statistics for a file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_COVERAGEMAPPING_H_
+#define LLVM_PROFILEDATA_COVERAGEMAPPING_H_
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/raw_ostream.h"
+#include <system_error>
+#include <tuple>
+
+namespace llvm {
+class IndexedInstrProfReader;
+namespace coverage {
+
+class CoverageMappingReader;
+
+class CoverageMapping;
+struct CounterExpressions;
+
+enum CoverageMappingVersion { CoverageMappingVersion1 };
+
+/// \brief A Counter is an abstract value that describes how to compute the
+/// execution count for a region of code using the collected profile count data.
+struct Counter {
+ enum CounterKind { Zero, CounterValueReference, Expression };
+ static const unsigned EncodingTagBits = 2;
+ static const unsigned EncodingTagMask = 0x3;
+ static const unsigned EncodingCounterTagAndExpansionRegionTagBits =
+ EncodingTagBits + 1;
+
+private:
+ CounterKind Kind;
+ unsigned ID;
+
+ Counter(CounterKind Kind, unsigned ID) : Kind(Kind), ID(ID) {}
+
+public:
+ Counter() : Kind(Zero), ID(0) {}
+
+ CounterKind getKind() const { return Kind; }
+
+ bool isZero() const { return Kind == Zero; }
+
+ bool isExpression() const { return Kind == Expression; }
+
+ unsigned getCounterID() const { return ID; }
+
+ unsigned getExpressionID() const { return ID; }
+
+ friend bool operator==(const Counter &LHS, const Counter &RHS) {
+ return LHS.Kind == RHS.Kind && LHS.ID == RHS.ID;
+ }
+
+ friend bool operator!=(const Counter &LHS, const Counter &RHS) {
+ return !(LHS == RHS);
+ }
+
+ friend bool operator<(const Counter &LHS, const Counter &RHS) {
+ return std::tie(LHS.Kind, LHS.ID) < std::tie(RHS.Kind, RHS.ID);
+ }
+
+ /// \brief Return the counter that represents the number zero.
+ static Counter getZero() { return Counter(); }
+
+ /// \brief Return the counter that corresponds to a specific profile counter.
+ static Counter getCounter(unsigned CounterId) {
+ return Counter(CounterValueReference, CounterId);
+ }
+
+ /// \brief Return the counter that corresponds to a specific
+ /// addition counter expression.
+ static Counter getExpression(unsigned ExpressionId) {
+ return Counter(Expression, ExpressionId);
+ }
+};
+
+/// \brief A Counter expression is a value that represents an arithmetic
+/// operation with two counters.
+struct CounterExpression {
+ enum ExprKind { Subtract, Add };
+ ExprKind Kind;
+ Counter LHS, RHS;
+
+ CounterExpression(ExprKind Kind, Counter LHS, Counter RHS)
+ : Kind(Kind), LHS(LHS), RHS(RHS) {}
+};
+
+/// \brief A Counter expression builder is used to construct the
+/// counter expressions. It avoids unnecessary duplication
+/// and simplifies algebraic expressions.
+class CounterExpressionBuilder {
+ /// \brief A list of all the counter expressions
+ std::vector<CounterExpression> Expressions;
+ /// \brief A lookup table for the index of a given expression.
+ llvm::DenseMap<CounterExpression, unsigned> ExpressionIndices;
+
+ /// \brief Return the counter which corresponds to the given expression.
+ ///
+ /// If the given expression is already stored in the builder, a counter
+ /// that references that expression is returned. Otherwise, the given
+ /// expression is added to the builder's collection of expressions.
+ Counter get(const CounterExpression &E);
+
+ /// \brief Gather the terms of the expression tree for processing.
+ ///
+ /// This collects each addition and subtraction referenced by the counter into
+ /// a sequence that can be sorted and combined to build a simplified counter
+ /// expression.
+ void extractTerms(Counter C, int Sign,
+ SmallVectorImpl<std::pair<unsigned, int>> &Terms);
+
+ /// \brief Simplifies the given expression tree
+ /// by getting rid of algebraically redundant operations.
+ Counter simplify(Counter ExpressionTree);
+
+public:
+ ArrayRef<CounterExpression> getExpressions() const { return Expressions; }
+
+ /// \brief Return a counter that represents the expression
+ /// that adds LHS and RHS.
+ Counter add(Counter LHS, Counter RHS);
+
+ /// \brief Return a counter that represents the expression
+ /// that subtracts RHS from LHS.
+ Counter subtract(Counter LHS, Counter RHS);
+};
+
+/// \brief A Counter mapping region associates a source range with
+/// a specific counter.
+struct CounterMappingRegion {
+ enum RegionKind {
+ /// \brief A CodeRegion associates some code with a counter
+ CodeRegion,
+
+ /// \brief An ExpansionRegion represents a file expansion region that
+ /// associates a source range with the expansion of a virtual source file,
+ /// such as for a macro instantiation or #include file.
+ ExpansionRegion,
+
+ /// \brief A SkippedRegion represents a source range with code that
+ /// was skipped by a preprocessor or similar means.
+ SkippedRegion
+ };
+
+ Counter Count;
+ unsigned FileID, ExpandedFileID;
+ unsigned LineStart, ColumnStart, LineEnd, ColumnEnd;
+ RegionKind Kind;
+
+ CounterMappingRegion(Counter Count, unsigned FileID, unsigned ExpandedFileID,
+ unsigned LineStart, unsigned ColumnStart,
+ unsigned LineEnd, unsigned ColumnEnd, RegionKind Kind)
+ : Count(Count), FileID(FileID), ExpandedFileID(ExpandedFileID),
+ LineStart(LineStart), ColumnStart(ColumnStart), LineEnd(LineEnd),
+ ColumnEnd(ColumnEnd), Kind(Kind) {}
+
+ static CounterMappingRegion
+ makeRegion(Counter Count, unsigned FileID, unsigned LineStart,
+ unsigned ColumnStart, unsigned LineEnd, unsigned ColumnEnd) {
+ return CounterMappingRegion(Count, FileID, 0, LineStart, ColumnStart,
+ LineEnd, ColumnEnd, CodeRegion);
+ }
+
+ static CounterMappingRegion
+ makeExpansion(unsigned FileID, unsigned ExpandedFileID, unsigned LineStart,
+ unsigned ColumnStart, unsigned LineEnd, unsigned ColumnEnd) {
+ return CounterMappingRegion(Counter(), FileID, ExpandedFileID, LineStart,
+ ColumnStart, LineEnd, ColumnEnd,
+ ExpansionRegion);
+ }
+
+ static CounterMappingRegion
+ makeSkipped(unsigned FileID, unsigned LineStart, unsigned ColumnStart,
+ unsigned LineEnd, unsigned ColumnEnd) {
+ return CounterMappingRegion(Counter(), FileID, 0, LineStart, ColumnStart,
+ LineEnd, ColumnEnd, SkippedRegion);
+ }
+
+
+ inline std::pair<unsigned, unsigned> startLoc() const {
+ return std::pair<unsigned, unsigned>(LineStart, ColumnStart);
+ }
+
+ inline std::pair<unsigned, unsigned> endLoc() const {
+ return std::pair<unsigned, unsigned>(LineEnd, ColumnEnd);
+ }
+
+ bool operator<(const CounterMappingRegion &Other) const {
+ if (FileID != Other.FileID)
+ return FileID < Other.FileID;
+ return startLoc() < Other.startLoc();
+ }
+
+ bool contains(const CounterMappingRegion &Other) const {
+ if (FileID != Other.FileID)
+ return false;
+ if (startLoc() > Other.startLoc())
+ return false;
+ if (endLoc() < Other.endLoc())
+ return false;
+ return true;
+ }
+};
+
+/// \brief Associates a source range with an execution count.
+struct CountedRegion : public CounterMappingRegion {
+ uint64_t ExecutionCount;
+
+ CountedRegion(const CounterMappingRegion &R, uint64_t ExecutionCount)
+ : CounterMappingRegion(R), ExecutionCount(ExecutionCount) {}
+};
+
+/// \brief A Counter mapping context is used to connect the counters,
+/// expressions and the obtained counter values.
+class CounterMappingContext {
+ ArrayRef<CounterExpression> Expressions;
+ ArrayRef<uint64_t> CounterValues;
+
+public:
+ CounterMappingContext(ArrayRef<CounterExpression> Expressions,
+ ArrayRef<uint64_t> CounterValues = None)
+ : Expressions(Expressions), CounterValues(CounterValues) {}
+
+ void setCounts(ArrayRef<uint64_t> Counts) { CounterValues = Counts; }
+
+ void dump(const Counter &C, llvm::raw_ostream &OS) const;
+ void dump(const Counter &C) const { dump(C, dbgs()); }
+
+ /// \brief Return the number of times that a region of code associated with
+ /// this counter was executed.
+ ErrorOr<int64_t> evaluate(const Counter &C) const;
+};
+
+/// \brief Code coverage information for a single function.
+struct FunctionRecord {
+ /// \brief Raw function name.
+ std::string Name;
+ /// \brief Associated files.
+ std::vector<std::string> Filenames;
+ /// \brief Regions in the function along with their counts.
+ std::vector<CountedRegion> CountedRegions;
+ /// \brief The number of times this function was executed.
+ uint64_t ExecutionCount;
+
+ FunctionRecord(StringRef Name, ArrayRef<StringRef> Filenames)
+ : Name(Name), Filenames(Filenames.begin(), Filenames.end()) {}
+
+ void pushRegion(CounterMappingRegion Region, uint64_t Count) {
+ if (CountedRegions.empty())
+ ExecutionCount = Count;
+ CountedRegions.emplace_back(Region, Count);
+ }
+};
+
+/// \brief Iterator over Functions, optionally filtered to a single file.
+class FunctionRecordIterator
+ : public iterator_facade_base<FunctionRecordIterator,
+ std::forward_iterator_tag, FunctionRecord> {
+ ArrayRef<FunctionRecord> Records;
+ ArrayRef<FunctionRecord>::iterator Current;
+ StringRef Filename;
+
+ /// \brief Skip records whose primary file is not \c Filename.
+ void skipOtherFiles();
+
+public:
+ FunctionRecordIterator(ArrayRef<FunctionRecord> Records_,
+ StringRef Filename = "")
+ : Records(Records_), Current(Records.begin()), Filename(Filename) {
+ skipOtherFiles();
+ }
+
+ FunctionRecordIterator() : Current(Records.begin()) {}
+
+ bool operator==(const FunctionRecordIterator &RHS) const {
+ return Current == RHS.Current && Filename == RHS.Filename;
+ }
+
+ const FunctionRecord &operator*() const { return *Current; }
+
+ FunctionRecordIterator &operator++() {
+ assert(Current != Records.end() && "incremented past end");
+ ++Current;
+ skipOtherFiles();
+ return *this;
+ }
+};
+
+/// \brief Coverage information for a macro expansion or #included file.
+///
+/// When covered code has pieces that can be expanded for more detail, such as a
+/// preprocessor macro use and its definition, these are represented as
+/// expansions whose coverage can be looked up independently.
+struct ExpansionRecord {
+ /// \brief The abstract file this expansion covers.
+ unsigned FileID;
+ /// \brief The region that expands to this record.
+ const CountedRegion &Region;
+ /// \brief Coverage for the expansion.
+ const FunctionRecord &Function;
+
+ ExpansionRecord(const CountedRegion &Region,
+ const FunctionRecord &Function)
+ : FileID(Region.ExpandedFileID), Region(Region), Function(Function) {}
+};
+
+/// \brief The execution count information starting at a point in a file.
+///
+/// A sequence of CoverageSegments gives execution counts for a file in format
+/// that's simple to iterate through for processing.
+struct CoverageSegment {
+ /// \brief The line where this segment begins.
+ unsigned Line;
+ /// \brief The column where this segment begins.
+ unsigned Col;
+ /// \brief The execution count, or zero if no count was recorded.
+ uint64_t Count;
+ /// \brief When false, the segment was uninstrumented or skipped.
+ bool HasCount;
+ /// \brief Whether this enters a new region or returns to a previous count.
+ bool IsRegionEntry;
+
+ CoverageSegment(unsigned Line, unsigned Col, bool IsRegionEntry)
+ : Line(Line), Col(Col), Count(0), HasCount(false),
+ IsRegionEntry(IsRegionEntry) {}
+
+ CoverageSegment(unsigned Line, unsigned Col, uint64_t Count,
+ bool IsRegionEntry)
+ : Line(Line), Col(Col), Count(Count), HasCount(true),
+ IsRegionEntry(IsRegionEntry) {}
+
+ friend bool operator==(const CoverageSegment &L, const CoverageSegment &R) {
+ return std::tie(L.Line, L.Col, L.Count, L.HasCount, L.IsRegionEntry) ==
+ std::tie(R.Line, R.Col, R.Count, R.HasCount, R.IsRegionEntry);
+ }
+
+ void setCount(uint64_t NewCount) {
+ Count = NewCount;
+ HasCount = true;
+ }
+
+ void addCount(uint64_t NewCount) { setCount(Count + NewCount); }
+};
+
+/// \brief Coverage information to be processed or displayed.
+///
+/// This represents the coverage of an entire file, expansion, or function. It
+/// provides a sequence of CoverageSegments to iterate through, as well as the
+/// list of expansions that can be further processed.
+class CoverageData {
+ std::string Filename;
+ std::vector<CoverageSegment> Segments;
+ std::vector<ExpansionRecord> Expansions;
+ friend class CoverageMapping;
+
+public:
+ CoverageData() {}
+
+ CoverageData(StringRef Filename) : Filename(Filename) {}
+
+ CoverageData(CoverageData &&RHS)
+ : Filename(std::move(RHS.Filename)), Segments(std::move(RHS.Segments)),
+ Expansions(std::move(RHS.Expansions)) {}
+
+ /// \brief Get the name of the file this data covers.
+ StringRef getFilename() { return Filename; }
+
+ std::vector<CoverageSegment>::iterator begin() { return Segments.begin(); }
+ std::vector<CoverageSegment>::iterator end() { return Segments.end(); }
+ bool empty() { return Segments.empty(); }
+
+ /// \brief Expansions that can be further processed.
+ std::vector<ExpansionRecord> getExpansions() { return Expansions; }
+};
+
+/// \brief The mapping of profile information to coverage data.
+///
+/// This is the main interface to get coverage information, using a profile to
+/// fill out execution counts.
+class CoverageMapping {
+ std::vector<FunctionRecord> Functions;
+ unsigned MismatchedFunctionCount;
+
+ CoverageMapping() : MismatchedFunctionCount(0) {}
+
+public:
+ /// \brief Load the coverage mapping using the given readers.
+ static ErrorOr<std::unique_ptr<CoverageMapping>>
+ load(CoverageMappingReader &CoverageReader,
+ IndexedInstrProfReader &ProfileReader);
+
+ /// \brief Load the coverage mapping from the given files.
+ static ErrorOr<std::unique_ptr<CoverageMapping>>
+ load(StringRef ObjectFilename, StringRef ProfileFilename,
+ StringRef Arch = StringRef());
+
+ /// \brief The number of functions that couldn't have their profiles mapped.
+ ///
+ /// This is a count of functions whose profile is out of date or otherwise
+ /// can't be associated with any coverage information.
+ unsigned getMismatchedCount() { return MismatchedFunctionCount; }
+
+ /// \brief Returns the list of files that are covered.
+ std::vector<StringRef> getUniqueSourceFiles() const;
+
+ /// \brief Get the coverage for a particular file.
+ ///
+ /// The given filename must be the name as recorded in the coverage
+ /// information. That is, only names returned from getUniqueSourceFiles will
+ /// yield a result.
+ CoverageData getCoverageForFile(StringRef Filename);
+
+ /// \brief Gets all of the functions covered by this profile.
+ iterator_range<FunctionRecordIterator> getCoveredFunctions() const {
+ return make_range(FunctionRecordIterator(Functions),
+ FunctionRecordIterator());
+ }
+
+ /// \brief Gets all of the functions in a particular file.
+ iterator_range<FunctionRecordIterator>
+ getCoveredFunctions(StringRef Filename) const {
+ return make_range(FunctionRecordIterator(Functions, Filename),
+ FunctionRecordIterator());
+ }
+
+ /// \brief Get the list of function instantiations in the file.
+ ///
+ /// Functions that are instantiated more than once, such as C++ template
+ /// specializations, have distinct coverage records for each instantiation.
+ std::vector<const FunctionRecord *> getInstantiations(StringRef Filename);
+
+ /// \brief Get the coverage for a particular function.
+ CoverageData getCoverageForFunction(const FunctionRecord &Function);
+
+ /// \brief Get the coverage for an expansion within a coverage set.
+ CoverageData getCoverageForExpansion(const ExpansionRecord &Expansion);
+};
+
+} // end namespace coverage
+
+/// \brief Provide DenseMapInfo for CounterExpression
+template<> struct DenseMapInfo<coverage::CounterExpression> {
+ static inline coverage::CounterExpression getEmptyKey() {
+ using namespace coverage;
+ return CounterExpression(CounterExpression::ExprKind::Subtract,
+ Counter::getCounter(~0U),
+ Counter::getCounter(~0U));
+ }
+
+ static inline coverage::CounterExpression getTombstoneKey() {
+ using namespace coverage;
+ return CounterExpression(CounterExpression::ExprKind::Add,
+ Counter::getCounter(~0U),
+ Counter::getCounter(~0U));
+ }
+
+ static unsigned getHashValue(const coverage::CounterExpression &V) {
+ return static_cast<unsigned>(
+ hash_combine(V.Kind, V.LHS.getKind(), V.LHS.getCounterID(),
+ V.RHS.getKind(), V.RHS.getCounterID()));
+ }
+
+ static bool isEqual(const coverage::CounterExpression &LHS,
+ const coverage::CounterExpression &RHS) {
+ return LHS.Kind == RHS.Kind && LHS.LHS == RHS.LHS && LHS.RHS == RHS.RHS;
+ }
+};
+
+const std::error_category &coveragemap_category();
+
+enum class coveragemap_error {
+ success = 0,
+ eof,
+ no_data_found,
+ unsupported_version,
+ truncated,
+ malformed
+};
+
+inline std::error_code make_error_code(coveragemap_error E) {
+ return std::error_code(static_cast<int>(E), coveragemap_category());
+}
+
+} // end namespace llvm
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::coveragemap_error> : std::true_type {};
+}
+
+#endif // LLVM_PROFILEDATA_COVERAGEMAPPING_H_
diff --git a/contrib/llvm/include/llvm/ProfileData/CoverageMappingReader.h b/contrib/llvm/include/llvm/ProfileData/CoverageMappingReader.h
new file mode 100644
index 0000000..38fb468
--- /dev/null
+++ b/contrib/llvm/include/llvm/ProfileData/CoverageMappingReader.h
@@ -0,0 +1,182 @@
+//=-- CoverageMappingReader.h - Code coverage mapping reader ------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for reading coverage mapping data for
+// instrumentation based coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_COVERAGEMAPPINGREADER_H
+#define LLVM_PROFILEDATA_COVERAGEMAPPINGREADER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/ProfileData/CoverageMapping.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <iterator>
+
+namespace llvm {
+namespace coverage {
+
+class CoverageMappingReader;
+
+/// \brief Coverage mapping information for a single function.
+struct CoverageMappingRecord {
+ StringRef FunctionName;
+ uint64_t FunctionHash;
+ ArrayRef<StringRef> Filenames;
+ ArrayRef<CounterExpression> Expressions;
+ ArrayRef<CounterMappingRegion> MappingRegions;
+};
+
+/// \brief A file format agnostic iterator over coverage mapping data.
+class CoverageMappingIterator
+ : public std::iterator<std::input_iterator_tag, CoverageMappingRecord> {
+ CoverageMappingReader *Reader;
+ CoverageMappingRecord Record;
+
+ void increment();
+
+public:
+ CoverageMappingIterator() : Reader(nullptr) {}
+ CoverageMappingIterator(CoverageMappingReader *Reader) : Reader(Reader) {
+ increment();
+ }
+
+ CoverageMappingIterator &operator++() {
+ increment();
+ return *this;
+ }
+ bool operator==(const CoverageMappingIterator &RHS) {
+ return Reader == RHS.Reader;
+ }
+ bool operator!=(const CoverageMappingIterator &RHS) {
+ return Reader != RHS.Reader;
+ }
+ CoverageMappingRecord &operator*() { return Record; }
+ CoverageMappingRecord *operator->() { return &Record; }
+};
+
+class CoverageMappingReader {
+public:
+ virtual std::error_code readNextRecord(CoverageMappingRecord &Record) = 0;
+ CoverageMappingIterator begin() { return CoverageMappingIterator(this); }
+ CoverageMappingIterator end() { return CoverageMappingIterator(); }
+ virtual ~CoverageMappingReader() {}
+};
+
+/// \brief Base class for the raw coverage mapping and filenames data readers.
+class RawCoverageReader {
+protected:
+ StringRef Data;
+
+ RawCoverageReader(StringRef Data) : Data(Data) {}
+
+ std::error_code readULEB128(uint64_t &Result);
+ std::error_code readIntMax(uint64_t &Result, uint64_t MaxPlus1);
+ std::error_code readSize(uint64_t &Result);
+ std::error_code readString(StringRef &Result);
+};
+
+/// \brief Reader for the raw coverage filenames.
+class RawCoverageFilenamesReader : public RawCoverageReader {
+ std::vector<StringRef> &Filenames;
+
+ RawCoverageFilenamesReader(const RawCoverageFilenamesReader &) = delete;
+ RawCoverageFilenamesReader &
+ operator=(const RawCoverageFilenamesReader &) = delete;
+
+public:
+ RawCoverageFilenamesReader(StringRef Data, std::vector<StringRef> &Filenames)
+ : RawCoverageReader(Data), Filenames(Filenames) {}
+
+ std::error_code read();
+};
+
+/// \brief Reader for the raw coverage mapping data.
+class RawCoverageMappingReader : public RawCoverageReader {
+ ArrayRef<StringRef> TranslationUnitFilenames;
+ std::vector<StringRef> &Filenames;
+ std::vector<CounterExpression> &Expressions;
+ std::vector<CounterMappingRegion> &MappingRegions;
+
+ RawCoverageMappingReader(const RawCoverageMappingReader &) = delete;
+ RawCoverageMappingReader &
+ operator=(const RawCoverageMappingReader &) = delete;
+
+public:
+ RawCoverageMappingReader(StringRef MappingData,
+ ArrayRef<StringRef> TranslationUnitFilenames,
+ std::vector<StringRef> &Filenames,
+ std::vector<CounterExpression> &Expressions,
+ std::vector<CounterMappingRegion> &MappingRegions)
+ : RawCoverageReader(MappingData),
+ TranslationUnitFilenames(TranslationUnitFilenames),
+ Filenames(Filenames), Expressions(Expressions),
+ MappingRegions(MappingRegions) {}
+
+ std::error_code read();
+
+private:
+ std::error_code decodeCounter(unsigned Value, Counter &C);
+ std::error_code readCounter(Counter &C);
+ std::error_code
+ readMappingRegionsSubArray(std::vector<CounterMappingRegion> &MappingRegions,
+ unsigned InferredFileID, size_t NumFileIDs);
+};
+
+/// \brief Reader for the coverage mapping data that is emitted by the
+/// frontend and stored in an object file.
+class BinaryCoverageReader : public CoverageMappingReader {
+public:
+ struct ProfileMappingRecord {
+ CoverageMappingVersion Version;
+ StringRef FunctionName;
+ uint64_t FunctionHash;
+ StringRef CoverageMapping;
+ size_t FilenamesBegin;
+ size_t FilenamesSize;
+
+ ProfileMappingRecord(CoverageMappingVersion Version, StringRef FunctionName,
+ uint64_t FunctionHash, StringRef CoverageMapping,
+ size_t FilenamesBegin, size_t FilenamesSize)
+ : Version(Version), FunctionName(FunctionName),
+ FunctionHash(FunctionHash), CoverageMapping(CoverageMapping),
+ FilenamesBegin(FilenamesBegin), FilenamesSize(FilenamesSize) {}
+ };
+
+private:
+ std::vector<StringRef> Filenames;
+ std::vector<ProfileMappingRecord> MappingRecords;
+ size_t CurrentRecord;
+ std::vector<StringRef> FunctionsFilenames;
+ std::vector<CounterExpression> Expressions;
+ std::vector<CounterMappingRegion> MappingRegions;
+
+ BinaryCoverageReader(const BinaryCoverageReader &) = delete;
+ BinaryCoverageReader &operator=(const BinaryCoverageReader &) = delete;
+
+ BinaryCoverageReader() : CurrentRecord(0) {}
+
+public:
+ static ErrorOr<std::unique_ptr<BinaryCoverageReader>>
+ create(std::unique_ptr<MemoryBuffer> &ObjectBuffer,
+ StringRef Arch);
+
+ std::error_code readNextRecord(CoverageMappingRecord &Record) override;
+};
+
+} // end namespace coverage
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ProfileData/CoverageMappingWriter.h b/contrib/llvm/include/llvm/ProfileData/CoverageMappingWriter.h
new file mode 100644
index 0000000..2e3b037
--- /dev/null
+++ b/contrib/llvm/include/llvm/ProfileData/CoverageMappingWriter.h
@@ -0,0 +1,63 @@
+//=-- CoverageMappingWriter.h - Code coverage mapping writer ------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing coverage mapping data for
+// instrumentation based coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_COVERAGEMAPPINGWRITER_H
+#define LLVM_PROFILEDATA_COVERAGEMAPPINGWRITER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ProfileData/CoverageMapping.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace coverage {
+
+/// \brief Writer of the filenames section for the instrumentation
+/// based code coverage.
+class CoverageFilenamesSectionWriter {
+ ArrayRef<StringRef> Filenames;
+
+public:
+ CoverageFilenamesSectionWriter(ArrayRef<StringRef> Filenames)
+ : Filenames(Filenames) {}
+
+ /// \brief Write encoded filenames to the given output stream.
+ void write(raw_ostream &OS);
+};
+
+/// \brief Writer for instrumentation based coverage mapping data.
+class CoverageMappingWriter {
+ ArrayRef<unsigned> VirtualFileMapping;
+ ArrayRef<CounterExpression> Expressions;
+ MutableArrayRef<CounterMappingRegion> MappingRegions;
+
+public:
+ CoverageMappingWriter(ArrayRef<unsigned> VirtualFileMapping,
+ ArrayRef<CounterExpression> Expressions,
+ MutableArrayRef<CounterMappingRegion> MappingRegions)
+ : VirtualFileMapping(VirtualFileMapping), Expressions(Expressions),
+ MappingRegions(MappingRegions) {}
+
+ CoverageMappingWriter(ArrayRef<CounterExpression> Expressions,
+ MutableArrayRef<CounterMappingRegion> MappingRegions)
+ : Expressions(Expressions), MappingRegions(MappingRegions) {}
+
+ /// \brief Write encoded coverage mapping data to the given output stream.
+ void write(raw_ostream &OS);
+};
+
+} // end namespace coverage
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ProfileData/InstrProf.h b/contrib/llvm/include/llvm/ProfileData/InstrProf.h
new file mode 100644
index 0000000..4688759
--- /dev/null
+++ b/contrib/llvm/include/llvm/ProfileData/InstrProf.h
@@ -0,0 +1,590 @@
+//=-- InstrProf.h - Instrumented profiling format support ---------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Instrumentation-based profiling data is generated by instrumented
+// binaries through library functions in compiler-rt, and read by the clang
+// frontend to feed PGO.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_INSTRPROF_H_
+#define LLVM_PROFILEDATA_INSTRPROF_H_
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/ProfileData/InstrProfData.inc"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/MD5.h"
+#include <cstdint>
+#include <list>
+#include <system_error>
+#include <vector>
+
+#define INSTR_PROF_INDEX_VERSION 3
+namespace llvm {
+
+class Function;
+class GlobalVariable;
+class Module;
+
+/// Return the name of data section containing profile counter variables.
+inline StringRef getInstrProfCountersSectionName(bool AddSegment) {
+ return AddSegment ? "__DATA," INSTR_PROF_CNTS_SECT_NAME_STR
+ : INSTR_PROF_CNTS_SECT_NAME_STR;
+}
+
+/// Return the name of data section containing names of instrumented
+/// functions.
+inline StringRef getInstrProfNameSectionName(bool AddSegment) {
+ return AddSegment ? "__DATA," INSTR_PROF_NAME_SECT_NAME_STR
+ : INSTR_PROF_NAME_SECT_NAME_STR;
+}
+
+/// Return the name of the data section containing per-function control
+/// data.
+inline StringRef getInstrProfDataSectionName(bool AddSegment) {
+ return AddSegment ? "__DATA," INSTR_PROF_DATA_SECT_NAME_STR
+ : INSTR_PROF_DATA_SECT_NAME_STR;
+}
+
+/// Return the name profile runtime entry point to do value profiling
+/// for a given site.
+inline StringRef getInstrProfValueProfFuncName() {
+ return INSTR_PROF_VALUE_PROF_FUNC_STR;
+}
+
+/// Return the name of the section containing function coverage mapping
+/// data.
+inline StringRef getInstrProfCoverageSectionName(bool AddSegment) {
+ return AddSegment ? "__DATA,__llvm_covmap" : "__llvm_covmap";
+}
+
+/// Return the name prefix of variables containing instrumented function names.
+inline StringRef getInstrProfNameVarPrefix() { return "__profn_"; }
+
+/// Return the name prefix of variables containing per-function control data.
+inline StringRef getInstrProfDataVarPrefix() { return "__profd_"; }
+
+/// Return the name prefix of profile counter variables.
+inline StringRef getInstrProfCountersVarPrefix() { return "__profc_"; }
+
+/// Return the name prefix of the COMDAT group for instrumentation variables
+/// associated with a COMDAT function.
+inline StringRef getInstrProfComdatPrefix() { return "__profv_"; }
+
+/// Return the name of a covarage mapping variable (internal linkage)
+/// for each instrumented source module. Such variables are allocated
+/// in the __llvm_covmap section.
+inline StringRef getCoverageMappingVarName() {
+ return "__llvm_coverage_mapping";
+}
+
+/// Return the name of function that registers all the per-function control
+/// data at program startup time by calling __llvm_register_function. This
+/// function has internal linkage and is called by __llvm_profile_init
+/// runtime method. This function is not generated for these platforms:
+/// Darwin, Linux, and FreeBSD.
+inline StringRef getInstrProfRegFuncsName() {
+ return "__llvm_profile_register_functions";
+}
+
+/// Return the name of the runtime interface that registers per-function control
+/// data for one instrumented function.
+inline StringRef getInstrProfRegFuncName() {
+ return "__llvm_profile_register_function";
+}
+
+/// Return the name of the runtime initialization method that is generated by
+/// the compiler. The function calls __llvm_profile_register_functions and
+/// __llvm_profile_override_default_filename functions if needed. This function
+/// has internal linkage and invoked at startup time via init_array.
+inline StringRef getInstrProfInitFuncName() { return "__llvm_profile_init"; }
+
+/// Return the name of the hook variable defined in profile runtime library.
+/// A reference to the variable causes the linker to link in the runtime
+/// initialization module (which defines the hook variable).
+inline StringRef getInstrProfRuntimeHookVarName() {
+ return "__llvm_profile_runtime";
+}
+
+/// Return the name of the compiler generated function that references the
+/// runtime hook variable. The function is a weak global.
+inline StringRef getInstrProfRuntimeHookVarUseFuncName() {
+ return "__llvm_profile_runtime_user";
+}
+
+/// Return the name of the profile runtime interface that overrides the default
+/// profile data file name.
+inline StringRef getInstrProfFileOverriderFuncName() {
+ return "__llvm_profile_override_default_filename";
+}
+
+/// Return the modified name for function \c F suitable to be
+/// used the key for profile lookup.
+std::string getPGOFuncName(const Function &F,
+ uint64_t Version = INSTR_PROF_INDEX_VERSION);
+
+/// Return the modified name for a function suitable to be
+/// used the key for profile lookup. The function's original
+/// name is \c RawFuncName and has linkage of type \c Linkage.
+/// The function is defined in module \c FileName.
+std::string getPGOFuncName(StringRef RawFuncName,
+ GlobalValue::LinkageTypes Linkage,
+ StringRef FileName,
+ uint64_t Version = INSTR_PROF_INDEX_VERSION);
+
+/// Create and return the global variable for function name used in PGO
+/// instrumentation. \c FuncName is the name of the function returned
+/// by \c getPGOFuncName call.
+GlobalVariable *createPGOFuncNameVar(Function &F, StringRef FuncName);
+
+/// Create and return the global variable for function name used in PGO
+/// instrumentation. /// \c FuncName is the name of the function
+/// returned by \c getPGOFuncName call, \c M is the owning module,
+/// and \c Linkage is the linkage of the instrumented function.
+GlobalVariable *createPGOFuncNameVar(Module &M,
+ GlobalValue::LinkageTypes Linkage,
+ StringRef FuncName);
+
+/// Given a PGO function name, remove the filename prefix and return
+/// the original (static) function name.
+StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName, StringRef FileName);
+
+const std::error_category &instrprof_category();
+
+enum class instrprof_error {
+ success = 0,
+ eof,
+ unrecognized_format,
+ bad_magic,
+ bad_header,
+ unsupported_version,
+ unsupported_hash_type,
+ too_large,
+ truncated,
+ malformed,
+ unknown_function,
+ hash_mismatch,
+ count_mismatch,
+ counter_overflow,
+ value_site_count_mismatch
+};
+
+inline std::error_code make_error_code(instrprof_error E) {
+ return std::error_code(static_cast<int>(E), instrprof_category());
+}
+
+inline instrprof_error MergeResult(instrprof_error &Accumulator,
+ instrprof_error Result) {
+ // Prefer first error encountered as later errors may be secondary effects of
+ // the initial problem.
+ if (Accumulator == instrprof_error::success &&
+ Result != instrprof_error::success)
+ Accumulator = Result;
+ return Accumulator;
+}
+
+enum InstrProfValueKind : uint32_t {
+#define VALUE_PROF_KIND(Enumerator, Value) Enumerator = Value,
+#include "llvm/ProfileData/InstrProfData.inc"
+};
+
+namespace object {
+class SectionRef;
+}
+
+namespace IndexedInstrProf {
+uint64_t ComputeHash(StringRef K);
+}
+
+/// A symbol table used for function PGO name look-up with keys
+/// (such as pointers, md5hash values) to the function. A function's
+/// PGO name or name's md5hash are used in retrieving the profile
+/// data of the function. See \c getPGOFuncName() method for details
+/// on how PGO name is formed.
+class InstrProfSymtab {
+public:
+ typedef std::vector<std::pair<uint64_t, uint64_t>> AddrHashMap;
+
+private:
+ StringRef Data;
+ uint64_t Address;
+ // A map from MD5 hash keys to function name strings.
+ std::vector<std::pair<uint64_t, std::string>> HashNameMap;
+ // A map from function runtime address to function name MD5 hash.
+ // This map is only populated and used by raw instr profile reader.
+ AddrHashMap AddrToMD5Map;
+
+public:
+ InstrProfSymtab() : Data(), Address(0), HashNameMap(), AddrToMD5Map() {}
+
+ /// Create InstrProfSymtab from an object file section which
+ /// contains function PGO names that are uncompressed.
+ /// This interface is used by CoverageMappingReader.
+ std::error_code create(object::SectionRef &Section);
+ /// This interface is used by reader of CoverageMapping test
+ /// format.
+ inline std::error_code create(StringRef D, uint64_t BaseAddr);
+ /// Create InstrProfSymtab from a set of names iteratable from
+ /// \p IterRange. This interface is used by IndexedProfReader.
+ template <typename NameIterRange> void create(const NameIterRange &IterRange);
+ // If the symtab is created by a series of calls to \c addFuncName, \c
+ // finalizeSymtab needs to be called before looking up function names.
+ // This is required because the underlying map is a vector (for space
+ // efficiency) which needs to be sorted.
+ inline void finalizeSymtab();
+ /// Update the symtab by adding \p FuncName to the table. This interface
+ /// is used by the raw and text profile readers.
+ void addFuncName(StringRef FuncName) {
+ HashNameMap.push_back(std::make_pair(
+ IndexedInstrProf::ComputeHash(FuncName), FuncName.str()));
+ }
+ /// Map a function address to its name's MD5 hash. This interface
+ /// is only used by the raw profiler reader.
+ void mapAddress(uint64_t Addr, uint64_t MD5Val) {
+ AddrToMD5Map.push_back(std::make_pair(Addr, MD5Val));
+ }
+ AddrHashMap &getAddrHashMap() { return AddrToMD5Map; }
+ /// Return function's PGO name from the function name's symabol
+ /// address in the object file. If an error occurs, Return
+ /// an empty string.
+ StringRef getFuncName(uint64_t FuncNameAddress, size_t NameSize);
+ /// Return function's PGO name from the name's md5 hash value.
+ /// If not found, return an empty string.
+ inline StringRef getFuncName(uint64_t FuncMD5Hash);
+};
+
+std::error_code InstrProfSymtab::create(StringRef D, uint64_t BaseAddr) {
+ Data = D;
+ Address = BaseAddr;
+ return std::error_code();
+}
+
+template <typename NameIterRange>
+void InstrProfSymtab::create(const NameIterRange &IterRange) {
+ for (auto Name : IterRange)
+ HashNameMap.push_back(
+ std::make_pair(IndexedInstrProf::ComputeHash(Name), Name.str()));
+ finalizeSymtab();
+}
+
+void InstrProfSymtab::finalizeSymtab() {
+ std::sort(HashNameMap.begin(), HashNameMap.end(), less_first());
+ HashNameMap.erase(std::unique(HashNameMap.begin(), HashNameMap.end()),
+ HashNameMap.end());
+ std::sort(AddrToMD5Map.begin(), AddrToMD5Map.end(), less_first());
+ AddrToMD5Map.erase(std::unique(AddrToMD5Map.begin(), AddrToMD5Map.end()),
+ AddrToMD5Map.end());
+}
+
+StringRef InstrProfSymtab::getFuncName(uint64_t FuncMD5Hash) {
+ auto Result =
+ std::lower_bound(HashNameMap.begin(), HashNameMap.end(), FuncMD5Hash,
+ [](const std::pair<uint64_t, std::string> &LHS,
+ uint64_t RHS) { return LHS.first < RHS; });
+ if (Result != HashNameMap.end())
+ return Result->second;
+ return StringRef();
+}
+
+struct InstrProfValueSiteRecord {
+ /// Value profiling data pairs at a given value site.
+ std::list<InstrProfValueData> ValueData;
+
+ InstrProfValueSiteRecord() { ValueData.clear(); }
+ template <class InputIterator>
+ InstrProfValueSiteRecord(InputIterator F, InputIterator L)
+ : ValueData(F, L) {}
+
+ /// Sort ValueData ascending by Value
+ void sortByTargetValues() {
+ ValueData.sort(
+ [](const InstrProfValueData &left, const InstrProfValueData &right) {
+ return left.Value < right.Value;
+ });
+ }
+
+ /// Merge data from another InstrProfValueSiteRecord
+ /// Optionally scale merged counts by \p Weight.
+ instrprof_error mergeValueData(InstrProfValueSiteRecord &Input,
+ uint64_t Weight = 1);
+};
+
+/// Profiling information for a single function.
+struct InstrProfRecord {
+ InstrProfRecord() {}
+ InstrProfRecord(StringRef Name, uint64_t Hash, std::vector<uint64_t> Counts)
+ : Name(Name), Hash(Hash), Counts(std::move(Counts)) {}
+ StringRef Name;
+ uint64_t Hash;
+ std::vector<uint64_t> Counts;
+
+ typedef std::vector<std::pair<uint64_t, uint64_t>> ValueMapType;
+
+ /// Return the number of value profile kinds with non-zero number
+ /// of profile sites.
+ inline uint32_t getNumValueKinds() const;
+ /// Return the number of instrumented sites for ValueKind.
+ inline uint32_t getNumValueSites(uint32_t ValueKind) const;
+ /// Return the total number of ValueData for ValueKind.
+ inline uint32_t getNumValueData(uint32_t ValueKind) const;
+ /// Return the number of value data collected for ValueKind at profiling
+ /// site: Site.
+ inline uint32_t getNumValueDataForSite(uint32_t ValueKind,
+ uint32_t Site) const;
+ /// Return the array of profiled values at \p Site.
+ inline std::unique_ptr<InstrProfValueData[]>
+ getValueForSite(uint32_t ValueKind, uint32_t Site,
+ uint64_t (*ValueMapper)(uint32_t, uint64_t) = 0) const;
+ inline void
+ getValueForSite(InstrProfValueData Dest[], uint32_t ValueKind, uint32_t Site,
+ uint64_t (*ValueMapper)(uint32_t, uint64_t) = 0) const;
+ /// Reserve space for NumValueSites sites.
+ inline void reserveSites(uint32_t ValueKind, uint32_t NumValueSites);
+ /// Add ValueData for ValueKind at value Site.
+ void addValueData(uint32_t ValueKind, uint32_t Site,
+ InstrProfValueData *VData, uint32_t N,
+ ValueMapType *ValueMap);
+
+ /// Merge the counts in \p Other into this one.
+ /// Optionally scale merged counts by \p Weight.
+ instrprof_error merge(InstrProfRecord &Other, uint64_t Weight = 1);
+
+ /// Clear value data entries
+ void clearValueData() {
+ for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
+ getValueSitesForKind(Kind).clear();
+ }
+
+private:
+ std::vector<InstrProfValueSiteRecord> IndirectCallSites;
+ const std::vector<InstrProfValueSiteRecord> &
+ getValueSitesForKind(uint32_t ValueKind) const {
+ switch (ValueKind) {
+ case IPVK_IndirectCallTarget:
+ return IndirectCallSites;
+ default:
+ llvm_unreachable("Unknown value kind!");
+ }
+ return IndirectCallSites;
+ }
+
+ std::vector<InstrProfValueSiteRecord> &
+ getValueSitesForKind(uint32_t ValueKind) {
+ return const_cast<std::vector<InstrProfValueSiteRecord> &>(
+ const_cast<const InstrProfRecord *>(this)
+ ->getValueSitesForKind(ValueKind));
+ }
+
+ // Map indirect call target name hash to name string.
+ uint64_t remapValue(uint64_t Value, uint32_t ValueKind,
+ ValueMapType *HashKeys);
+
+ // Merge Value Profile data from Src record to this record for ValueKind.
+ // Scale merged value counts by \p Weight.
+ instrprof_error mergeValueProfData(uint32_t ValueKind, InstrProfRecord &Src,
+ uint64_t Weight);
+};
+
+uint32_t InstrProfRecord::getNumValueKinds() const {
+ uint32_t NumValueKinds = 0;
+ for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind)
+ NumValueKinds += !(getValueSitesForKind(Kind).empty());
+ return NumValueKinds;
+}
+
+uint32_t InstrProfRecord::getNumValueData(uint32_t ValueKind) const {
+ uint32_t N = 0;
+ const std::vector<InstrProfValueSiteRecord> &SiteRecords =
+ getValueSitesForKind(ValueKind);
+ for (auto &SR : SiteRecords) {
+ N += SR.ValueData.size();
+ }
+ return N;
+}
+
+uint32_t InstrProfRecord::getNumValueSites(uint32_t ValueKind) const {
+ return getValueSitesForKind(ValueKind).size();
+}
+
+uint32_t InstrProfRecord::getNumValueDataForSite(uint32_t ValueKind,
+ uint32_t Site) const {
+ return getValueSitesForKind(ValueKind)[Site].ValueData.size();
+}
+
+std::unique_ptr<InstrProfValueData[]> InstrProfRecord::getValueForSite(
+ uint32_t ValueKind, uint32_t Site,
+ uint64_t (*ValueMapper)(uint32_t, uint64_t)) const {
+ uint32_t N = getNumValueDataForSite(ValueKind, Site);
+ if (N == 0)
+ return std::unique_ptr<InstrProfValueData[]>(nullptr);
+
+ auto VD = llvm::make_unique<InstrProfValueData[]>(N);
+ getValueForSite(VD.get(), ValueKind, Site, ValueMapper);
+
+ return VD;
+}
+
+void InstrProfRecord::getValueForSite(InstrProfValueData Dest[],
+ uint32_t ValueKind, uint32_t Site,
+ uint64_t (*ValueMapper)(uint32_t,
+ uint64_t)) const {
+ uint32_t I = 0;
+ for (auto V : getValueSitesForKind(ValueKind)[Site].ValueData) {
+ Dest[I].Value = ValueMapper ? ValueMapper(ValueKind, V.Value) : V.Value;
+ Dest[I].Count = V.Count;
+ I++;
+ }
+}
+
+void InstrProfRecord::reserveSites(uint32_t ValueKind, uint32_t NumValueSites) {
+ std::vector<InstrProfValueSiteRecord> &ValueSites =
+ getValueSitesForKind(ValueKind);
+ ValueSites.reserve(NumValueSites);
+}
+
+inline support::endianness getHostEndianness() {
+ return sys::IsLittleEndianHost ? support::little : support::big;
+}
+
+// Include definitions for value profile data
+#define INSTR_PROF_VALUE_PROF_DATA
+#include "llvm/ProfileData/InstrProfData.inc"
+
+ /*
+ * Initialize the record for runtime value profile data.
+ * Return 0 if the initialization is successful, otherwise
+ * return 1.
+ */
+int initializeValueProfRuntimeRecord(ValueProfRuntimeRecord *RuntimeRecord,
+ const uint16_t *NumValueSites,
+ ValueProfNode **Nodes);
+
+/* Release memory allocated for the runtime record. */
+void finalizeValueProfRuntimeRecord(ValueProfRuntimeRecord *RuntimeRecord);
+
+/* Return the size of ValueProfData structure that can be used to store
+ the value profile data collected at runtime. */
+uint32_t getValueProfDataSizeRT(const ValueProfRuntimeRecord *Record);
+
+/* Return a ValueProfData instance that stores the data collected at runtime. */
+ValueProfData *
+serializeValueProfDataFromRT(const ValueProfRuntimeRecord *Record,
+ ValueProfData *Dst);
+
+namespace IndexedInstrProf {
+
+enum class HashT : uint32_t {
+ MD5,
+
+ Last = MD5
+};
+
+static inline uint64_t MD5Hash(StringRef Str) {
+ MD5 Hash;
+ Hash.update(Str);
+ llvm::MD5::MD5Result Result;
+ Hash.final(Result);
+ // Return the least significant 8 bytes. Our MD5 implementation returns the
+ // result in little endian, so we may need to swap bytes.
+ using namespace llvm::support;
+ return endian::read<uint64_t, little, unaligned>(Result);
+}
+
+inline uint64_t ComputeHash(HashT Type, StringRef K) {
+ switch (Type) {
+ case HashT::MD5:
+ return IndexedInstrProf::MD5Hash(K);
+ }
+ llvm_unreachable("Unhandled hash type");
+}
+
+const uint64_t Magic = 0x8169666f72706cff; // "\xfflprofi\x81"
+const uint64_t Version = INSTR_PROF_INDEX_VERSION;
+const HashT HashType = HashT::MD5;
+
+inline uint64_t ComputeHash(StringRef K) { return ComputeHash(HashType, K); }
+
+// This structure defines the file header of the LLVM profile
+// data file in indexed-format.
+struct Header {
+ uint64_t Magic;
+ uint64_t Version;
+ uint64_t MaxFunctionCount;
+ uint64_t HashType;
+ uint64_t HashOffset;
+};
+
+} // end namespace IndexedInstrProf
+
+namespace RawInstrProf {
+
+const uint64_t Version = INSTR_PROF_RAW_VERSION;
+
+template <class IntPtrT> inline uint64_t getMagic();
+template <> inline uint64_t getMagic<uint64_t>() {
+ return INSTR_PROF_RAW_MAGIC_64;
+}
+
+template <> inline uint64_t getMagic<uint32_t>() {
+ return INSTR_PROF_RAW_MAGIC_32;
+}
+
+// Per-function profile data header/control structure.
+// The definition should match the structure defined in
+// compiler-rt/lib/profile/InstrProfiling.h.
+// It should also match the synthesized type in
+// Transforms/Instrumentation/InstrProfiling.cpp:getOrCreateRegionCounters.
+template <class IntPtrT> struct LLVM_ALIGNAS(8) ProfileData {
+ #define INSTR_PROF_DATA(Type, LLVMType, Name, Init) Type Name;
+ #include "llvm/ProfileData/InstrProfData.inc"
+};
+
+// File header structure of the LLVM profile data in raw format.
+// The definition should match the header referenced in
+// compiler-rt/lib/profile/InstrProfilingFile.c and
+// InstrProfilingBuffer.c.
+struct Header {
+#define INSTR_PROF_RAW_HEADER(Type, Name, Init) const Type Name;
+#include "llvm/ProfileData/InstrProfData.inc"
+};
+
+} // end namespace RawInstrProf
+
+namespace coverage {
+
+// Profile coverage map has the following layout:
+// [CoverageMapFileHeader]
+// [ArrayStart]
+// [CovMapFunctionRecord]
+// [CovMapFunctionRecord]
+// ...
+// [ArrayEnd]
+// [Encoded Region Mapping Data]
+LLVM_PACKED_START
+template <class IntPtrT> struct CovMapFunctionRecord {
+ #define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Init) Type Name;
+ #include "llvm/ProfileData/InstrProfData.inc"
+};
+LLVM_PACKED_END
+
+}
+
+} // end namespace llvm
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::instrprof_error> : std::true_type {};
+}
+
+#endif // LLVM_PROFILEDATA_INSTRPROF_H_
diff --git a/contrib/llvm/include/llvm/ProfileData/InstrProfData.inc b/contrib/llvm/include/llvm/ProfileData/InstrProfData.inc
new file mode 100644
index 0000000..48dae50
--- /dev/null
+++ b/contrib/llvm/include/llvm/ProfileData/InstrProfData.inc
@@ -0,0 +1,735 @@
+/*===-- InstrProfData.inc - instr profiling runtime structures -----------=== *\
+|*
+|* The LLVM Compiler Infrastructure
+|*
+|* This file is distributed under the University of Illinois Open Source
+|* License. See LICENSE.TXT for details.
+|*
+\*===----------------------------------------------------------------------===*/
+/*
+ * This is the master file that defines all the data structure, signature,
+ * constant literals that are shared across profiling runtime library,
+ * compiler (instrumentation), and host tools (reader/writer). The entities
+ * defined in this file affect the profile runtime ABI, the raw profile format,
+ * or both.
+ *
+ * The file has two identical copies. The master copy lives in LLVM and
+ * the other one sits in compiler-rt/lib/profile directory. To make changes
+ * in this file, first modify the master copy and copy it over to compiler-rt.
+ * Testing of any change in this file can start only after the two copies are
+ * synced up.
+ *
+ * The first part of the file includes macros that defines types, names, and
+ * initializers for the member fields of the core data structures. The field
+ * declarations for one structure is enabled by defining the field activation
+ * macro associated with that structure. Only one field activation record
+ * can be defined at one time and the rest definitions will be filtered out by
+ * the preprocessor.
+ *
+ * Examples of how the template is used to instantiate structure definition:
+ * 1. To declare a structure:
+ *
+ * struct ProfData {
+ * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
+ * Type Name;
+ * #include "llvm/ProfileData/InstrProfData.inc"
+ * };
+ *
+ * 2. To construct LLVM type arrays for the struct type:
+ *
+ * Type *DataTypes[] = {
+ * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
+ * LLVMType,
+ * #include "llvm/ProfileData/InstrProfData.inc"
+ * };
+ *
+ * 4. To construct constant array for the initializers:
+ * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
+ * Initializer,
+ * Constant *ConstantVals[] = {
+ * #include "llvm/ProfileData/InstrProfData.inc"
+ * };
+ *
+ *
+ * The second part of the file includes definitions all other entities that
+ * are related to runtime ABI and format. When no field activation macro is
+ * defined, this file can be included to introduce the definitions.
+ *
+\*===----------------------------------------------------------------------===*/
+
+/* INSTR_PROF_DATA start. */
+/* Definition of member fields of the per-function control structure. */
+#ifndef INSTR_PROF_DATA
+#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+
+INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
+ ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
+ NamePtr->getType()->getPointerElementType()->getArrayNumElements()))
+INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \
+ ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters))
+INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
+ ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
+ Inc->getHash()->getZExtValue()))
+INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), NamePtr, \
+ ConstantExpr::getBitCast(NamePtr, llvm::Type::getInt8PtrTy(Ctx)))
+INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt64PtrTy(Ctx), CounterPtr, \
+ ConstantExpr::getBitCast(CounterPtr, \
+ llvm::Type::getInt64PtrTy(Ctx)))
+INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \
+ FunctionAddr)
+INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \
+ ConstantPointerNull::get(Int8PtrTy))
+INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \
+ ConstantArray::get(Int16ArrayTy, Int16ArrayVals))
+#undef INSTR_PROF_DATA
+/* INSTR_PROF_DATA end. */
+
+/* INSTR_PROF_RAW_HEADER start */
+/* Definition of member fields of the raw profile header data structure. */
+#ifndef INSTR_PROF_RAW_HEADER
+#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
+INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
+INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
+INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
+INSTR_PROF_RAW_HEADER(uint64_t, NamesSize, NamesSize)
+INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta, (uintptr_t)CountersBegin)
+INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin)
+INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
+INSTR_PROF_RAW_HEADER(uint64_t, ValueDataSize, ValueDataSize)
+INSTR_PROF_RAW_HEADER(uint64_t, ValueDataDelta, (uintptr_t)ValueDataBegin)
+#undef INSTR_PROF_RAW_HEADER
+/* INSTR_PROF_RAW_HEADER end */
+
+/* VALUE_PROF_FUNC_PARAM start */
+/* Definition of parameter types of the runtime API used to do value profiling
+ * for a given value site.
+ */
+#ifndef VALUE_PROF_FUNC_PARAM
+#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType)
+#define INSTR_PROF_COMMA
+#else
+#define INSTR_PROF_DATA_DEFINED
+#define INSTR_PROF_COMMA ,
+#endif
+VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
+ INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
+#undef VALUE_PROF_FUNC_PARAM
+#undef INSTR_PROF_COMMA
+/* VALUE_PROF_FUNC_PARAM end */
+
+/* VALUE_PROF_KIND start */
+#ifndef VALUE_PROF_KIND
+#define VALUE_PROF_KIND(Enumerator, Value)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0)
+/* These two kinds must be the last to be
+ * declared. This is to make sure the string
+ * array created with the template can be
+ * indexed with the kind value.
+ */
+VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget)
+VALUE_PROF_KIND(IPVK_Last, IPVK_IndirectCallTarget)
+
+#undef VALUE_PROF_KIND
+/* VALUE_PROF_KIND end */
+
+/* COVMAP_FUNC_RECORD start */
+/* Definition of member fields of the function record structure in coverage
+ * map.
+ */
+#ifndef COVMAP_FUNC_RECORD
+#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \
+ NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \
+ llvm::Type::getInt8PtrTy(Ctx)))
+COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
+ NameValue.size()))
+COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
+ CoverageMapping.size()))
+COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), FuncHash))
+#undef COVMAP_FUNC_RECORD
+/* COVMAP_FUNC_RECORD end. */
+
+
+#ifdef INSTR_PROF_VALUE_PROF_DATA
+#define INSTR_PROF_DATA_DEFINED
+
+/*!
+ * This is the header of the data structure that defines the on-disk
+ * layout of the value profile data of a particular kind for one function.
+ */
+typedef struct ValueProfRecord {
+ /* The kind of the value profile record. */
+ uint32_t Kind;
+ /*
+ * The number of value profile sites. It is guaranteed to be non-zero;
+ * otherwise the record for this kind won't be emitted.
+ */
+ uint32_t NumValueSites;
+ /*
+ * The first element of the array that stores the number of profiled
+ * values for each value site. The size of the array is NumValueSites.
+ * Since NumValueSites is greater than zero, there is at least one
+ * element in the array.
+ */
+ uint8_t SiteCountArray[1];
+
+ /*
+ * The fake declaration is for documentation purpose only.
+ * Align the start of next field to be on 8 byte boundaries.
+ uint8_t Padding[X];
+ */
+
+ /* The array of value profile data. The size of the array is the sum
+ * of all elements in SiteCountArray[].
+ InstrProfValueData ValueData[];
+ */
+
+#ifdef __cplusplus
+ /*!
+ * \brief Return the number of value sites.
+ */
+ uint32_t getNumValueSites() const { return NumValueSites; }
+ /*!
+ * \brief Read data from this record and save it to Record.
+ */
+ void deserializeTo(InstrProfRecord &Record,
+ InstrProfRecord::ValueMapType *VMap);
+ /*
+ * In-place byte swap:
+ * Do byte swap for this instance. \c Old is the original order before
+ * the swap, and \c New is the New byte order.
+ */
+ void swapBytes(support::endianness Old, support::endianness New);
+#endif
+} ValueProfRecord;
+
+/*!
+ * Per-function header/control data structure for value profiling
+ * data in indexed format.
+ */
+typedef struct ValueProfData {
+ /*
+ * Total size in bytes including this field. It must be a multiple
+ * of sizeof(uint64_t).
+ */
+ uint32_t TotalSize;
+ /*
+ *The number of value profile kinds that has value profile data.
+ * In this implementation, a value profile kind is considered to
+ * have profile data if the number of value profile sites for the
+ * kind is not zero. More aggressively, the implementation can
+ * choose to check the actual data value: if none of the value sites
+ * has any profiled values, the kind can be skipped.
+ */
+ uint32_t NumValueKinds;
+
+ /*
+ * Following are a sequence of variable length records. The prefix/header
+ * of each record is defined by ValueProfRecord type. The number of
+ * records is NumValueKinds.
+ * ValueProfRecord Record_1;
+ * ValueProfRecord Record_N;
+ */
+
+#if __cplusplus
+ /*!
+ * Return the total size in bytes of the on-disk value profile data
+ * given the data stored in Record.
+ */
+ static uint32_t getSize(const InstrProfRecord &Record);
+ /*!
+ * Return a pointer to \c ValueProfData instance ready to be streamed.
+ */
+ static std::unique_ptr<ValueProfData>
+ serializeFrom(const InstrProfRecord &Record);
+ /*!
+ * Check the integrity of the record. Return the error code when
+ * an error is detected, otherwise return instrprof_error::success.
+ */
+ instrprof_error checkIntegrity();
+ /*!
+ * Return a pointer to \c ValueProfileData instance ready to be read.
+ * All data in the instance are properly byte swapped. The input
+ * data is assumed to be in little endian order.
+ */
+ static ErrorOr<std::unique_ptr<ValueProfData>>
+ getValueProfData(const unsigned char *SrcBuffer,
+ const unsigned char *const SrcBufferEnd,
+ support::endianness SrcDataEndianness);
+ /*!
+ * Swap byte order from \c Endianness order to host byte order.
+ */
+ void swapBytesToHost(support::endianness Endianness);
+ /*!
+ * Swap byte order from host byte order to \c Endianness order.
+ */
+ void swapBytesFromHost(support::endianness Endianness);
+ /*!
+ * Return the total size of \c ValueProfileData.
+ */
+ uint32_t getSize() const { return TotalSize; }
+ /*!
+ * Read data from this data and save it to \c Record.
+ */
+ void deserializeTo(InstrProfRecord &Record,
+ InstrProfRecord::ValueMapType *VMap);
+ void operator delete(void *ptr) { ::operator delete(ptr); }
+#endif
+} ValueProfData;
+
+/*
+ * The closure is designed to abstact away two types of value profile data:
+ * - InstrProfRecord which is the primary data structure used to
+ * represent profile data in host tools (reader, writer, and profile-use)
+ * - value profile runtime data structure suitable to be used by C
+ * runtime library.
+ *
+ * Both sources of data need to serialize to disk/memory-buffer in common
+ * format: ValueProfData. The abstraction allows compiler-rt's raw profiler
+ * writer to share the same format and code with indexed profile writer.
+ *
+ * For documentation of the member methods below, refer to corresponding methods
+ * in class InstrProfRecord.
+ */
+typedef struct ValueProfRecordClosure {
+ const void *Record;
+ uint32_t (*GetNumValueKinds)(const void *Record);
+ uint32_t (*GetNumValueSites)(const void *Record, uint32_t VKind);
+ uint32_t (*GetNumValueData)(const void *Record, uint32_t VKind);
+ uint32_t (*GetNumValueDataForSite)(const void *R, uint32_t VK, uint32_t S);
+
+ /*
+ * After extracting the value profile data from the value profile record,
+ * this method is used to map the in-memory value to on-disk value. If
+ * the method is null, value will be written out untranslated.
+ */
+ uint64_t (*RemapValueData)(uint32_t, uint64_t Value);
+ void (*GetValueForSite)(const void *R, InstrProfValueData *Dst, uint32_t K,
+ uint32_t S, uint64_t (*Mapper)(uint32_t, uint64_t));
+ ValueProfData *(*AllocValueProfData)(size_t TotalSizeInBytes);
+} ValueProfRecordClosure;
+
+/*
+ * A wrapper struct that represents value profile runtime data.
+ * Like InstrProfRecord class which is used by profiling host tools,
+ * ValueProfRuntimeRecord also implements the abstract intefaces defined in
+ * ValueProfRecordClosure so that the runtime data can be serialized using
+ * shared C implementation. In this structure, NumValueSites and Nodes
+ * members are the primary fields while other fields hold the derived
+ * information for fast implementation of closure interfaces.
+ */
+typedef struct ValueProfRuntimeRecord {
+ /* Number of sites for each value profile kind. */
+ const uint16_t *NumValueSites;
+ /* An array of linked-list headers. The size of of the array is the
+ * total number of value profile sites : sum(NumValueSites[*])). Each
+ * linked-list stores the values profiled for a value profile site. */
+ ValueProfNode **Nodes;
+
+ /* Total number of value profile kinds which have at least one
+ * value profile sites. */
+ uint32_t NumValueKinds;
+ /* An array recording the number of values tracked at each site.
+ * The size of the array is TotalNumValueSites. */
+ uint8_t *SiteCountArray[IPVK_Last + 1];
+ ValueProfNode **NodesKind[IPVK_Last + 1];
+} ValueProfRuntimeRecord;
+
+/* Forward declarations of C interfaces. */
+int initializeValueProfRuntimeRecord(ValueProfRuntimeRecord *RuntimeRecord,
+ const uint16_t *NumValueSites,
+ ValueProfNode **Nodes);
+void finalizeValueProfRuntimeRecord(ValueProfRuntimeRecord *RuntimeRecord);
+uint32_t getValueProfDataSizeRT(const ValueProfRuntimeRecord *Record);
+ValueProfData *
+serializeValueProfDataFromRT(const ValueProfRuntimeRecord *Record,
+ ValueProfData *Dst);
+uint32_t getNumValueKindsRT(const void *R);
+
+#undef INSTR_PROF_VALUE_PROF_DATA
+#endif /* INSTR_PROF_VALUE_PROF_DATA */
+
+
+#ifdef INSTR_PROF_COMMON_API_IMPL
+#define INSTR_PROF_DATA_DEFINED
+#ifdef __cplusplus
+#define INSTR_PROF_INLINE inline
+#else
+#define INSTR_PROF_INLINE
+#endif
+
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+/*!
+ * \brief Return the \c ValueProfRecord header size including the
+ * padding bytes.
+ */
+INSTR_PROF_INLINE
+uint32_t getValueProfRecordHeaderSize(uint32_t NumValueSites) {
+ uint32_t Size = offsetof(ValueProfRecord, SiteCountArray) +
+ sizeof(uint8_t) * NumValueSites;
+ /* Round the size to multiple of 8 bytes. */
+ Size = (Size + 7) & ~7;
+ return Size;
+}
+
+/*!
+ * \brief Return the total size of the value profile record including the
+ * header and the value data.
+ */
+INSTR_PROF_INLINE
+uint32_t getValueProfRecordSize(uint32_t NumValueSites,
+ uint32_t NumValueData) {
+ return getValueProfRecordHeaderSize(NumValueSites) +
+ sizeof(InstrProfValueData) * NumValueData;
+}
+
+/*!
+ * \brief Return the pointer to the start of value data array.
+ */
+INSTR_PROF_INLINE
+InstrProfValueData *getValueProfRecordValueData(ValueProfRecord *This) {
+ return (InstrProfValueData *)((char *)This + getValueProfRecordHeaderSize(
+ This->NumValueSites));
+}
+
+/*!
+ * \brief Return the total number of value data for \c This record.
+ */
+INSTR_PROF_INLINE
+uint32_t getValueProfRecordNumValueData(ValueProfRecord *This) {
+ uint32_t NumValueData = 0;
+ uint32_t I;
+ for (I = 0; I < This->NumValueSites; I++)
+ NumValueData += This->SiteCountArray[I];
+ return NumValueData;
+}
+
+/*!
+ * \brief Use this method to advance to the next \c This \c ValueProfRecord.
+ */
+INSTR_PROF_INLINE
+ValueProfRecord *getValueProfRecordNext(ValueProfRecord *This) {
+ uint32_t NumValueData = getValueProfRecordNumValueData(This);
+ return (ValueProfRecord *)((char *)This +
+ getValueProfRecordSize(This->NumValueSites,
+ NumValueData));
+}
+
+/*!
+ * \brief Return the first \c ValueProfRecord instance.
+ */
+INSTR_PROF_INLINE
+ValueProfRecord *getFirstValueProfRecord(ValueProfData *This) {
+ return (ValueProfRecord *)((char *)This + sizeof(ValueProfData));
+}
+
+/* Closure based interfaces. */
+
+/*!
+ * Return the total size in bytes of the on-disk value profile data
+ * given the data stored in Record.
+ */
+uint32_t getValueProfDataSize(ValueProfRecordClosure *Closure) {
+ uint32_t Kind;
+ uint32_t TotalSize = sizeof(ValueProfData);
+ const void *Record = Closure->Record;
+ uint32_t NumValueKinds = Closure->GetNumValueKinds(Record);
+ if (NumValueKinds == 0)
+ return TotalSize;
+
+ for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
+ uint32_t NumValueSites = Closure->GetNumValueSites(Record, Kind);
+ if (!NumValueSites)
+ continue;
+ TotalSize += getValueProfRecordSize(NumValueSites,
+ Closure->GetNumValueData(Record, Kind));
+ }
+ return TotalSize;
+}
+
+/*!
+ * Extract value profile data of a function for the profile kind \c ValueKind
+ * from the \c Closure and serialize the data into \c This record instance.
+ */
+void serializeValueProfRecordFrom(ValueProfRecord *This,
+ ValueProfRecordClosure *Closure,
+ uint32_t ValueKind, uint32_t NumValueSites) {
+ uint32_t S;
+ const void *Record = Closure->Record;
+ This->Kind = ValueKind;
+ This->NumValueSites = NumValueSites;
+ InstrProfValueData *DstVD = getValueProfRecordValueData(This);
+
+ for (S = 0; S < NumValueSites; S++) {
+ uint32_t ND = Closure->GetNumValueDataForSite(Record, ValueKind, S);
+ This->SiteCountArray[S] = ND;
+ Closure->GetValueForSite(Record, DstVD, ValueKind, S,
+ Closure->RemapValueData);
+ DstVD += ND;
+ }
+}
+
+/*!
+ * Extract value profile data of a function from the \c Closure
+ * and serialize the data into \c DstData if it is not NULL or heap
+ * memory allocated by the \c Closure's allocator method.
+ */
+ValueProfData *serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
+ ValueProfData *DstData) {
+ uint32_t Kind;
+ uint32_t TotalSize = getValueProfDataSize(Closure);
+
+ ValueProfData *VPD =
+ DstData ? DstData : Closure->AllocValueProfData(TotalSize);
+
+ VPD->TotalSize = TotalSize;
+ VPD->NumValueKinds = Closure->GetNumValueKinds(Closure->Record);
+ ValueProfRecord *VR = getFirstValueProfRecord(VPD);
+ for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
+ uint32_t NumValueSites = Closure->GetNumValueSites(Closure->Record, Kind);
+ if (!NumValueSites)
+ continue;
+ serializeValueProfRecordFrom(VR, Closure, Kind, NumValueSites);
+ VR = getValueProfRecordNext(VR);
+ }
+ return VPD;
+}
+
+/*
+ * The value profiler runtime library stores the value profile data
+ * for a given function in \c NumValueSites and \c Nodes structures.
+ * \c ValueProfRuntimeRecord class is used to encapsulate the runtime
+ * profile data and provides fast interfaces to retrieve the profile
+ * information. This interface is used to initialize the runtime record
+ * and pre-compute the information needed for efficient implementation
+ * of callbacks required by ValueProfRecordClosure class.
+ */
+int initializeValueProfRuntimeRecord(ValueProfRuntimeRecord *RuntimeRecord,
+ const uint16_t *NumValueSites,
+ ValueProfNode **Nodes) {
+ unsigned I, J, S = 0, NumValueKinds = 0;
+ RuntimeRecord->NumValueSites = NumValueSites;
+ RuntimeRecord->Nodes = Nodes;
+ for (I = 0; I <= IPVK_Last; I++) {
+ uint16_t N = NumValueSites[I];
+ if (!N) {
+ RuntimeRecord->SiteCountArray[I] = 0;
+ continue;
+ }
+ NumValueKinds++;
+ RuntimeRecord->SiteCountArray[I] = (uint8_t *)calloc(N, 1);
+ if (!RuntimeRecord->SiteCountArray[I])
+ return 1;
+ RuntimeRecord->NodesKind[I] = Nodes ? &Nodes[S] : NULL;
+ for (J = 0; J < N; J++) {
+ /* Compute value count for each site. */
+ uint32_t C = 0;
+ ValueProfNode *Site = Nodes ? RuntimeRecord->NodesKind[I][J] : NULL;
+ while (Site) {
+ C++;
+ Site = Site->Next;
+ }
+ if (C > UCHAR_MAX)
+ C = UCHAR_MAX;
+ RuntimeRecord->SiteCountArray[I][J] = C;
+ }
+ S += N;
+ }
+ RuntimeRecord->NumValueKinds = NumValueKinds;
+ return 0;
+}
+
+void finalizeValueProfRuntimeRecord(ValueProfRuntimeRecord *RuntimeRecord) {
+ unsigned I;
+ for (I = 0; I <= IPVK_Last; I++) {
+ if (RuntimeRecord->SiteCountArray[I])
+ free(RuntimeRecord->SiteCountArray[I]);
+ }
+}
+
+/* ValueProfRecordClosure Interface implementation for
+ * ValueProfDataRuntimeRecord. */
+uint32_t getNumValueKindsRT(const void *R) {
+ return ((const ValueProfRuntimeRecord *)R)->NumValueKinds;
+}
+
+uint32_t getNumValueSitesRT(const void *R, uint32_t VK) {
+ return ((const ValueProfRuntimeRecord *)R)->NumValueSites[VK];
+}
+
+uint32_t getNumValueDataForSiteRT(const void *R, uint32_t VK, uint32_t S) {
+ const ValueProfRuntimeRecord *Record = (const ValueProfRuntimeRecord *)R;
+ return Record->SiteCountArray[VK][S];
+}
+
+uint32_t getNumValueDataRT(const void *R, uint32_t VK) {
+ unsigned I, S = 0;
+ const ValueProfRuntimeRecord *Record = (const ValueProfRuntimeRecord *)R;
+ if (Record->SiteCountArray[VK] == 0)
+ return 0;
+ for (I = 0; I < Record->NumValueSites[VK]; I++)
+ S += Record->SiteCountArray[VK][I];
+ return S;
+}
+
+void getValueForSiteRT(const void *R, InstrProfValueData *Dst, uint32_t VK,
+ uint32_t S, uint64_t (*Mapper)(uint32_t, uint64_t)) {
+ unsigned I, N = 0;
+ const ValueProfRuntimeRecord *Record = (const ValueProfRuntimeRecord *)R;
+ N = getNumValueDataForSiteRT(R, VK, S);
+ if (N == 0)
+ return;
+ ValueProfNode *VNode = Record->NodesKind[VK][S];
+ for (I = 0; I < N; I++) {
+ Dst[I] = VNode->VData;
+ VNode = VNode->Next;
+ }
+}
+
+ValueProfData *allocValueProfDataRT(size_t TotalSizeInBytes) {
+ return (ValueProfData *)calloc(TotalSizeInBytes, 1);
+}
+
+static ValueProfRecordClosure RTRecordClosure = {0,
+ getNumValueKindsRT,
+ getNumValueSitesRT,
+ getNumValueDataRT,
+ getNumValueDataForSiteRT,
+ 0,
+ getValueForSiteRT,
+ allocValueProfDataRT};
+
+/*
+ * Return the size of ValueProfData structure to store data
+ * recorded in the runtime record.
+ */
+uint32_t getValueProfDataSizeRT(const ValueProfRuntimeRecord *Record) {
+ RTRecordClosure.Record = Record;
+ return getValueProfDataSize(&RTRecordClosure);
+}
+
+/*
+ * Return a ValueProfData instance that stores the data collected
+ * from runtime. If \c DstData is provided by the caller, the value
+ * profile data will be store in *DstData and DstData is returned,
+ * otherwise the method will allocate space for the value data and
+ * return pointer to the newly allocated space.
+ */
+ValueProfData *
+serializeValueProfDataFromRT(const ValueProfRuntimeRecord *Record,
+ ValueProfData *DstData) {
+ RTRecordClosure.Record = Record;
+ return serializeValueProfDataFrom(&RTRecordClosure, DstData);
+}
+
+
+#undef INSTR_PROF_COMMON_API_IMPL
+#endif /* INSTR_PROF_COMMON_API_IMPL */
+
+/*============================================================================*/
+
+
+#ifndef INSTR_PROF_DATA_DEFINED
+
+#ifndef INSTR_PROF_DATA_INC_
+#define INSTR_PROF_DATA_INC_
+
+/* Helper macros. */
+#define INSTR_PROF_SIMPLE_QUOTE(x) #x
+#define INSTR_PROF_QUOTE(x) INSTR_PROF_SIMPLE_QUOTE(x)
+#define INSTR_PROF_SIMPLE_CONCAT(x,y) x ## y
+#define INSTR_PROF_CONCAT(x,y) INSTR_PROF_SIMPLE_CONCAT(x,y)
+
+/* Magic number to detect file format and endianness.
+ * Use 255 at one end, since no UTF-8 file can use that character. Avoid 0,
+ * so that utilities, like strings, don't grab it as a string. 129 is also
+ * invalid UTF-8, and high enough to be interesting.
+ * Use "lprofr" in the centre to stand for "LLVM Profile Raw", or "lprofR"
+ * for 32-bit platforms.
+ */
+#define INSTR_PROF_RAW_MAGIC_64 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
+ (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \
+ (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129
+#define INSTR_PROF_RAW_MAGIC_32 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
+ (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \
+ (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
+
+/* Raw profile format version. */
+#define INSTR_PROF_RAW_VERSION 2
+
+/* Runtime section names and name strings. */
+#define INSTR_PROF_DATA_SECT_NAME __llvm_prf_data
+#define INSTR_PROF_NAME_SECT_NAME __llvm_prf_names
+#define INSTR_PROF_CNTS_SECT_NAME __llvm_prf_cnts
+
+#define INSTR_PROF_DATA_SECT_NAME_STR \
+ INSTR_PROF_QUOTE(INSTR_PROF_DATA_SECT_NAME)
+#define INSTR_PROF_NAME_SECT_NAME_STR \
+ INSTR_PROF_QUOTE(INSTR_PROF_NAME_SECT_NAME)
+#define INSTR_PROF_CNTS_SECT_NAME_STR \
+ INSTR_PROF_QUOTE(INSTR_PROF_CNTS_SECT_NAME)
+
+/* Macros to define start/stop section symbol for a given
+ * section on Linux. For instance
+ * INSTR_PROF_SECT_START(INSTR_PROF_DATA_SECT_NAME) will
+ * expand to __start___llvm_prof_data
+ */
+#define INSTR_PROF_SECT_START(Sect) \
+ INSTR_PROF_CONCAT(__start_,Sect)
+#define INSTR_PROF_SECT_STOP(Sect) \
+ INSTR_PROF_CONCAT(__stop_,Sect)
+
+/* Value Profiling API linkage name. */
+#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
+#define INSTR_PROF_VALUE_PROF_FUNC_STR \
+ INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
+
+/* InstrProfile per-function control data alignment. */
+#define INSTR_PROF_DATA_ALIGNMENT 8
+
+/* The data structure that represents a tracked value by the
+ * value profiler.
+ */
+typedef struct InstrProfValueData {
+ /* Profiled value. */
+ uint64_t Value;
+ /* Number of times the value appears in the training run. */
+ uint64_t Count;
+} InstrProfValueData;
+
+/* This is an internal data structure used by value profiler. It
+ * is defined here to allow serialization code sharing by LLVM
+ * to be used in unit test.
+ */
+typedef struct ValueProfNode {
+ InstrProfValueData VData;
+ struct ValueProfNode *Next;
+} ValueProfNode;
+
+#endif /* INSTR_PROF_DATA_INC_ */
+
+#else
+#undef INSTR_PROF_DATA_DEFINED
+#endif
+
diff --git a/contrib/llvm/include/llvm/ProfileData/InstrProfReader.h b/contrib/llvm/include/llvm/ProfileData/InstrProfReader.h
new file mode 100644
index 0000000..fed3e69
--- /dev/null
+++ b/contrib/llvm/include/llvm/ProfileData/InstrProfReader.h
@@ -0,0 +1,390 @@
+//=-- InstrProfReader.h - Instrumented profiling readers ----------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for reading profiling data for instrumentation
+// based PGO and coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_INSTRPROFREADER_H
+#define LLVM_PROFILEDATA_INSTRPROFREADER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/LineIterator.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/OnDiskHashTable.h"
+#include "llvm/Support/raw_ostream.h"
+#include <iterator>
+
+namespace llvm {
+
+class InstrProfReader;
+
+/// A file format agnostic iterator over profiling data.
+class InstrProfIterator : public std::iterator<std::input_iterator_tag,
+ InstrProfRecord> {
+ InstrProfReader *Reader;
+ InstrProfRecord Record;
+
+ void Increment();
+public:
+ InstrProfIterator() : Reader(nullptr) {}
+ InstrProfIterator(InstrProfReader *Reader) : Reader(Reader) { Increment(); }
+
+ InstrProfIterator &operator++() { Increment(); return *this; }
+ bool operator==(const InstrProfIterator &RHS) { return Reader == RHS.Reader; }
+ bool operator!=(const InstrProfIterator &RHS) { return Reader != RHS.Reader; }
+ InstrProfRecord &operator*() { return Record; }
+ InstrProfRecord *operator->() { return &Record; }
+};
+
+/// Base class and interface for reading profiling data of any known instrprof
+/// format. Provides an iterator over InstrProfRecords.
+class InstrProfReader {
+ std::error_code LastError;
+
+public:
+ InstrProfReader() : LastError(instrprof_error::success), Symtab() {}
+ virtual ~InstrProfReader() {}
+
+ /// Read the header. Required before reading first record.
+ virtual std::error_code readHeader() = 0;
+ /// Read a single record.
+ virtual std::error_code readNextRecord(InstrProfRecord &Record) = 0;
+ /// Iterator over profile data.
+ InstrProfIterator begin() { return InstrProfIterator(this); }
+ InstrProfIterator end() { return InstrProfIterator(); }
+
+ /// Return the PGO symtab. There are three different readers:
+ /// Raw, Text, and Indexed profile readers. The first two types
+ /// of readers are used only by llvm-profdata tool, while the indexed
+ /// profile reader is also used by llvm-cov tool and the compiler (
+ /// backend or frontend). Since creating PGO symtab can create
+ /// significant runtime and memory overhead (as it touches data
+ /// for the whole program), InstrProfSymtab for the indexed profile
+ /// reader should be created on demand and it is recommended to be
+ /// only used for dumping purpose with llvm-proftool, not with the
+ /// compiler.
+ virtual InstrProfSymtab &getSymtab() = 0;
+
+protected:
+ std::unique_ptr<InstrProfSymtab> Symtab;
+ /// Set the current std::error_code and return same.
+ std::error_code error(std::error_code EC) {
+ LastError = EC;
+ return EC;
+ }
+
+ /// Clear the current error code and return a successful one.
+ std::error_code success() { return error(instrprof_error::success); }
+
+public:
+ /// Return true if the reader has finished reading the profile data.
+ bool isEOF() { return LastError == instrprof_error::eof; }
+ /// Return true if the reader encountered an error reading profiling data.
+ bool hasError() { return LastError && !isEOF(); }
+ /// Get the current error code.
+ std::error_code getError() { return LastError; }
+
+ /// Factory method to create an appropriately typed reader for the given
+ /// instrprof file.
+ static ErrorOr<std::unique_ptr<InstrProfReader>> create(std::string Path);
+
+ static ErrorOr<std::unique_ptr<InstrProfReader>>
+ create(std::unique_ptr<MemoryBuffer> Buffer);
+};
+
+/// Reader for the simple text based instrprof format.
+///
+/// This format is a simple text format that's suitable for test data. Records
+/// are separated by one or more blank lines, and record fields are separated by
+/// new lines.
+///
+/// Each record consists of a function name, a function hash, a number of
+/// counters, and then each counter value, in that order.
+class TextInstrProfReader : public InstrProfReader {
+private:
+ /// The profile data file contents.
+ std::unique_ptr<MemoryBuffer> DataBuffer;
+ /// Iterator over the profile data.
+ line_iterator Line;
+
+ TextInstrProfReader(const TextInstrProfReader &) = delete;
+ TextInstrProfReader &operator=(const TextInstrProfReader &) = delete;
+ std::error_code readValueProfileData(InstrProfRecord &Record);
+
+public:
+ TextInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer_)
+ : DataBuffer(std::move(DataBuffer_)), Line(*DataBuffer, true, '#') {}
+
+ /// Return true if the given buffer is in text instrprof format.
+ static bool hasFormat(const MemoryBuffer &Buffer);
+
+ /// Read the header.
+ std::error_code readHeader() override;
+ /// Read a single record.
+ std::error_code readNextRecord(InstrProfRecord &Record) override;
+
+ InstrProfSymtab &getSymtab() override {
+ assert(Symtab.get());
+ return *Symtab.get();
+ }
+};
+
+/// Reader for the raw instrprof binary format from runtime.
+///
+/// This format is a raw memory dump of the instrumentation-baed profiling data
+/// from the runtime. It has no index.
+///
+/// Templated on the unsigned type whose size matches pointers on the platform
+/// that wrote the profile.
+template <class IntPtrT>
+class RawInstrProfReader : public InstrProfReader {
+private:
+ /// The profile data file contents.
+ std::unique_ptr<MemoryBuffer> DataBuffer;
+ bool ShouldSwapBytes;
+ uint64_t CountersDelta;
+ uint64_t NamesDelta;
+ const RawInstrProf::ProfileData<IntPtrT> *Data;
+ const RawInstrProf::ProfileData<IntPtrT> *DataEnd;
+ const uint64_t *CountersStart;
+ const char *NamesStart;
+ const uint8_t *ValueDataStart;
+ const char *ProfileEnd;
+ uint32_t ValueKindLast;
+ uint32_t CurValueDataSize;
+
+ InstrProfRecord::ValueMapType FunctionPtrToNameMap;
+
+ RawInstrProfReader(const RawInstrProfReader &) = delete;
+ RawInstrProfReader &operator=(const RawInstrProfReader &) = delete;
+public:
+ RawInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer)
+ : DataBuffer(std::move(DataBuffer)) { }
+
+ static bool hasFormat(const MemoryBuffer &DataBuffer);
+ std::error_code readHeader() override;
+ std::error_code readNextRecord(InstrProfRecord &Record) override;
+
+ InstrProfSymtab &getSymtab() override {
+ assert(Symtab.get());
+ return *Symtab.get();
+ }
+
+private:
+ void createSymtab(InstrProfSymtab &Symtab);
+ std::error_code readNextHeader(const char *CurrentPos);
+ std::error_code readHeader(const RawInstrProf::Header &Header);
+ template <class IntT> IntT swap(IntT Int) const {
+ return ShouldSwapBytes ? sys::getSwappedBytes(Int) : Int;
+ }
+ support::endianness getDataEndianness() const {
+ support::endianness HostEndian = getHostEndianness();
+ if (!ShouldSwapBytes)
+ return HostEndian;
+ if (HostEndian == support::little)
+ return support::big;
+ else
+ return support::little;
+ }
+
+ inline uint8_t getNumPaddingBytes(uint64_t SizeInBytes) {
+ return 7 & (sizeof(uint64_t) - SizeInBytes % sizeof(uint64_t));
+ }
+ std::error_code readName(InstrProfRecord &Record);
+ std::error_code readFuncHash(InstrProfRecord &Record);
+ std::error_code readRawCounts(InstrProfRecord &Record);
+ std::error_code readValueProfilingData(InstrProfRecord &Record);
+ bool atEnd() const { return Data == DataEnd; }
+ void advanceData() {
+ Data++;
+ ValueDataStart += CurValueDataSize;
+ }
+
+ const uint64_t *getCounter(IntPtrT CounterPtr) const {
+ ptrdiff_t Offset = (swap(CounterPtr) - CountersDelta) / sizeof(uint64_t);
+ return CountersStart + Offset;
+ }
+ const char *getName(IntPtrT NamePtr) const {
+ ptrdiff_t Offset = (swap(NamePtr) - NamesDelta) / sizeof(char);
+ return NamesStart + Offset;
+ }
+};
+
+typedef RawInstrProfReader<uint32_t> RawInstrProfReader32;
+typedef RawInstrProfReader<uint64_t> RawInstrProfReader64;
+
+namespace IndexedInstrProf {
+enum class HashT : uint32_t;
+}
+
+/// Trait for lookups into the on-disk hash table for the binary instrprof
+/// format.
+class InstrProfLookupTrait {
+ std::vector<InstrProfRecord> DataBuffer;
+ IndexedInstrProf::HashT HashType;
+ unsigned FormatVersion;
+ // Endianness of the input value profile data.
+ // It should be LE by default, but can be changed
+ // for testing purpose.
+ support::endianness ValueProfDataEndianness;
+
+public:
+ InstrProfLookupTrait(IndexedInstrProf::HashT HashType, unsigned FormatVersion)
+ : HashType(HashType), FormatVersion(FormatVersion),
+ ValueProfDataEndianness(support::little) {}
+
+ typedef ArrayRef<InstrProfRecord> data_type;
+
+ typedef StringRef internal_key_type;
+ typedef StringRef external_key_type;
+ typedef uint64_t hash_value_type;
+ typedef uint64_t offset_type;
+
+ static bool EqualKey(StringRef A, StringRef B) { return A == B; }
+ static StringRef GetInternalKey(StringRef K) { return K; }
+ static StringRef GetExternalKey(StringRef K) { return K; }
+
+ hash_value_type ComputeHash(StringRef K);
+
+ static std::pair<offset_type, offset_type>
+ ReadKeyDataLength(const unsigned char *&D) {
+ using namespace support;
+ offset_type KeyLen = endian::readNext<offset_type, little, unaligned>(D);
+ offset_type DataLen = endian::readNext<offset_type, little, unaligned>(D);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ StringRef ReadKey(const unsigned char *D, offset_type N) {
+ return StringRef((const char *)D, N);
+ }
+
+ bool readValueProfilingData(const unsigned char *&D,
+ const unsigned char *const End);
+ data_type ReadData(StringRef K, const unsigned char *D, offset_type N);
+
+ // Used for testing purpose only.
+ void setValueProfDataEndianness(support::endianness Endianness) {
+ ValueProfDataEndianness = Endianness;
+ }
+};
+
+struct InstrProfReaderIndexBase {
+ // Read all the profile records with the same key pointed to the current
+ // iterator.
+ virtual std::error_code getRecords(ArrayRef<InstrProfRecord> &Data) = 0;
+ // Read all the profile records with the key equal to FuncName
+ virtual std::error_code getRecords(StringRef FuncName,
+ ArrayRef<InstrProfRecord> &Data) = 0;
+ virtual void advanceToNextKey() = 0;
+ virtual bool atEnd() const = 0;
+ virtual void setValueProfDataEndianness(support::endianness Endianness) = 0;
+ virtual ~InstrProfReaderIndexBase() {}
+ virtual uint64_t getVersion() const = 0;
+ virtual void populateSymtab(InstrProfSymtab &) = 0;
+};
+
+typedef OnDiskIterableChainedHashTable<InstrProfLookupTrait>
+ OnDiskHashTableImplV3;
+
+template <typename HashTableImpl>
+class InstrProfReaderIndex : public InstrProfReaderIndexBase {
+
+private:
+ std::unique_ptr<HashTableImpl> HashTable;
+ typename HashTableImpl::data_iterator RecordIterator;
+ uint64_t FormatVersion;
+
+public:
+ InstrProfReaderIndex(const unsigned char *Buckets,
+ const unsigned char *const Payload,
+ const unsigned char *const Base,
+ IndexedInstrProf::HashT HashType, uint64_t Version);
+
+ std::error_code getRecords(ArrayRef<InstrProfRecord> &Data) override;
+ std::error_code getRecords(StringRef FuncName,
+ ArrayRef<InstrProfRecord> &Data) override;
+ void advanceToNextKey() override { RecordIterator++; }
+ bool atEnd() const override {
+ return RecordIterator == HashTable->data_end();
+ }
+ void setValueProfDataEndianness(support::endianness Endianness) override {
+ HashTable->getInfoObj().setValueProfDataEndianness(Endianness);
+ }
+ ~InstrProfReaderIndex() override {}
+ uint64_t getVersion() const override { return FormatVersion; }
+ void populateSymtab(InstrProfSymtab &Symtab) override {
+ Symtab.create(HashTable->keys());
+ }
+};
+
+/// Reader for the indexed binary instrprof format.
+class IndexedInstrProfReader : public InstrProfReader {
+private:
+ /// The profile data file contents.
+ std::unique_ptr<MemoryBuffer> DataBuffer;
+ /// The index into the profile data.
+ std::unique_ptr<InstrProfReaderIndexBase> Index;
+ /// The maximal execution count among all functions.
+ uint64_t MaxFunctionCount;
+
+ IndexedInstrProfReader(const IndexedInstrProfReader &) = delete;
+ IndexedInstrProfReader &operator=(const IndexedInstrProfReader &) = delete;
+
+public:
+ uint64_t getVersion() const { return Index->getVersion(); }
+ IndexedInstrProfReader(std::unique_ptr<MemoryBuffer> DataBuffer)
+ : DataBuffer(std::move(DataBuffer)), Index(nullptr) {}
+
+ /// Return true if the given buffer is in an indexed instrprof format.
+ static bool hasFormat(const MemoryBuffer &DataBuffer);
+
+ /// Read the file header.
+ std::error_code readHeader() override;
+ /// Read a single record.
+ std::error_code readNextRecord(InstrProfRecord &Record) override;
+
+ /// Return the pointer to InstrProfRecord associated with FuncName
+ /// and FuncHash
+ ErrorOr<InstrProfRecord> getInstrProfRecord(StringRef FuncName,
+ uint64_t FuncHash);
+
+ /// Fill Counts with the profile data for the given function name.
+ std::error_code getFunctionCounts(StringRef FuncName, uint64_t FuncHash,
+ std::vector<uint64_t> &Counts);
+
+ /// Return the maximum of all known function counts.
+ uint64_t getMaximumFunctionCount() { return MaxFunctionCount; }
+
+ /// Factory method to create an indexed reader.
+ static ErrorOr<std::unique_ptr<IndexedInstrProfReader>>
+ create(std::string Path);
+
+ static ErrorOr<std::unique_ptr<IndexedInstrProfReader>>
+ create(std::unique_ptr<MemoryBuffer> Buffer);
+
+ // Used for testing purpose only.
+ void setValueProfDataEndianness(support::endianness Endianness) {
+ Index->setValueProfDataEndianness(Endianness);
+ }
+
+ // See description in the base class. This interface is designed
+ // to be used by llvm-profdata (for dumping). Avoid using this when
+ // the client is the compiler.
+ InstrProfSymtab &getSymtab() override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ProfileData/InstrProfWriter.h b/contrib/llvm/include/llvm/ProfileData/InstrProfWriter.h
new file mode 100644
index 0000000..e7f53de
--- /dev/null
+++ b/contrib/llvm/include/llvm/ProfileData/InstrProfWriter.h
@@ -0,0 +1,61 @@
+//=-- InstrProfWriter.h - Instrumented profiling writer -----------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing profiling data for instrumentation
+// based PGO and coverage.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_INSTRPROFWRITER_H
+#define LLVM_PROFILEDATA_INSTRPROFWRITER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// Writer for instrumentation based profile data.
+class InstrProfWriter {
+public:
+ typedef SmallDenseMap<uint64_t, InstrProfRecord, 1> ProfilingData;
+
+private:
+ StringMap<ProfilingData> FunctionData;
+ uint64_t MaxFunctionCount;
+
+public:
+ InstrProfWriter() : MaxFunctionCount(0) {}
+
+ /// Add function counts for the given function. If there are already counts
+ /// for this function and the hash and number of counts match, each counter is
+ /// summed. Optionally scale counts by \p Weight.
+ std::error_code addRecord(InstrProfRecord &&I, uint64_t Weight = 1);
+ /// Write the profile to \c OS
+ void write(raw_fd_ostream &OS);
+ /// Write the profile in text format to \c OS
+ void writeText(raw_fd_ostream &OS);
+ /// Write \c Record in text format to \c OS
+ static void writeRecordInText(const InstrProfRecord &Record,
+ InstrProfSymtab &Symtab, raw_fd_ostream &OS);
+ /// Write the profile, returning the raw data. For testing.
+ std::unique_ptr<MemoryBuffer> writeBuffer();
+
+ // Internal interface for testing purpose only.
+ void setValueProfDataEndianness(support::endianness Endianness);
+
+private:
+ std::pair<uint64_t, uint64_t> writeImpl(raw_ostream &OS);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/ProfileData/SampleProf.h b/contrib/llvm/include/llvm/ProfileData/SampleProf.h
new file mode 100644
index 0000000..8df3fe8
--- /dev/null
+++ b/contrib/llvm/include/llvm/ProfileData/SampleProf.h
@@ -0,0 +1,386 @@
+//=-- SampleProf.h - Sampling profiling format support --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains common definitions used in the reading and writing of
+// sample profile data.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_PROFILEDATA_SAMPLEPROF_H_
+#define LLVM_PROFILEDATA_SAMPLEPROF_H_
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <map>
+#include <system_error>
+
+namespace llvm {
+
+const std::error_category &sampleprof_category();
+
+enum class sampleprof_error {
+ success = 0,
+ bad_magic,
+ unsupported_version,
+ too_large,
+ truncated,
+ malformed,
+ unrecognized_format,
+ unsupported_writing_format,
+ truncated_name_table,
+ not_implemented,
+ counter_overflow
+};
+
+inline std::error_code make_error_code(sampleprof_error E) {
+ return std::error_code(static_cast<int>(E), sampleprof_category());
+}
+
+inline sampleprof_error MergeResult(sampleprof_error &Accumulator,
+ sampleprof_error Result) {
+ // Prefer first error encountered as later errors may be secondary effects of
+ // the initial problem.
+ if (Accumulator == sampleprof_error::success &&
+ Result != sampleprof_error::success)
+ Accumulator = Result;
+ return Accumulator;
+}
+
+} // end namespace llvm
+
+namespace std {
+template <>
+struct is_error_code_enum<llvm::sampleprof_error> : std::true_type {};
+}
+
+namespace llvm {
+
+namespace sampleprof {
+
+static inline uint64_t SPMagic() {
+ return uint64_t('S') << (64 - 8) | uint64_t('P') << (64 - 16) |
+ uint64_t('R') << (64 - 24) | uint64_t('O') << (64 - 32) |
+ uint64_t('F') << (64 - 40) | uint64_t('4') << (64 - 48) |
+ uint64_t('2') << (64 - 56) | uint64_t(0xff);
+}
+
+static inline uint64_t SPVersion() { return 102; }
+
+/// Represents the relative location of an instruction.
+///
+/// Instruction locations are specified by the line offset from the
+/// beginning of the function (marked by the line where the function
+/// header is) and the discriminator value within that line.
+///
+/// The discriminator value is useful to distinguish instructions
+/// that are on the same line but belong to different basic blocks
+/// (e.g., the two post-increment instructions in "if (p) x++; else y++;").
+struct LineLocation {
+ LineLocation(uint32_t L, uint32_t D) : LineOffset(L), Discriminator(D) {}
+ void print(raw_ostream &OS) const;
+ void dump() const;
+ bool operator<(const LineLocation &O) const {
+ return LineOffset < O.LineOffset ||
+ (LineOffset == O.LineOffset && Discriminator < O.Discriminator);
+ }
+
+ uint32_t LineOffset;
+ uint32_t Discriminator;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const LineLocation &Loc);
+
+/// Represents the relative location of a callsite.
+///
+/// Callsite locations are specified by the line offset from the
+/// beginning of the function (marked by the line where the function
+/// head is), the discriminator value within that line, and the callee
+/// function name.
+struct CallsiteLocation : public LineLocation {
+ CallsiteLocation(uint32_t L, uint32_t D, StringRef N)
+ : LineLocation(L, D), CalleeName(N) {}
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+ StringRef CalleeName;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const CallsiteLocation &Loc);
+
+/// Representation of a single sample record.
+///
+/// A sample record is represented by a positive integer value, which
+/// indicates how frequently was the associated line location executed.
+///
+/// Additionally, if the associated location contains a function call,
+/// the record will hold a list of all the possible called targets. For
+/// direct calls, this will be the exact function being invoked. For
+/// indirect calls (function pointers, virtual table dispatch), this
+/// will be a list of one or more functions.
+class SampleRecord {
+public:
+ typedef StringMap<uint64_t> CallTargetMap;
+
+ SampleRecord() : NumSamples(0), CallTargets() {}
+
+ /// Increment the number of samples for this record by \p S.
+ /// Optionally scale sample count \p S by \p Weight.
+ ///
+ /// Sample counts accumulate using saturating arithmetic, to avoid wrapping
+ /// around unsigned integers.
+ sampleprof_error addSamples(uint64_t S, uint64_t Weight = 1) {
+ bool Overflowed;
+ if (Weight > 1) {
+ S = SaturatingMultiply(S, Weight, &Overflowed);
+ if (Overflowed)
+ return sampleprof_error::counter_overflow;
+ }
+ NumSamples = SaturatingAdd(NumSamples, S, &Overflowed);
+ if (Overflowed)
+ return sampleprof_error::counter_overflow;
+
+ return sampleprof_error::success;
+ }
+
+ /// Add called function \p F with samples \p S.
+ /// Optionally scale sample count \p S by \p Weight.
+ ///
+ /// Sample counts accumulate using saturating arithmetic, to avoid wrapping
+ /// around unsigned integers.
+ sampleprof_error addCalledTarget(StringRef F, uint64_t S,
+ uint64_t Weight = 1) {
+ uint64_t &TargetSamples = CallTargets[F];
+ bool Overflowed;
+ if (Weight > 1) {
+ S = SaturatingMultiply(S, Weight, &Overflowed);
+ if (Overflowed)
+ return sampleprof_error::counter_overflow;
+ }
+ TargetSamples = SaturatingAdd(TargetSamples, S, &Overflowed);
+ if (Overflowed)
+ return sampleprof_error::counter_overflow;
+
+ return sampleprof_error::success;
+ }
+
+ /// Return true if this sample record contains function calls.
+ bool hasCalls() const { return CallTargets.size() > 0; }
+
+ uint64_t getSamples() const { return NumSamples; }
+ const CallTargetMap &getCallTargets() const { return CallTargets; }
+
+ /// Merge the samples in \p Other into this record.
+ /// Optionally scale sample counts by \p Weight.
+ sampleprof_error merge(const SampleRecord &Other, uint64_t Weight = 1) {
+ sampleprof_error Result = addSamples(Other.getSamples(), Weight);
+ for (const auto &I : Other.getCallTargets()) {
+ MergeResult(Result, addCalledTarget(I.first(), I.second, Weight));
+ }
+ return Result;
+ }
+
+ void print(raw_ostream &OS, unsigned Indent) const;
+ void dump() const;
+
+private:
+ uint64_t NumSamples;
+ CallTargetMap CallTargets;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const SampleRecord &Sample);
+
+typedef std::map<LineLocation, SampleRecord> BodySampleMap;
+class FunctionSamples;
+typedef std::map<CallsiteLocation, FunctionSamples> CallsiteSampleMap;
+
+/// Representation of the samples collected for a function.
+///
+/// This data structure contains all the collected samples for the body
+/// of a function. Each sample corresponds to a LineLocation instance
+/// within the body of the function.
+class FunctionSamples {
+public:
+ FunctionSamples() : TotalSamples(0), TotalHeadSamples(0) {}
+ void print(raw_ostream &OS = dbgs(), unsigned Indent = 0) const;
+ void dump() const;
+ sampleprof_error addTotalSamples(uint64_t Num, uint64_t Weight = 1) {
+ bool Overflowed;
+ if (Weight > 1) {
+ Num = SaturatingMultiply(Num, Weight, &Overflowed);
+ if (Overflowed)
+ return sampleprof_error::counter_overflow;
+ }
+ TotalSamples = SaturatingAdd(TotalSamples, Num, &Overflowed);
+ if (Overflowed)
+ return sampleprof_error::counter_overflow;
+
+ return sampleprof_error::success;
+ }
+ sampleprof_error addHeadSamples(uint64_t Num, uint64_t Weight = 1) {
+ bool Overflowed;
+ if (Weight > 1) {
+ Num = SaturatingMultiply(Num, Weight, &Overflowed);
+ if (Overflowed)
+ return sampleprof_error::counter_overflow;
+ }
+ TotalHeadSamples = SaturatingAdd(TotalHeadSamples, Num, &Overflowed);
+ if (Overflowed)
+ return sampleprof_error::counter_overflow;
+
+ return sampleprof_error::success;
+ }
+ sampleprof_error addBodySamples(uint32_t LineOffset, uint32_t Discriminator,
+ uint64_t Num, uint64_t Weight = 1) {
+ return BodySamples[LineLocation(LineOffset, Discriminator)].addSamples(
+ Num, Weight);
+ }
+ sampleprof_error addCalledTargetSamples(uint32_t LineOffset,
+ uint32_t Discriminator,
+ std::string FName, uint64_t Num,
+ uint64_t Weight = 1) {
+ return BodySamples[LineLocation(LineOffset, Discriminator)].addCalledTarget(
+ FName, Num, Weight);
+ }
+
+ /// Return the number of samples collected at the given location.
+ /// Each location is specified by \p LineOffset and \p Discriminator.
+ /// If the location is not found in profile, return error.
+ ErrorOr<uint64_t> findSamplesAt(uint32_t LineOffset,
+ uint32_t Discriminator) const {
+ const auto &ret = BodySamples.find(LineLocation(LineOffset, Discriminator));
+ if (ret == BodySamples.end())
+ return std::error_code();
+ else
+ return ret->second.getSamples();
+ }
+
+ /// Return the function samples at the given callsite location.
+ FunctionSamples &functionSamplesAt(const CallsiteLocation &Loc) {
+ return CallsiteSamples[Loc];
+ }
+
+ /// Return a pointer to function samples at the given callsite location.
+ const FunctionSamples *
+ findFunctionSamplesAt(const CallsiteLocation &Loc) const {
+ auto iter = CallsiteSamples.find(Loc);
+ if (iter == CallsiteSamples.end()) {
+ return nullptr;
+ } else {
+ return &iter->second;
+ }
+ }
+
+ bool empty() const { return TotalSamples == 0; }
+
+ /// Return the total number of samples collected inside the function.
+ uint64_t getTotalSamples() const { return TotalSamples; }
+
+ /// Return the total number of samples collected at the head of the
+ /// function.
+ uint64_t getHeadSamples() const { return TotalHeadSamples; }
+
+ /// Return all the samples collected in the body of the function.
+ const BodySampleMap &getBodySamples() const { return BodySamples; }
+
+ /// Return all the callsite samples collected in the body of the function.
+ const CallsiteSampleMap &getCallsiteSamples() const {
+ return CallsiteSamples;
+ }
+
+ /// Merge the samples in \p Other into this one.
+ /// Optionally scale samples by \p Weight.
+ sampleprof_error merge(const FunctionSamples &Other, uint64_t Weight = 1) {
+ sampleprof_error Result = sampleprof_error::success;
+ MergeResult(Result, addTotalSamples(Other.getTotalSamples(), Weight));
+ MergeResult(Result, addHeadSamples(Other.getHeadSamples(), Weight));
+ for (const auto &I : Other.getBodySamples()) {
+ const LineLocation &Loc = I.first;
+ const SampleRecord &Rec = I.second;
+ MergeResult(Result, BodySamples[Loc].merge(Rec, Weight));
+ }
+ for (const auto &I : Other.getCallsiteSamples()) {
+ const CallsiteLocation &Loc = I.first;
+ const FunctionSamples &Rec = I.second;
+ MergeResult(Result, functionSamplesAt(Loc).merge(Rec, Weight));
+ }
+ return Result;
+ }
+
+private:
+ /// Total number of samples collected inside this function.
+ ///
+ /// Samples are cumulative, they include all the samples collected
+ /// inside this function and all its inlined callees.
+ uint64_t TotalSamples;
+
+ /// Total number of samples collected at the head of the function.
+ /// This is an approximation of the number of calls made to this function
+ /// at runtime.
+ uint64_t TotalHeadSamples;
+
+ /// Map instruction locations to collected samples.
+ ///
+ /// Each entry in this map contains the number of samples
+ /// collected at the corresponding line offset. All line locations
+ /// are an offset from the start of the function.
+ BodySampleMap BodySamples;
+
+ /// Map call sites to collected samples for the called function.
+ ///
+ /// Each entry in this map corresponds to all the samples
+ /// collected for the inlined function call at the given
+ /// location. For example, given:
+ ///
+ /// void foo() {
+ /// 1 bar();
+ /// ...
+ /// 8 baz();
+ /// }
+ ///
+ /// If the bar() and baz() calls were inlined inside foo(), this
+ /// map will contain two entries. One for all the samples collected
+ /// in the call to bar() at line offset 1, the other for all the samples
+ /// collected in the call to baz() at line offset 8.
+ CallsiteSampleMap CallsiteSamples;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const FunctionSamples &FS);
+
+/// Sort a LocationT->SampleT map by LocationT.
+///
+/// It produces a sorted list of <LocationT, SampleT> records by ascending
+/// order of LocationT.
+template <class LocationT, class SampleT> class SampleSorter {
+public:
+ typedef std::pair<const LocationT, SampleT> SamplesWithLoc;
+ typedef SmallVector<const SamplesWithLoc *, 20> SamplesWithLocList;
+
+ SampleSorter(const std::map<LocationT, SampleT> &Samples) {
+ for (const auto &I : Samples)
+ V.push_back(&I);
+ std::stable_sort(V.begin(), V.end(),
+ [](const SamplesWithLoc *A, const SamplesWithLoc *B) {
+ return A->first < B->first;
+ });
+ }
+ const SamplesWithLocList &get() const { return V; }
+
+private:
+ SamplesWithLocList V;
+};
+
+} // end namespace sampleprof
+
+} // end namespace llvm
+
+#endif // LLVM_PROFILEDATA_SAMPLEPROF_H_
diff --git a/contrib/llvm/include/llvm/ProfileData/SampleProfReader.h b/contrib/llvm/include/llvm/ProfileData/SampleProfReader.h
new file mode 100644
index 0000000..6db0fbb
--- /dev/null
+++ b/contrib/llvm/include/llvm/ProfileData/SampleProfReader.h
@@ -0,0 +1,409 @@
+//===- SampleProfReader.h - Read LLVM sample profile data -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions needed for reading sample profiles.
+//
+// NOTE: If you are making changes to this file format, please remember
+// to document them in the Clang documentation at
+// tools/clang/docs/UsersManual.rst.
+//
+// Text format
+// -----------
+//
+// Sample profiles are written as ASCII text. The file is divided into
+// sections, which correspond to each of the functions executed at runtime.
+// Each section has the following format
+//
+// function1:total_samples:total_head_samples
+// offset1[.discriminator]: number_of_samples [fn1:num fn2:num ... ]
+// offset2[.discriminator]: number_of_samples [fn3:num fn4:num ... ]
+// ...
+// offsetN[.discriminator]: number_of_samples [fn5:num fn6:num ... ]
+// offsetA[.discriminator]: fnA:num_of_total_samples
+// offsetA1[.discriminator]: number_of_samples [fn7:num fn8:num ... ]
+// ...
+//
+// This is a nested tree in which the identation represents the nesting level
+// of the inline stack. There are no blank lines in the file. And the spacing
+// within a single line is fixed. Additional spaces will result in an error
+// while reading the file.
+//
+// Any line starting with the '#' character is completely ignored.
+//
+// Inlined calls are represented with indentation. The Inline stack is a
+// stack of source locations in which the top of the stack represents the
+// leaf function, and the bottom of the stack represents the actual
+// symbol to which the instruction belongs.
+//
+// Function names must be mangled in order for the profile loader to
+// match them in the current translation unit. The two numbers in the
+// function header specify how many total samples were accumulated in the
+// function (first number), and the total number of samples accumulated
+// in the prologue of the function (second number). This head sample
+// count provides an indicator of how frequently the function is invoked.
+//
+// There are two types of lines in the function body.
+//
+// * Sampled line represents the profile information of a source location.
+// * Callsite line represents the profile information of a callsite.
+//
+// Each sampled line may contain several items. Some are optional (marked
+// below):
+//
+// a. Source line offset. This number represents the line number
+// in the function where the sample was collected. The line number is
+// always relative to the line where symbol of the function is
+// defined. So, if the function has its header at line 280, the offset
+// 13 is at line 293 in the file.
+//
+// Note that this offset should never be a negative number. This could
+// happen in cases like macros. The debug machinery will register the
+// line number at the point of macro expansion. So, if the macro was
+// expanded in a line before the start of the function, the profile
+// converter should emit a 0 as the offset (this means that the optimizers
+// will not be able to associate a meaningful weight to the instructions
+// in the macro).
+//
+// b. [OPTIONAL] Discriminator. This is used if the sampled program
+// was compiled with DWARF discriminator support
+// (http://wiki.dwarfstd.org/index.php?title=Path_Discriminators).
+// DWARF discriminators are unsigned integer values that allow the
+// compiler to distinguish between multiple execution paths on the
+// same source line location.
+//
+// For example, consider the line of code ``if (cond) foo(); else bar();``.
+// If the predicate ``cond`` is true 80% of the time, then the edge
+// into function ``foo`` should be considered to be taken most of the
+// time. But both calls to ``foo`` and ``bar`` are at the same source
+// line, so a sample count at that line is not sufficient. The
+// compiler needs to know which part of that line is taken more
+// frequently.
+//
+// This is what discriminators provide. In this case, the calls to
+// ``foo`` and ``bar`` will be at the same line, but will have
+// different discriminator values. This allows the compiler to correctly
+// set edge weights into ``foo`` and ``bar``.
+//
+// c. Number of samples. This is an integer quantity representing the
+// number of samples collected by the profiler at this source
+// location.
+//
+// d. [OPTIONAL] Potential call targets and samples. If present, this
+// line contains a call instruction. This models both direct and
+// number of samples. For example,
+//
+// 130: 7 foo:3 bar:2 baz:7
+//
+// The above means that at relative line offset 130 there is a call
+// instruction that calls one of ``foo()``, ``bar()`` and ``baz()``,
+// with ``baz()`` being the relatively more frequently called target.
+//
+// Each callsite line may contain several items. Some are optional.
+//
+// a. Source line offset. This number represents the line number of the
+// callsite that is inlined in the profiled binary.
+//
+// b. [OPTIONAL] Discriminator. Same as the discriminator for sampled line.
+//
+// c. Number of samples. This is an integer quantity representing the
+// total number of samples collected for the inlined instance at this
+// callsite
+//
+//
+// Binary format
+// -------------
+//
+// This is a more compact encoding. Numbers are encoded as ULEB128 values
+// and all strings are encoded in a name table. The file is organized in
+// the following sections:
+//
+// MAGIC (uint64_t)
+// File identifier computed by function SPMagic() (0x5350524f463432ff)
+//
+// VERSION (uint32_t)
+// File format version number computed by SPVersion()
+//
+// NAME TABLE
+// SIZE (uint32_t)
+// Number of entries in the name table.
+// NAMES
+// A NUL-separated list of SIZE strings.
+//
+// FUNCTION BODY (one for each uninlined function body present in the profile)
+// HEAD_SAMPLES (uint64_t) [only for top-level functions]
+// Total number of samples collected at the head (prologue) of the
+// function.
+// NOTE: This field should only be present for top-level functions
+// (i.e., not inlined into any caller). Inlined function calls
+// have no prologue, so they don't need this.
+// NAME_IDX (uint32_t)
+// Index into the name table indicating the function name.
+// SAMPLES (uint64_t)
+// Total number of samples collected in this function.
+// NRECS (uint32_t)
+// Total number of sampling records this function's profile.
+// BODY RECORDS
+// A list of NRECS entries. Each entry contains:
+// OFFSET (uint32_t)
+// Line offset from the start of the function.
+// DISCRIMINATOR (uint32_t)
+// Discriminator value (see description of discriminators
+// in the text format documentation above).
+// SAMPLES (uint64_t)
+// Number of samples collected at this location.
+// NUM_CALLS (uint32_t)
+// Number of non-inlined function calls made at this location. In the
+// case of direct calls, this number will always be 1. For indirect
+// calls (virtual functions and function pointers) this will
+// represent all the actual functions called at runtime.
+// CALL_TARGETS
+// A list of NUM_CALLS entries for each called function:
+// NAME_IDX (uint32_t)
+// Index into the name table with the callee name.
+// SAMPLES (uint64_t)
+// Number of samples collected at the call site.
+// NUM_INLINED_FUNCTIONS (uint32_t)
+// Number of callees inlined into this function.
+// INLINED FUNCTION RECORDS
+// A list of NUM_INLINED_FUNCTIONS entries describing each of the inlined
+// callees.
+// OFFSET (uint32_t)
+// Line offset from the start of the function.
+// DISCRIMINATOR (uint32_t)
+// Discriminator value (see description of discriminators
+// in the text format documentation above).
+// FUNCTION BODY
+// A FUNCTION BODY entry describing the inlined function.
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_PROFILEDATA_SAMPLEPROFREADER_H
+#define LLVM_PROFILEDATA_SAMPLEPROFREADER_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/ProfileData/SampleProf.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/GCOV.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+namespace sampleprof {
+
+/// \brief Sample-based profile reader.
+///
+/// Each profile contains sample counts for all the functions
+/// executed. Inside each function, statements are annotated with the
+/// collected samples on all the instructions associated with that
+/// statement.
+///
+/// For this to produce meaningful data, the program needs to be
+/// compiled with some debug information (at minimum, line numbers:
+/// -gline-tables-only). Otherwise, it will be impossible to match IR
+/// instructions to the line numbers collected by the profiler.
+///
+/// From the profile file, we are interested in collecting the
+/// following information:
+///
+/// * A list of functions included in the profile (mangled names).
+///
+/// * For each function F:
+/// 1. The total number of samples collected in F.
+///
+/// 2. The samples collected at each line in F. To provide some
+/// protection against source code shuffling, line numbers should
+/// be relative to the start of the function.
+///
+/// The reader supports two file formats: text and binary. The text format
+/// is useful for debugging and testing, while the binary format is more
+/// compact and I/O efficient. They can both be used interchangeably.
+class SampleProfileReader {
+public:
+ SampleProfileReader(std::unique_ptr<MemoryBuffer> B, LLVMContext &C)
+ : Profiles(0), Ctx(C), Buffer(std::move(B)) {}
+
+ virtual ~SampleProfileReader() {}
+
+ /// \brief Read and validate the file header.
+ virtual std::error_code readHeader() = 0;
+
+ /// \brief Read sample profiles from the associated file.
+ virtual std::error_code read() = 0;
+
+ /// \brief Print the profile for \p FName on stream \p OS.
+ void dumpFunctionProfile(StringRef FName, raw_ostream &OS = dbgs());
+
+ /// \brief Print all the profiles on stream \p OS.
+ void dump(raw_ostream &OS = dbgs());
+
+ /// \brief Return the samples collected for function \p F.
+ FunctionSamples *getSamplesFor(const Function &F) {
+ return &Profiles[F.getName()];
+ }
+
+ /// \brief Return all the profiles.
+ StringMap<FunctionSamples> &getProfiles() { return Profiles; }
+
+ /// \brief Report a parse error message.
+ void reportError(int64_t LineNumber, Twine Msg) const {
+ Ctx.diagnose(DiagnosticInfoSampleProfile(Buffer->getBufferIdentifier(),
+ LineNumber, Msg));
+ }
+
+ /// \brief Create a sample profile reader appropriate to the file format.
+ static ErrorOr<std::unique_ptr<SampleProfileReader>>
+ create(StringRef Filename, LLVMContext &C);
+
+ /// \brief Create a sample profile reader from the supplied memory buffer.
+ static ErrorOr<std::unique_ptr<SampleProfileReader>>
+ create(std::unique_ptr<MemoryBuffer> &B, LLVMContext &C);
+
+protected:
+ /// \brief Map every function to its associated profile.
+ ///
+ /// The profile of every function executed at runtime is collected
+ /// in the structure FunctionSamples. This maps function objects
+ /// to their corresponding profiles.
+ StringMap<FunctionSamples> Profiles;
+
+ /// \brief LLVM context used to emit diagnostics.
+ LLVMContext &Ctx;
+
+ /// \brief Memory buffer holding the profile file.
+ std::unique_ptr<MemoryBuffer> Buffer;
+};
+
+class SampleProfileReaderText : public SampleProfileReader {
+public:
+ SampleProfileReaderText(std::unique_ptr<MemoryBuffer> B, LLVMContext &C)
+ : SampleProfileReader(std::move(B), C) {}
+
+ /// \brief Read and validate the file header.
+ std::error_code readHeader() override { return sampleprof_error::success; }
+
+ /// \brief Read sample profiles from the associated file.
+ std::error_code read() override;
+
+ /// \brief Return true if \p Buffer is in the format supported by this class.
+ static bool hasFormat(const MemoryBuffer &Buffer);
+};
+
+class SampleProfileReaderBinary : public SampleProfileReader {
+public:
+ SampleProfileReaderBinary(std::unique_ptr<MemoryBuffer> B, LLVMContext &C)
+ : SampleProfileReader(std::move(B), C), Data(nullptr), End(nullptr) {}
+
+ /// \brief Read and validate the file header.
+ std::error_code readHeader() override;
+
+ /// \brief Read sample profiles from the associated file.
+ std::error_code read() override;
+
+ /// \brief Return true if \p Buffer is in the format supported by this class.
+ static bool hasFormat(const MemoryBuffer &Buffer);
+
+protected:
+ /// \brief Read a numeric value of type T from the profile.
+ ///
+ /// If an error occurs during decoding, a diagnostic message is emitted and
+ /// EC is set.
+ ///
+ /// \returns the read value.
+ template <typename T> ErrorOr<T> readNumber();
+
+ /// \brief Read a string from the profile.
+ ///
+ /// If an error occurs during decoding, a diagnostic message is emitted and
+ /// EC is set.
+ ///
+ /// \returns the read value.
+ ErrorOr<StringRef> readString();
+
+ /// Read a string indirectly via the name table.
+ ErrorOr<StringRef> readStringFromTable();
+
+ /// \brief Return true if we've reached the end of file.
+ bool at_eof() const { return Data >= End; }
+
+ /// Read the contents of the given profile instance.
+ std::error_code readProfile(FunctionSamples &FProfile);
+
+ /// \brief Points to the current location in the buffer.
+ const uint8_t *Data;
+
+ /// \brief Points to the end of the buffer.
+ const uint8_t *End;
+
+ /// Function name table.
+ std::vector<StringRef> NameTable;
+};
+
+typedef SmallVector<FunctionSamples *, 10> InlineCallStack;
+
+// Supported histogram types in GCC. Currently, we only need support for
+// call target histograms.
+enum HistType {
+ HIST_TYPE_INTERVAL,
+ HIST_TYPE_POW2,
+ HIST_TYPE_SINGLE_VALUE,
+ HIST_TYPE_CONST_DELTA,
+ HIST_TYPE_INDIR_CALL,
+ HIST_TYPE_AVERAGE,
+ HIST_TYPE_IOR,
+ HIST_TYPE_INDIR_CALL_TOPN
+};
+
+class SampleProfileReaderGCC : public SampleProfileReader {
+public:
+ SampleProfileReaderGCC(std::unique_ptr<MemoryBuffer> B, LLVMContext &C)
+ : SampleProfileReader(std::move(B), C), GcovBuffer(Buffer.get()) {}
+
+ /// \brief Read and validate the file header.
+ std::error_code readHeader() override;
+
+ /// \brief Read sample profiles from the associated file.
+ std::error_code read() override;
+
+ /// \brief Return true if \p Buffer is in the format supported by this class.
+ static bool hasFormat(const MemoryBuffer &Buffer);
+
+protected:
+ std::error_code readNameTable();
+ std::error_code readOneFunctionProfile(const InlineCallStack &InlineStack,
+ bool Update, uint32_t Offset);
+ std::error_code readFunctionProfiles();
+ std::error_code skipNextWord();
+ template <typename T> ErrorOr<T> readNumber();
+ ErrorOr<StringRef> readString();
+
+ /// \brief Read the section tag and check that it's the same as \p Expected.
+ std::error_code readSectionTag(uint32_t Expected);
+
+ /// GCOV buffer containing the profile.
+ GCOVBuffer GcovBuffer;
+
+ /// Function names in this profile.
+ std::vector<std::string> Names;
+
+ /// GCOV tags used to separate sections in the profile file.
+ static const uint32_t GCOVTagAFDOFileNames = 0xaa000000;
+ static const uint32_t GCOVTagAFDOFunction = 0xac000000;
+};
+
+} // End namespace sampleprof
+
+} // End namespace llvm
+
+#endif // LLVM_PROFILEDATA_SAMPLEPROFREADER_H
diff --git a/contrib/llvm/include/llvm/ProfileData/SampleProfWriter.h b/contrib/llvm/include/llvm/ProfileData/SampleProfWriter.h
new file mode 100644
index 0000000..029dd2e
--- /dev/null
+++ b/contrib/llvm/include/llvm/ProfileData/SampleProfWriter.h
@@ -0,0 +1,134 @@
+//===- SampleProfWriter.h - Write LLVM sample profile data ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains definitions needed for writing sample profiles.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_PROFILEDATA_SAMPLEPROFWRITER_H
+#define LLVM_PROFILEDATA_SAMPLEPROFWRITER_H
+
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ProfileData/SampleProf.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+namespace sampleprof {
+
+enum SampleProfileFormat { SPF_None = 0, SPF_Text, SPF_Binary, SPF_GCC };
+
+/// \brief Sample-based profile writer. Base class.
+class SampleProfileWriter {
+public:
+ virtual ~SampleProfileWriter() {}
+
+ /// Write sample profiles in \p S for function \p FName.
+ ///
+ /// \returns status code of the file update operation.
+ virtual std::error_code write(StringRef FName, const FunctionSamples &S) = 0;
+
+ /// Write all the sample profiles in the given map of samples.
+ ///
+ /// \returns status code of the file update operation.
+ std::error_code write(const StringMap<FunctionSamples> &ProfileMap) {
+ if (std::error_code EC = writeHeader(ProfileMap))
+ return EC;
+
+ for (const auto &I : ProfileMap) {
+ StringRef FName = I.first();
+ const FunctionSamples &Profile = I.second;
+ if (std::error_code EC = write(FName, Profile))
+ return EC;
+ }
+ return sampleprof_error::success;
+ }
+
+ raw_ostream &getOutputStream() { return *OutputStream; }
+
+ /// Profile writer factory.
+ ///
+ /// Create a new file writer based on the value of \p Format.
+ static ErrorOr<std::unique_ptr<SampleProfileWriter>>
+ create(StringRef Filename, SampleProfileFormat Format);
+
+ /// Create a new stream writer based on the value of \p Format.
+ /// For testing.
+ static ErrorOr<std::unique_ptr<SampleProfileWriter>>
+ create(std::unique_ptr<raw_ostream> &OS, SampleProfileFormat Format);
+
+protected:
+ SampleProfileWriter(std::unique_ptr<raw_ostream> &OS)
+ : OutputStream(std::move(OS)) {}
+
+ /// \brief Write a file header for the profile file.
+ virtual std::error_code
+ writeHeader(const StringMap<FunctionSamples> &ProfileMap) = 0;
+
+ /// \brief Output stream where to emit the profile to.
+ std::unique_ptr<raw_ostream> OutputStream;
+};
+
+/// \brief Sample-based profile writer (text format).
+class SampleProfileWriterText : public SampleProfileWriter {
+public:
+ std::error_code write(StringRef FName, const FunctionSamples &S) override;
+
+protected:
+ SampleProfileWriterText(std::unique_ptr<raw_ostream> &OS)
+ : SampleProfileWriter(OS), Indent(0) {}
+
+ std::error_code
+ writeHeader(const StringMap<FunctionSamples> &ProfileMap) override {
+ return sampleprof_error::success;
+ }
+
+private:
+ /// Indent level to use when writing.
+ ///
+ /// This is used when printing inlined callees.
+ unsigned Indent;
+
+ friend ErrorOr<std::unique_ptr<SampleProfileWriter>>
+ SampleProfileWriter::create(std::unique_ptr<raw_ostream> &OS,
+ SampleProfileFormat Format);
+};
+
+/// \brief Sample-based profile writer (binary format).
+class SampleProfileWriterBinary : public SampleProfileWriter {
+public:
+ std::error_code write(StringRef F, const FunctionSamples &S) override;
+
+protected:
+ SampleProfileWriterBinary(std::unique_ptr<raw_ostream> &OS)
+ : SampleProfileWriter(OS), NameTable() {}
+
+ std::error_code
+ writeHeader(const StringMap<FunctionSamples> &ProfileMap) override;
+ std::error_code writeNameIdx(StringRef FName);
+ std::error_code writeBody(StringRef FName, const FunctionSamples &S);
+
+private:
+ void addName(StringRef FName);
+ void addNames(const FunctionSamples &S);
+
+ MapVector<StringRef, uint32_t> NameTable;
+
+ friend ErrorOr<std::unique_ptr<SampleProfileWriter>>
+ SampleProfileWriter::create(std::unique_ptr<raw_ostream> &OS,
+ SampleProfileFormat Format);
+};
+
+} // End namespace sampleprof
+
+} // End namespace llvm
+
+#endif // LLVM_PROFILEDATA_SAMPLEPROFWRITER_H
diff --git a/contrib/llvm/include/llvm/Support/AIXDataTypesFix.h b/contrib/llvm/include/llvm/Support/AIXDataTypesFix.h
new file mode 100644
index 0000000..a9a9147
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/AIXDataTypesFix.h
@@ -0,0 +1,25 @@
+//===-- llvm/Support/AIXDataTypesFix.h - Fix datatype defs ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file overrides default system-defined types and limits which cannot be
+// done in DataTypes.h.in because it is processed by autoheader first, which
+// comments out any #undef statement
+//
+//===----------------------------------------------------------------------===//
+
+// No include guards desired!
+
+#ifndef SUPPORT_DATATYPES_H
+#error "AIXDataTypesFix.h must only be included via DataTypes.h!"
+#endif
+
+// GCC is strict about defining large constants: they must have LL modifier.
+// These will be defined properly at the end of DataTypes.h
+#undef INT64_MAX
+#undef INT64_MIN
diff --git a/contrib/llvm/include/llvm/Support/ARMBuildAttributes.h b/contrib/llvm/include/llvm/Support/ARMBuildAttributes.h
new file mode 100644
index 0000000..fc14cb2
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ARMBuildAttributes.h
@@ -0,0 +1,229 @@
+//===-- ARMBuildAttributes.h - ARM Build Attributes -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains enumerations and support routines for ARM build attributes
+// as defined in ARM ABI addenda document (ABI release 2.08).
+//
+// ELF for the ARM Architecture r2.09 - November 30, 2012
+//
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044e/IHI0044E_aaelf.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ARMBUILDATTRIBUTES_H
+#define LLVM_SUPPORT_ARMBUILDATTRIBUTES_H
+
+namespace llvm {
+class StringRef;
+
+namespace ARMBuildAttrs {
+
+enum SpecialAttr {
+ // This is for the .cpu asm attr. It translates into one or more
+ // AttrType (below) entries in the .ARM.attributes section in the ELF.
+ SEL_CPU
+};
+
+enum AttrType {
+ // Rest correspond to ELF/.ARM.attributes
+ File = 1,
+ CPU_raw_name = 4,
+ CPU_name = 5,
+ CPU_arch = 6,
+ CPU_arch_profile = 7,
+ ARM_ISA_use = 8,
+ THUMB_ISA_use = 9,
+ FP_arch = 10,
+ WMMX_arch = 11,
+ Advanced_SIMD_arch = 12,
+ PCS_config = 13,
+ ABI_PCS_R9_use = 14,
+ ABI_PCS_RW_data = 15,
+ ABI_PCS_RO_data = 16,
+ ABI_PCS_GOT_use = 17,
+ ABI_PCS_wchar_t = 18,
+ ABI_FP_rounding = 19,
+ ABI_FP_denormal = 20,
+ ABI_FP_exceptions = 21,
+ ABI_FP_user_exceptions = 22,
+ ABI_FP_number_model = 23,
+ ABI_align_needed = 24,
+ ABI_align_preserved = 25,
+ ABI_enum_size = 26,
+ ABI_HardFP_use = 27,
+ ABI_VFP_args = 28,
+ ABI_WMMX_args = 29,
+ ABI_optimization_goals = 30,
+ ABI_FP_optimization_goals = 31,
+ compatibility = 32,
+ CPU_unaligned_access = 34,
+ FP_HP_extension = 36,
+ ABI_FP_16bit_format = 38,
+ MPextension_use = 42, // recoded from 70 (ABI r2.08)
+ DIV_use = 44,
+ also_compatible_with = 65,
+ conformance = 67,
+ Virtualization_use = 68,
+
+ /// Legacy Tags
+ Section = 2, // deprecated (ABI r2.09)
+ Symbol = 3, // deprecated (ABI r2.09)
+ ABI_align8_needed = 24, // renamed to ABI_align_needed (ABI r2.09)
+ ABI_align8_preserved = 25, // renamed to ABI_align_preserved (ABI r2.09)
+ nodefaults = 64, // deprecated (ABI r2.09)
+ T2EE_use = 66, // deprecated (ABI r2.09)
+ MPextension_use_old = 70 // recoded to MPextension_use (ABI r2.08)
+};
+
+StringRef AttrTypeAsString(unsigned Attr, bool HasTagPrefix = true);
+StringRef AttrTypeAsString(AttrType Attr, bool HasTagPrefix = true);
+int AttrTypeFromString(StringRef Tag);
+
+// Magic numbers for .ARM.attributes
+enum AttrMagic {
+ Format_Version = 0x41
+};
+
+// Legal Values for CPU_arch, (=6), uleb128
+enum CPUArch {
+ Pre_v4 = 0,
+ v4 = 1, // e.g. SA110
+ v4T = 2, // e.g. ARM7TDMI
+ v5T = 3, // e.g. ARM9TDMI
+ v5TE = 4, // e.g. ARM946E_S
+ v5TEJ = 5, // e.g. ARM926EJ_S
+ v6 = 6, // e.g. ARM1136J_S
+ v6KZ = 7, // e.g. ARM1176JZ_S
+ v6T2 = 8, // e.g. ARM1156T2_S
+ v6K = 9, // e.g. ARM1176JZ_S
+ v7 = 10, // e.g. Cortex A8, Cortex M3
+ v6_M = 11, // e.g. Cortex M1
+ v6S_M = 12, // v6_M with the System extensions
+ v7E_M = 13, // v7_M with DSP extensions
+ v8 = 14, // v8,v8.1a AArch32
+};
+
+enum CPUArchProfile { // (=7), uleb128
+ Not_Applicable = 0, // pre v7, or cross-profile code
+ ApplicationProfile = (0x41), // 'A' (e.g. for Cortex A8)
+ RealTimeProfile = (0x52), // 'R' (e.g. for Cortex R4)
+ MicroControllerProfile = (0x4D), // 'M' (e.g. for Cortex M3)
+ SystemProfile = (0x53) // 'S' Application or real-time profile
+};
+
+// The following have a lot of common use cases
+enum {
+ Not_Allowed = 0,
+ Allowed = 1,
+
+ // Tag_ARM_ISA_use (=8), uleb128
+
+ // Tag_THUMB_ISA_use, (=9), uleb128
+ AllowThumb32 = 2, // 32-bit Thumb (implies 16-bit instructions)
+
+ // Tag_FP_arch (=10), uleb128 (formerly Tag_VFP_arch = 10)
+ AllowFPv2 = 2, // v2 FP ISA permitted (implies use of the v1 FP ISA)
+ AllowFPv3A = 3, // v3 FP ISA permitted (implies use of the v2 FP ISA)
+ AllowFPv3B = 4, // v3 FP ISA permitted, but only D0-D15, S0-S31
+ AllowFPv4A = 5, // v4 FP ISA permitted (implies use of v3 FP ISA)
+ AllowFPv4B = 6, // v4 FP ISA was permitted, but only D0-D15, S0-S31
+ AllowFPARMv8A = 7, // Use of the ARM v8-A FP ISA was permitted
+ AllowFPARMv8B = 8, // Use of the ARM v8-A FP ISA was permitted, but only
+ // D0-D15, S0-S31
+
+ // Tag_WMMX_arch, (=11), uleb128
+ AllowWMMXv1 = 1, // The user permitted this entity to use WMMX v1
+ AllowWMMXv2 = 2, // The user permitted this entity to use WMMX v2
+
+ // Tag_Advanced_SIMD_arch, (=12), uleb128
+ AllowNeon = 1, // SIMDv1 was permitted
+ AllowNeon2 = 2, // SIMDv2 was permitted (Half-precision FP, MAC operations)
+ AllowNeonARMv8 = 3, // ARM v8-A SIMD was permitted
+ AllowNeonARMv8_1a = 4,// ARM v8.1-A SIMD was permitted (RDMA)
+
+ // Tag_ABI_PCS_R9_use, (=14), uleb128
+ R9IsGPR = 0, // R9 used as v6 (just another callee-saved register)
+ R9IsSB = 1, // R9 used as a global static base rgister
+ R9IsTLSPointer = 2, // R9 used as a thread local storage pointer
+ R9Reserved = 3, // R9 not used by code associated with attributed entity
+
+ // Tag_ABI_PCS_RW_data, (=15), uleb128
+ AddressRWPCRel = 1, // Address RW static data PC-relative
+ AddressRWSBRel = 2, // Address RW static data SB-relative
+ AddressRWNone = 3, // No RW static data permitted
+
+ // Tag_ABI_PCS_RO_data, (=14), uleb128
+ AddressROPCRel = 1, // Address RO static data PC-relative
+ AddressRONone = 2, // No RO static data permitted
+
+ // Tag_ABI_PCS_GOT_use, (=17), uleb128
+ AddressDirect = 1, // Address imported data directly
+ AddressGOT = 2, // Address imported data indirectly (via GOT)
+
+ // Tag_ABI_PCS_wchar_t, (=18), uleb128
+ WCharProhibited = 0, // wchar_t is not used
+ WCharWidth2Bytes = 2, // sizeof(wchar_t) == 2
+ WCharWidth4Bytes = 4, // sizeof(wchar_t) == 4
+
+ // Tag_ABI_FP_denormal, (=20), uleb128
+ PositiveZero = 0,
+ IEEEDenormals = 1,
+ PreserveFPSign = 2, // sign when flushed-to-zero is preserved
+
+ // Tag_ABI_FP_number_model, (=23), uleb128
+ AllowRTABI = 2, // numbers, infinities, and one quiet NaN (see [RTABI])
+ AllowIEE754 = 3, // this code to use all the IEEE 754-defined FP encodings
+
+ // Tag_ABI_enum_size, (=26), uleb128
+ EnumProhibited = 0, // The user prohibited the use of enums when building
+ // this entity.
+ EnumSmallest = 1, // Enum is smallest container big enough to hold all
+ // values.
+ Enum32Bit = 2, // Enum is at least 32 bits.
+ Enum32BitABI = 3, // Every enumeration visible across an ABI-complying
+ // interface contains a value needing 32 bits to encode
+ // it; other enums can be containerized.
+
+ // Tag_ABI_HardFP_use, (=27), uleb128
+ HardFPImplied = 0, // FP use should be implied by Tag_FP_arch
+ HardFPSinglePrecision = 1, // Single-precision only
+
+ // Tag_ABI_VFP_args, (=28), uleb128
+ BaseAAPCS = 0,
+ HardFPAAPCS = 1,
+
+ // Tag_FP_HP_extension, (=36), uleb128
+ AllowHPFP = 1, // Allow use of Half Precision FP
+
+ // Tag_FP_16bit_format, (=38), uleb128
+ FP16FormatIEEE = 1,
+
+ // Tag_MPextension_use, (=42), uleb128
+ AllowMP = 1, // Allow use of MP extensions
+
+ // Tag_DIV_use, (=44), uleb128
+ // Note: AllowDIVExt must be emitted if and only if the permission to use
+ // hardware divide cannot be conveyed using AllowDIVIfExists or DisallowDIV
+ AllowDIVIfExists = 0, // Allow hardware divide if available in arch, or no
+ // info exists.
+ DisallowDIV = 1, // Hardware divide explicitly disallowed.
+ AllowDIVExt = 2, // Allow hardware divide as optional architecture
+ // extension above the base arch specified by
+ // Tag_CPU_arch and Tag_CPU_arch_profile.
+
+ // Tag_Virtualization_use, (=68), uleb128
+ AllowTZ = 1,
+ AllowVirtualization = 2,
+ AllowTZVirtualization = 3
+};
+
+} // namespace ARMBuildAttrs
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ARMEHABI.h b/contrib/llvm/include/llvm/Support/ARMEHABI.h
new file mode 100644
index 0000000..9b052df
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ARMEHABI.h
@@ -0,0 +1,134 @@
+//===--- ARMEHABI.h - ARM Exception Handling ABI ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the constants for the ARM unwind opcodes and exception
+// handling table entry kinds.
+//
+// The enumerations and constants in this file reflect the ARM EHABI
+// Specification as published by ARM.
+//
+// Exception Handling ABI for the ARM Architecture r2.09 - November 30, 2012
+//
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038a/IHI0038A_ehabi.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ARMEHABI_H
+#define LLVM_SUPPORT_ARMEHABI_H
+
+namespace llvm {
+namespace ARM {
+namespace EHABI {
+ /// ARM exception handling table entry kinds
+ enum EHTEntryKind {
+ EHT_GENERIC = 0x00,
+ EHT_COMPACT = 0x80
+ };
+
+ enum {
+ /// Special entry for the function never unwind
+ EXIDX_CANTUNWIND = 0x1
+ };
+
+ /// ARM-defined frame unwinding opcodes
+ enum UnwindOpcodes {
+ // Format: 00xxxxxx
+ // Purpose: vsp = vsp + ((x << 2) + 4)
+ UNWIND_OPCODE_INC_VSP = 0x00,
+
+ // Format: 01xxxxxx
+ // Purpose: vsp = vsp - ((x << 2) + 4)
+ UNWIND_OPCODE_DEC_VSP = 0x40,
+
+ // Format: 10000000 00000000
+ // Purpose: refuse to unwind
+ UNWIND_OPCODE_REFUSE = 0x8000,
+
+ // Format: 1000xxxx xxxxxxxx
+ // Purpose: pop r[15:12], r[11:4]
+ // Constraint: x != 0
+ UNWIND_OPCODE_POP_REG_MASK_R4 = 0x8000,
+
+ // Format: 1001xxxx
+ // Purpose: vsp = r[x]
+ // Constraint: x != 13 && x != 15
+ UNWIND_OPCODE_SET_VSP = 0x90,
+
+ // Format: 10100xxx
+ // Purpose: pop r[(4+x):4]
+ UNWIND_OPCODE_POP_REG_RANGE_R4 = 0xa0,
+
+ // Format: 10101xxx
+ // Purpose: pop r14, r[(4+x):4]
+ UNWIND_OPCODE_POP_REG_RANGE_R4_R14 = 0xa8,
+
+ // Format: 10110000
+ // Purpose: finish
+ UNWIND_OPCODE_FINISH = 0xb0,
+
+ // Format: 10110001 0000xxxx
+ // Purpose: pop r[3:0]
+ // Constraint: x != 0
+ UNWIND_OPCODE_POP_REG_MASK = 0xb100,
+
+ // Format: 10110010 x(uleb128)
+ // Purpose: vsp = vsp + ((x << 2) + 0x204)
+ UNWIND_OPCODE_INC_VSP_ULEB128 = 0xb2,
+
+ // Format: 10110011 xxxxyyyy
+ // Purpose: pop d[(x+y):x]
+ UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDX = 0xb300,
+
+ // Format: 10111xxx
+ // Purpose: pop d[(8+x):8]
+ UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDX_D8 = 0xb8,
+
+ // Format: 11000xxx
+ // Purpose: pop wR[(10+x):10]
+ UNWIND_OPCODE_POP_WIRELESS_MMX_REG_RANGE_WR10 = 0xc0,
+
+ // Format: 11000110 xxxxyyyy
+ // Purpose: pop wR[(x+y):x]
+ UNWIND_OPCODE_POP_WIRELESS_MMX_REG_RANGE = 0xc600,
+
+ // Format: 11000111 0000xxxx
+ // Purpose: pop wCGR[3:0]
+ // Constraint: x != 0
+ UNWIND_OPCODE_POP_WIRELESS_MMX_REG_MASK = 0xc700,
+
+ // Format: 11001000 xxxxyyyy
+ // Purpose: pop d[(16+x+y):(16+x)]
+ UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D16 = 0xc800,
+
+ // Format: 11001001 xxxxyyyy
+ // Purpose: pop d[(x+y):x]
+ UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD = 0xc900,
+
+ // Format: 11010xxx
+ // Purpose: pop d[(8+x):8]
+ UNWIND_OPCODE_POP_VFP_REG_RANGE_FSTMFDD_D8 = 0xd0
+ };
+
+ /// ARM-defined Personality Routine Index
+ enum PersonalityRoutineIndex {
+ // To make the exception handling table become more compact, ARM defined
+ // several personality routines in EHABI. There are 3 different
+ // personality routines in ARM EHABI currently. It is possible to have 16
+ // pre-defined personality routines at most.
+ AEABI_UNWIND_CPP_PR0 = 0,
+ AEABI_UNWIND_CPP_PR1 = 1,
+ AEABI_UNWIND_CPP_PR2 = 2,
+
+ NUM_PERSONALITY_INDEX
+ };
+}
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ARMTargetParser.def b/contrib/llvm/include/llvm/Support/ARMTargetParser.def
new file mode 100644
index 0000000..2f99b07
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ARMTargetParser.def
@@ -0,0 +1,223 @@
+//===- ARMTargetParser.def - ARM target parsing defines ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides defines to build up the ARM target parser's logic.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef ARM_FPU
+#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION)
+#endif
+ARM_FPU("invalid", FK_INVALID, FV_NONE, NS_None, FR_None)
+ARM_FPU("none", FK_NONE, FV_NONE, NS_None, FR_None)
+ARM_FPU("vfp", FK_VFP, FV_VFPV2, NS_None, FR_None)
+ARM_FPU("vfpv2", FK_VFPV2, FV_VFPV2, NS_None, FR_None)
+ARM_FPU("vfpv3", FK_VFPV3, FV_VFPV3, NS_None, FR_None)
+ARM_FPU("vfpv3-fp16", FK_VFPV3_FP16, FV_VFPV3_FP16, NS_None, FR_None)
+ARM_FPU("vfpv3-d16", FK_VFPV3_D16, FV_VFPV3, NS_None, FR_D16)
+ARM_FPU("vfpv3-d16-fp16", FK_VFPV3_D16_FP16, FV_VFPV3_FP16, NS_None, FR_D16)
+ARM_FPU("vfpv3xd", FK_VFPV3XD, FV_VFPV3, NS_None, FR_SP_D16)
+ARM_FPU("vfpv3xd-fp16", FK_VFPV3XD_FP16, FV_VFPV3_FP16, NS_None, FR_SP_D16)
+ARM_FPU("vfpv4", FK_VFPV4, FV_VFPV4, NS_None, FR_None)
+ARM_FPU("vfpv4-d16", FK_VFPV4_D16, FV_VFPV4, NS_None, FR_D16)
+ARM_FPU("fpv4-sp-d16", FK_FPV4_SP_D16, FV_VFPV4, NS_None, FR_SP_D16)
+ARM_FPU("fpv5-d16", FK_FPV5_D16, FV_VFPV5, NS_None, FR_D16)
+ARM_FPU("fpv5-sp-d16", FK_FPV5_SP_D16, FV_VFPV5, NS_None, FR_SP_D16)
+ARM_FPU("fp-armv8", FK_FP_ARMV8, FV_VFPV5, NS_None, FR_None)
+ARM_FPU("neon", FK_NEON, FV_VFPV3, NS_Neon, FR_None)
+ARM_FPU("neon-fp16", FK_NEON_FP16, FV_VFPV3_FP16, NS_Neon, FR_None)
+ARM_FPU("neon-vfpv4", FK_NEON_VFPV4, FV_VFPV4, NS_Neon, FR_None)
+ARM_FPU("neon-fp-armv8", FK_NEON_FP_ARMV8, FV_VFPV5, NS_Neon, FR_None)
+ARM_FPU("crypto-neon-fp-armv8", FK_CRYPTO_NEON_FP_ARMV8, FV_VFPV5, NS_Crypto,
+ FR_None)
+ARM_FPU("softvfp", FK_SOFTVFP, FV_NONE, NS_None, FR_None)
+#undef ARM_FPU
+
+#ifndef ARM_ARCH
+#define ARM_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT)
+#endif
+ARM_ARCH("invalid", AK_INVALID, nullptr, nullptr,
+ ARMBuildAttrs::CPUArch::Pre_v4, FK_NONE, AEK_NONE)
+ARM_ARCH("armv2", AK_ARMV2, "2", "v2", ARMBuildAttrs::CPUArch::Pre_v4,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("armv2a", AK_ARMV2A, "2A", "v2a", ARMBuildAttrs::CPUArch::Pre_v4,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("armv3", AK_ARMV3, "3", "v3", ARMBuildAttrs::CPUArch::Pre_v4,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("armv3m", AK_ARMV3M, "3M", "v3m", ARMBuildAttrs::CPUArch::Pre_v4,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("armv4", AK_ARMV4, "4", "v4", ARMBuildAttrs::CPUArch::v4,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("armv4t", AK_ARMV4T, "4T", "v4t", ARMBuildAttrs::CPUArch::v4T,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("armv5t", AK_ARMV5T, "5T", "v5", ARMBuildAttrs::CPUArch::v5T,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("armv5te", AK_ARMV5TE, "5TE", "v5e", ARMBuildAttrs::CPUArch::v5TE,
+ FK_NONE, AEK_DSP)
+ARM_ARCH("armv5tej", AK_ARMV5TEJ, "5TEJ", "v5e", ARMBuildAttrs::CPUArch::v5TEJ,
+ FK_NONE, AEK_DSP)
+ARM_ARCH("armv6", AK_ARMV6, "6", "v6", ARMBuildAttrs::CPUArch::v6,
+ FK_VFPV2, AEK_DSP)
+ARM_ARCH("armv6k", AK_ARMV6K, "6K", "v6k", ARMBuildAttrs::CPUArch::v6K,
+ FK_VFPV2, AEK_DSP)
+ARM_ARCH("armv6t2", AK_ARMV6T2, "6T2", "v6t2", ARMBuildAttrs::CPUArch::v6T2,
+ FK_NONE, AEK_DSP)
+ARM_ARCH("armv6kz", AK_ARMV6KZ, "6KZ", "v6kz", ARMBuildAttrs::CPUArch::v6KZ,
+ FK_VFPV2, (AEK_SEC | AEK_DSP))
+ARM_ARCH("armv6-m", AK_ARMV6M, "6-M", "v6m", ARMBuildAttrs::CPUArch::v6_M,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("armv7-a", AK_ARMV7A, "7-A", "v7", ARMBuildAttrs::CPUArch::v7,
+ FK_NEON, AEK_DSP)
+ARM_ARCH("armv7-r", AK_ARMV7R, "7-R", "v7r", ARMBuildAttrs::CPUArch::v7,
+ FK_NONE, (AEK_HWDIV | AEK_DSP))
+ARM_ARCH("armv7-m", AK_ARMV7M, "7-M", "v7m", ARMBuildAttrs::CPUArch::v7,
+ FK_NONE, AEK_HWDIV)
+ARM_ARCH("armv7e-m", AK_ARMV7EM, "7E-M", "v7em", ARMBuildAttrs::CPUArch::v7E_M,
+ FK_NONE, (AEK_HWDIV | AEK_DSP))
+ARM_ARCH("armv8-a", AK_ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8,
+ FK_CRYPTO_NEON_FP_ARMV8, (AEK_SEC | AEK_MP | AEK_VIRT | AEK_HWDIVARM |
+ AEK_HWDIV | AEK_DSP | AEK_CRC))
+ARM_ARCH("armv8.1-a", AK_ARMV8_1A, "8.1-A", "v8.1a", ARMBuildAttrs::CPUArch::v8,
+ FK_CRYPTO_NEON_FP_ARMV8, (AEK_SEC | AEK_MP | AEK_VIRT | AEK_HWDIVARM |
+ AEK_HWDIV | AEK_DSP | AEK_CRC))
+ARM_ARCH("armv8.2-a", AK_ARMV8_2A, "8.2-A", "v8.2a", ARMBuildAttrs::CPUArch::v8,
+ FK_CRYPTO_NEON_FP_ARMV8, (AEK_SEC | AEK_MP | AEK_VIRT | AEK_HWDIVARM |
+ AEK_HWDIV | AEK_DSP | AEK_CRC))
+// Non-standard Arch names.
+ARM_ARCH("iwmmxt", AK_IWMMXT, "iwmmxt", "", ARMBuildAttrs::CPUArch::v5TE,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("iwmmxt2", AK_IWMMXT2, "iwmmxt2", "", ARMBuildAttrs::CPUArch::v5TE,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("xscale", AK_XSCALE, "xscale", "", ARMBuildAttrs::CPUArch::v5TE,
+ FK_NONE, AEK_NONE)
+ARM_ARCH("armv7s", AK_ARMV7S, "7-S", "v7s", ARMBuildAttrs::CPUArch::v7,
+ FK_NEON_VFPV4, AEK_DSP)
+ARM_ARCH("armv7k", AK_ARMV7K, "7-K", "v7k", ARMBuildAttrs::CPUArch::v7,
+ FK_NONE, AEK_DSP)
+#undef ARM_ARCH
+
+#ifndef ARM_ARCH_EXT_NAME
+#define ARM_ARCH_EXT_NAME(NAME, ID, FEATURE, NEGFEATURE)
+#endif
+// FIXME: This would be nicer were it tablegen
+ARM_ARCH_EXT_NAME("invalid", AEK_INVALID, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("none", AEK_NONE, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("crc", AEK_CRC, "+crc", "-crc")
+ARM_ARCH_EXT_NAME("crypto", AEK_CRYPTO, "+crypto","-crypto")
+ARM_ARCH_EXT_NAME("fp", AEK_FP, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("idiv", (AEK_HWDIVARM | AEK_HWDIV), nullptr, nullptr)
+ARM_ARCH_EXT_NAME("mp", AEK_MP, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("simd", AEK_SIMD, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("sec", AEK_SEC, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("virt", AEK_VIRT, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("fp16", AEK_FP16, "+fullfp16", "-fullfp16")
+ARM_ARCH_EXT_NAME("os", AEK_OS, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("iwmmxt", AEK_IWMMXT, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("iwmmxt2", AEK_IWMMXT2, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("maverick", AEK_MAVERICK, nullptr, nullptr)
+ARM_ARCH_EXT_NAME("xscale", AEK_XSCALE, nullptr, nullptr)
+#undef ARM_ARCH_EXT_NAME
+
+#ifndef ARM_HW_DIV_NAME
+#define ARM_HW_DIV_NAME(NAME, ID)
+#endif
+ARM_HW_DIV_NAME("invalid", AEK_INVALID)
+ARM_HW_DIV_NAME("none", AEK_NONE)
+ARM_HW_DIV_NAME("thumb", AEK_HWDIV)
+ARM_HW_DIV_NAME("arm", AEK_HWDIVARM)
+ARM_HW_DIV_NAME("arm,thumb", (AEK_HWDIVARM | AEK_HWDIV))
+#undef ARM_HW_DIV_NAME
+
+#ifndef ARM_CPU_NAME
+#define ARM_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT)
+#endif
+ARM_CPU_NAME("arm2", AK_ARMV2, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("arm3", AK_ARMV2A, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("arm6", AK_ARMV3, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("arm7m", AK_ARMV3M, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("arm8", AK_ARMV4, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm810", AK_ARMV4, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("strongarm", AK_ARMV4, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("strongarm110", AK_ARMV4, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("strongarm1100", AK_ARMV4, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("strongarm1110", AK_ARMV4, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm7tdmi", AK_ARMV4T, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("arm7tdmi-s", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm710t", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm720t", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm9", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm9tdmi", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm920", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm920t", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm922t", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm9312", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm940t", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("ep9312", AK_ARMV4T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm10tdmi", AK_ARMV5T, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("arm1020t", AK_ARMV5T, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm9e", AK_ARMV5TE, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm946e-s", AK_ARMV5TE, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm966e-s", AK_ARMV5TE, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm968e-s", AK_ARMV5TE, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm10e", AK_ARMV5TE, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm1020e", AK_ARMV5TE, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm1022e", AK_ARMV5TE, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("arm926ej-s", AK_ARMV5TEJ, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("arm1136j-s", AK_ARMV6, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm1136jf-s", AK_ARMV6, FK_VFPV2, true, AEK_NONE)
+ARM_CPU_NAME("arm1136jz-s", AK_ARMV6, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm1176j-s", AK_ARMV6K, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("arm1176jz-s", AK_ARMV6KZ, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("mpcore", AK_ARMV6K, FK_VFPV2, false, AEK_NONE)
+ARM_CPU_NAME("mpcorenovfp", AK_ARMV6K, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("arm1176jzf-s", AK_ARMV6KZ, FK_VFPV2, true, AEK_NONE)
+ARM_CPU_NAME("arm1156t2-s", AK_ARMV6T2, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("arm1156t2f-s", AK_ARMV6T2, FK_VFPV2, false, AEK_NONE)
+ARM_CPU_NAME("cortex-m0", AK_ARMV6M, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("cortex-m0plus", AK_ARMV6M, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("cortex-m1", AK_ARMV6M, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("sc000", AK_ARMV6M, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("cortex-a5", AK_ARMV7A, FK_NEON_VFPV4, false, (AEK_SEC | AEK_MP))
+ARM_CPU_NAME("cortex-a7", AK_ARMV7A, FK_NEON_VFPV4, false,
+ (AEK_SEC | AEK_MP | AEK_VIRT | AEK_HWDIVARM | AEK_HWDIV))
+ARM_CPU_NAME("cortex-a8", AK_ARMV7A, FK_NEON, true, AEK_SEC)
+ARM_CPU_NAME("cortex-a9", AK_ARMV7A, FK_NEON_FP16, false, (AEK_SEC | AEK_MP))
+ARM_CPU_NAME("cortex-a12", AK_ARMV7A, FK_NEON_VFPV4, false,
+ (AEK_SEC | AEK_MP | AEK_VIRT | AEK_HWDIVARM | AEK_HWDIV))
+ARM_CPU_NAME("cortex-a15", AK_ARMV7A, FK_NEON_VFPV4, false,
+ (AEK_SEC | AEK_MP | AEK_VIRT | AEK_HWDIVARM | AEK_HWDIV))
+ARM_CPU_NAME("cortex-a17", AK_ARMV7A, FK_NEON_VFPV4, false,
+ (AEK_SEC | AEK_MP | AEK_VIRT | AEK_HWDIVARM | AEK_HWDIV))
+ARM_CPU_NAME("krait", AK_ARMV7A, FK_NEON_VFPV4, false,
+ (AEK_HWDIVARM | AEK_HWDIV))
+ARM_CPU_NAME("cortex-r4", AK_ARMV7R, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("cortex-r4f", AK_ARMV7R, FK_VFPV3_D16, false, AEK_NONE)
+ARM_CPU_NAME("cortex-r5", AK_ARMV7R, FK_VFPV3_D16, false,
+ (AEK_MP | AEK_HWDIVARM))
+ARM_CPU_NAME("cortex-r7", AK_ARMV7R, FK_VFPV3_D16_FP16, false,
+ (AEK_MP | AEK_HWDIVARM))
+ARM_CPU_NAME("sc300", AK_ARMV7M, FK_NONE, false, AEK_NONE)
+ARM_CPU_NAME("cortex-m3", AK_ARMV7M, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("cortex-m4", AK_ARMV7EM, FK_FPV4_SP_D16, true, AEK_NONE)
+ARM_CPU_NAME("cortex-m7", AK_ARMV7EM, FK_FPV5_D16, false, AEK_NONE)
+ARM_CPU_NAME("cortex-a35", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, AEK_CRC)
+ARM_CPU_NAME("cortex-a53", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, true, AEK_CRC)
+ARM_CPU_NAME("cortex-a57", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, AEK_CRC)
+ARM_CPU_NAME("cortex-a72", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, AEK_CRC)
+ARM_CPU_NAME("cyclone", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, AEK_CRC)
+// Non-standard Arch names.
+ARM_CPU_NAME("iwmmxt", AK_IWMMXT, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("xscale", AK_XSCALE, FK_NONE, true, AEK_NONE)
+ARM_CPU_NAME("swift", AK_ARMV7S, FK_NEON_VFPV4, true,
+ (AEK_HWDIVARM | AEK_HWDIV))
+// Invalid CPU
+ARM_CPU_NAME("invalid", AK_INVALID, FK_INVALID, true, AEK_INVALID)
+#undef ARM_CPU_NAME
diff --git a/contrib/llvm/include/llvm/Support/ARMWinEH.h b/contrib/llvm/include/llvm/Support/ARMWinEH.h
new file mode 100644
index 0000000..1463629
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ARMWinEH.h
@@ -0,0 +1,382 @@
+//===-- llvm/Support/WinARMEH.h - Windows on ARM EH Constants ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ARMWINEH_H
+#define LLVM_SUPPORT_ARMWINEH_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace ARM {
+namespace WinEH {
+enum class RuntimeFunctionFlag {
+ RFF_Unpacked, /// unpacked entry
+ RFF_Packed, /// packed entry
+ RFF_PackedFragment, /// packed entry representing a fragment
+ RFF_Reserved, /// reserved
+};
+
+enum class ReturnType {
+ RT_POP, /// return via pop {pc} (L flag must be set)
+ RT_B, /// 16-bit branch
+ RT_BW, /// 32-bit branch
+ RT_NoEpilogue, /// no epilogue (fragment)
+};
+
+/// RuntimeFunction - An entry in the table of procedure data (.pdata)
+///
+/// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+/// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +---------------------------------------------------------------+
+/// | Function Start RVA |
+/// +-------------------+-+-+-+-----+-+---+---------------------+---+
+/// | Stack Adjust |C|L|R| Reg |H|Ret| Function Length |Flg|
+/// +-------------------+-+-+-+-----+-+---+---------------------+---+
+///
+/// Flag : 2-bit field with the following meanings:
+/// - 00 = packed unwind data not used; reamining bits point to .xdata record
+/// - 01 = packed unwind data
+/// - 10 = packed unwind data, function assumed to have no prologue; useful
+/// for function fragments that are discontiguous with the start of the
+/// function
+/// - 11 = reserved
+/// Function Length : 11-bit field providing the length of the entire function
+/// in bytes, divided by 2; if the function is greater than
+/// 4KB, a full .xdata record must be used instead
+/// Ret : 2-bit field indicating how the function returns
+/// - 00 = return via pop {pc} (the L bit must be set)
+/// - 01 = return via 16-bit branch
+/// - 10 = return via 32-bit branch
+/// - 11 = no epilogue; useful for function fragments that may only contain a
+/// prologue but the epilogue is elsewhere
+/// H : 1-bit flag indicating whether the function "homes" the integer parameter
+/// registers (r0-r3), allocating 16-bytes on the stack
+/// Reg : 3-bit field indicating the index of the last saved non-volatile
+/// register. If the R bit is set to 0, then only integer registers are
+/// saved (r4-rN, where N is 4 + Reg). If the R bit is set to 1, then
+/// only floating-point registers are being saved (d8-dN, where N is
+/// 8 + Reg). The special case of the R bit being set to 1 and Reg equal
+/// to 7 indicates that no registers are saved.
+/// R : 1-bit flag indicating whether the non-volatile registers are integer or
+/// floating-point. 0 indicates integer, 1 indicates floating-point. The
+/// special case of the R-flag being set and Reg being set to 7 indicates
+/// that no non-volatile registers are saved.
+/// L : 1-bit flag indicating whether the function saves/restores the link
+/// register (LR)
+/// C : 1-bit flag indicating whether the function includes extra instructions
+/// to setup a frame chain for fast walking. If this flag is set, r11 is
+/// implicitly added to the list of saved non-volatile integer registers.
+/// Stack Adjust : 10-bit field indicating the number of bytes of stack that are
+/// allocated for this function. Only values between 0x000 and
+/// 0x3f3 can be directly encoded. If the value is 0x3f4 or
+/// greater, then the low 4 bits have special meaning as follows:
+/// - Bit 0-1
+/// indicate the number of words' of adjustment (1-4), minus 1
+/// - Bit 2
+/// indicates if the prologue combined adjustment into push
+/// - Bit 3
+/// indicates if the epilogue combined adjustment into pop
+///
+/// RESTRICTIONS:
+/// - IF C is SET:
+/// + L flag must be set since frame chaining requires r11 and lr
+/// + r11 must NOT be included in the set of registers described by Reg
+/// - IF Ret is 0:
+/// + L flag must be set
+
+// NOTE: RuntimeFunction is meant to be a simple class that provides raw access
+// to all fields in the structure. The accessor methods reflect the names of
+// the bitfields that they correspond to. Although some obvious simplifications
+// are possible via merging of methods, it would prevent the use of this class
+// to fully inspect the contents of the data structure which is particularly
+// useful for scenarios such as llvm-readobj to aid in testing.
+
+class RuntimeFunction {
+public:
+ const support::ulittle32_t BeginAddress;
+ const support::ulittle32_t UnwindData;
+
+ RuntimeFunction(const support::ulittle32_t *Data)
+ : BeginAddress(Data[0]), UnwindData(Data[1]) {}
+
+ RuntimeFunction(const support::ulittle32_t BeginAddress,
+ const support::ulittle32_t UnwindData)
+ : BeginAddress(BeginAddress), UnwindData(UnwindData) {}
+
+ RuntimeFunctionFlag Flag() const {
+ return RuntimeFunctionFlag(UnwindData & 0x3);
+ }
+
+ uint32_t ExceptionInformationRVA() const {
+ assert(Flag() == RuntimeFunctionFlag::RFF_Unpacked &&
+ "unpacked form required for this operation");
+ return (UnwindData & ~0x3);
+ }
+
+ uint32_t PackedUnwindData() const {
+ assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+ Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+ "packed form required for this operation");
+ return (UnwindData & ~0x3);
+ }
+ uint32_t FunctionLength() const {
+ assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+ Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+ "packed form required for this operation");
+ return (((UnwindData & 0x00001ffc) >> 2) << 1);
+ }
+ ReturnType Ret() const {
+ assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+ Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+ "packed form required for this operation");
+ assert(((UnwindData & 0x00006000) || L()) && "L must be set to 1");
+ return ReturnType((UnwindData & 0x00006000) >> 13);
+ }
+ bool H() const {
+ assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+ Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+ "packed form required for this operation");
+ return ((UnwindData & 0x00008000) >> 15);
+ }
+ uint8_t Reg() const {
+ assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+ Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+ "packed form required for this operation");
+ return ((UnwindData & 0x00070000) >> 16);
+ }
+ bool R() const {
+ assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+ Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+ "packed form required for this operation");
+ return ((UnwindData & 0x00080000) >> 19);
+ }
+ bool L() const {
+ assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+ Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+ "packed form required for this operation");
+ return ((UnwindData & 0x00100000) >> 20);
+ }
+ bool C() const {
+ assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+ Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+ "packed form required for this operation");
+ assert(((~UnwindData & 0x00200000) || L()) &&
+ "L flag must be set, chaining requires r11 and LR");
+ assert(((~UnwindData & 0x00200000) || (Reg() < 7) || R()) &&
+ "r11 must not be included in Reg; C implies r11");
+ return ((UnwindData & 0x00200000) >> 21);
+ }
+ uint16_t StackAdjust() const {
+ assert((Flag() == RuntimeFunctionFlag::RFF_Packed ||
+ Flag() == RuntimeFunctionFlag::RFF_PackedFragment) &&
+ "packed form required for this operation");
+ return ((UnwindData & 0xffc00000) >> 22);
+ }
+};
+
+/// PrologueFolding - pseudo-flag derived from Stack Adjust indicating that the
+/// prologue has stack adjustment combined into the push
+inline bool PrologueFolding(const RuntimeFunction &RF) {
+ return RF.StackAdjust() >= 0x3f4 && (RF.StackAdjust() & 0x4);
+}
+/// Epilogue - pseudo-flag derived from Stack Adjust indicating that the
+/// epilogue has stack adjustment combined into the pop
+inline bool EpilogueFolding(const RuntimeFunction &RF) {
+ return RF.StackAdjust() >= 0x3f4 && (RF.StackAdjust() & 0x8);
+}
+/// StackAdjustment - calculated stack adjustment in words. The stack
+/// adjustment should be determined via this function to account for the special
+/// handling the special encoding when the value is >= 0x3f4.
+inline uint16_t StackAdjustment(const RuntimeFunction &RF) {
+ uint16_t Adjustment = RF.StackAdjust();
+ if (Adjustment >= 0x3f4)
+ return (Adjustment & 0x3) ? ((Adjustment & 0x3) << 2) - 1 : 0;
+ return Adjustment;
+}
+
+/// SavedRegisterMask - Utility function to calculate the set of saved general
+/// purpose (r0-r15) and VFP (d0-d31) registers.
+std::pair<uint16_t, uint32_t> SavedRegisterMask(const RuntimeFunction &RF);
+
+/// ExceptionDataRecord - An entry in the table of exception data (.xdata)
+///
+/// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+/// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +-------+---------+-+-+-+---+-----------------------------------+
+/// | C Wrd | Epi Cnt |F|E|X|Ver| Function Length |
+/// +-------+--------+'-'-'-'---'---+-------------------------------+
+/// | Reserved |Ex. Code Words| (Extended Epilogue Count) |
+/// +-------+--------+--------------+-------------------------------+
+///
+/// Function Length : 18-bit field indicating the total length of the function
+/// in bytes divided by 2. If a function is larger than
+/// 512KB, then multiple pdata and xdata records must be used.
+/// Vers : 2-bit field describing the version of the remaining structure. Only
+/// version 0 is currently defined (values 1-3 are not permitted).
+/// X : 1-bit field indicating the presence of exception data
+/// E : 1-bit field indicating that the single epilogue is packed into the
+/// header
+/// F : 1-bit field indicating that the record describes a function fragment
+/// (implies that no prologue is present, and prologue processing should be
+/// skipped)
+/// Epilogue Count : 5-bit field that differs in meaning based on the E field.
+///
+/// If E is set, then this field specifies the index of the
+/// first unwind code describing the (only) epilogue.
+///
+/// Otherwise, this field indicates the number of exception
+/// scopes. If more than 31 scopes exist, then this field and
+/// the Code Words field must both be set to 0 to indicate that
+/// an extension word is required.
+/// Code Words : 4-bit field that species the number of 32-bit words needed to
+/// contain all the unwind codes. If more than 15 words (63 code
+/// bytes) are required, then this field and the Epilogue Count
+/// field must both be set to 0 to indicate that an extension word
+/// is required.
+/// Extended Epilogue Count, Extended Code Words :
+/// Valid only if Epilog Count and Code Words are both
+/// set to 0. Provides an 8-bit extended code word
+/// count and 16-bits for epilogue count
+///
+/// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+/// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +----------------+------+---+---+-------------------------------+
+/// | Ep Start Idx | Cond |Res| Epilogue Start Offset |
+/// +----------------+------+---+-----------------------------------+
+///
+/// If the E bit is unset in the header, the header is followed by a series of
+/// epilogue scopes, which are sorted by their offset.
+///
+/// Epilogue Start Offset: 18-bit field encoding the offset of epilogue relative
+/// to the start of the function in bytes divided by two
+/// Res : 2-bit field reserved for future expansion (must be set to 0)
+/// Condition : 4-bit field providing the condition under which the epilogue is
+/// executed. Unconditional epilogues should set this field to 0xe.
+/// Epilogues must be entirely conditional or unconditional, and in
+/// Thumb-2 mode. The epilogue beings with the first instruction
+/// after the IT opcode.
+/// Epilogue Start Index : 8-bit field indicating the byte index of the first
+/// unwind code describing the epilogue
+///
+/// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+/// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +---------------+---------------+---------------+---------------+
+/// | Unwind Code 3 | Unwind Code 2 | Unwind Code 1 | Unwind Code 0 |
+/// +---------------+---------------+---------------+---------------+
+///
+/// Following the epilogue scopes, the byte code describing the unwinding
+/// follows. This is padded to align up to word alignment. Bytes are stored in
+/// little endian.
+///
+/// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+/// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+/// +---------------------------------------------------------------+
+/// | Exception Handler RVA (requires X = 1) |
+/// +---------------------------------------------------------------+
+/// | (possibly followed by data required for exception handler) |
+/// +---------------------------------------------------------------+
+///
+/// If the X bit is set in the header, the unwind byte code is followed by the
+/// exception handler information. This constants of one Exception Handler RVA
+/// which is the address to the exception handler, followed immediately by the
+/// variable length data associated with the exception handler.
+///
+
+struct EpilogueScope {
+ const support::ulittle32_t ES;
+
+ EpilogueScope(const support::ulittle32_t Data) : ES(Data) {}
+ uint32_t EpilogueStartOffset() const {
+ return (ES & 0x0003ffff);
+ }
+ uint8_t Res() const {
+ return ((ES & 0x000c0000) >> 18);
+ }
+ uint8_t Condition() const {
+ return ((ES & 0x00f00000) >> 20);
+ }
+ uint8_t EpilogueStartIndex() const {
+ return ((ES & 0xff000000) >> 24);
+ }
+};
+
+struct ExceptionDataRecord;
+inline size_t HeaderWords(const ExceptionDataRecord &XR);
+
+struct ExceptionDataRecord {
+ const support::ulittle32_t *Data;
+
+ ExceptionDataRecord(const support::ulittle32_t *Data) : Data(Data) {}
+
+ uint32_t FunctionLength() const {
+ return (Data[0] & 0x0003ffff);
+ }
+
+ uint8_t Vers() const {
+ return (Data[0] & 0x000C0000) >> 18;
+ }
+
+ bool X() const {
+ return ((Data[0] & 0x00100000) >> 20);
+ }
+
+ bool E() const {
+ return ((Data[0] & 0x00200000) >> 21);
+ }
+
+ bool F() const {
+ return ((Data[0] & 0x00400000) >> 22);
+ }
+
+ uint8_t EpilogueCount() const {
+ if (HeaderWords(*this) == 1)
+ return (Data[0] & 0x0f800000) >> 23;
+ return Data[1] & 0x0000ffff;
+ }
+
+ uint8_t CodeWords() const {
+ if (HeaderWords(*this) == 1)
+ return (Data[0] & 0xf0000000) >> 28;
+ return (Data[1] & 0x00ff0000) >> 16;
+ }
+
+ ArrayRef<support::ulittle32_t> EpilogueScopes() const {
+ assert(E() == 0 && "epilogue scopes are only present when the E bit is 0");
+ size_t Offset = HeaderWords(*this);
+ return makeArrayRef(&Data[Offset], EpilogueCount());
+ }
+
+ ArrayRef<uint8_t> UnwindByteCode() const {
+ const size_t Offset = HeaderWords(*this)
+ + (E() ? 0 : EpilogueCount());
+ const uint8_t *ByteCode =
+ reinterpret_cast<const uint8_t *>(&Data[Offset]);
+ return makeArrayRef(ByteCode, CodeWords() * sizeof(uint32_t));
+ }
+
+ uint32_t ExceptionHandlerRVA() const {
+ assert(X() && "Exception Handler RVA is only valid if the X bit is set");
+ return Data[HeaderWords(*this) + EpilogueCount() + CodeWords()];
+ }
+
+ uint32_t ExceptionHandlerParameter() const {
+ assert(X() && "Exception Handler RVA is only valid if the X bit is set");
+ return Data[HeaderWords(*this) + EpilogueCount() + CodeWords() + 1];
+ }
+};
+
+inline size_t HeaderWords(const ExceptionDataRecord &XR) {
+ return (XR.Data[0] & 0xff800000) ? 1 : 2;
+}
+}
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/AlignOf.h b/contrib/llvm/include/llvm/Support/AlignOf.h
new file mode 100644
index 0000000..5268c8d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/AlignOf.h
@@ -0,0 +1,258 @@
+//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AlignOf function that computes alignments for
+// arbitrary types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ALIGNOF_H
+#define LLVM_SUPPORT_ALIGNOF_H
+
+#include "llvm/Support/Compiler.h"
+#include <cstddef>
+#include <type_traits>
+
+namespace llvm {
+
+namespace detail {
+
+// For everything other than an abstract class we can calulate alignment by
+// building a class with a single character and a member of the given type.
+template <typename T, bool = std::is_abstract<T>::value>
+struct AlignmentCalcImpl {
+ char x;
+#if defined(_MSC_VER)
+// Disables "structure was padded due to __declspec(align())" warnings that are
+// generated by any class using AlignOf<T> with a manually specified alignment.
+// Although the warning is disabled in the LLVM project we need this pragma
+// as AlignOf.h is a published support header that's available for use
+// out-of-tree, and we would like that to compile cleanly at /W4.
+#pragma warning(suppress : 4324)
+#endif
+ T t;
+private:
+ AlignmentCalcImpl() {} // Never instantiate.
+};
+
+// Abstract base class helper, this will have the minimal alignment and size
+// for any abstract class. We don't even define its destructor because this
+// type should never be used in a way that requires it.
+struct AlignmentCalcImplBase {
+ virtual ~AlignmentCalcImplBase() = 0;
+};
+
+// When we have an abstract class type, specialize the alignment computation
+// engine to create another abstract class that derives from both an empty
+// abstract base class and the provided type. This has the same effect as the
+// above except that it handles the fact that we can't actually create a member
+// of type T.
+template <typename T>
+struct AlignmentCalcImpl<T, true> : AlignmentCalcImplBase, T {
+ virtual ~AlignmentCalcImpl() = 0;
+};
+
+} // End detail namespace.
+
+/// AlignOf - A templated class that contains an enum value representing
+/// the alignment of the template argument. For example,
+/// AlignOf<int>::Alignment represents the alignment of type "int". The
+/// alignment calculated is the minimum alignment, and not necessarily
+/// the "desired" alignment returned by GCC's __alignof__ (for example). Note
+/// that because the alignment is an enum value, it can be used as a
+/// compile-time constant (e.g., for template instantiation).
+template <typename T>
+struct AlignOf {
+#ifndef _MSC_VER
+ // Avoid warnings from GCC like:
+ // comparison between 'enum llvm::AlignOf<X>::<anonymous>' and 'enum
+ // llvm::AlignOf<Y>::<anonymous>' [-Wenum-compare]
+ // by using constexpr instead of enum.
+ // (except on MSVC, since it doesn't support constexpr yet).
+ static constexpr unsigned Alignment = static_cast<unsigned int>(
+ sizeof(detail::AlignmentCalcImpl<T>) - sizeof(T));
+#else
+ enum {
+ Alignment = static_cast<unsigned int>(sizeof(detail::AlignmentCalcImpl<T>) -
+ sizeof(T))
+ };
+#endif
+ enum { Alignment_GreaterEqual_2Bytes = Alignment >= 2 ? 1 : 0 };
+ enum { Alignment_GreaterEqual_4Bytes = Alignment >= 4 ? 1 : 0 };
+ enum { Alignment_GreaterEqual_8Bytes = Alignment >= 8 ? 1 : 0 };
+ enum { Alignment_GreaterEqual_16Bytes = Alignment >= 16 ? 1 : 0 };
+
+ enum { Alignment_LessEqual_2Bytes = Alignment <= 2 ? 1 : 0 };
+ enum { Alignment_LessEqual_4Bytes = Alignment <= 4 ? 1 : 0 };
+ enum { Alignment_LessEqual_8Bytes = Alignment <= 8 ? 1 : 0 };
+ enum { Alignment_LessEqual_16Bytes = Alignment <= 16 ? 1 : 0 };
+};
+
+#ifndef _MSC_VER
+template <typename T> constexpr unsigned AlignOf<T>::Alignment;
+#endif
+
+/// alignOf - A templated function that returns the minimum alignment of
+/// of a type. This provides no extra functionality beyond the AlignOf
+/// class besides some cosmetic cleanliness. Example usage:
+/// alignOf<int>() returns the alignment of an int.
+template <typename T>
+inline unsigned alignOf() { return AlignOf<T>::Alignment; }
+
+/// \struct AlignedCharArray
+/// \brief Helper for building an aligned character array type.
+///
+/// This template is used to explicitly build up a collection of aligned
+/// character array types. We have to build these up using a macro and explicit
+/// specialization to cope with old versions of MSVC and GCC where only an
+/// integer literal can be used to specify an alignment constraint. Once built
+/// up here, we can then begin to indirect between these using normal C++
+/// template parameters.
+
+// MSVC requires special handling here.
+#ifndef _MSC_VER
+
+#if __has_feature(cxx_alignas)
+template<std::size_t Alignment, std::size_t Size>
+struct AlignedCharArray {
+ alignas(Alignment) char buffer[Size];
+};
+
+#elif defined(__GNUC__) || defined(__IBM_ATTRIBUTES)
+/// \brief Create a type with an aligned char buffer.
+template<std::size_t Alignment, std::size_t Size>
+struct AlignedCharArray;
+
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+ template<std::size_t Size> \
+ struct AlignedCharArray<x, Size> { \
+ __attribute__((aligned(x))) char buffer[Size]; \
+ };
+
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128)
+
+#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
+
+#else
+# error No supported align as directive.
+#endif
+
+#else // _MSC_VER
+
+/// \brief Create a type with an aligned char buffer.
+template<std::size_t Alignment, std::size_t Size>
+struct AlignedCharArray;
+
+// We provide special variations of this template for the most common
+// alignments because __declspec(align(...)) doesn't actually work when it is
+// a member of a by-value function argument in MSVC, even if the alignment
+// request is something reasonably like 8-byte or 16-byte. Note that we can't
+// even include the declspec with the union that forces the alignment because
+// MSVC warns on the existence of the declspec despite the union member forcing
+// proper alignment.
+
+template<std::size_t Size>
+struct AlignedCharArray<1, Size> {
+ union {
+ char aligned;
+ char buffer[Size];
+ };
+};
+
+template<std::size_t Size>
+struct AlignedCharArray<2, Size> {
+ union {
+ short aligned;
+ char buffer[Size];
+ };
+};
+
+template<std::size_t Size>
+struct AlignedCharArray<4, Size> {
+ union {
+ int aligned;
+ char buffer[Size];
+ };
+};
+
+template<std::size_t Size>
+struct AlignedCharArray<8, Size> {
+ union {
+ double aligned;
+ char buffer[Size];
+ };
+};
+
+
+// The rest of these are provided with a __declspec(align(...)) and we simply
+// can't pass them by-value as function arguments on MSVC.
+
+#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \
+ template<std::size_t Size> \
+ struct AlignedCharArray<x, Size> { \
+ __declspec(align(x)) char buffer[Size]; \
+ };
+
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64)
+LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128)
+
+#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT
+
+#endif // _MSC_VER
+
+namespace detail {
+template <typename T1,
+ typename T2 = char, typename T3 = char, typename T4 = char,
+ typename T5 = char, typename T6 = char, typename T7 = char,
+ typename T8 = char, typename T9 = char, typename T10 = char>
+class AlignerImpl {
+ T1 t1; T2 t2; T3 t3; T4 t4; T5 t5; T6 t6; T7 t7; T8 t8; T9 t9; T10 t10;
+
+ AlignerImpl(); // Never defined or instantiated.
+};
+
+template <typename T1,
+ typename T2 = char, typename T3 = char, typename T4 = char,
+ typename T5 = char, typename T6 = char, typename T7 = char,
+ typename T8 = char, typename T9 = char, typename T10 = char>
+union SizerImpl {
+ char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)],
+ arr5[sizeof(T5)], arr6[sizeof(T6)], arr7[sizeof(T7)], arr8[sizeof(T8)],
+ arr9[sizeof(T9)], arr10[sizeof(T10)];
+};
+} // end namespace detail
+
+/// \brief This union template exposes a suitably aligned and sized character
+/// array member which can hold elements of any of up to ten types.
+///
+/// These types may be arrays, structs, or any other types. The goal is to
+/// expose a char array buffer member which can be used as suitable storage for
+/// a placement new of any of these types. Support for more than ten types can
+/// be added at the cost of more boilerplate.
+template <typename T1,
+ typename T2 = char, typename T3 = char, typename T4 = char,
+ typename T5 = char, typename T6 = char, typename T7 = char,
+ typename T8 = char, typename T9 = char, typename T10 = char>
+struct AlignedCharArrayUnion : llvm::AlignedCharArray<
+ AlignOf<detail::AlignerImpl<T1, T2, T3, T4, T5,
+ T6, T7, T8, T9, T10> >::Alignment,
+ sizeof(detail::SizerImpl<T1, T2, T3, T4, T5,
+ T6, T7, T8, T9, T10>)> {
+};
+} // end namespace llvm
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Allocator.h b/contrib/llvm/include/llvm/Support/Allocator.h
new file mode 100644
index 0000000..c608736
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Allocator.h
@@ -0,0 +1,434 @@
+//===--- Allocator.h - Simple memory allocation abstraction -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the MallocAllocator and BumpPtrAllocator interfaces. Both
+/// of these conform to an LLVM "Allocator" concept which consists of an
+/// Allocate method accepting a size and alignment, and a Deallocate accepting
+/// a pointer and size. Further, the LLVM "Allocator" concept has overloads of
+/// Allocate and Deallocate for setting size and alignment based on the final
+/// type. These overloads are typically provided by a base class template \c
+/// AllocatorBase.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ALLOCATOR_H
+#define LLVM_SUPPORT_ALLOCATOR_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Memory.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+
+namespace llvm {
+
+/// \brief CRTP base class providing obvious overloads for the core \c
+/// Allocate() methods of LLVM-style allocators.
+///
+/// This base class both documents the full public interface exposed by all
+/// LLVM-style allocators, and redirects all of the overloads to a single core
+/// set of methods which the derived class must define.
+template <typename DerivedT> class AllocatorBase {
+public:
+ /// \brief Allocate \a Size bytes of \a Alignment aligned memory. This method
+ /// must be implemented by \c DerivedT.
+ void *Allocate(size_t Size, size_t Alignment) {
+#ifdef __clang__
+ static_assert(static_cast<void *(AllocatorBase::*)(size_t, size_t)>(
+ &AllocatorBase::Allocate) !=
+ static_cast<void *(DerivedT::*)(size_t, size_t)>(
+ &DerivedT::Allocate),
+ "Class derives from AllocatorBase without implementing the "
+ "core Allocate(size_t, size_t) overload!");
+#endif
+ return static_cast<DerivedT *>(this)->Allocate(Size, Alignment);
+ }
+
+ /// \brief Deallocate \a Ptr to \a Size bytes of memory allocated by this
+ /// allocator.
+ void Deallocate(const void *Ptr, size_t Size) {
+#ifdef __clang__
+ static_assert(static_cast<void (AllocatorBase::*)(const void *, size_t)>(
+ &AllocatorBase::Deallocate) !=
+ static_cast<void (DerivedT::*)(const void *, size_t)>(
+ &DerivedT::Deallocate),
+ "Class derives from AllocatorBase without implementing the "
+ "core Deallocate(void *) overload!");
+#endif
+ return static_cast<DerivedT *>(this)->Deallocate(Ptr, Size);
+ }
+
+ // The rest of these methods are helpers that redirect to one of the above
+ // core methods.
+
+ /// \brief Allocate space for a sequence of objects without constructing them.
+ template <typename T> T *Allocate(size_t Num = 1) {
+ return static_cast<T *>(Allocate(Num * sizeof(T), AlignOf<T>::Alignment));
+ }
+
+ /// \brief Deallocate space for a sequence of objects without constructing them.
+ template <typename T>
+ typename std::enable_if<
+ !std::is_same<typename std::remove_cv<T>::type, void>::value, void>::type
+ Deallocate(T *Ptr, size_t Num = 1) {
+ Deallocate(static_cast<const void *>(Ptr), Num * sizeof(T));
+ }
+};
+
+class MallocAllocator : public AllocatorBase<MallocAllocator> {
+public:
+ void Reset() {}
+
+ LLVM_ATTRIBUTE_RETURNS_NONNULL void *Allocate(size_t Size,
+ size_t /*Alignment*/) {
+ return malloc(Size);
+ }
+
+ // Pull in base class overloads.
+ using AllocatorBase<MallocAllocator>::Allocate;
+
+ void Deallocate(const void *Ptr, size_t /*Size*/) {
+ free(const_cast<void *>(Ptr));
+ }
+
+ // Pull in base class overloads.
+ using AllocatorBase<MallocAllocator>::Deallocate;
+
+ void PrintStats() const {}
+};
+
+namespace detail {
+
+// We call out to an external function to actually print the message as the
+// printing code uses Allocator.h in its implementation.
+void printBumpPtrAllocatorStats(unsigned NumSlabs, size_t BytesAllocated,
+ size_t TotalMemory);
+} // End namespace detail.
+
+/// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
+///
+/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
+/// memory rather than relying on a boundless contiguous heap. However, it has
+/// bump-pointer semantics in that it is a monotonically growing pool of memory
+/// where every allocation is found by merely allocating the next N bytes in
+/// the slab, or the next N bytes in the next slab.
+///
+/// Note that this also has a threshold for forcing allocations above a certain
+/// size into their own slab.
+///
+/// The BumpPtrAllocatorImpl template defaults to using a MallocAllocator
+/// object, which wraps malloc, to allocate memory, but it can be changed to
+/// use a custom allocator.
+template <typename AllocatorT = MallocAllocator, size_t SlabSize = 4096,
+ size_t SizeThreshold = SlabSize>
+class BumpPtrAllocatorImpl
+ : public AllocatorBase<
+ BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold>> {
+public:
+ static_assert(SizeThreshold <= SlabSize,
+ "The SizeThreshold must be at most the SlabSize to ensure "
+ "that objects larger than a slab go into their own memory "
+ "allocation.");
+
+ BumpPtrAllocatorImpl()
+ : CurPtr(nullptr), End(nullptr), BytesAllocated(0), Allocator() {}
+ template <typename T>
+ BumpPtrAllocatorImpl(T &&Allocator)
+ : CurPtr(nullptr), End(nullptr), BytesAllocated(0),
+ Allocator(std::forward<T &&>(Allocator)) {}
+
+ // Manually implement a move constructor as we must clear the old allocator's
+ // slabs as a matter of correctness.
+ BumpPtrAllocatorImpl(BumpPtrAllocatorImpl &&Old)
+ : CurPtr(Old.CurPtr), End(Old.End), Slabs(std::move(Old.Slabs)),
+ CustomSizedSlabs(std::move(Old.CustomSizedSlabs)),
+ BytesAllocated(Old.BytesAllocated),
+ Allocator(std::move(Old.Allocator)) {
+ Old.CurPtr = Old.End = nullptr;
+ Old.BytesAllocated = 0;
+ Old.Slabs.clear();
+ Old.CustomSizedSlabs.clear();
+ }
+
+ ~BumpPtrAllocatorImpl() {
+ DeallocateSlabs(Slabs.begin(), Slabs.end());
+ DeallocateCustomSizedSlabs();
+ }
+
+ BumpPtrAllocatorImpl &operator=(BumpPtrAllocatorImpl &&RHS) {
+ DeallocateSlabs(Slabs.begin(), Slabs.end());
+ DeallocateCustomSizedSlabs();
+
+ CurPtr = RHS.CurPtr;
+ End = RHS.End;
+ BytesAllocated = RHS.BytesAllocated;
+ Slabs = std::move(RHS.Slabs);
+ CustomSizedSlabs = std::move(RHS.CustomSizedSlabs);
+ Allocator = std::move(RHS.Allocator);
+
+ RHS.CurPtr = RHS.End = nullptr;
+ RHS.BytesAllocated = 0;
+ RHS.Slabs.clear();
+ RHS.CustomSizedSlabs.clear();
+ return *this;
+ }
+
+ /// \brief Deallocate all but the current slab and reset the current pointer
+ /// to the beginning of it, freeing all memory allocated so far.
+ void Reset() {
+ DeallocateCustomSizedSlabs();
+ CustomSizedSlabs.clear();
+
+ if (Slabs.empty())
+ return;
+
+ // Reset the state.
+ BytesAllocated = 0;
+ CurPtr = (char *)Slabs.front();
+ End = CurPtr + SlabSize;
+
+ // Deallocate all but the first slab, and deallocate all custom-sized slabs.
+ DeallocateSlabs(std::next(Slabs.begin()), Slabs.end());
+ Slabs.erase(std::next(Slabs.begin()), Slabs.end());
+ }
+
+ /// \brief Allocate space at the specified alignment.
+ LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void *
+ Allocate(size_t Size, size_t Alignment) {
+ assert(Alignment > 0 && "0-byte alignnment is not allowed. Use 1 instead.");
+
+ // Keep track of how many bytes we've allocated.
+ BytesAllocated += Size;
+
+ size_t Adjustment = alignmentAdjustment(CurPtr, Alignment);
+ assert(Adjustment + Size >= Size && "Adjustment + Size must not overflow");
+
+ // Check if we have enough space.
+ if (Adjustment + Size <= size_t(End - CurPtr)) {
+ char *AlignedPtr = CurPtr + Adjustment;
+ CurPtr = AlignedPtr + Size;
+ // Update the allocation point of this memory block in MemorySanitizer.
+ // Without this, MemorySanitizer messages for values originated from here
+ // will point to the allocation of the entire slab.
+ __msan_allocated_memory(AlignedPtr, Size);
+ // Similarly, tell ASan about this space.
+ __asan_unpoison_memory_region(AlignedPtr, Size);
+ return AlignedPtr;
+ }
+
+ // If Size is really big, allocate a separate slab for it.
+ size_t PaddedSize = Size + Alignment - 1;
+ if (PaddedSize > SizeThreshold) {
+ void *NewSlab = Allocator.Allocate(PaddedSize, 0);
+ // We own the new slab and don't want anyone reading anyting other than
+ // pieces returned from this method. So poison the whole slab.
+ __asan_poison_memory_region(NewSlab, PaddedSize);
+ CustomSizedSlabs.push_back(std::make_pair(NewSlab, PaddedSize));
+
+ uintptr_t AlignedAddr = alignAddr(NewSlab, Alignment);
+ assert(AlignedAddr + Size <= (uintptr_t)NewSlab + PaddedSize);
+ char *AlignedPtr = (char*)AlignedAddr;
+ __msan_allocated_memory(AlignedPtr, Size);
+ __asan_unpoison_memory_region(AlignedPtr, Size);
+ return AlignedPtr;
+ }
+
+ // Otherwise, start a new slab and try again.
+ StartNewSlab();
+ uintptr_t AlignedAddr = alignAddr(CurPtr, Alignment);
+ assert(AlignedAddr + Size <= (uintptr_t)End &&
+ "Unable to allocate memory!");
+ char *AlignedPtr = (char*)AlignedAddr;
+ CurPtr = AlignedPtr + Size;
+ __msan_allocated_memory(AlignedPtr, Size);
+ __asan_unpoison_memory_region(AlignedPtr, Size);
+ return AlignedPtr;
+ }
+
+ // Pull in base class overloads.
+ using AllocatorBase<BumpPtrAllocatorImpl>::Allocate;
+
+ void Deallocate(const void *Ptr, size_t Size) {
+ __asan_poison_memory_region(Ptr, Size);
+ }
+
+ // Pull in base class overloads.
+ using AllocatorBase<BumpPtrAllocatorImpl>::Deallocate;
+
+ size_t GetNumSlabs() const { return Slabs.size() + CustomSizedSlabs.size(); }
+
+ size_t getTotalMemory() const {
+ size_t TotalMemory = 0;
+ for (auto I = Slabs.begin(), E = Slabs.end(); I != E; ++I)
+ TotalMemory += computeSlabSize(std::distance(Slabs.begin(), I));
+ for (auto &PtrAndSize : CustomSizedSlabs)
+ TotalMemory += PtrAndSize.second;
+ return TotalMemory;
+ }
+
+ void PrintStats() const {
+ detail::printBumpPtrAllocatorStats(Slabs.size(), BytesAllocated,
+ getTotalMemory());
+ }
+
+private:
+ /// \brief The current pointer into the current slab.
+ ///
+ /// This points to the next free byte in the slab.
+ char *CurPtr;
+
+ /// \brief The end of the current slab.
+ char *End;
+
+ /// \brief The slabs allocated so far.
+ SmallVector<void *, 4> Slabs;
+
+ /// \brief Custom-sized slabs allocated for too-large allocation requests.
+ SmallVector<std::pair<void *, size_t>, 0> CustomSizedSlabs;
+
+ /// \brief How many bytes we've allocated.
+ ///
+ /// Used so that we can compute how much space was wasted.
+ size_t BytesAllocated;
+
+ /// \brief The allocator instance we use to get slabs of memory.
+ AllocatorT Allocator;
+
+ static size_t computeSlabSize(unsigned SlabIdx) {
+ // Scale the actual allocated slab size based on the number of slabs
+ // allocated. Every 128 slabs allocated, we double the allocated size to
+ // reduce allocation frequency, but saturate at multiplying the slab size by
+ // 2^30.
+ return SlabSize * ((size_t)1 << std::min<size_t>(30, SlabIdx / 128));
+ }
+
+ /// \brief Allocate a new slab and move the bump pointers over into the new
+ /// slab, modifying CurPtr and End.
+ void StartNewSlab() {
+ size_t AllocatedSlabSize = computeSlabSize(Slabs.size());
+
+ void *NewSlab = Allocator.Allocate(AllocatedSlabSize, 0);
+ // We own the new slab and don't want anyone reading anything other than
+ // pieces returned from this method. So poison the whole slab.
+ __asan_poison_memory_region(NewSlab, AllocatedSlabSize);
+
+ Slabs.push_back(NewSlab);
+ CurPtr = (char *)(NewSlab);
+ End = ((char *)NewSlab) + AllocatedSlabSize;
+ }
+
+ /// \brief Deallocate a sequence of slabs.
+ void DeallocateSlabs(SmallVectorImpl<void *>::iterator I,
+ SmallVectorImpl<void *>::iterator E) {
+ for (; I != E; ++I) {
+ size_t AllocatedSlabSize =
+ computeSlabSize(std::distance(Slabs.begin(), I));
+ Allocator.Deallocate(*I, AllocatedSlabSize);
+ }
+ }
+
+ /// \brief Deallocate all memory for custom sized slabs.
+ void DeallocateCustomSizedSlabs() {
+ for (auto &PtrAndSize : CustomSizedSlabs) {
+ void *Ptr = PtrAndSize.first;
+ size_t Size = PtrAndSize.second;
+ Allocator.Deallocate(Ptr, Size);
+ }
+ }
+
+ template <typename T> friend class SpecificBumpPtrAllocator;
+};
+
+/// \brief The standard BumpPtrAllocator which just uses the default template
+/// paramaters.
+typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
+
+/// \brief A BumpPtrAllocator that allows only elements of a specific type to be
+/// allocated.
+///
+/// This allows calling the destructor in DestroyAll() and when the allocator is
+/// destroyed.
+template <typename T> class SpecificBumpPtrAllocator {
+ BumpPtrAllocator Allocator;
+
+public:
+ SpecificBumpPtrAllocator() : Allocator() {}
+ SpecificBumpPtrAllocator(SpecificBumpPtrAllocator &&Old)
+ : Allocator(std::move(Old.Allocator)) {}
+ ~SpecificBumpPtrAllocator() { DestroyAll(); }
+
+ SpecificBumpPtrAllocator &operator=(SpecificBumpPtrAllocator &&RHS) {
+ Allocator = std::move(RHS.Allocator);
+ return *this;
+ }
+
+ /// Call the destructor of each allocated object and deallocate all but the
+ /// current slab and reset the current pointer to the beginning of it, freeing
+ /// all memory allocated so far.
+ void DestroyAll() {
+ auto DestroyElements = [](char *Begin, char *End) {
+ assert(Begin == (char*)alignAddr(Begin, alignOf<T>()));
+ for (char *Ptr = Begin; Ptr + sizeof(T) <= End; Ptr += sizeof(T))
+ reinterpret_cast<T *>(Ptr)->~T();
+ };
+
+ for (auto I = Allocator.Slabs.begin(), E = Allocator.Slabs.end(); I != E;
+ ++I) {
+ size_t AllocatedSlabSize = BumpPtrAllocator::computeSlabSize(
+ std::distance(Allocator.Slabs.begin(), I));
+ char *Begin = (char*)alignAddr(*I, alignOf<T>());
+ char *End = *I == Allocator.Slabs.back() ? Allocator.CurPtr
+ : (char *)*I + AllocatedSlabSize;
+
+ DestroyElements(Begin, End);
+ }
+
+ for (auto &PtrAndSize : Allocator.CustomSizedSlabs) {
+ void *Ptr = PtrAndSize.first;
+ size_t Size = PtrAndSize.second;
+ DestroyElements((char*)alignAddr(Ptr, alignOf<T>()), (char *)Ptr + Size);
+ }
+
+ Allocator.Reset();
+ }
+
+ /// \brief Allocate space for an array of objects without constructing them.
+ T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
+};
+
+} // end namespace llvm
+
+template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
+void *operator new(size_t Size,
+ llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize,
+ SizeThreshold> &Allocator) {
+ struct S {
+ char c;
+ union {
+ double D;
+ long double LD;
+ long long L;
+ void *P;
+ } x;
+ };
+ return Allocator.Allocate(
+ Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
+}
+
+template <typename AllocatorT, size_t SlabSize, size_t SizeThreshold>
+void operator delete(
+ void *, llvm::BumpPtrAllocatorImpl<AllocatorT, SlabSize, SizeThreshold> &) {
+}
+
+#endif // LLVM_SUPPORT_ALLOCATOR_H
diff --git a/contrib/llvm/include/llvm/Support/ArrayRecycler.h b/contrib/llvm/include/llvm/Support/ArrayRecycler.h
new file mode 100644
index 0000000..36f644a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ArrayRecycler.h
@@ -0,0 +1,143 @@
+//==- llvm/Support/ArrayRecycler.h - Recycling of Arrays ---------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ArrayRecycler class template which can recycle small
+// arrays allocated from one of the allocators in Allocator.h
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ARRAYRECYCLER_H
+#define LLVM_SUPPORT_ARRAYRECYCLER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/MathExtras.h"
+
+namespace llvm {
+
+/// Recycle small arrays allocated from a BumpPtrAllocator.
+///
+/// Arrays are allocated in a small number of fixed sizes. For each supported
+/// array size, the ArrayRecycler keeps a free list of available arrays.
+///
+template<class T, size_t Align = AlignOf<T>::Alignment>
+class ArrayRecycler {
+ // The free list for a given array size is a simple singly linked list.
+ // We can't use iplist or Recycler here since those classes can't be copied.
+ struct FreeList {
+ FreeList *Next;
+ };
+
+ static_assert(Align >= AlignOf<FreeList>::Alignment, "Object underaligned");
+ static_assert(sizeof(T) >= sizeof(FreeList), "Objects are too small");
+
+ // Keep a free list for each array size.
+ SmallVector<FreeList*, 8> Bucket;
+
+ // Remove an entry from the free list in Bucket[Idx] and return it.
+ // Return NULL if no entries are available.
+ T *pop(unsigned Idx) {
+ if (Idx >= Bucket.size())
+ return nullptr;
+ FreeList *Entry = Bucket[Idx];
+ if (!Entry)
+ return nullptr;
+ Bucket[Idx] = Entry->Next;
+ return reinterpret_cast<T*>(Entry);
+ }
+
+ // Add an entry to the free list at Bucket[Idx].
+ void push(unsigned Idx, T *Ptr) {
+ assert(Ptr && "Cannot recycle NULL pointer");
+ FreeList *Entry = reinterpret_cast<FreeList*>(Ptr);
+ if (Idx >= Bucket.size())
+ Bucket.resize(size_t(Idx) + 1);
+ Entry->Next = Bucket[Idx];
+ Bucket[Idx] = Entry;
+ }
+
+public:
+ /// The size of an allocated array is represented by a Capacity instance.
+ ///
+ /// This class is much smaller than a size_t, and it provides methods to work
+ /// with the set of legal array capacities.
+ class Capacity {
+ uint8_t Index;
+ explicit Capacity(uint8_t idx) : Index(idx) {}
+
+ public:
+ Capacity() : Index(0) {}
+
+ /// Get the capacity of an array that can hold at least N elements.
+ static Capacity get(size_t N) {
+ return Capacity(N ? Log2_64_Ceil(N) : 0);
+ }
+
+ /// Get the number of elements in an array with this capacity.
+ size_t getSize() const { return size_t(1u) << Index; }
+
+ /// Get the bucket number for this capacity.
+ unsigned getBucket() const { return Index; }
+
+ /// Get the next larger capacity. Large capacities grow exponentially, so
+ /// this function can be used to reallocate incrementally growing vectors
+ /// in amortized linear time.
+ Capacity getNext() const { return Capacity(Index + 1); }
+ };
+
+ ~ArrayRecycler() {
+ // The client should always call clear() so recycled arrays can be returned
+ // to the allocator.
+ assert(Bucket.empty() && "Non-empty ArrayRecycler deleted!");
+ }
+
+ /// Release all the tracked allocations to the allocator. The recycler must
+ /// be free of any tracked allocations before being deleted.
+ template<class AllocatorType>
+ void clear(AllocatorType &Allocator) {
+ for (; !Bucket.empty(); Bucket.pop_back())
+ while (T *Ptr = pop(Bucket.size() - 1))
+ Allocator.Deallocate(Ptr);
+ }
+
+ /// Special case for BumpPtrAllocator which has an empty Deallocate()
+ /// function.
+ ///
+ /// There is no need to traverse the free lists, pulling all the objects into
+ /// cache.
+ void clear(BumpPtrAllocator&) {
+ Bucket.clear();
+ }
+
+ /// Allocate an array of at least the requested capacity.
+ ///
+ /// Return an existing recycled array, or allocate one from Allocator if
+ /// none are available for recycling.
+ ///
+ template<class AllocatorType>
+ T *allocate(Capacity Cap, AllocatorType &Allocator) {
+ // Try to recycle an existing array.
+ if (T *Ptr = pop(Cap.getBucket()))
+ return Ptr;
+ // Nope, get more memory.
+ return static_cast<T*>(Allocator.Allocate(sizeof(T)*Cap.getSize(), Align));
+ }
+
+ /// Deallocate an array with the specified Capacity.
+ ///
+ /// Cap must be the same capacity that was given to allocate().
+ ///
+ void deallocate(Capacity Cap, T *Ptr) {
+ push(Cap.getBucket(), Ptr);
+ }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Atomic.h b/contrib/llvm/include/llvm/Support/Atomic.h
new file mode 100644
index 0000000..9ec23e8
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Atomic.h
@@ -0,0 +1,39 @@
+//===- llvm/Support/Atomic.h - Atomic Operations -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys atomic operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ATOMIC_H
+#define LLVM_SUPPORT_ATOMIC_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+ namespace sys {
+ void MemoryFence();
+
+#ifdef _MSC_VER
+ typedef long cas_flag;
+#else
+ typedef uint32_t cas_flag;
+#endif
+ cas_flag CompareAndSwap(volatile cas_flag* ptr,
+ cas_flag new_value,
+ cas_flag old_value);
+ cas_flag AtomicIncrement(volatile cas_flag* ptr);
+ cas_flag AtomicDecrement(volatile cas_flag* ptr);
+ cas_flag AtomicAdd(volatile cas_flag* ptr, cas_flag val);
+ cas_flag AtomicMul(volatile cas_flag* ptr, cas_flag val);
+ cas_flag AtomicDiv(volatile cas_flag* ptr, cas_flag val);
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/BlockFrequency.h b/contrib/llvm/include/llvm/Support/BlockFrequency.h
new file mode 100644
index 0000000..1b45cc5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/BlockFrequency.h
@@ -0,0 +1,78 @@
+//===-------- BlockFrequency.h - Block Frequency Wrapper --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Block Frequency class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BLOCKFREQUENCY_H
+#define LLVM_SUPPORT_BLOCKFREQUENCY_H
+
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class raw_ostream;
+
+// This class represents Block Frequency as a 64-bit value.
+class BlockFrequency {
+ uint64_t Frequency;
+
+public:
+ BlockFrequency(uint64_t Freq = 0) : Frequency(Freq) { }
+
+ /// \brief Returns the maximum possible frequency, the saturation value.
+ static uint64_t getMaxFrequency() { return -1ULL; }
+
+ /// \brief Returns the frequency as a fixpoint number scaled by the entry
+ /// frequency.
+ uint64_t getFrequency() const { return Frequency; }
+
+ /// \brief Multiplies with a branch probability. The computation will never
+ /// overflow.
+ BlockFrequency &operator*=(BranchProbability Prob);
+ BlockFrequency operator*(BranchProbability Prob) const;
+
+ /// \brief Divide by a non-zero branch probability using saturating
+ /// arithmetic.
+ BlockFrequency &operator/=(BranchProbability Prob);
+ BlockFrequency operator/(BranchProbability Prob) const;
+
+ /// \brief Adds another block frequency using saturating arithmetic.
+ BlockFrequency &operator+=(BlockFrequency Freq);
+ BlockFrequency operator+(BlockFrequency Freq) const;
+
+ /// \brief Subtracts another block frequency using saturating arithmetic.
+ BlockFrequency &operator-=(BlockFrequency Freq);
+ BlockFrequency operator-(BlockFrequency Freq) const;
+
+ /// \brief Shift block frequency to the right by count digits saturating to 1.
+ BlockFrequency &operator>>=(const unsigned count);
+
+ bool operator<(BlockFrequency RHS) const {
+ return Frequency < RHS.Frequency;
+ }
+
+ bool operator<=(BlockFrequency RHS) const {
+ return Frequency <= RHS.Frequency;
+ }
+
+ bool operator>(BlockFrequency RHS) const {
+ return Frequency > RHS.Frequency;
+ }
+
+ bool operator>=(BlockFrequency RHS) const {
+ return Frequency >= RHS.Frequency;
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/BranchProbability.h b/contrib/llvm/include/llvm/Support/BranchProbability.h
new file mode 100644
index 0000000..26bc888
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/BranchProbability.h
@@ -0,0 +1,219 @@
+//===- BranchProbability.h - Branch Probability Wrapper ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Definition of BranchProbability shared by IR and Machine Instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_BRANCHPROBABILITY_H
+#define LLVM_SUPPORT_BRANCHPROBABILITY_H
+
+#include "llvm/Support/DataTypes.h"
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <numeric>
+
+namespace llvm {
+
+class raw_ostream;
+
+// This class represents Branch Probability as a non-negative fraction that is
+// no greater than 1. It uses a fixed-point-like implementation, in which the
+// denominator is always a constant value (here we use 1<<31 for maximum
+// precision).
+class BranchProbability {
+ // Numerator
+ uint32_t N;
+
+ // Denominator, which is a constant value.
+ static const uint32_t D = 1u << 31;
+ static const uint32_t UnknownN = UINT32_MAX;
+
+ // Construct a BranchProbability with only numerator assuming the denominator
+ // is 1<<31. For internal use only.
+ explicit BranchProbability(uint32_t n) : N(n) {}
+
+public:
+ BranchProbability() : N(UnknownN) {}
+ BranchProbability(uint32_t Numerator, uint32_t Denominator);
+
+ bool isZero() const { return N == 0; }
+ bool isUnknown() const { return N == UnknownN; }
+
+ static BranchProbability getZero() { return BranchProbability(0); }
+ static BranchProbability getOne() { return BranchProbability(D); }
+ static BranchProbability getUnknown() { return BranchProbability(UnknownN); }
+ // Create a BranchProbability object with the given numerator and 1<<31
+ // as denominator.
+ static BranchProbability getRaw(uint32_t N) { return BranchProbability(N); }
+ // Create a BranchProbability object from 64-bit integers.
+ static BranchProbability getBranchProbability(uint64_t Numerator,
+ uint64_t Denominator);
+
+ // Normalize given probabilties so that the sum of them becomes approximate
+ // one.
+ template <class ProbabilityIter>
+ static void normalizeProbabilities(ProbabilityIter Begin,
+ ProbabilityIter End);
+
+ uint32_t getNumerator() const { return N; }
+ static uint32_t getDenominator() { return D; }
+
+ // Return (1 - Probability).
+ BranchProbability getCompl() const { return BranchProbability(D - N); }
+
+ raw_ostream &print(raw_ostream &OS) const;
+
+ void dump() const;
+
+ /// \brief Scale a large integer.
+ ///
+ /// Scales \c Num. Guarantees full precision. Returns the floor of the
+ /// result.
+ ///
+ /// \return \c Num times \c this.
+ uint64_t scale(uint64_t Num) const;
+
+ /// \brief Scale a large integer by the inverse.
+ ///
+ /// Scales \c Num by the inverse of \c this. Guarantees full precision.
+ /// Returns the floor of the result.
+ ///
+ /// \return \c Num divided by \c this.
+ uint64_t scaleByInverse(uint64_t Num) const;
+
+ BranchProbability &operator+=(BranchProbability RHS) {
+ assert(N != UnknownN && RHS.N != UnknownN &&
+ "Unknown probability cannot participate in arithmetics.");
+ // Saturate the result in case of overflow.
+ N = (uint64_t(N) + RHS.N > D) ? D : N + RHS.N;
+ return *this;
+ }
+
+ BranchProbability &operator-=(BranchProbability RHS) {
+ assert(N != UnknownN && RHS.N != UnknownN &&
+ "Unknown probability cannot participate in arithmetics.");
+ // Saturate the result in case of underflow.
+ N = N < RHS.N ? 0 : N - RHS.N;
+ return *this;
+ }
+
+ BranchProbability &operator*=(BranchProbability RHS) {
+ assert(N != UnknownN && RHS.N != UnknownN &&
+ "Unknown probability cannot participate in arithmetics.");
+ N = (static_cast<uint64_t>(N) * RHS.N + D / 2) / D;
+ return *this;
+ }
+
+ BranchProbability &operator/=(uint32_t RHS) {
+ assert(N != UnknownN &&
+ "Unknown probability cannot participate in arithmetics.");
+ assert(RHS > 0 && "The divider cannot be zero.");
+ N /= RHS;
+ return *this;
+ }
+
+ BranchProbability operator+(BranchProbability RHS) const {
+ BranchProbability Prob(*this);
+ return Prob += RHS;
+ }
+
+ BranchProbability operator-(BranchProbability RHS) const {
+ BranchProbability Prob(*this);
+ return Prob -= RHS;
+ }
+
+ BranchProbability operator*(BranchProbability RHS) const {
+ BranchProbability Prob(*this);
+ return Prob *= RHS;
+ }
+
+ BranchProbability operator/(uint32_t RHS) const {
+ BranchProbability Prob(*this);
+ return Prob /= RHS;
+ }
+
+ bool operator==(BranchProbability RHS) const { return N == RHS.N; }
+ bool operator!=(BranchProbability RHS) const { return !(*this == RHS); }
+
+ bool operator<(BranchProbability RHS) const {
+ assert(N != UnknownN && RHS.N != UnknownN &&
+ "Unknown probability cannot participate in comparisons.");
+ return N < RHS.N;
+ }
+
+ bool operator>(BranchProbability RHS) const {
+ assert(N != UnknownN && RHS.N != UnknownN &&
+ "Unknown probability cannot participate in comparisons.");
+ return RHS < *this;
+ }
+
+ bool operator<=(BranchProbability RHS) const {
+ assert(N != UnknownN && RHS.N != UnknownN &&
+ "Unknown probability cannot participate in comparisons.");
+ return !(RHS < *this);
+ }
+
+ bool operator>=(BranchProbability RHS) const {
+ assert(N != UnknownN && RHS.N != UnknownN &&
+ "Unknown probability cannot participate in comparisons.");
+ return !(*this < RHS);
+ }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, BranchProbability Prob) {
+ return Prob.print(OS);
+}
+
+template <class ProbabilityIter>
+void BranchProbability::normalizeProbabilities(ProbabilityIter Begin,
+ ProbabilityIter End) {
+ if (Begin == End)
+ return;
+
+ unsigned UnknownProbCount = 0;
+ uint64_t Sum = std::accumulate(Begin, End, uint64_t(0),
+ [&](uint64_t S, const BranchProbability &BP) {
+ if (!BP.isUnknown())
+ return S + BP.N;
+ UnknownProbCount++;
+ return S;
+ });
+
+ if (UnknownProbCount > 0) {
+ BranchProbability ProbForUnknown = BranchProbability::getZero();
+ // If the sum of all known probabilities is less than one, evenly distribute
+ // the complement of sum to unknown probabilities. Otherwise, set unknown
+ // probabilities to zeros and continue to normalize known probabilities.
+ if (Sum < BranchProbability::getDenominator())
+ ProbForUnknown = BranchProbability::getRaw(
+ (BranchProbability::getDenominator() - Sum) / UnknownProbCount);
+
+ std::replace_if(Begin, End,
+ [](const BranchProbability &BP) { return BP.isUnknown(); },
+ ProbForUnknown);
+
+ if (Sum <= BranchProbability::getDenominator())
+ return;
+ }
+
+ if (Sum == 0) {
+ BranchProbability BP(1, std::distance(Begin, End));
+ std::fill(Begin, End, BP);
+ return;
+ }
+
+ for (auto I = Begin; I != End; ++I)
+ I->N = (I->N * uint64_t(D) + Sum / 2) / Sum;
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/CBindingWrapping.h b/contrib/llvm/include/llvm/Support/CBindingWrapping.h
new file mode 100644
index 0000000..d4633aa
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/CBindingWrapping.h
@@ -0,0 +1,47 @@
+//===- llvm/Support/CBindingWrapph.h - C Interface Wrapping -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the wrapping macros for the C interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CBINDINGWRAPPING_H
+#define LLVM_SUPPORT_CBINDINGWRAPPING_H
+
+#include "llvm/Support/Casting.h"
+#include "llvm-c/Types.h"
+
+#define DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref) \
+ inline ty *unwrap(ref P) { \
+ return reinterpret_cast<ty*>(P); \
+ } \
+ \
+ inline ref wrap(const ty *P) { \
+ return reinterpret_cast<ref>(const_cast<ty*>(P)); \
+ }
+
+#define DEFINE_ISA_CONVERSION_FUNCTIONS(ty, ref) \
+ DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref) \
+ \
+ template<typename T> \
+ inline T *unwrap(ref P) { \
+ return cast<T>(unwrap(P)); \
+ }
+
+#define DEFINE_STDCXX_CONVERSION_FUNCTIONS(ty, ref) \
+ DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ty, ref) \
+ \
+ template<typename T> \
+ inline T *unwrap(ref P) { \
+ T *Q = (T*)unwrap(P); \
+ assert(Q && "Invalid cast!"); \
+ return Q; \
+ }
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/COFF.h b/contrib/llvm/include/llvm/Support/COFF.h
new file mode 100644
index 0000000..0162175
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/COFF.h
@@ -0,0 +1,679 @@
+//===-- llvm/Support/COFF.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an definitions used in Windows COFF Files.
+//
+// Structures and enums defined within this file where created using
+// information from Microsoft's publicly available PE/COFF format document:
+//
+// Microsoft Portable Executable and Common Object File Format Specification
+// Revision 8.1 - February 15, 2008
+//
+// As of 5/2/2010, hosted by Microsoft at:
+// http://www.microsoft.com/whdc/system/platform/firmware/pecoff.mspx
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_COFF_H
+#define LLVM_SUPPORT_COFF_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstring>
+
+namespace llvm {
+namespace COFF {
+
+ // The maximum number of sections that a COFF object can have (inclusive).
+ const int32_t MaxNumberOfSections16 = 65279;
+
+ // The PE signature bytes that follows the DOS stub header.
+ static const char PEMagic[] = { 'P', 'E', '\0', '\0' };
+
+ static const char BigObjMagic[] = {
+ '\xc7', '\xa1', '\xba', '\xd1', '\xee', '\xba', '\xa9', '\x4b',
+ '\xaf', '\x20', '\xfa', '\xf6', '\x6a', '\xa4', '\xdc', '\xb8',
+ };
+
+ // Sizes in bytes of various things in the COFF format.
+ enum {
+ Header16Size = 20,
+ Header32Size = 56,
+ NameSize = 8,
+ Symbol16Size = 18,
+ Symbol32Size = 20,
+ SectionSize = 40,
+ RelocationSize = 10
+ };
+
+ struct header {
+ uint16_t Machine;
+ int32_t NumberOfSections;
+ uint32_t TimeDateStamp;
+ uint32_t PointerToSymbolTable;
+ uint32_t NumberOfSymbols;
+ uint16_t SizeOfOptionalHeader;
+ uint16_t Characteristics;
+ };
+
+ struct BigObjHeader {
+ enum : uint16_t { MinBigObjectVersion = 2 };
+
+ uint16_t Sig1; ///< Must be IMAGE_FILE_MACHINE_UNKNOWN (0).
+ uint16_t Sig2; ///< Must be 0xFFFF.
+ uint16_t Version;
+ uint16_t Machine;
+ uint32_t TimeDateStamp;
+ uint8_t UUID[16];
+ uint32_t unused1;
+ uint32_t unused2;
+ uint32_t unused3;
+ uint32_t unused4;
+ uint32_t NumberOfSections;
+ uint32_t PointerToSymbolTable;
+ uint32_t NumberOfSymbols;
+ };
+
+ enum MachineTypes {
+ MT_Invalid = 0xffff,
+
+ IMAGE_FILE_MACHINE_UNKNOWN = 0x0,
+ IMAGE_FILE_MACHINE_AM33 = 0x13,
+ IMAGE_FILE_MACHINE_AMD64 = 0x8664,
+ IMAGE_FILE_MACHINE_ARM = 0x1C0,
+ IMAGE_FILE_MACHINE_ARMNT = 0x1C4,
+ IMAGE_FILE_MACHINE_ARM64 = 0xAA64,
+ IMAGE_FILE_MACHINE_EBC = 0xEBC,
+ IMAGE_FILE_MACHINE_I386 = 0x14C,
+ IMAGE_FILE_MACHINE_IA64 = 0x200,
+ IMAGE_FILE_MACHINE_M32R = 0x9041,
+ IMAGE_FILE_MACHINE_MIPS16 = 0x266,
+ IMAGE_FILE_MACHINE_MIPSFPU = 0x366,
+ IMAGE_FILE_MACHINE_MIPSFPU16 = 0x466,
+ IMAGE_FILE_MACHINE_POWERPC = 0x1F0,
+ IMAGE_FILE_MACHINE_POWERPCFP = 0x1F1,
+ IMAGE_FILE_MACHINE_R4000 = 0x166,
+ IMAGE_FILE_MACHINE_SH3 = 0x1A2,
+ IMAGE_FILE_MACHINE_SH3DSP = 0x1A3,
+ IMAGE_FILE_MACHINE_SH4 = 0x1A6,
+ IMAGE_FILE_MACHINE_SH5 = 0x1A8,
+ IMAGE_FILE_MACHINE_THUMB = 0x1C2,
+ IMAGE_FILE_MACHINE_WCEMIPSV2 = 0x169
+ };
+
+ enum Characteristics {
+ C_Invalid = 0,
+
+ /// The file does not contain base relocations and must be loaded at its
+ /// preferred base. If this cannot be done, the loader will error.
+ IMAGE_FILE_RELOCS_STRIPPED = 0x0001,
+ /// The file is valid and can be run.
+ IMAGE_FILE_EXECUTABLE_IMAGE = 0x0002,
+ /// COFF line numbers have been stripped. This is deprecated and should be
+ /// 0.
+ IMAGE_FILE_LINE_NUMS_STRIPPED = 0x0004,
+ /// COFF symbol table entries for local symbols have been removed. This is
+ /// deprecated and should be 0.
+ IMAGE_FILE_LOCAL_SYMS_STRIPPED = 0x0008,
+ /// Aggressively trim working set. This is deprecated and must be 0.
+ IMAGE_FILE_AGGRESSIVE_WS_TRIM = 0x0010,
+ /// Image can handle > 2GiB addresses.
+ IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x0020,
+ /// Little endian: the LSB precedes the MSB in memory. This is deprecated
+ /// and should be 0.
+ IMAGE_FILE_BYTES_REVERSED_LO = 0x0080,
+ /// Machine is based on a 32bit word architecture.
+ IMAGE_FILE_32BIT_MACHINE = 0x0100,
+ /// Debugging info has been removed.
+ IMAGE_FILE_DEBUG_STRIPPED = 0x0200,
+ /// If the image is on removable media, fully load it and copy it to swap.
+ IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 0x0400,
+ /// If the image is on network media, fully load it and copy it to swap.
+ IMAGE_FILE_NET_RUN_FROM_SWAP = 0x0800,
+ /// The image file is a system file, not a user program.
+ IMAGE_FILE_SYSTEM = 0x1000,
+ /// The image file is a DLL.
+ IMAGE_FILE_DLL = 0x2000,
+ /// This file should only be run on a uniprocessor machine.
+ IMAGE_FILE_UP_SYSTEM_ONLY = 0x4000,
+ /// Big endian: the MSB precedes the LSB in memory. This is deprecated
+ /// and should be 0.
+ IMAGE_FILE_BYTES_REVERSED_HI = 0x8000
+ };
+
+ struct symbol {
+ char Name[NameSize];
+ uint32_t Value;
+ int32_t SectionNumber;
+ uint16_t Type;
+ uint8_t StorageClass;
+ uint8_t NumberOfAuxSymbols;
+ };
+
+ enum SymbolSectionNumber : int32_t {
+ IMAGE_SYM_DEBUG = -2,
+ IMAGE_SYM_ABSOLUTE = -1,
+ IMAGE_SYM_UNDEFINED = 0
+ };
+
+ /// Storage class tells where and what the symbol represents
+ enum SymbolStorageClass {
+ SSC_Invalid = 0xff,
+
+ IMAGE_SYM_CLASS_END_OF_FUNCTION = -1, ///< Physical end of function
+ IMAGE_SYM_CLASS_NULL = 0, ///< No symbol
+ IMAGE_SYM_CLASS_AUTOMATIC = 1, ///< Stack variable
+ IMAGE_SYM_CLASS_EXTERNAL = 2, ///< External symbol
+ IMAGE_SYM_CLASS_STATIC = 3, ///< Static
+ IMAGE_SYM_CLASS_REGISTER = 4, ///< Register variable
+ IMAGE_SYM_CLASS_EXTERNAL_DEF = 5, ///< External definition
+ IMAGE_SYM_CLASS_LABEL = 6, ///< Label
+ IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7, ///< Undefined label
+ IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8, ///< Member of structure
+ IMAGE_SYM_CLASS_ARGUMENT = 9, ///< Function argument
+ IMAGE_SYM_CLASS_STRUCT_TAG = 10, ///< Structure tag
+ IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11, ///< Member of union
+ IMAGE_SYM_CLASS_UNION_TAG = 12, ///< Union tag
+ IMAGE_SYM_CLASS_TYPE_DEFINITION = 13, ///< Type definition
+ IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14, ///< Undefined static
+ IMAGE_SYM_CLASS_ENUM_TAG = 15, ///< Enumeration tag
+ IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16, ///< Member of enumeration
+ IMAGE_SYM_CLASS_REGISTER_PARAM = 17, ///< Register parameter
+ IMAGE_SYM_CLASS_BIT_FIELD = 18, ///< Bit field
+ /// ".bb" or ".eb" - beginning or end of block
+ IMAGE_SYM_CLASS_BLOCK = 100,
+ /// ".bf" or ".ef" - beginning or end of function
+ IMAGE_SYM_CLASS_FUNCTION = 101,
+ IMAGE_SYM_CLASS_END_OF_STRUCT = 102, ///< End of structure
+ IMAGE_SYM_CLASS_FILE = 103, ///< File name
+ /// Line number, reformatted as symbol
+ IMAGE_SYM_CLASS_SECTION = 104,
+ IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105, ///< Duplicate tag
+ /// External symbol in dmert public lib
+ IMAGE_SYM_CLASS_CLR_TOKEN = 107
+ };
+
+ enum SymbolBaseType {
+ IMAGE_SYM_TYPE_NULL = 0, ///< No type information or unknown base type.
+ IMAGE_SYM_TYPE_VOID = 1, ///< Used with void pointers and functions.
+ IMAGE_SYM_TYPE_CHAR = 2, ///< A character (signed byte).
+ IMAGE_SYM_TYPE_SHORT = 3, ///< A 2-byte signed integer.
+ IMAGE_SYM_TYPE_INT = 4, ///< A natural integer type on the target.
+ IMAGE_SYM_TYPE_LONG = 5, ///< A 4-byte signed integer.
+ IMAGE_SYM_TYPE_FLOAT = 6, ///< A 4-byte floating-point number.
+ IMAGE_SYM_TYPE_DOUBLE = 7, ///< An 8-byte floating-point number.
+ IMAGE_SYM_TYPE_STRUCT = 8, ///< A structure.
+ IMAGE_SYM_TYPE_UNION = 9, ///< An union.
+ IMAGE_SYM_TYPE_ENUM = 10, ///< An enumerated type.
+ IMAGE_SYM_TYPE_MOE = 11, ///< A member of enumeration (a specific value).
+ IMAGE_SYM_TYPE_BYTE = 12, ///< A byte; unsigned 1-byte integer.
+ IMAGE_SYM_TYPE_WORD = 13, ///< A word; unsigned 2-byte integer.
+ IMAGE_SYM_TYPE_UINT = 14, ///< An unsigned integer of natural size.
+ IMAGE_SYM_TYPE_DWORD = 15 ///< An unsigned 4-byte integer.
+ };
+
+ enum SymbolComplexType {
+ IMAGE_SYM_DTYPE_NULL = 0, ///< No complex type; simple scalar variable.
+ IMAGE_SYM_DTYPE_POINTER = 1, ///< A pointer to base type.
+ IMAGE_SYM_DTYPE_FUNCTION = 2, ///< A function that returns a base type.
+ IMAGE_SYM_DTYPE_ARRAY = 3, ///< An array of base type.
+
+ /// Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
+ SCT_COMPLEX_TYPE_SHIFT = 4
+ };
+
+ enum AuxSymbolType {
+ IMAGE_AUX_SYMBOL_TYPE_TOKEN_DEF = 1
+ };
+
+ struct section {
+ char Name[NameSize];
+ uint32_t VirtualSize;
+ uint32_t VirtualAddress;
+ uint32_t SizeOfRawData;
+ uint32_t PointerToRawData;
+ uint32_t PointerToRelocations;
+ uint32_t PointerToLineNumbers;
+ uint16_t NumberOfRelocations;
+ uint16_t NumberOfLineNumbers;
+ uint32_t Characteristics;
+ };
+
+ enum SectionCharacteristics : uint32_t {
+ SC_Invalid = 0xffffffff,
+
+ IMAGE_SCN_TYPE_NOLOAD = 0x00000002,
+ IMAGE_SCN_TYPE_NO_PAD = 0x00000008,
+ IMAGE_SCN_CNT_CODE = 0x00000020,
+ IMAGE_SCN_CNT_INITIALIZED_DATA = 0x00000040,
+ IMAGE_SCN_CNT_UNINITIALIZED_DATA = 0x00000080,
+ IMAGE_SCN_LNK_OTHER = 0x00000100,
+ IMAGE_SCN_LNK_INFO = 0x00000200,
+ IMAGE_SCN_LNK_REMOVE = 0x00000800,
+ IMAGE_SCN_LNK_COMDAT = 0x00001000,
+ IMAGE_SCN_GPREL = 0x00008000,
+ IMAGE_SCN_MEM_PURGEABLE = 0x00020000,
+ IMAGE_SCN_MEM_16BIT = 0x00020000,
+ IMAGE_SCN_MEM_LOCKED = 0x00040000,
+ IMAGE_SCN_MEM_PRELOAD = 0x00080000,
+ IMAGE_SCN_ALIGN_1BYTES = 0x00100000,
+ IMAGE_SCN_ALIGN_2BYTES = 0x00200000,
+ IMAGE_SCN_ALIGN_4BYTES = 0x00300000,
+ IMAGE_SCN_ALIGN_8BYTES = 0x00400000,
+ IMAGE_SCN_ALIGN_16BYTES = 0x00500000,
+ IMAGE_SCN_ALIGN_32BYTES = 0x00600000,
+ IMAGE_SCN_ALIGN_64BYTES = 0x00700000,
+ IMAGE_SCN_ALIGN_128BYTES = 0x00800000,
+ IMAGE_SCN_ALIGN_256BYTES = 0x00900000,
+ IMAGE_SCN_ALIGN_512BYTES = 0x00A00000,
+ IMAGE_SCN_ALIGN_1024BYTES = 0x00B00000,
+ IMAGE_SCN_ALIGN_2048BYTES = 0x00C00000,
+ IMAGE_SCN_ALIGN_4096BYTES = 0x00D00000,
+ IMAGE_SCN_ALIGN_8192BYTES = 0x00E00000,
+ IMAGE_SCN_LNK_NRELOC_OVFL = 0x01000000,
+ IMAGE_SCN_MEM_DISCARDABLE = 0x02000000,
+ IMAGE_SCN_MEM_NOT_CACHED = 0x04000000,
+ IMAGE_SCN_MEM_NOT_PAGED = 0x08000000,
+ IMAGE_SCN_MEM_SHARED = 0x10000000,
+ IMAGE_SCN_MEM_EXECUTE = 0x20000000,
+ IMAGE_SCN_MEM_READ = 0x40000000,
+ IMAGE_SCN_MEM_WRITE = 0x80000000
+ };
+
+ struct relocation {
+ uint32_t VirtualAddress;
+ uint32_t SymbolTableIndex;
+ uint16_t Type;
+ };
+
+ enum RelocationTypeI386 {
+ IMAGE_REL_I386_ABSOLUTE = 0x0000,
+ IMAGE_REL_I386_DIR16 = 0x0001,
+ IMAGE_REL_I386_REL16 = 0x0002,
+ IMAGE_REL_I386_DIR32 = 0x0006,
+ IMAGE_REL_I386_DIR32NB = 0x0007,
+ IMAGE_REL_I386_SEG12 = 0x0009,
+ IMAGE_REL_I386_SECTION = 0x000A,
+ IMAGE_REL_I386_SECREL = 0x000B,
+ IMAGE_REL_I386_TOKEN = 0x000C,
+ IMAGE_REL_I386_SECREL7 = 0x000D,
+ IMAGE_REL_I386_REL32 = 0x0014
+ };
+
+ enum RelocationTypeAMD64 {
+ IMAGE_REL_AMD64_ABSOLUTE = 0x0000,
+ IMAGE_REL_AMD64_ADDR64 = 0x0001,
+ IMAGE_REL_AMD64_ADDR32 = 0x0002,
+ IMAGE_REL_AMD64_ADDR32NB = 0x0003,
+ IMAGE_REL_AMD64_REL32 = 0x0004,
+ IMAGE_REL_AMD64_REL32_1 = 0x0005,
+ IMAGE_REL_AMD64_REL32_2 = 0x0006,
+ IMAGE_REL_AMD64_REL32_3 = 0x0007,
+ IMAGE_REL_AMD64_REL32_4 = 0x0008,
+ IMAGE_REL_AMD64_REL32_5 = 0x0009,
+ IMAGE_REL_AMD64_SECTION = 0x000A,
+ IMAGE_REL_AMD64_SECREL = 0x000B,
+ IMAGE_REL_AMD64_SECREL7 = 0x000C,
+ IMAGE_REL_AMD64_TOKEN = 0x000D,
+ IMAGE_REL_AMD64_SREL32 = 0x000E,
+ IMAGE_REL_AMD64_PAIR = 0x000F,
+ IMAGE_REL_AMD64_SSPAN32 = 0x0010
+ };
+
+ enum RelocationTypesARM {
+ IMAGE_REL_ARM_ABSOLUTE = 0x0000,
+ IMAGE_REL_ARM_ADDR32 = 0x0001,
+ IMAGE_REL_ARM_ADDR32NB = 0x0002,
+ IMAGE_REL_ARM_BRANCH24 = 0x0003,
+ IMAGE_REL_ARM_BRANCH11 = 0x0004,
+ IMAGE_REL_ARM_TOKEN = 0x0005,
+ IMAGE_REL_ARM_BLX24 = 0x0008,
+ IMAGE_REL_ARM_BLX11 = 0x0009,
+ IMAGE_REL_ARM_SECTION = 0x000E,
+ IMAGE_REL_ARM_SECREL = 0x000F,
+ IMAGE_REL_ARM_MOV32A = 0x0010,
+ IMAGE_REL_ARM_MOV32T = 0x0011,
+ IMAGE_REL_ARM_BRANCH20T = 0x0012,
+ IMAGE_REL_ARM_BRANCH24T = 0x0014,
+ IMAGE_REL_ARM_BLX23T = 0x0015
+ };
+
+ enum COMDATType {
+ IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
+ IMAGE_COMDAT_SELECT_ANY,
+ IMAGE_COMDAT_SELECT_SAME_SIZE,
+ IMAGE_COMDAT_SELECT_EXACT_MATCH,
+ IMAGE_COMDAT_SELECT_ASSOCIATIVE,
+ IMAGE_COMDAT_SELECT_LARGEST,
+ IMAGE_COMDAT_SELECT_NEWEST
+ };
+
+ // Auxiliary Symbol Formats
+ struct AuxiliaryFunctionDefinition {
+ uint32_t TagIndex;
+ uint32_t TotalSize;
+ uint32_t PointerToLinenumber;
+ uint32_t PointerToNextFunction;
+ char unused[2];
+ };
+
+ struct AuxiliarybfAndefSymbol {
+ uint8_t unused1[4];
+ uint16_t Linenumber;
+ uint8_t unused2[6];
+ uint32_t PointerToNextFunction;
+ uint8_t unused3[2];
+ };
+
+ struct AuxiliaryWeakExternal {
+ uint32_t TagIndex;
+ uint32_t Characteristics;
+ uint8_t unused[10];
+ };
+
+ /// These are not documented in the spec, but are located in WinNT.h.
+ enum WeakExternalCharacteristics {
+ IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY = 1,
+ IMAGE_WEAK_EXTERN_SEARCH_LIBRARY = 2,
+ IMAGE_WEAK_EXTERN_SEARCH_ALIAS = 3
+ };
+
+ struct AuxiliarySectionDefinition {
+ uint32_t Length;
+ uint16_t NumberOfRelocations;
+ uint16_t NumberOfLinenumbers;
+ uint32_t CheckSum;
+ uint32_t Number;
+ uint8_t Selection;
+ char unused;
+ };
+
+ struct AuxiliaryCLRToken {
+ uint8_t AuxType;
+ uint8_t unused1;
+ uint32_t SymbolTableIndex;
+ char unused2[12];
+ };
+
+ union Auxiliary {
+ AuxiliaryFunctionDefinition FunctionDefinition;
+ AuxiliarybfAndefSymbol bfAndefSymbol;
+ AuxiliaryWeakExternal WeakExternal;
+ AuxiliarySectionDefinition SectionDefinition;
+ };
+
+ /// @brief The Import Directory Table.
+ ///
+ /// There is a single array of these and one entry per imported DLL.
+ struct ImportDirectoryTableEntry {
+ uint32_t ImportLookupTableRVA;
+ uint32_t TimeDateStamp;
+ uint32_t ForwarderChain;
+ uint32_t NameRVA;
+ uint32_t ImportAddressTableRVA;
+ };
+
+ /// @brief The PE32 Import Lookup Table.
+ ///
+ /// There is an array of these for each imported DLL. It represents either
+ /// the ordinal to import from the target DLL, or a name to lookup and import
+ /// from the target DLL.
+ ///
+ /// This also happens to be the same format used by the Import Address Table
+ /// when it is initially written out to the image.
+ struct ImportLookupTableEntry32 {
+ uint32_t data;
+
+ /// @brief Is this entry specified by ordinal, or name?
+ bool isOrdinal() const { return data & 0x80000000; }
+
+ /// @brief Get the ordinal value of this entry. isOrdinal must be true.
+ uint16_t getOrdinal() const {
+ assert(isOrdinal() && "ILT entry is not an ordinal!");
+ return data & 0xFFFF;
+ }
+
+ /// @brief Set the ordinal value and set isOrdinal to true.
+ void setOrdinal(uint16_t o) {
+ data = o;
+ data |= 0x80000000;
+ }
+
+ /// @brief Get the Hint/Name entry RVA. isOrdinal must be false.
+ uint32_t getHintNameRVA() const {
+ assert(!isOrdinal() && "ILT entry is not a Hint/Name RVA!");
+ return data;
+ }
+
+ /// @brief Set the Hint/Name entry RVA and set isOrdinal to false.
+ void setHintNameRVA(uint32_t rva) { data = rva; }
+ };
+
+ /// @brief The DOS compatible header at the front of all PEs.
+ struct DOSHeader {
+ uint16_t Magic;
+ uint16_t UsedBytesInTheLastPage;
+ uint16_t FileSizeInPages;
+ uint16_t NumberOfRelocationItems;
+ uint16_t HeaderSizeInParagraphs;
+ uint16_t MinimumExtraParagraphs;
+ uint16_t MaximumExtraParagraphs;
+ uint16_t InitialRelativeSS;
+ uint16_t InitialSP;
+ uint16_t Checksum;
+ uint16_t InitialIP;
+ uint16_t InitialRelativeCS;
+ uint16_t AddressOfRelocationTable;
+ uint16_t OverlayNumber;
+ uint16_t Reserved[4];
+ uint16_t OEMid;
+ uint16_t OEMinfo;
+ uint16_t Reserved2[10];
+ uint32_t AddressOfNewExeHeader;
+ };
+
+ struct PE32Header {
+ enum {
+ PE32 = 0x10b,
+ PE32_PLUS = 0x20b
+ };
+
+ uint16_t Magic;
+ uint8_t MajorLinkerVersion;
+ uint8_t MinorLinkerVersion;
+ uint32_t SizeOfCode;
+ uint32_t SizeOfInitializedData;
+ uint32_t SizeOfUninitializedData;
+ uint32_t AddressOfEntryPoint; // RVA
+ uint32_t BaseOfCode; // RVA
+ uint32_t BaseOfData; // RVA
+ uint32_t ImageBase;
+ uint32_t SectionAlignment;
+ uint32_t FileAlignment;
+ uint16_t MajorOperatingSystemVersion;
+ uint16_t MinorOperatingSystemVersion;
+ uint16_t MajorImageVersion;
+ uint16_t MinorImageVersion;
+ uint16_t MajorSubsystemVersion;
+ uint16_t MinorSubsystemVersion;
+ uint32_t Win32VersionValue;
+ uint32_t SizeOfImage;
+ uint32_t SizeOfHeaders;
+ uint32_t CheckSum;
+ uint16_t Subsystem;
+ // FIXME: This should be DllCharacteristics to match the COFF spec.
+ uint16_t DLLCharacteristics;
+ uint32_t SizeOfStackReserve;
+ uint32_t SizeOfStackCommit;
+ uint32_t SizeOfHeapReserve;
+ uint32_t SizeOfHeapCommit;
+ uint32_t LoaderFlags;
+ // FIXME: This should be NumberOfRvaAndSizes to match the COFF spec.
+ uint32_t NumberOfRvaAndSize;
+ };
+
+ struct DataDirectory {
+ uint32_t RelativeVirtualAddress;
+ uint32_t Size;
+ };
+
+ enum DataDirectoryIndex {
+ EXPORT_TABLE = 0,
+ IMPORT_TABLE,
+ RESOURCE_TABLE,
+ EXCEPTION_TABLE,
+ CERTIFICATE_TABLE,
+ BASE_RELOCATION_TABLE,
+ DEBUG,
+ ARCHITECTURE,
+ GLOBAL_PTR,
+ TLS_TABLE,
+ LOAD_CONFIG_TABLE,
+ BOUND_IMPORT,
+ IAT,
+ DELAY_IMPORT_DESCRIPTOR,
+ CLR_RUNTIME_HEADER,
+
+ NUM_DATA_DIRECTORIES
+ };
+
+ enum WindowsSubsystem {
+ IMAGE_SUBSYSTEM_UNKNOWN = 0, ///< An unknown subsystem.
+ IMAGE_SUBSYSTEM_NATIVE = 1, ///< Device drivers and native Windows processes
+ IMAGE_SUBSYSTEM_WINDOWS_GUI = 2, ///< The Windows GUI subsystem.
+ IMAGE_SUBSYSTEM_WINDOWS_CUI = 3, ///< The Windows character subsystem.
+ IMAGE_SUBSYSTEM_OS2_CUI = 5, ///< The OS/2 character subsytem.
+ IMAGE_SUBSYSTEM_POSIX_CUI = 7, ///< The POSIX character subsystem.
+ IMAGE_SUBSYSTEM_NATIVE_WINDOWS = 8, ///< Native Windows 9x driver.
+ IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 9, ///< Windows CE.
+ IMAGE_SUBSYSTEM_EFI_APPLICATION = 10, ///< An EFI application.
+ IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER = 11, ///< An EFI driver with boot
+ /// services.
+ IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER = 12, ///< An EFI driver with run-time
+ /// services.
+ IMAGE_SUBSYSTEM_EFI_ROM = 13, ///< An EFI ROM image.
+ IMAGE_SUBSYSTEM_XBOX = 14, ///< XBOX.
+ IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION = 16 ///< A BCD application.
+ };
+
+ enum DLLCharacteristics {
+ /// ASLR with 64 bit address space.
+ IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020,
+ /// DLL can be relocated at load time.
+ IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040,
+ /// Code integrity checks are enforced.
+ IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY = 0x0080,
+ ///< Image is NX compatible.
+ IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100,
+ /// Isolation aware, but do not isolate the image.
+ IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION = 0x0200,
+ /// Does not use structured exception handling (SEH). No SEH handler may be
+ /// called in this image.
+ IMAGE_DLL_CHARACTERISTICS_NO_SEH = 0x0400,
+ /// Do not bind the image.
+ IMAGE_DLL_CHARACTERISTICS_NO_BIND = 0x0800,
+ ///< Image should execute in an AppContainer.
+ IMAGE_DLL_CHARACTERISTICS_APPCONTAINER = 0x1000,
+ ///< A WDM driver.
+ IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER = 0x2000,
+ ///< Image supports Control Flow Guard.
+ IMAGE_DLL_CHARACTERISTICS_GUARD_CF = 0x4000,
+ /// Terminal Server aware.
+ IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE = 0x8000
+ };
+
+ enum DebugType {
+ IMAGE_DEBUG_TYPE_UNKNOWN = 0,
+ IMAGE_DEBUG_TYPE_COFF = 1,
+ IMAGE_DEBUG_TYPE_CODEVIEW = 2,
+ IMAGE_DEBUG_TYPE_FPO = 3,
+ IMAGE_DEBUG_TYPE_MISC = 4,
+ IMAGE_DEBUG_TYPE_EXCEPTION = 5,
+ IMAGE_DEBUG_TYPE_FIXUP = 6,
+ IMAGE_DEBUG_TYPE_OMAP_TO_SRC = 7,
+ IMAGE_DEBUG_TYPE_OMAP_FROM_SRC = 8,
+ IMAGE_DEBUG_TYPE_BORLAND = 9,
+ IMAGE_DEBUG_TYPE_CLSID = 11
+ };
+
+ enum BaseRelocationType {
+ IMAGE_REL_BASED_ABSOLUTE = 0,
+ IMAGE_REL_BASED_HIGH = 1,
+ IMAGE_REL_BASED_LOW = 2,
+ IMAGE_REL_BASED_HIGHLOW = 3,
+ IMAGE_REL_BASED_HIGHADJ = 4,
+ IMAGE_REL_BASED_MIPS_JMPADDR = 5,
+ IMAGE_REL_BASED_ARM_MOV32A = 5,
+ IMAGE_REL_BASED_ARM_MOV32T = 7,
+ IMAGE_REL_BASED_MIPS_JMPADDR16 = 9,
+ IMAGE_REL_BASED_DIR64 = 10
+ };
+
+ enum ImportType {
+ IMPORT_CODE = 0,
+ IMPORT_DATA = 1,
+ IMPORT_CONST = 2
+ };
+
+ enum ImportNameType {
+ /// Import is by ordinal. This indicates that the value in the Ordinal/Hint
+ /// field of the import header is the import's ordinal. If this constant is
+ /// not specified, then the Ordinal/Hint field should always be interpreted
+ /// as the import's hint.
+ IMPORT_ORDINAL = 0,
+ /// The import name is identical to the public symbol name
+ IMPORT_NAME = 1,
+ /// The import name is the public symbol name, but skipping the leading ?,
+ /// @, or optionally _.
+ IMPORT_NAME_NOPREFIX = 2,
+ /// The import name is the public symbol name, but skipping the leading ?,
+ /// @, or optionally _, and truncating at the first @.
+ IMPORT_NAME_UNDECORATE = 3
+ };
+
+ struct ImportHeader {
+ uint16_t Sig1; ///< Must be IMAGE_FILE_MACHINE_UNKNOWN (0).
+ uint16_t Sig2; ///< Must be 0xFFFF.
+ uint16_t Version;
+ uint16_t Machine;
+ uint32_t TimeDateStamp;
+ uint32_t SizeOfData;
+ uint16_t OrdinalHint;
+ uint16_t TypeInfo;
+
+ ImportType getType() const {
+ return static_cast<ImportType>(TypeInfo & 0x3);
+ }
+
+ ImportNameType getNameType() const {
+ return static_cast<ImportNameType>((TypeInfo & 0x1C) >> 3);
+ }
+ };
+
+ enum CodeViewIdentifiers {
+ DEBUG_LINE_TABLES_HAVE_COLUMN_RECORDS = 0x1,
+ DEBUG_SECTION_MAGIC = 0x4,
+ DEBUG_SYMBOL_SUBSECTION = 0xF1,
+ DEBUG_LINE_TABLE_SUBSECTION = 0xF2,
+ DEBUG_STRING_TABLE_SUBSECTION = 0xF3,
+ DEBUG_INDEX_SUBSECTION = 0xF4,
+
+ // Symbol subsections are split into records of different types.
+ DEBUG_SYMBOL_TYPE_PROC_START = 0x1147,
+ DEBUG_SYMBOL_TYPE_PROC_END = 0x114F
+ };
+
+ inline bool isReservedSectionNumber(int32_t SectionNumber) {
+ return SectionNumber <= 0;
+ }
+
+} // End namespace COFF.
+} // End namespace llvm.
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/COM.h b/contrib/llvm/include/llvm/Support/COM.h
new file mode 100644
index 0000000..a2d5a7a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/COM.h
@@ -0,0 +1,36 @@
+//===- llvm/Support/COM.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Provides a library for accessing COM functionality of the Host OS.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_COM_H
+#define LLVM_SUPPORT_COM_H
+
+namespace llvm {
+namespace sys {
+
+enum class COMThreadingMode { SingleThreaded, MultiThreaded };
+
+class InitializeCOMRAII {
+public:
+ explicit InitializeCOMRAII(COMThreadingMode Threading,
+ bool SpeedOverMemory = false);
+ ~InitializeCOMRAII();
+
+private:
+ InitializeCOMRAII(const InitializeCOMRAII &) = delete;
+ void operator=(const InitializeCOMRAII &) = delete;
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Capacity.h b/contrib/llvm/include/llvm/Support/Capacity.h
new file mode 100644
index 0000000..7460f98
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Capacity.h
@@ -0,0 +1,32 @@
+//===--- Capacity.h - Generic computation of ADT memory use -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the capacity function that computes the amount of
+// memory used by an ADT.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CAPACITY_H
+#define LLVM_SUPPORT_CAPACITY_H
+
+#include <cstddef>
+
+namespace llvm {
+
+template <typename T>
+static inline size_t capacity_in_bytes(const T &x) {
+ // This default definition of capacity should work for things like std::vector
+ // and friends. More specialized versions will work for others.
+ return x.capacity() * sizeof(typename T::value_type);
+}
+
+} // end namespace llvm
+
+#endif
+
diff --git a/contrib/llvm/include/llvm/Support/Casting.h b/contrib/llvm/include/llvm/Support/Casting.h
new file mode 100644
index 0000000..6ba5efa
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Casting.h
@@ -0,0 +1,326 @@
+//===-- llvm/Support/Casting.h - Allow flexible, checked, casts -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the isa<X>(), cast<X>(), dyn_cast<X>(), cast_or_null<X>(),
+// and dyn_cast_or_null<X>() templates.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CASTING_H
+#define LLVM_SUPPORT_CASTING_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/type_traits.h"
+#include <cassert>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// isa<x> Support Templates
+//===----------------------------------------------------------------------===//
+
+// Define a template that can be specialized by smart pointers to reflect the
+// fact that they are automatically dereferenced, and are not involved with the
+// template selection process... the default implementation is a noop.
+//
+template<typename From> struct simplify_type {
+ typedef From SimpleType; // The real type this represents...
+
+ // An accessor to get the real value...
+ static SimpleType &getSimplifiedValue(From &Val) { return Val; }
+};
+
+template<typename From> struct simplify_type<const From> {
+ typedef typename simplify_type<From>::SimpleType NonConstSimpleType;
+ typedef typename add_const_past_pointer<NonConstSimpleType>::type
+ SimpleType;
+ typedef typename add_lvalue_reference_if_not_pointer<SimpleType>::type
+ RetType;
+ static RetType getSimplifiedValue(const From& Val) {
+ return simplify_type<From>::getSimplifiedValue(const_cast<From&>(Val));
+ }
+};
+
+// The core of the implementation of isa<X> is here; To and From should be
+// the names of classes. This template can be specialized to customize the
+// implementation of isa<> without rewriting it from scratch.
+template <typename To, typename From, typename Enabler = void>
+struct isa_impl {
+ static inline bool doit(const From &Val) {
+ return To::classof(&Val);
+ }
+};
+
+/// \brief Always allow upcasts, and perform no dynamic check for them.
+template <typename To, typename From>
+struct isa_impl<
+ To, From, typename std::enable_if<std::is_base_of<To, From>::value>::type> {
+ static inline bool doit(const From &) { return true; }
+};
+
+template <typename To, typename From> struct isa_impl_cl {
+ static inline bool doit(const From &Val) {
+ return isa_impl<To, From>::doit(Val);
+ }
+};
+
+template <typename To, typename From> struct isa_impl_cl<To, const From> {
+ static inline bool doit(const From &Val) {
+ return isa_impl<To, From>::doit(Val);
+ }
+};
+
+template <typename To, typename From> struct isa_impl_cl<To, From*> {
+ static inline bool doit(const From *Val) {
+ assert(Val && "isa<> used on a null pointer");
+ return isa_impl<To, From>::doit(*Val);
+ }
+};
+
+template <typename To, typename From> struct isa_impl_cl<To, From*const> {
+ static inline bool doit(const From *Val) {
+ assert(Val && "isa<> used on a null pointer");
+ return isa_impl<To, From>::doit(*Val);
+ }
+};
+
+template <typename To, typename From> struct isa_impl_cl<To, const From*> {
+ static inline bool doit(const From *Val) {
+ assert(Val && "isa<> used on a null pointer");
+ return isa_impl<To, From>::doit(*Val);
+ }
+};
+
+template <typename To, typename From> struct isa_impl_cl<To, const From*const> {
+ static inline bool doit(const From *Val) {
+ assert(Val && "isa<> used on a null pointer");
+ return isa_impl<To, From>::doit(*Val);
+ }
+};
+
+template<typename To, typename From, typename SimpleFrom>
+struct isa_impl_wrap {
+ // When From != SimplifiedType, we can simplify the type some more by using
+ // the simplify_type template.
+ static bool doit(const From &Val) {
+ return isa_impl_wrap<To, SimpleFrom,
+ typename simplify_type<SimpleFrom>::SimpleType>::doit(
+ simplify_type<const From>::getSimplifiedValue(Val));
+ }
+};
+
+template<typename To, typename FromTy>
+struct isa_impl_wrap<To, FromTy, FromTy> {
+ // When From == SimpleType, we are as simple as we are going to get.
+ static bool doit(const FromTy &Val) {
+ return isa_impl_cl<To,FromTy>::doit(Val);
+ }
+};
+
+// isa<X> - Return true if the parameter to the template is an instance of the
+// template type argument. Used like this:
+//
+// if (isa<Type>(myVal)) { ... }
+//
+template <class X, class Y>
+LLVM_ATTRIBUTE_UNUSED_RESULT inline bool isa(const Y &Val) {
+ return isa_impl_wrap<X, const Y,
+ typename simplify_type<const Y>::SimpleType>::doit(Val);
+}
+
+//===----------------------------------------------------------------------===//
+// cast<x> Support Templates
+//===----------------------------------------------------------------------===//
+
+template<class To, class From> struct cast_retty;
+
+
+// Calculate what type the 'cast' function should return, based on a requested
+// type of To and a source type of From.
+template<class To, class From> struct cast_retty_impl {
+ typedef To& ret_type; // Normal case, return Ty&
+};
+template<class To, class From> struct cast_retty_impl<To, const From> {
+ typedef const To &ret_type; // Normal case, return Ty&
+};
+
+template<class To, class From> struct cast_retty_impl<To, From*> {
+ typedef To* ret_type; // Pointer arg case, return Ty*
+};
+
+template<class To, class From> struct cast_retty_impl<To, const From*> {
+ typedef const To* ret_type; // Constant pointer arg case, return const Ty*
+};
+
+template<class To, class From> struct cast_retty_impl<To, const From*const> {
+ typedef const To* ret_type; // Constant pointer arg case, return const Ty*
+};
+
+
+template<class To, class From, class SimpleFrom>
+struct cast_retty_wrap {
+ // When the simplified type and the from type are not the same, use the type
+ // simplifier to reduce the type, then reuse cast_retty_impl to get the
+ // resultant type.
+ typedef typename cast_retty<To, SimpleFrom>::ret_type ret_type;
+};
+
+template<class To, class FromTy>
+struct cast_retty_wrap<To, FromTy, FromTy> {
+ // When the simplified type is equal to the from type, use it directly.
+ typedef typename cast_retty_impl<To,FromTy>::ret_type ret_type;
+};
+
+template<class To, class From>
+struct cast_retty {
+ typedef typename cast_retty_wrap<To, From,
+ typename simplify_type<From>::SimpleType>::ret_type ret_type;
+};
+
+// Ensure the non-simple values are converted using the simplify_type template
+// that may be specialized by smart pointers...
+//
+template<class To, class From, class SimpleFrom> struct cast_convert_val {
+ // This is not a simple type, use the template to simplify it...
+ static typename cast_retty<To, From>::ret_type doit(From &Val) {
+ return cast_convert_val<To, SimpleFrom,
+ typename simplify_type<SimpleFrom>::SimpleType>::doit(
+ simplify_type<From>::getSimplifiedValue(Val));
+ }
+};
+
+template<class To, class FromTy> struct cast_convert_val<To,FromTy,FromTy> {
+ // This _is_ a simple type, just cast it.
+ static typename cast_retty<To, FromTy>::ret_type doit(const FromTy &Val) {
+ typename cast_retty<To, FromTy>::ret_type Res2
+ = (typename cast_retty<To, FromTy>::ret_type)const_cast<FromTy&>(Val);
+ return Res2;
+ }
+};
+
+template <class X> struct is_simple_type {
+ static const bool value =
+ std::is_same<X, typename simplify_type<X>::SimpleType>::value;
+};
+
+// cast<X> - Return the argument parameter cast to the specified type. This
+// casting operator asserts that the type is correct, so it does not return null
+// on failure. It does not allow a null argument (use cast_or_null for that).
+// It is typically used like this:
+//
+// cast<Instruction>(myVal)->getParent()
+//
+template <class X, class Y>
+inline typename std::enable_if<!is_simple_type<Y>::value,
+ typename cast_retty<X, const Y>::ret_type>::type
+cast(const Y &Val) {
+ assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!");
+ return cast_convert_val<
+ X, const Y, typename simplify_type<const Y>::SimpleType>::doit(Val);
+}
+
+template <class X, class Y>
+inline typename cast_retty<X, Y>::ret_type cast(Y &Val) {
+ assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!");
+ return cast_convert_val<X, Y,
+ typename simplify_type<Y>::SimpleType>::doit(Val);
+}
+
+template <class X, class Y>
+inline typename cast_retty<X, Y *>::ret_type cast(Y *Val) {
+ assert(isa<X>(Val) && "cast<Ty>() argument of incompatible type!");
+ return cast_convert_val<X, Y*,
+ typename simplify_type<Y*>::SimpleType>::doit(Val);
+}
+
+// cast_or_null<X> - Functionally identical to cast, except that a null value is
+// accepted.
+//
+template <class X, class Y>
+LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
+ !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type
+cast_or_null(const Y &Val) {
+ if (!Val)
+ return nullptr;
+ assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
+ return cast<X>(Val);
+}
+
+template <class X, class Y>
+LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
+ !is_simple_type<Y>::value, typename cast_retty<X, Y>::ret_type>::type
+cast_or_null(Y &Val) {
+ if (!Val)
+ return nullptr;
+ assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
+ return cast<X>(Val);
+}
+
+template <class X, class Y>
+LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type
+cast_or_null(Y *Val) {
+ if (!Val) return nullptr;
+ assert(isa<X>(Val) && "cast_or_null<Ty>() argument of incompatible type!");
+ return cast<X>(Val);
+}
+
+
+// dyn_cast<X> - Return the argument parameter cast to the specified type. This
+// casting operator returns null if the argument is of the wrong type, so it can
+// be used to test for a type as well as cast if successful. This should be
+// used in the context of an if statement like this:
+//
+// if (const Instruction *I = dyn_cast<Instruction>(myVal)) { ... }
+//
+
+template <class X, class Y>
+LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
+ !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type
+dyn_cast(const Y &Val) {
+ return isa<X>(Val) ? cast<X>(Val) : nullptr;
+}
+
+template <class X, class Y>
+LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y>::ret_type
+dyn_cast(Y &Val) {
+ return isa<X>(Val) ? cast<X>(Val) : nullptr;
+}
+
+template <class X, class Y>
+LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type
+dyn_cast(Y *Val) {
+ return isa<X>(Val) ? cast<X>(Val) : nullptr;
+}
+
+// dyn_cast_or_null<X> - Functionally identical to dyn_cast, except that a null
+// value is accepted.
+//
+template <class X, class Y>
+LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
+ !is_simple_type<Y>::value, typename cast_retty<X, const Y>::ret_type>::type
+dyn_cast_or_null(const Y &Val) {
+ return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
+}
+
+template <class X, class Y>
+LLVM_ATTRIBUTE_UNUSED_RESULT inline typename std::enable_if<
+ !is_simple_type<Y>::value, typename cast_retty<X, Y>::ret_type>::type
+dyn_cast_or_null(Y &Val) {
+ return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
+}
+
+template <class X, class Y>
+LLVM_ATTRIBUTE_UNUSED_RESULT inline typename cast_retty<X, Y *>::ret_type
+dyn_cast_or_null(Y *Val) {
+ return (Val && isa<X>(Val)) ? cast<X>(Val) : nullptr;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/CodeGen.h b/contrib/llvm/include/llvm/Support/CodeGen.h
new file mode 100644
index 0000000..243f2dd
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/CodeGen.h
@@ -0,0 +1,95 @@
+//===-- llvm/Support/CodeGen.h - CodeGen Concepts ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file define some types which define code generation concepts. For
+// example, relocation model.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CODEGEN_H
+#define LLVM_SUPPORT_CODEGEN_H
+
+#include "llvm-c/TargetMachine.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+
+ // Relocation model types.
+ namespace Reloc {
+ enum Model { Default, Static, PIC_, DynamicNoPIC };
+ }
+
+ // Code model types.
+ namespace CodeModel {
+ enum Model { Default, JITDefault, Small, Kernel, Medium, Large };
+ }
+
+ namespace PICLevel {
+ enum Level { Default=0, Small=1, Large=2 };
+ }
+
+ // TLS models.
+ namespace TLSModel {
+ enum Model {
+ GeneralDynamic,
+ LocalDynamic,
+ InitialExec,
+ LocalExec
+ };
+ }
+
+ // Code generation optimization level.
+ namespace CodeGenOpt {
+ enum Level {
+ None, // -O0
+ Less, // -O1
+ Default, // -O2, -Os
+ Aggressive // -O3
+ };
+ }
+
+ // Create wrappers for C Binding types (see CBindingWrapping.h).
+ inline CodeModel::Model unwrap(LLVMCodeModel Model) {
+ switch (Model) {
+ case LLVMCodeModelDefault:
+ return CodeModel::Default;
+ case LLVMCodeModelJITDefault:
+ return CodeModel::JITDefault;
+ case LLVMCodeModelSmall:
+ return CodeModel::Small;
+ case LLVMCodeModelKernel:
+ return CodeModel::Kernel;
+ case LLVMCodeModelMedium:
+ return CodeModel::Medium;
+ case LLVMCodeModelLarge:
+ return CodeModel::Large;
+ }
+ return CodeModel::Default;
+ }
+
+ inline LLVMCodeModel wrap(CodeModel::Model Model) {
+ switch (Model) {
+ case CodeModel::Default:
+ return LLVMCodeModelDefault;
+ case CodeModel::JITDefault:
+ return LLVMCodeModelJITDefault;
+ case CodeModel::Small:
+ return LLVMCodeModelSmall;
+ case CodeModel::Kernel:
+ return LLVMCodeModelKernel;
+ case CodeModel::Medium:
+ return LLVMCodeModelMedium;
+ case CodeModel::Large:
+ return LLVMCodeModelLarge;
+ }
+ llvm_unreachable("Bad CodeModel!");
+ }
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/CommandLine.h b/contrib/llvm/include/llvm/Support/CommandLine.h
new file mode 100644
index 0000000..943d2df
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/CommandLine.h
@@ -0,0 +1,1755 @@
+//===- llvm/Support/CommandLine.h - Command line handler --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements a command line argument processor that is useful when
+// creating a tool. It provides a simple, minimalistic interface that is easily
+// extensible and supports nonlocal (library) command line options.
+//
+// Note that rather than trying to figure out what this code does, you should
+// read the library documentation located in docs/CommandLine.html or looks at
+// the many example usages in tools/*/*.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_COMMANDLINE_H
+#define LLVM_SUPPORT_COMMANDLINE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <climits>
+#include <cstdarg>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class StringSaver;
+
+/// cl Namespace - This namespace contains all of the command line option
+/// processing machinery. It is intentionally a short name to make qualified
+/// usage concise.
+namespace cl {
+
+//===----------------------------------------------------------------------===//
+// ParseCommandLineOptions - Command line option processing entry point.
+//
+void ParseCommandLineOptions(int argc, const char *const *argv,
+ const char *Overview = nullptr);
+
+//===----------------------------------------------------------------------===//
+// ParseEnvironmentOptions - Environment variable option processing alternate
+// entry point.
+//
+void ParseEnvironmentOptions(const char *progName, const char *envvar,
+ const char *Overview = nullptr);
+
+///===---------------------------------------------------------------------===//
+/// SetVersionPrinter - Override the default (LLVM specific) version printer
+/// used to print out the version when --version is given
+/// on the command line. This allows other systems using the
+/// CommandLine utilities to print their own version string.
+void SetVersionPrinter(void (*func)());
+
+///===---------------------------------------------------------------------===//
+/// AddExtraVersionPrinter - Add an extra printer to use in addition to the
+/// default one. This can be called multiple times,
+/// and each time it adds a new function to the list
+/// which will be called after the basic LLVM version
+/// printing is complete. Each can then add additional
+/// information specific to the tool.
+void AddExtraVersionPrinter(void (*func)());
+
+// PrintOptionValues - Print option values.
+// With -print-options print the difference between option values and defaults.
+// With -print-all-options print all option values.
+// (Currently not perfect, but best-effort.)
+void PrintOptionValues();
+
+// Forward declaration - AddLiteralOption needs to be up here to make gcc happy.
+class Option;
+
+/// \brief Adds a new option for parsing and provides the option it refers to.
+///
+/// \param O pointer to the option
+/// \param Name the string name for the option to handle during parsing
+///
+/// Literal options are used by some parsers to register special option values.
+/// This is how the PassNameParser registers pass names for opt.
+void AddLiteralOption(Option &O, const char *Name);
+
+//===----------------------------------------------------------------------===//
+// Flags permitted to be passed to command line arguments
+//
+
+enum NumOccurrencesFlag { // Flags for the number of occurrences allowed
+ Optional = 0x00, // Zero or One occurrence
+ ZeroOrMore = 0x01, // Zero or more occurrences allowed
+ Required = 0x02, // One occurrence required
+ OneOrMore = 0x03, // One or more occurrences required
+
+ // ConsumeAfter - Indicates that this option is fed anything that follows the
+ // last positional argument required by the application (it is an error if
+ // there are zero positional arguments, and a ConsumeAfter option is used).
+ // Thus, for example, all arguments to LLI are processed until a filename is
+ // found. Once a filename is found, all of the succeeding arguments are
+ // passed, unprocessed, to the ConsumeAfter option.
+ //
+ ConsumeAfter = 0x04
+};
+
+enum ValueExpected { // Is a value required for the option?
+ // zero reserved for the unspecified value
+ ValueOptional = 0x01, // The value can appear... or not
+ ValueRequired = 0x02, // The value is required to appear!
+ ValueDisallowed = 0x03 // A value may not be specified (for flags)
+};
+
+enum OptionHidden { // Control whether -help shows this option
+ NotHidden = 0x00, // Option included in -help & -help-hidden
+ Hidden = 0x01, // -help doesn't, but -help-hidden does
+ ReallyHidden = 0x02 // Neither -help nor -help-hidden show this arg
+};
+
+// Formatting flags - This controls special features that the option might have
+// that cause it to be parsed differently...
+//
+// Prefix - This option allows arguments that are otherwise unrecognized to be
+// matched by options that are a prefix of the actual value. This is useful for
+// cases like a linker, where options are typically of the form '-lfoo' or
+// '-L../../include' where -l or -L are the actual flags. When prefix is
+// enabled, and used, the value for the flag comes from the suffix of the
+// argument.
+//
+// Grouping - With this option enabled, multiple letter options are allowed to
+// bunch together with only a single hyphen for the whole group. This allows
+// emulation of the behavior that ls uses for example: ls -la === ls -l -a
+//
+
+enum FormattingFlags {
+ NormalFormatting = 0x00, // Nothing special
+ Positional = 0x01, // Is a positional argument, no '-' required
+ Prefix = 0x02, // Can this option directly prefix its value?
+ Grouping = 0x03 // Can this option group with other options?
+};
+
+enum MiscFlags { // Miscellaneous flags to adjust argument
+ CommaSeparated = 0x01, // Should this cl::list split between commas?
+ PositionalEatsArgs = 0x02, // Should this positional cl::list eat -args?
+ Sink = 0x04 // Should this cl::list eat all unknown options?
+};
+
+//===----------------------------------------------------------------------===//
+// Option Category class
+//
+class OptionCategory {
+private:
+ const char *const Name;
+ const char *const Description;
+ void registerCategory();
+
+public:
+ OptionCategory(const char *const Name,
+ const char *const Description = nullptr)
+ : Name(Name), Description(Description) {
+ registerCategory();
+ }
+ const char *getName() const { return Name; }
+ const char *getDescription() const { return Description; }
+};
+
+// The general Option Category (used as default category).
+extern OptionCategory GeneralCategory;
+
+//===----------------------------------------------------------------------===//
+// Option Base class
+//
+class alias;
+class Option {
+ friend class alias;
+
+ // handleOccurrences - Overriden by subclasses to handle the value passed into
+ // an argument. Should return true if there was an error processing the
+ // argument and the program should exit.
+ //
+ virtual bool handleOccurrence(unsigned pos, StringRef ArgName,
+ StringRef Arg) = 0;
+
+ virtual enum ValueExpected getValueExpectedFlagDefault() const {
+ return ValueOptional;
+ }
+
+ // Out of line virtual function to provide home for the class.
+ virtual void anchor();
+
+ int NumOccurrences; // The number of times specified
+ // Occurrences, HiddenFlag, and Formatting are all enum types but to avoid
+ // problems with signed enums in bitfields.
+ unsigned Occurrences : 3; // enum NumOccurrencesFlag
+ // not using the enum type for 'Value' because zero is an implementation
+ // detail representing the non-value
+ unsigned Value : 2;
+ unsigned HiddenFlag : 2; // enum OptionHidden
+ unsigned Formatting : 2; // enum FormattingFlags
+ unsigned Misc : 3;
+ unsigned Position; // Position of last occurrence of the option
+ unsigned AdditionalVals; // Greater than 0 for multi-valued option.
+
+public:
+ StringRef ArgStr; // The argument string itself (ex: "help", "o")
+ StringRef HelpStr; // The descriptive text message for -help
+ StringRef ValueStr; // String describing what the value of this option is
+ OptionCategory *Category; // The Category this option belongs to
+ bool FullyInitialized; // Has addArguemnt been called?
+
+ inline enum NumOccurrencesFlag getNumOccurrencesFlag() const {
+ return (enum NumOccurrencesFlag)Occurrences;
+ }
+ inline enum ValueExpected getValueExpectedFlag() const {
+ return Value ? ((enum ValueExpected)Value) : getValueExpectedFlagDefault();
+ }
+ inline enum OptionHidden getOptionHiddenFlag() const {
+ return (enum OptionHidden)HiddenFlag;
+ }
+ inline enum FormattingFlags getFormattingFlag() const {
+ return (enum FormattingFlags)Formatting;
+ }
+ inline unsigned getMiscFlags() const { return Misc; }
+ inline unsigned getPosition() const { return Position; }
+ inline unsigned getNumAdditionalVals() const { return AdditionalVals; }
+
+ // hasArgStr - Return true if the argstr != ""
+ bool hasArgStr() const { return !ArgStr.empty(); }
+
+ //-------------------------------------------------------------------------===
+ // Accessor functions set by OptionModifiers
+ //
+ void setArgStr(StringRef S);
+ void setDescription(StringRef S) { HelpStr = S; }
+ void setValueStr(StringRef S) { ValueStr = S; }
+ void setNumOccurrencesFlag(enum NumOccurrencesFlag Val) { Occurrences = Val; }
+ void setValueExpectedFlag(enum ValueExpected Val) { Value = Val; }
+ void setHiddenFlag(enum OptionHidden Val) { HiddenFlag = Val; }
+ void setFormattingFlag(enum FormattingFlags V) { Formatting = V; }
+ void setMiscFlag(enum MiscFlags M) { Misc |= M; }
+ void setPosition(unsigned pos) { Position = pos; }
+ void setCategory(OptionCategory &C) { Category = &C; }
+
+protected:
+ explicit Option(enum NumOccurrencesFlag OccurrencesFlag,
+ enum OptionHidden Hidden)
+ : NumOccurrences(0), Occurrences(OccurrencesFlag), Value(0),
+ HiddenFlag(Hidden), Formatting(NormalFormatting), Misc(0), Position(0),
+ AdditionalVals(0), ArgStr(""), HelpStr(""), ValueStr(""),
+ Category(&GeneralCategory), FullyInitialized(false) {}
+
+ inline void setNumAdditionalVals(unsigned n) { AdditionalVals = n; }
+
+public:
+ // addArgument - Register this argument with the commandline system.
+ //
+ void addArgument();
+
+ /// Unregisters this option from the CommandLine system.
+ ///
+ /// This option must have been the last option registered.
+ /// For testing purposes only.
+ void removeArgument();
+
+ // Return the width of the option tag for printing...
+ virtual size_t getOptionWidth() const = 0;
+
+ // printOptionInfo - Print out information about this option. The
+ // to-be-maintained width is specified.
+ //
+ virtual void printOptionInfo(size_t GlobalWidth) const = 0;
+
+ virtual void printOptionValue(size_t GlobalWidth, bool Force) const = 0;
+
+ virtual void getExtraOptionNames(SmallVectorImpl<StringRef> &) {}
+
+ // addOccurrence - Wrapper around handleOccurrence that enforces Flags.
+ //
+ virtual bool addOccurrence(unsigned pos, StringRef ArgName, StringRef Value,
+ bool MultiArg = false);
+
+ // Prints option name followed by message. Always returns true.
+ bool error(const Twine &Message, StringRef ArgName = StringRef());
+
+public:
+ inline int getNumOccurrences() const { return NumOccurrences; }
+ virtual ~Option() {}
+};
+
+//===----------------------------------------------------------------------===//
+// Command line option modifiers that can be used to modify the behavior of
+// command line option parsers...
+//
+
+// desc - Modifier to set the description shown in the -help output...
+struct desc {
+ const char *Desc;
+ desc(const char *Str) : Desc(Str) {}
+ void apply(Option &O) const { O.setDescription(Desc); }
+};
+
+// value_desc - Modifier to set the value description shown in the -help
+// output...
+struct value_desc {
+ const char *Desc;
+ value_desc(const char *Str) : Desc(Str) {}
+ void apply(Option &O) const { O.setValueStr(Desc); }
+};
+
+// init - Specify a default (initial) value for the command line argument, if
+// the default constructor for the argument type does not give you what you
+// want. This is only valid on "opt" arguments, not on "list" arguments.
+//
+template <class Ty> struct initializer {
+ const Ty &Init;
+ initializer(const Ty &Val) : Init(Val) {}
+
+ template <class Opt> void apply(Opt &O) const { O.setInitialValue(Init); }
+};
+
+template <class Ty> initializer<Ty> init(const Ty &Val) {
+ return initializer<Ty>(Val);
+}
+
+// location - Allow the user to specify which external variable they want to
+// store the results of the command line argument processing into, if they don't
+// want to store it in the option itself.
+//
+template <class Ty> struct LocationClass {
+ Ty &Loc;
+ LocationClass(Ty &L) : Loc(L) {}
+
+ template <class Opt> void apply(Opt &O) const { O.setLocation(O, Loc); }
+};
+
+template <class Ty> LocationClass<Ty> location(Ty &L) {
+ return LocationClass<Ty>(L);
+}
+
+// cat - Specifiy the Option category for the command line argument to belong
+// to.
+struct cat {
+ OptionCategory &Category;
+ cat(OptionCategory &c) : Category(c) {}
+
+ template <class Opt> void apply(Opt &O) const { O.setCategory(Category); }
+};
+
+//===----------------------------------------------------------------------===//
+// OptionValue class
+
+// Support value comparison outside the template.
+struct GenericOptionValue {
+ virtual bool compare(const GenericOptionValue &V) const = 0;
+
+protected:
+ ~GenericOptionValue() = default;
+ GenericOptionValue() = default;
+ GenericOptionValue(const GenericOptionValue&) = default;
+ GenericOptionValue &operator=(const GenericOptionValue &) = default;
+
+private:
+ virtual void anchor();
+};
+
+template <class DataType> struct OptionValue;
+
+// The default value safely does nothing. Option value printing is only
+// best-effort.
+template <class DataType, bool isClass>
+struct OptionValueBase : public GenericOptionValue {
+ // Temporary storage for argument passing.
+ typedef OptionValue<DataType> WrapperType;
+
+ bool hasValue() const { return false; }
+
+ const DataType &getValue() const { llvm_unreachable("no default value"); }
+
+ // Some options may take their value from a different data type.
+ template <class DT> void setValue(const DT & /*V*/) {}
+
+ bool compare(const DataType & /*V*/) const { return false; }
+
+ bool compare(const GenericOptionValue & /*V*/) const override {
+ return false;
+ }
+
+protected:
+ ~OptionValueBase() = default;
+};
+
+// Simple copy of the option value.
+template <class DataType> class OptionValueCopy : public GenericOptionValue {
+ DataType Value;
+ bool Valid;
+
+protected:
+ ~OptionValueCopy() = default;
+ OptionValueCopy(const OptionValueCopy&) = default;
+ OptionValueCopy &operator=(const OptionValueCopy&) = default;
+
+public:
+ OptionValueCopy() : Valid(false) {}
+
+ bool hasValue() const { return Valid; }
+
+ const DataType &getValue() const {
+ assert(Valid && "invalid option value");
+ return Value;
+ }
+
+ void setValue(const DataType &V) {
+ Valid = true;
+ Value = V;
+ }
+
+ bool compare(const DataType &V) const { return Valid && (Value != V); }
+
+ bool compare(const GenericOptionValue &V) const override {
+ const OptionValueCopy<DataType> &VC =
+ static_cast<const OptionValueCopy<DataType> &>(V);
+ if (!VC.hasValue())
+ return false;
+ return compare(VC.getValue());
+ }
+};
+
+// Non-class option values.
+template <class DataType>
+struct OptionValueBase<DataType, false> : OptionValueCopy<DataType> {
+ typedef DataType WrapperType;
+
+protected:
+ ~OptionValueBase() = default;
+ OptionValueBase() = default;
+ OptionValueBase(const OptionValueBase&) = default;
+ OptionValueBase &operator=(const OptionValueBase&) = default;
+};
+
+// Top-level option class.
+template <class DataType>
+struct OptionValue final
+ : OptionValueBase<DataType, std::is_class<DataType>::value> {
+ OptionValue() = default;
+
+ OptionValue(const DataType &V) { this->setValue(V); }
+ // Some options may take their value from a different data type.
+ template <class DT> OptionValue<DataType> &operator=(const DT &V) {
+ this->setValue(V);
+ return *this;
+ }
+};
+
+// Other safe-to-copy-by-value common option types.
+enum boolOrDefault { BOU_UNSET, BOU_TRUE, BOU_FALSE };
+template <>
+struct OptionValue<cl::boolOrDefault> final
+ : OptionValueCopy<cl::boolOrDefault> {
+ typedef cl::boolOrDefault WrapperType;
+
+ OptionValue() {}
+
+ OptionValue(const cl::boolOrDefault &V) { this->setValue(V); }
+ OptionValue<cl::boolOrDefault> &operator=(const cl::boolOrDefault &V) {
+ setValue(V);
+ return *this;
+ }
+
+private:
+ void anchor() override;
+};
+
+template <>
+struct OptionValue<std::string> final : OptionValueCopy<std::string> {
+ typedef StringRef WrapperType;
+
+ OptionValue() {}
+
+ OptionValue(const std::string &V) { this->setValue(V); }
+ OptionValue<std::string> &operator=(const std::string &V) {
+ setValue(V);
+ return *this;
+ }
+
+private:
+ void anchor() override;
+};
+
+//===----------------------------------------------------------------------===//
+// Enum valued command line option
+//
+#define clEnumVal(ENUMVAL, DESC) #ENUMVAL, int(ENUMVAL), DESC
+#define clEnumValN(ENUMVAL, FLAGNAME, DESC) FLAGNAME, int(ENUMVAL), DESC
+#define clEnumValEnd (reinterpret_cast<void *>(0))
+
+// values - For custom data types, allow specifying a group of values together
+// as the values that go into the mapping that the option handler uses. Note
+// that the values list must always have a 0 at the end of the list to indicate
+// that the list has ended.
+//
+template <class DataType> class ValuesClass {
+ // Use a vector instead of a map, because the lists should be short,
+ // the overhead is less, and most importantly, it keeps them in the order
+ // inserted so we can print our option out nicely.
+ SmallVector<std::pair<const char *, std::pair<int, const char *>>, 4> Values;
+ void processValues(va_list Vals);
+
+public:
+ ValuesClass(const char *EnumName, DataType Val, const char *Desc,
+ va_list ValueArgs) {
+ // Insert the first value, which is required.
+ Values.push_back(std::make_pair(EnumName, std::make_pair(Val, Desc)));
+
+ // Process the varargs portion of the values...
+ while (const char *enumName = va_arg(ValueArgs, const char *)) {
+ DataType EnumVal = static_cast<DataType>(va_arg(ValueArgs, int));
+ const char *EnumDesc = va_arg(ValueArgs, const char *);
+ Values.push_back(std::make_pair(enumName, // Add value to value map
+ std::make_pair(EnumVal, EnumDesc)));
+ }
+ }
+
+ template <class Opt> void apply(Opt &O) const {
+ for (size_t i = 0, e = Values.size(); i != e; ++i)
+ O.getParser().addLiteralOption(Values[i].first, Values[i].second.first,
+ Values[i].second.second);
+ }
+};
+
+template <class DataType>
+ValuesClass<DataType> LLVM_END_WITH_NULL
+values(const char *Arg, DataType Val, const char *Desc, ...) {
+ va_list ValueArgs;
+ va_start(ValueArgs, Desc);
+ ValuesClass<DataType> Vals(Arg, Val, Desc, ValueArgs);
+ va_end(ValueArgs);
+ return Vals;
+}
+
+//===----------------------------------------------------------------------===//
+// parser class - Parameterizable parser for different data types. By default,
+// known data types (string, int, bool) have specialized parsers, that do what
+// you would expect. The default parser, used for data types that are not
+// built-in, uses a mapping table to map specific options to values, which is
+// used, among other things, to handle enum types.
+
+//--------------------------------------------------
+// generic_parser_base - This class holds all the non-generic code that we do
+// not need replicated for every instance of the generic parser. This also
+// allows us to put stuff into CommandLine.cpp
+//
+class generic_parser_base {
+protected:
+ class GenericOptionInfo {
+ public:
+ GenericOptionInfo(const char *name, const char *helpStr)
+ : Name(name), HelpStr(helpStr) {}
+ const char *Name;
+ const char *HelpStr;
+ };
+
+public:
+ generic_parser_base(Option &O) : Owner(O) {}
+
+ virtual ~generic_parser_base() {} // Base class should have virtual-dtor
+
+ // getNumOptions - Virtual function implemented by generic subclass to
+ // indicate how many entries are in Values.
+ //
+ virtual unsigned getNumOptions() const = 0;
+
+ // getOption - Return option name N.
+ virtual const char *getOption(unsigned N) const = 0;
+
+ // getDescription - Return description N
+ virtual const char *getDescription(unsigned N) const = 0;
+
+ // Return the width of the option tag for printing...
+ virtual size_t getOptionWidth(const Option &O) const;
+
+ virtual const GenericOptionValue &getOptionValue(unsigned N) const = 0;
+
+ // printOptionInfo - Print out information about this option. The
+ // to-be-maintained width is specified.
+ //
+ virtual void printOptionInfo(const Option &O, size_t GlobalWidth) const;
+
+ void printGenericOptionDiff(const Option &O, const GenericOptionValue &V,
+ const GenericOptionValue &Default,
+ size_t GlobalWidth) const;
+
+ // printOptionDiff - print the value of an option and it's default.
+ //
+ // Template definition ensures that the option and default have the same
+ // DataType (via the same AnyOptionValue).
+ template <class AnyOptionValue>
+ void printOptionDiff(const Option &O, const AnyOptionValue &V,
+ const AnyOptionValue &Default,
+ size_t GlobalWidth) const {
+ printGenericOptionDiff(O, V, Default, GlobalWidth);
+ }
+
+ void initialize() {}
+
+ void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) {
+ // If there has been no argstr specified, that means that we need to add an
+ // argument for every possible option. This ensures that our options are
+ // vectored to us.
+ if (!Owner.hasArgStr())
+ for (unsigned i = 0, e = getNumOptions(); i != e; ++i)
+ OptionNames.push_back(getOption(i));
+ }
+
+ enum ValueExpected getValueExpectedFlagDefault() const {
+ // If there is an ArgStr specified, then we are of the form:
+ //
+ // -opt=O2 or -opt O2 or -optO2
+ //
+ // In which case, the value is required. Otherwise if an arg str has not
+ // been specified, we are of the form:
+ //
+ // -O2 or O2 or -la (where -l and -a are separate options)
+ //
+ // If this is the case, we cannot allow a value.
+ //
+ if (Owner.hasArgStr())
+ return ValueRequired;
+ else
+ return ValueDisallowed;
+ }
+
+ // findOption - Return the option number corresponding to the specified
+ // argument string. If the option is not found, getNumOptions() is returned.
+ //
+ unsigned findOption(const char *Name);
+
+protected:
+ Option &Owner;
+};
+
+// Default parser implementation - This implementation depends on having a
+// mapping of recognized options to values of some sort. In addition to this,
+// each entry in the mapping also tracks a help message that is printed with the
+// command line option for -help. Because this is a simple mapping parser, the
+// data type can be any unsupported type.
+//
+template <class DataType> class parser : public generic_parser_base {
+protected:
+ class OptionInfo : public GenericOptionInfo {
+ public:
+ OptionInfo(const char *name, DataType v, const char *helpStr)
+ : GenericOptionInfo(name, helpStr), V(v) {}
+ OptionValue<DataType> V;
+ };
+ SmallVector<OptionInfo, 8> Values;
+
+public:
+ parser(Option &O) : generic_parser_base(O) {}
+ typedef DataType parser_data_type;
+
+ // Implement virtual functions needed by generic_parser_base
+ unsigned getNumOptions() const override { return unsigned(Values.size()); }
+ const char *getOption(unsigned N) const override { return Values[N].Name; }
+ const char *getDescription(unsigned N) const override {
+ return Values[N].HelpStr;
+ }
+
+ // getOptionValue - Return the value of option name N.
+ const GenericOptionValue &getOptionValue(unsigned N) const override {
+ return Values[N].V;
+ }
+
+ // parse - Return true on error.
+ bool parse(Option &O, StringRef ArgName, StringRef Arg, DataType &V) {
+ StringRef ArgVal;
+ if (Owner.hasArgStr())
+ ArgVal = Arg;
+ else
+ ArgVal = ArgName;
+
+ for (size_t i = 0, e = Values.size(); i != e; ++i)
+ if (Values[i].Name == ArgVal) {
+ V = Values[i].V.getValue();
+ return false;
+ }
+
+ return O.error("Cannot find option named '" + ArgVal + "'!");
+ }
+
+ /// addLiteralOption - Add an entry to the mapping table.
+ ///
+ template <class DT>
+ void addLiteralOption(const char *Name, const DT &V, const char *HelpStr) {
+ assert(findOption(Name) == Values.size() && "Option already exists!");
+ OptionInfo X(Name, static_cast<DataType>(V), HelpStr);
+ Values.push_back(X);
+ AddLiteralOption(Owner, Name);
+ }
+
+ /// removeLiteralOption - Remove the specified option.
+ ///
+ void removeLiteralOption(const char *Name) {
+ unsigned N = findOption(Name);
+ assert(N != Values.size() && "Option not found!");
+ Values.erase(Values.begin() + N);
+ }
+};
+
+//--------------------------------------------------
+// basic_parser - Super class of parsers to provide boilerplate code
+//
+class basic_parser_impl { // non-template implementation of basic_parser<t>
+public:
+ basic_parser_impl(Option &) {}
+
+
+ enum ValueExpected getValueExpectedFlagDefault() const {
+ return ValueRequired;
+ }
+
+ void getExtraOptionNames(SmallVectorImpl<StringRef> &) {}
+
+ void initialize() {}
+
+ // Return the width of the option tag for printing...
+ size_t getOptionWidth(const Option &O) const;
+
+ // printOptionInfo - Print out information about this option. The
+ // to-be-maintained width is specified.
+ //
+ void printOptionInfo(const Option &O, size_t GlobalWidth) const;
+
+ // printOptionNoValue - Print a placeholder for options that don't yet support
+ // printOptionDiff().
+ void printOptionNoValue(const Option &O, size_t GlobalWidth) const;
+
+ // getValueName - Overload in subclass to provide a better default value.
+ virtual const char *getValueName() const { return "value"; }
+
+ // An out-of-line virtual method to provide a 'home' for this class.
+ virtual void anchor();
+
+protected:
+ ~basic_parser_impl() = default;
+ // A helper for basic_parser::printOptionDiff.
+ void printOptionName(const Option &O, size_t GlobalWidth) const;
+};
+
+// basic_parser - The real basic parser is just a template wrapper that provides
+// a typedef for the provided data type.
+//
+template <class DataType> class basic_parser : public basic_parser_impl {
+public:
+ basic_parser(Option &O) : basic_parser_impl(O) {}
+ typedef DataType parser_data_type;
+ typedef OptionValue<DataType> OptVal;
+
+protected:
+ // Workaround Clang PR22793
+ ~basic_parser() {}
+};
+
+//--------------------------------------------------
+// parser<bool>
+//
+template <> class parser<bool> final : public basic_parser<bool> {
+public:
+ parser(Option &O) : basic_parser(O) {}
+
+ // parse - Return true on error.
+ bool parse(Option &O, StringRef ArgName, StringRef Arg, bool &Val);
+
+ void initialize() {}
+
+ enum ValueExpected getValueExpectedFlagDefault() const {
+ return ValueOptional;
+ }
+
+ // getValueName - Do not print =<value> at all.
+ const char *getValueName() const override { return nullptr; }
+
+ void printOptionDiff(const Option &O, bool V, OptVal Default,
+ size_t GlobalWidth) const;
+
+ // An out-of-line virtual method to provide a 'home' for this class.
+ void anchor() override;
+};
+
+extern template class basic_parser<bool>;
+
+//--------------------------------------------------
+// parser<boolOrDefault>
+template <>
+class parser<boolOrDefault> final : public basic_parser<boolOrDefault> {
+public:
+ parser(Option &O) : basic_parser(O) {}
+
+ // parse - Return true on error.
+ bool parse(Option &O, StringRef ArgName, StringRef Arg, boolOrDefault &Val);
+
+ enum ValueExpected getValueExpectedFlagDefault() const {
+ return ValueOptional;
+ }
+
+ // getValueName - Do not print =<value> at all.
+ const char *getValueName() const override { return nullptr; }
+
+ void printOptionDiff(const Option &O, boolOrDefault V, OptVal Default,
+ size_t GlobalWidth) const;
+
+ // An out-of-line virtual method to provide a 'home' for this class.
+ void anchor() override;
+};
+
+extern template class basic_parser<boolOrDefault>;
+
+//--------------------------------------------------
+// parser<int>
+//
+template <> class parser<int> final : public basic_parser<int> {
+public:
+ parser(Option &O) : basic_parser(O) {}
+
+ // parse - Return true on error.
+ bool parse(Option &O, StringRef ArgName, StringRef Arg, int &Val);
+
+ // getValueName - Overload in subclass to provide a better default value.
+ const char *getValueName() const override { return "int"; }
+
+ void printOptionDiff(const Option &O, int V, OptVal Default,
+ size_t GlobalWidth) const;
+
+ // An out-of-line virtual method to provide a 'home' for this class.
+ void anchor() override;
+};
+
+extern template class basic_parser<int>;
+
+//--------------------------------------------------
+// parser<unsigned>
+//
+template <> class parser<unsigned> final : public basic_parser<unsigned> {
+public:
+ parser(Option &O) : basic_parser(O) {}
+
+ // parse - Return true on error.
+ bool parse(Option &O, StringRef ArgName, StringRef Arg, unsigned &Val);
+
+ // getValueName - Overload in subclass to provide a better default value.
+ const char *getValueName() const override { return "uint"; }
+
+ void printOptionDiff(const Option &O, unsigned V, OptVal Default,
+ size_t GlobalWidth) const;
+
+ // An out-of-line virtual method to provide a 'home' for this class.
+ void anchor() override;
+};
+
+extern template class basic_parser<unsigned>;
+
+//--------------------------------------------------
+// parser<unsigned long long>
+//
+template <>
+class parser<unsigned long long> final
+ : public basic_parser<unsigned long long> {
+public:
+ parser(Option &O) : basic_parser(O) {}
+
+ // parse - Return true on error.
+ bool parse(Option &O, StringRef ArgName, StringRef Arg,
+ unsigned long long &Val);
+
+ // getValueName - Overload in subclass to provide a better default value.
+ const char *getValueName() const override { return "uint"; }
+
+ void printOptionDiff(const Option &O, unsigned long long V, OptVal Default,
+ size_t GlobalWidth) const;
+
+ // An out-of-line virtual method to provide a 'home' for this class.
+ void anchor() override;
+};
+
+extern template class basic_parser<unsigned long long>;
+
+//--------------------------------------------------
+// parser<double>
+//
+template <> class parser<double> final : public basic_parser<double> {
+public:
+ parser(Option &O) : basic_parser(O) {}
+
+ // parse - Return true on error.
+ bool parse(Option &O, StringRef ArgName, StringRef Arg, double &Val);
+
+ // getValueName - Overload in subclass to provide a better default value.
+ const char *getValueName() const override { return "number"; }
+
+ void printOptionDiff(const Option &O, double V, OptVal Default,
+ size_t GlobalWidth) const;
+
+ // An out-of-line virtual method to provide a 'home' for this class.
+ void anchor() override;
+};
+
+extern template class basic_parser<double>;
+
+//--------------------------------------------------
+// parser<float>
+//
+template <> class parser<float> final : public basic_parser<float> {
+public:
+ parser(Option &O) : basic_parser(O) {}
+
+ // parse - Return true on error.
+ bool parse(Option &O, StringRef ArgName, StringRef Arg, float &Val);
+
+ // getValueName - Overload in subclass to provide a better default value.
+ const char *getValueName() const override { return "number"; }
+
+ void printOptionDiff(const Option &O, float V, OptVal Default,
+ size_t GlobalWidth) const;
+
+ // An out-of-line virtual method to provide a 'home' for this class.
+ void anchor() override;
+};
+
+extern template class basic_parser<float>;
+
+//--------------------------------------------------
+// parser<std::string>
+//
+template <> class parser<std::string> final : public basic_parser<std::string> {
+public:
+ parser(Option &O) : basic_parser(O) {}
+
+ // parse - Return true on error.
+ bool parse(Option &, StringRef, StringRef Arg, std::string &Value) {
+ Value = Arg.str();
+ return false;
+ }
+
+ // getValueName - Overload in subclass to provide a better default value.
+ const char *getValueName() const override { return "string"; }
+
+ void printOptionDiff(const Option &O, StringRef V, OptVal Default,
+ size_t GlobalWidth) const;
+
+ // An out-of-line virtual method to provide a 'home' for this class.
+ void anchor() override;
+};
+
+extern template class basic_parser<std::string>;
+
+//--------------------------------------------------
+// parser<char>
+//
+template <> class parser<char> final : public basic_parser<char> {
+public:
+ parser(Option &O) : basic_parser(O) {}
+
+ // parse - Return true on error.
+ bool parse(Option &, StringRef, StringRef Arg, char &Value) {
+ Value = Arg[0];
+ return false;
+ }
+
+ // getValueName - Overload in subclass to provide a better default value.
+ const char *getValueName() const override { return "char"; }
+
+ void printOptionDiff(const Option &O, char V, OptVal Default,
+ size_t GlobalWidth) const;
+
+ // An out-of-line virtual method to provide a 'home' for this class.
+ void anchor() override;
+};
+
+extern template class basic_parser<char>;
+
+//--------------------------------------------------
+// PrintOptionDiff
+//
+// This collection of wrappers is the intermediary between class opt and class
+// parser to handle all the template nastiness.
+
+// This overloaded function is selected by the generic parser.
+template <class ParserClass, class DT>
+void printOptionDiff(const Option &O, const generic_parser_base &P, const DT &V,
+ const OptionValue<DT> &Default, size_t GlobalWidth) {
+ OptionValue<DT> OV = V;
+ P.printOptionDiff(O, OV, Default, GlobalWidth);
+}
+
+// This is instantiated for basic parsers when the parsed value has a different
+// type than the option value. e.g. HelpPrinter.
+template <class ParserDT, class ValDT> struct OptionDiffPrinter {
+ void print(const Option &O, const parser<ParserDT> &P, const ValDT & /*V*/,
+ const OptionValue<ValDT> & /*Default*/, size_t GlobalWidth) {
+ P.printOptionNoValue(O, GlobalWidth);
+ }
+};
+
+// This is instantiated for basic parsers when the parsed value has the same
+// type as the option value.
+template <class DT> struct OptionDiffPrinter<DT, DT> {
+ void print(const Option &O, const parser<DT> &P, const DT &V,
+ const OptionValue<DT> &Default, size_t GlobalWidth) {
+ P.printOptionDiff(O, V, Default, GlobalWidth);
+ }
+};
+
+// This overloaded function is selected by the basic parser, which may parse a
+// different type than the option type.
+template <class ParserClass, class ValDT>
+void printOptionDiff(
+ const Option &O,
+ const basic_parser<typename ParserClass::parser_data_type> &P,
+ const ValDT &V, const OptionValue<ValDT> &Default, size_t GlobalWidth) {
+
+ OptionDiffPrinter<typename ParserClass::parser_data_type, ValDT> printer;
+ printer.print(O, static_cast<const ParserClass &>(P), V, Default,
+ GlobalWidth);
+}
+
+//===----------------------------------------------------------------------===//
+// applicator class - This class is used because we must use partial
+// specialization to handle literal string arguments specially (const char* does
+// not correctly respond to the apply method). Because the syntax to use this
+// is a pain, we have the 'apply' method below to handle the nastiness...
+//
+template <class Mod> struct applicator {
+ template <class Opt> static void opt(const Mod &M, Opt &O) { M.apply(O); }
+};
+
+// Handle const char* as a special case...
+template <unsigned n> struct applicator<char[n]> {
+ template <class Opt> static void opt(const char *Str, Opt &O) {
+ O.setArgStr(Str);
+ }
+};
+template <unsigned n> struct applicator<const char[n]> {
+ template <class Opt> static void opt(const char *Str, Opt &O) {
+ O.setArgStr(Str);
+ }
+};
+template <> struct applicator<const char *> {
+ template <class Opt> static void opt(const char *Str, Opt &O) {
+ O.setArgStr(Str);
+ }
+};
+
+template <> struct applicator<NumOccurrencesFlag> {
+ static void opt(NumOccurrencesFlag N, Option &O) {
+ O.setNumOccurrencesFlag(N);
+ }
+};
+template <> struct applicator<ValueExpected> {
+ static void opt(ValueExpected VE, Option &O) { O.setValueExpectedFlag(VE); }
+};
+template <> struct applicator<OptionHidden> {
+ static void opt(OptionHidden OH, Option &O) { O.setHiddenFlag(OH); }
+};
+template <> struct applicator<FormattingFlags> {
+ static void opt(FormattingFlags FF, Option &O) { O.setFormattingFlag(FF); }
+};
+template <> struct applicator<MiscFlags> {
+ static void opt(MiscFlags MF, Option &O) { O.setMiscFlag(MF); }
+};
+
+// apply method - Apply modifiers to an option in a type safe way.
+template <class Opt, class Mod, class... Mods>
+void apply(Opt *O, const Mod &M, const Mods &... Ms) {
+ applicator<Mod>::opt(M, *O);
+ apply(O, Ms...);
+}
+
+template <class Opt, class Mod> void apply(Opt *O, const Mod &M) {
+ applicator<Mod>::opt(M, *O);
+}
+
+//===----------------------------------------------------------------------===//
+// opt_storage class
+
+// Default storage class definition: external storage. This implementation
+// assumes the user will specify a variable to store the data into with the
+// cl::location(x) modifier.
+//
+template <class DataType, bool ExternalStorage, bool isClass>
+class opt_storage {
+ DataType *Location; // Where to store the object...
+ OptionValue<DataType> Default;
+
+ void check_location() const {
+ assert(Location && "cl::location(...) not specified for a command "
+ "line option with external storage, "
+ "or cl::init specified before cl::location()!!");
+ }
+
+public:
+ opt_storage() : Location(nullptr) {}
+
+ bool setLocation(Option &O, DataType &L) {
+ if (Location)
+ return O.error("cl::location(x) specified more than once!");
+ Location = &L;
+ Default = L;
+ return false;
+ }
+
+ template <class T> void setValue(const T &V, bool initial = false) {
+ check_location();
+ *Location = V;
+ if (initial)
+ Default = V;
+ }
+
+ DataType &getValue() {
+ check_location();
+ return *Location;
+ }
+ const DataType &getValue() const {
+ check_location();
+ return *Location;
+ }
+
+ operator DataType() const { return this->getValue(); }
+
+ const OptionValue<DataType> &getDefault() const { return Default; }
+};
+
+// Define how to hold a class type object, such as a string. Since we can
+// inherit from a class, we do so. This makes us exactly compatible with the
+// object in all cases that it is used.
+//
+template <class DataType>
+class opt_storage<DataType, false, true> : public DataType {
+public:
+ OptionValue<DataType> Default;
+
+ template <class T> void setValue(const T &V, bool initial = false) {
+ DataType::operator=(V);
+ if (initial)
+ Default = V;
+ }
+
+ DataType &getValue() { return *this; }
+ const DataType &getValue() const { return *this; }
+
+ const OptionValue<DataType> &getDefault() const { return Default; }
+};
+
+// Define a partial specialization to handle things we cannot inherit from. In
+// this case, we store an instance through containment, and overload operators
+// to get at the value.
+//
+template <class DataType> class opt_storage<DataType, false, false> {
+public:
+ DataType Value;
+ OptionValue<DataType> Default;
+
+ // Make sure we initialize the value with the default constructor for the
+ // type.
+ opt_storage() : Value(DataType()), Default(DataType()) {}
+
+ template <class T> void setValue(const T &V, bool initial = false) {
+ Value = V;
+ if (initial)
+ Default = V;
+ }
+ DataType &getValue() { return Value; }
+ DataType getValue() const { return Value; }
+
+ const OptionValue<DataType> &getDefault() const { return Default; }
+
+ operator DataType() const { return getValue(); }
+
+ // If the datatype is a pointer, support -> on it.
+ DataType operator->() const { return Value; }
+};
+
+//===----------------------------------------------------------------------===//
+// opt - A scalar command line option.
+//
+template <class DataType, bool ExternalStorage = false,
+ class ParserClass = parser<DataType>>
+class opt : public Option,
+ public opt_storage<DataType, ExternalStorage,
+ std::is_class<DataType>::value> {
+ ParserClass Parser;
+
+ bool handleOccurrence(unsigned pos, StringRef ArgName,
+ StringRef Arg) override {
+ typename ParserClass::parser_data_type Val =
+ typename ParserClass::parser_data_type();
+ if (Parser.parse(*this, ArgName, Arg, Val))
+ return true; // Parse error!
+ this->setValue(Val);
+ this->setPosition(pos);
+ return false;
+ }
+
+ enum ValueExpected getValueExpectedFlagDefault() const override {
+ return Parser.getValueExpectedFlagDefault();
+ }
+ void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) override {
+ return Parser.getExtraOptionNames(OptionNames);
+ }
+
+ // Forward printing stuff to the parser...
+ size_t getOptionWidth() const override {
+ return Parser.getOptionWidth(*this);
+ }
+ void printOptionInfo(size_t GlobalWidth) const override {
+ Parser.printOptionInfo(*this, GlobalWidth);
+ }
+
+ void printOptionValue(size_t GlobalWidth, bool Force) const override {
+ if (Force || this->getDefault().compare(this->getValue())) {
+ cl::printOptionDiff<ParserClass>(*this, Parser, this->getValue(),
+ this->getDefault(), GlobalWidth);
+ }
+ }
+
+ void done() {
+ addArgument();
+ Parser.initialize();
+ }
+
+ // Command line options should not be copyable
+ opt(const opt &) = delete;
+ opt &operator=(const opt &) = delete;
+
+public:
+ // setInitialValue - Used by the cl::init modifier...
+ void setInitialValue(const DataType &V) { this->setValue(V, true); }
+
+ ParserClass &getParser() { return Parser; }
+
+ template <class T> DataType &operator=(const T &Val) {
+ this->setValue(Val);
+ return this->getValue();
+ }
+
+ template <class... Mods>
+ explicit opt(const Mods &... Ms)
+ : Option(Optional, NotHidden), Parser(*this) {
+ apply(this, Ms...);
+ done();
+ }
+};
+
+extern template class opt<unsigned>;
+extern template class opt<int>;
+extern template class opt<std::string>;
+extern template class opt<char>;
+extern template class opt<bool>;
+
+//===----------------------------------------------------------------------===//
+// list_storage class
+
+// Default storage class definition: external storage. This implementation
+// assumes the user will specify a variable to store the data into with the
+// cl::location(x) modifier.
+//
+template <class DataType, class StorageClass> class list_storage {
+ StorageClass *Location; // Where to store the object...
+
+public:
+ list_storage() : Location(0) {}
+
+ bool setLocation(Option &O, StorageClass &L) {
+ if (Location)
+ return O.error("cl::location(x) specified more than once!");
+ Location = &L;
+ return false;
+ }
+
+ template <class T> void addValue(const T &V) {
+ assert(Location != 0 && "cl::location(...) not specified for a command "
+ "line option with external storage!");
+ Location->push_back(V);
+ }
+};
+
+// Define how to hold a class type object, such as a string.
+// Originally this code inherited from std::vector. In transitioning to a new
+// API for command line options we should change this. The new implementation
+// of this list_storage specialization implements the minimum subset of the
+// std::vector API required for all the current clients.
+//
+// FIXME: Reduce this API to a more narrow subset of std::vector
+//
+template <class DataType> class list_storage<DataType, bool> {
+ std::vector<DataType> Storage;
+
+public:
+ typedef typename std::vector<DataType>::iterator iterator;
+
+ iterator begin() { return Storage.begin(); }
+ iterator end() { return Storage.end(); }
+
+ typedef typename std::vector<DataType>::const_iterator const_iterator;
+ const_iterator begin() const { return Storage.begin(); }
+ const_iterator end() const { return Storage.end(); }
+
+ typedef typename std::vector<DataType>::size_type size_type;
+ size_type size() const { return Storage.size(); }
+
+ bool empty() const { return Storage.empty(); }
+
+ void push_back(const DataType &value) { Storage.push_back(value); }
+ void push_back(DataType &&value) { Storage.push_back(value); }
+
+ typedef typename std::vector<DataType>::reference reference;
+ typedef typename std::vector<DataType>::const_reference const_reference;
+ reference operator[](size_type pos) { return Storage[pos]; }
+ const_reference operator[](size_type pos) const { return Storage[pos]; }
+
+ iterator erase(const_iterator pos) { return Storage.erase(pos); }
+ iterator erase(const_iterator first, const_iterator last) {
+ return Storage.erase(first, last);
+ }
+
+ iterator erase(iterator pos) { return Storage.erase(pos); }
+ iterator erase(iterator first, iterator last) {
+ return Storage.erase(first, last);
+ }
+
+ iterator insert(const_iterator pos, const DataType &value) {
+ return Storage.insert(pos, value);
+ }
+ iterator insert(const_iterator pos, DataType &&value) {
+ return Storage.insert(pos, value);
+ }
+
+ iterator insert(iterator pos, const DataType &value) {
+ return Storage.insert(pos, value);
+ }
+ iterator insert(iterator pos, DataType &&value) {
+ return Storage.insert(pos, value);
+ }
+
+ reference front() { return Storage.front(); }
+ const_reference front() const { return Storage.front(); }
+
+ operator std::vector<DataType>&() { return Storage; }
+ operator ArrayRef<DataType>() { return Storage; }
+ std::vector<DataType> *operator&() { return &Storage; }
+ const std::vector<DataType> *operator&() const { return &Storage; }
+
+ template <class T> void addValue(const T &V) { Storage.push_back(V); }
+};
+
+//===----------------------------------------------------------------------===//
+// list - A list of command line options.
+//
+template <class DataType, class StorageClass = bool,
+ class ParserClass = parser<DataType>>
+class list : public Option, public list_storage<DataType, StorageClass> {
+ std::vector<unsigned> Positions;
+ ParserClass Parser;
+
+ enum ValueExpected getValueExpectedFlagDefault() const override {
+ return Parser.getValueExpectedFlagDefault();
+ }
+ void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) override {
+ return Parser.getExtraOptionNames(OptionNames);
+ }
+
+ bool handleOccurrence(unsigned pos, StringRef ArgName,
+ StringRef Arg) override {
+ typename ParserClass::parser_data_type Val =
+ typename ParserClass::parser_data_type();
+ if (Parser.parse(*this, ArgName, Arg, Val))
+ return true; // Parse Error!
+ list_storage<DataType, StorageClass>::addValue(Val);
+ setPosition(pos);
+ Positions.push_back(pos);
+ return false;
+ }
+
+ // Forward printing stuff to the parser...
+ size_t getOptionWidth() const override {
+ return Parser.getOptionWidth(*this);
+ }
+ void printOptionInfo(size_t GlobalWidth) const override {
+ Parser.printOptionInfo(*this, GlobalWidth);
+ }
+
+ // Unimplemented: list options don't currently store their default value.
+ void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override {
+ }
+
+ void done() {
+ addArgument();
+ Parser.initialize();
+ }
+
+ // Command line options should not be copyable
+ list(const list &) = delete;
+ list &operator=(const list &) = delete;
+
+public:
+ ParserClass &getParser() { return Parser; }
+
+ unsigned getPosition(unsigned optnum) const {
+ assert(optnum < this->size() && "Invalid option index");
+ return Positions[optnum];
+ }
+
+ void setNumAdditionalVals(unsigned n) { Option::setNumAdditionalVals(n); }
+
+ template <class... Mods>
+ explicit list(const Mods &... Ms)
+ : Option(ZeroOrMore, NotHidden), Parser(*this) {
+ apply(this, Ms...);
+ done();
+ }
+};
+
+// multi_val - Modifier to set the number of additional values.
+struct multi_val {
+ unsigned AdditionalVals;
+ explicit multi_val(unsigned N) : AdditionalVals(N) {}
+
+ template <typename D, typename S, typename P>
+ void apply(list<D, S, P> &L) const {
+ L.setNumAdditionalVals(AdditionalVals);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// bits_storage class
+
+// Default storage class definition: external storage. This implementation
+// assumes the user will specify a variable to store the data into with the
+// cl::location(x) modifier.
+//
+template <class DataType, class StorageClass> class bits_storage {
+ unsigned *Location; // Where to store the bits...
+
+ template <class T> static unsigned Bit(const T &V) {
+ unsigned BitPos = reinterpret_cast<unsigned>(V);
+ assert(BitPos < sizeof(unsigned) * CHAR_BIT &&
+ "enum exceeds width of bit vector!");
+ return 1 << BitPos;
+ }
+
+public:
+ bits_storage() : Location(nullptr) {}
+
+ bool setLocation(Option &O, unsigned &L) {
+ if (Location)
+ return O.error("cl::location(x) specified more than once!");
+ Location = &L;
+ return false;
+ }
+
+ template <class T> void addValue(const T &V) {
+ assert(Location != 0 && "cl::location(...) not specified for a command "
+ "line option with external storage!");
+ *Location |= Bit(V);
+ }
+
+ unsigned getBits() { return *Location; }
+
+ template <class T> bool isSet(const T &V) {
+ return (*Location & Bit(V)) != 0;
+ }
+};
+
+// Define how to hold bits. Since we can inherit from a class, we do so.
+// This makes us exactly compatible with the bits in all cases that it is used.
+//
+template <class DataType> class bits_storage<DataType, bool> {
+ unsigned Bits; // Where to store the bits...
+
+ template <class T> static unsigned Bit(const T &V) {
+ unsigned BitPos = (unsigned)V;
+ assert(BitPos < sizeof(unsigned) * CHAR_BIT &&
+ "enum exceeds width of bit vector!");
+ return 1 << BitPos;
+ }
+
+public:
+ template <class T> void addValue(const T &V) { Bits |= Bit(V); }
+
+ unsigned getBits() { return Bits; }
+
+ template <class T> bool isSet(const T &V) { return (Bits & Bit(V)) != 0; }
+};
+
+//===----------------------------------------------------------------------===//
+// bits - A bit vector of command options.
+//
+template <class DataType, class Storage = bool,
+ class ParserClass = parser<DataType>>
+class bits : public Option, public bits_storage<DataType, Storage> {
+ std::vector<unsigned> Positions;
+ ParserClass Parser;
+
+ enum ValueExpected getValueExpectedFlagDefault() const override {
+ return Parser.getValueExpectedFlagDefault();
+ }
+ void getExtraOptionNames(SmallVectorImpl<StringRef> &OptionNames) override {
+ return Parser.getExtraOptionNames(OptionNames);
+ }
+
+ bool handleOccurrence(unsigned pos, StringRef ArgName,
+ StringRef Arg) override {
+ typename ParserClass::parser_data_type Val =
+ typename ParserClass::parser_data_type();
+ if (Parser.parse(*this, ArgName, Arg, Val))
+ return true; // Parse Error!
+ this->addValue(Val);
+ setPosition(pos);
+ Positions.push_back(pos);
+ return false;
+ }
+
+ // Forward printing stuff to the parser...
+ size_t getOptionWidth() const override {
+ return Parser.getOptionWidth(*this);
+ }
+ void printOptionInfo(size_t GlobalWidth) const override {
+ Parser.printOptionInfo(*this, GlobalWidth);
+ }
+
+ // Unimplemented: bits options don't currently store their default values.
+ void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override {
+ }
+
+ void done() {
+ addArgument();
+ Parser.initialize();
+ }
+
+ // Command line options should not be copyable
+ bits(const bits &) = delete;
+ bits &operator=(const bits &) = delete;
+
+public:
+ ParserClass &getParser() { return Parser; }
+
+ unsigned getPosition(unsigned optnum) const {
+ assert(optnum < this->size() && "Invalid option index");
+ return Positions[optnum];
+ }
+
+ template <class... Mods>
+ explicit bits(const Mods &... Ms)
+ : Option(ZeroOrMore, NotHidden), Parser(*this) {
+ apply(this, Ms...);
+ done();
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Aliased command line option (alias this name to a preexisting name)
+//
+
+class alias : public Option {
+ Option *AliasFor;
+ bool handleOccurrence(unsigned pos, StringRef /*ArgName*/,
+ StringRef Arg) override {
+ return AliasFor->handleOccurrence(pos, AliasFor->ArgStr, Arg);
+ }
+ bool addOccurrence(unsigned pos, StringRef /*ArgName*/, StringRef Value,
+ bool MultiArg = false) override {
+ return AliasFor->addOccurrence(pos, AliasFor->ArgStr, Value, MultiArg);
+ }
+ // Handle printing stuff...
+ size_t getOptionWidth() const override;
+ void printOptionInfo(size_t GlobalWidth) const override;
+
+ // Aliases do not need to print their values.
+ void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const override {
+ }
+
+ ValueExpected getValueExpectedFlagDefault() const override {
+ return AliasFor->getValueExpectedFlag();
+ }
+
+ void done() {
+ if (!hasArgStr())
+ error("cl::alias must have argument name specified!");
+ if (!AliasFor)
+ error("cl::alias must have an cl::aliasopt(option) specified!");
+ addArgument();
+ }
+
+ // Command line options should not be copyable
+ alias(const alias &) = delete;
+ alias &operator=(const alias &) = delete;
+
+public:
+ void setAliasFor(Option &O) {
+ if (AliasFor)
+ error("cl::alias must only have one cl::aliasopt(...) specified!");
+ AliasFor = &O;
+ }
+
+ template <class... Mods>
+ explicit alias(const Mods &... Ms)
+ : Option(Optional, Hidden), AliasFor(nullptr) {
+ apply(this, Ms...);
+ done();
+ }
+};
+
+// aliasfor - Modifier to set the option an alias aliases.
+struct aliasopt {
+ Option &Opt;
+ explicit aliasopt(Option &O) : Opt(O) {}
+ void apply(alias &A) const { A.setAliasFor(Opt); }
+};
+
+// extrahelp - provide additional help at the end of the normal help
+// output. All occurrences of cl::extrahelp will be accumulated and
+// printed to stderr at the end of the regular help, just before
+// exit is called.
+struct extrahelp {
+ const char *morehelp;
+ explicit extrahelp(const char *help);
+};
+
+void PrintVersionMessage();
+
+/// This function just prints the help message, exactly the same way as if the
+/// -help or -help-hidden option had been given on the command line.
+///
+/// NOTE: THIS FUNCTION TERMINATES THE PROGRAM!
+///
+/// \param Hidden if true will print hidden options
+/// \param Categorized if true print options in categories
+void PrintHelpMessage(bool Hidden = false, bool Categorized = false);
+
+//===----------------------------------------------------------------------===//
+// Public interface for accessing registered options.
+//
+
+/// \brief Use this to get a StringMap to all registered named options
+/// (e.g. -help). Note \p Map Should be an empty StringMap.
+///
+/// \return A reference to the StringMap used by the cl APIs to parse options.
+///
+/// Access to unnamed arguments (i.e. positional) are not provided because
+/// it is expected that the client already has access to these.
+///
+/// Typical usage:
+/// \code
+/// main(int argc,char* argv[]) {
+/// StringMap<llvm::cl::Option*> &opts = llvm::cl::getRegisteredOptions();
+/// assert(opts.count("help") == 1)
+/// opts["help"]->setDescription("Show alphabetical help information")
+/// // More code
+/// llvm::cl::ParseCommandLineOptions(argc,argv);
+/// //More code
+/// }
+/// \endcode
+///
+/// This interface is useful for modifying options in libraries that are out of
+/// the control of the client. The options should be modified before calling
+/// llvm::cl::ParseCommandLineOptions().
+///
+/// Hopefully this API can be depricated soon. Any situation where options need
+/// to be modified by tools or libraries should be handled by sane APIs rather
+/// than just handing around a global list.
+StringMap<Option *> &getRegisteredOptions();
+
+//===----------------------------------------------------------------------===//
+// Standalone command line processing utilities.
+//
+
+/// \brief Tokenizes a command line that can contain escapes and quotes.
+//
+/// The quoting rules match those used by GCC and other tools that use
+/// libiberty's buildargv() or expandargv() utilities, and do not match bash.
+/// They differ from buildargv() on treatment of backslashes that do not escape
+/// a special character to make it possible to accept most Windows file paths.
+///
+/// \param [in] Source The string to be split on whitespace with quotes.
+/// \param [in] Saver Delegates back to the caller for saving parsed strings.
+/// \param [in] MarkEOLs true if tokenizing a response file and you want end of
+/// lines and end of the response file to be marked with a nullptr string.
+/// \param [out] NewArgv All parsed strings are appended to NewArgv.
+void TokenizeGNUCommandLine(StringRef Source, StringSaver &Saver,
+ SmallVectorImpl<const char *> &NewArgv,
+ bool MarkEOLs = false);
+
+/// \brief Tokenizes a Windows command line which may contain quotes and escaped
+/// quotes.
+///
+/// See MSDN docs for CommandLineToArgvW for information on the quoting rules.
+/// http://msdn.microsoft.com/en-us/library/windows/desktop/17w5ykft(v=vs.85).aspx
+///
+/// \param [in] Source The string to be split on whitespace with quotes.
+/// \param [in] Saver Delegates back to the caller for saving parsed strings.
+/// \param [in] MarkEOLs true if tokenizing a response file and you want end of
+/// lines and end of the response file to be marked with a nullptr string.
+/// \param [out] NewArgv All parsed strings are appended to NewArgv.
+void TokenizeWindowsCommandLine(StringRef Source, StringSaver &Saver,
+ SmallVectorImpl<const char *> &NewArgv,
+ bool MarkEOLs = false);
+
+/// \brief String tokenization function type. Should be compatible with either
+/// Windows or Unix command line tokenizers.
+typedef void (*TokenizerCallback)(StringRef Source, StringSaver &Saver,
+ SmallVectorImpl<const char *> &NewArgv,
+ bool MarkEOLs);
+
+/// \brief Expand response files on a command line recursively using the given
+/// StringSaver and tokenization strategy. Argv should contain the command line
+/// before expansion and will be modified in place. If requested, Argv will
+/// also be populated with nullptrs indicating where each response file line
+/// ends, which is useful for the "/link" argument that needs to consume all
+/// remaining arguments only until the next end of line, when in a response
+/// file.
+///
+/// \param [in] Saver Delegates back to the caller for saving parsed strings.
+/// \param [in] Tokenizer Tokenization strategy. Typically Unix or Windows.
+/// \param [in,out] Argv Command line into which to expand response files.
+/// \param [in] MarkEOLs Mark end of lines and the end of the response file
+/// with nullptrs in the Argv vector.
+/// \return true if all @files were expanded successfully or there were none.
+bool ExpandResponseFiles(StringSaver &Saver, TokenizerCallback Tokenizer,
+ SmallVectorImpl<const char *> &Argv,
+ bool MarkEOLs = false);
+
+/// \brief Mark all options not part of this category as cl::ReallyHidden.
+///
+/// \param Category the category of options to keep displaying
+///
+/// Some tools (like clang-format) like to be able to hide all options that are
+/// not specific to the tool. This function allows a tool to specify a single
+/// option category to display in the -help output.
+void HideUnrelatedOptions(cl::OptionCategory &Category);
+
+/// \brief Mark all options not part of the categories as cl::ReallyHidden.
+///
+/// \param Categories the categories of options to keep displaying.
+///
+/// Some tools (like clang-format) like to be able to hide all options that are
+/// not specific to the tool. This function allows a tool to specify a single
+/// option category to display in the -help output.
+void HideUnrelatedOptions(ArrayRef<const cl::OptionCategory *> Categories);
+
+} // End namespace cl
+
+} // End namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Compiler.h b/contrib/llvm/include/llvm/Support/Compiler.h
new file mode 100644
index 0000000..b3416bb
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Compiler.h
@@ -0,0 +1,448 @@
+//===-- llvm/Support/Compiler.h - Compiler abstraction support --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines several macros, based on the current compiler. This allows
+// use of compiler-specific features in a way that remains portable.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_COMPILER_H
+#define LLVM_SUPPORT_COMPILER_H
+
+#include "llvm/Config/llvm-config.h"
+
+#ifndef __has_feature
+# define __has_feature(x) 0
+#endif
+
+#ifndef __has_extension
+# define __has_extension(x) 0
+#endif
+
+#ifndef __has_attribute
+# define __has_attribute(x) 0
+#endif
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+/// \macro LLVM_GNUC_PREREQ
+/// \brief Extend the default __GNUC_PREREQ even if glibc's features.h isn't
+/// available.
+#ifndef LLVM_GNUC_PREREQ
+# if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+# define LLVM_GNUC_PREREQ(maj, min, patch) \
+ ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) + __GNUC_PATCHLEVEL__ >= \
+ ((maj) << 20) + ((min) << 10) + (patch))
+# elif defined(__GNUC__) && defined(__GNUC_MINOR__)
+# define LLVM_GNUC_PREREQ(maj, min, patch) \
+ ((__GNUC__ << 20) + (__GNUC_MINOR__ << 10) >= ((maj) << 20) + ((min) << 10))
+# else
+# define LLVM_GNUC_PREREQ(maj, min, patch) 0
+# endif
+#endif
+
+/// \macro LLVM_MSC_PREREQ
+/// \brief Is the compiler MSVC of at least the specified version?
+/// The common \param version values to check for are:
+/// * 1800: Microsoft Visual Studio 2013 / 12.0
+/// * 1900: Microsoft Visual Studio 2015 / 14.0
+#ifdef _MSC_VER
+#define LLVM_MSC_PREREQ(version) (_MSC_VER >= (version))
+
+// We require at least MSVC 2013.
+#if !LLVM_MSC_PREREQ(1800)
+#error LLVM requires at least MSVC 2013.
+#endif
+
+#else
+#define LLVM_MSC_PREREQ(version) 0
+#endif
+
+#if !defined(_MSC_VER) || defined(__clang__) || LLVM_MSC_PREREQ(1900)
+#define LLVM_NOEXCEPT noexcept
+#else
+#define LLVM_NOEXCEPT throw()
+#endif
+
+/// \brief Does the compiler support ref-qualifiers for *this?
+///
+/// Sadly, this is separate from just rvalue reference support because GCC
+/// and MSVC implemented this later than everything else.
+#if __has_feature(cxx_rvalue_references) || LLVM_GNUC_PREREQ(4, 8, 1)
+#define LLVM_HAS_RVALUE_REFERENCE_THIS 1
+#else
+#define LLVM_HAS_RVALUE_REFERENCE_THIS 0
+#endif
+
+/// Expands to '&' if ref-qualifiers for *this are supported.
+///
+/// This can be used to provide lvalue/rvalue overrides of member functions.
+/// The rvalue override should be guarded by LLVM_HAS_RVALUE_REFERENCE_THIS
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+#define LLVM_LVALUE_FUNCTION &
+#else
+#define LLVM_LVALUE_FUNCTION
+#endif
+
+#if __has_feature(cxx_constexpr) || defined(__GXX_EXPERIMENTAL_CXX0X__)
+# define LLVM_CONSTEXPR constexpr
+#else
+# define LLVM_CONSTEXPR
+#endif
+
+/// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked
+/// into a shared library, then the class should be private to the library and
+/// not accessible from outside it. Can also be used to mark variables and
+/// functions, making them private to any shared library they are linked into.
+/// On PE/COFF targets, library visibility is the default, so this isn't needed.
+#if (__has_attribute(visibility) || LLVM_GNUC_PREREQ(4, 0, 0)) && \
+ !defined(__MINGW32__) && !defined(__CYGWIN__) && !defined(LLVM_ON_WIN32)
+#define LLVM_LIBRARY_VISIBILITY __attribute__ ((visibility("hidden")))
+#else
+#define LLVM_LIBRARY_VISIBILITY
+#endif
+
+#if __has_attribute(sentinel) || LLVM_GNUC_PREREQ(3, 0, 0)
+#define LLVM_END_WITH_NULL __attribute__((sentinel))
+#else
+#define LLVM_END_WITH_NULL
+#endif
+
+#if __has_attribute(used) || LLVM_GNUC_PREREQ(3, 1, 0)
+#define LLVM_ATTRIBUTE_USED __attribute__((__used__))
+#else
+#define LLVM_ATTRIBUTE_USED
+#endif
+
+#if __has_attribute(warn_unused_result) || LLVM_GNUC_PREREQ(3, 4, 0)
+#define LLVM_ATTRIBUTE_UNUSED_RESULT __attribute__((__warn_unused_result__))
+#else
+#define LLVM_ATTRIBUTE_UNUSED_RESULT
+#endif
+
+// Some compilers warn about unused functions. When a function is sometimes
+// used or not depending on build settings (e.g. a function only called from
+// within "assert"), this attribute can be used to suppress such warnings.
+//
+// However, it shouldn't be used for unused *variables*, as those have a much
+// more portable solution:
+// (void)unused_var_name;
+// Prefer cast-to-void wherever it is sufficient.
+#if __has_attribute(unused) || LLVM_GNUC_PREREQ(3, 1, 0)
+#define LLVM_ATTRIBUTE_UNUSED __attribute__((__unused__))
+#else
+#define LLVM_ATTRIBUTE_UNUSED
+#endif
+
+// FIXME: Provide this for PE/COFF targets.
+#if (__has_attribute(weak) || LLVM_GNUC_PREREQ(4, 0, 0)) && \
+ (!defined(__MINGW32__) && !defined(__CYGWIN__) && !defined(LLVM_ON_WIN32))
+#define LLVM_ATTRIBUTE_WEAK __attribute__((__weak__))
+#else
+#define LLVM_ATTRIBUTE_WEAK
+#endif
+
+// Prior to clang 3.2, clang did not accept any spelling of
+// __has_attribute(const), so assume it is supported.
+#if defined(__clang__) || defined(__GNUC__)
+// aka 'CONST' but following LLVM Conventions.
+#define LLVM_READNONE __attribute__((__const__))
+#else
+#define LLVM_READNONE
+#endif
+
+#if __has_attribute(pure) || defined(__GNUC__)
+// aka 'PURE' but following LLVM Conventions.
+#define LLVM_READONLY __attribute__((__pure__))
+#else
+#define LLVM_READONLY
+#endif
+
+#if __has_builtin(__builtin_expect) || LLVM_GNUC_PREREQ(4, 0, 0)
+#define LLVM_LIKELY(EXPR) __builtin_expect((bool)(EXPR), true)
+#define LLVM_UNLIKELY(EXPR) __builtin_expect((bool)(EXPR), false)
+#else
+#define LLVM_LIKELY(EXPR) (EXPR)
+#define LLVM_UNLIKELY(EXPR) (EXPR)
+#endif
+
+/// LLVM_ATTRIBUTE_NOINLINE - On compilers where we have a directive to do so,
+/// mark a method "not for inlining".
+#if __has_attribute(noinline) || LLVM_GNUC_PREREQ(3, 4, 0)
+#define LLVM_ATTRIBUTE_NOINLINE __attribute__((noinline))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_NOINLINE __declspec(noinline)
+#else
+#define LLVM_ATTRIBUTE_NOINLINE
+#endif
+
+/// LLVM_ATTRIBUTE_ALWAYS_INLINE - On compilers where we have a directive to do
+/// so, mark a method "always inline" because it is performance sensitive. GCC
+/// 3.4 supported this but is buggy in various cases and produces unimplemented
+/// errors, just use it in GCC 4.0 and later.
+#if __has_attribute(always_inline) || LLVM_GNUC_PREREQ(4, 0, 0)
+#define LLVM_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_ALWAYS_INLINE __forceinline
+#else
+#define LLVM_ATTRIBUTE_ALWAYS_INLINE
+#endif
+
+#ifdef __GNUC__
+#define LLVM_ATTRIBUTE_NORETURN __attribute__((noreturn))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_NORETURN __declspec(noreturn)
+#else
+#define LLVM_ATTRIBUTE_NORETURN
+#endif
+
+#if __has_attribute(returns_nonnull) || LLVM_GNUC_PREREQ(4, 9, 0)
+#define LLVM_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull))
+#else
+#define LLVM_ATTRIBUTE_RETURNS_NONNULL
+#endif
+
+/// \macro LLVM_ATTRIBUTE_RETURNS_NOALIAS Used to mark a function as returning a
+/// pointer that does not alias any other valid pointer.
+#ifdef __GNUC__
+#define LLVM_ATTRIBUTE_RETURNS_NOALIAS __attribute__((__malloc__))
+#elif defined(_MSC_VER)
+#define LLVM_ATTRIBUTE_RETURNS_NOALIAS __declspec(restrict)
+#else
+#define LLVM_ATTRIBUTE_RETURNS_NOALIAS
+#endif
+
+/// LLVM_EXTENSION - Support compilers where we have a keyword to suppress
+/// pedantic diagnostics.
+#ifdef __GNUC__
+#define LLVM_EXTENSION __extension__
+#else
+#define LLVM_EXTENSION
+#endif
+
+// LLVM_ATTRIBUTE_DEPRECATED(decl, "message")
+#if __has_feature(attribute_deprecated_with_message)
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+ decl __attribute__((deprecated(message)))
+#elif defined(__GNUC__)
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+ decl __attribute__((deprecated))
+#elif defined(_MSC_VER)
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+ __declspec(deprecated(message)) decl
+#else
+# define LLVM_ATTRIBUTE_DEPRECATED(decl, message) \
+ decl
+#endif
+
+/// LLVM_BUILTIN_UNREACHABLE - On compilers which support it, expands
+/// to an expression which states that it is undefined behavior for the
+/// compiler to reach this point. Otherwise is not defined.
+#if __has_builtin(__builtin_unreachable) || LLVM_GNUC_PREREQ(4, 5, 0)
+# define LLVM_BUILTIN_UNREACHABLE __builtin_unreachable()
+#elif defined(_MSC_VER)
+# define LLVM_BUILTIN_UNREACHABLE __assume(false)
+#endif
+
+/// LLVM_BUILTIN_TRAP - On compilers which support it, expands to an expression
+/// which causes the program to exit abnormally.
+#if __has_builtin(__builtin_trap) || LLVM_GNUC_PREREQ(4, 3, 0)
+# define LLVM_BUILTIN_TRAP __builtin_trap()
+#elif defined(_MSC_VER)
+// The __debugbreak intrinsic is supported by MSVC, does not require forward
+// declarations involving platform-specific typedefs (unlike RaiseException),
+// results in a call to vectored exception handlers, and encodes to a short
+// instruction that still causes the trapping behavior we want.
+# define LLVM_BUILTIN_TRAP __debugbreak()
+#else
+# define LLVM_BUILTIN_TRAP *(volatile int*)0x11 = 0
+#endif
+
+/// \macro LLVM_ASSUME_ALIGNED
+/// \brief Returns a pointer with an assumed alignment.
+#if __has_builtin(__builtin_assume_aligned) || LLVM_GNUC_PREREQ(4, 7, 0)
+# define LLVM_ASSUME_ALIGNED(p, a) __builtin_assume_aligned(p, a)
+#elif defined(LLVM_BUILTIN_UNREACHABLE)
+// As of today, clang does not support __builtin_assume_aligned.
+# define LLVM_ASSUME_ALIGNED(p, a) \
+ (((uintptr_t(p) % (a)) == 0) ? (p) : (LLVM_BUILTIN_UNREACHABLE, (p)))
+#else
+# define LLVM_ASSUME_ALIGNED(p, a) (p)
+#endif
+
+/// \macro LLVM_ALIGNAS
+/// \brief Used to specify a minimum alignment for a structure or variable. The
+/// alignment must be a constant integer. Use LLVM_PTR_SIZE to compute
+/// alignments in terms of the size of a pointer.
+///
+/// Note that __declspec(align) has special quirks, it's not legal to pass a
+/// structure with __declspec(align) as a formal parameter.
+#ifdef _MSC_VER
+# define LLVM_ALIGNAS(x) __declspec(align(x))
+#elif __GNUC__ && !__has_feature(cxx_alignas) && !LLVM_GNUC_PREREQ(4, 8, 0)
+# define LLVM_ALIGNAS(x) __attribute__((aligned(x)))
+#else
+# define LLVM_ALIGNAS(x) alignas(x)
+#endif
+
+/// \macro LLVM_PACKED
+/// \brief Used to specify a packed structure.
+/// LLVM_PACKED(
+/// struct A {
+/// int i;
+/// int j;
+/// int k;
+/// long long l;
+/// });
+///
+/// LLVM_PACKED_START
+/// struct B {
+/// int i;
+/// int j;
+/// int k;
+/// long long l;
+/// };
+/// LLVM_PACKED_END
+#ifdef _MSC_VER
+# define LLVM_PACKED(d) __pragma(pack(push, 1)) d __pragma(pack(pop))
+# define LLVM_PACKED_START __pragma(pack(push, 1))
+# define LLVM_PACKED_END __pragma(pack(pop))
+#else
+# define LLVM_PACKED(d) d __attribute__((packed))
+# define LLVM_PACKED_START _Pragma("pack(push, 1)")
+# define LLVM_PACKED_END _Pragma("pack(pop)")
+#endif
+
+/// \macro LLVM_PTR_SIZE
+/// \brief A constant integer equivalent to the value of sizeof(void*).
+/// Generally used in combination with LLVM_ALIGNAS or when doing computation in
+/// the preprocessor.
+#ifdef __SIZEOF_POINTER__
+# define LLVM_PTR_SIZE __SIZEOF_POINTER__
+#elif defined(_WIN64)
+# define LLVM_PTR_SIZE 8
+#elif defined(_WIN32)
+# define LLVM_PTR_SIZE 4
+#elif defined(_MSC_VER)
+# error "could not determine LLVM_PTR_SIZE as a constant int for MSVC"
+#else
+# define LLVM_PTR_SIZE sizeof(void *)
+#endif
+
+/// \macro LLVM_FUNCTION_NAME
+/// \brief Expands to __func__ on compilers which support it. Otherwise,
+/// expands to a compiler-dependent replacement.
+#if defined(_MSC_VER)
+# define LLVM_FUNCTION_NAME __FUNCTION__
+#else
+# define LLVM_FUNCTION_NAME __func__
+#endif
+
+/// \macro LLVM_MEMORY_SANITIZER_BUILD
+/// \brief Whether LLVM itself is built with MemorySanitizer instrumentation.
+#if __has_feature(memory_sanitizer)
+# define LLVM_MEMORY_SANITIZER_BUILD 1
+# include <sanitizer/msan_interface.h>
+#else
+# define LLVM_MEMORY_SANITIZER_BUILD 0
+# define __msan_allocated_memory(p, size)
+# define __msan_unpoison(p, size)
+#endif
+
+/// \macro LLVM_ADDRESS_SANITIZER_BUILD
+/// \brief Whether LLVM itself is built with AddressSanitizer instrumentation.
+#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+# define LLVM_ADDRESS_SANITIZER_BUILD 1
+# include <sanitizer/asan_interface.h>
+#else
+# define LLVM_ADDRESS_SANITIZER_BUILD 0
+# define __asan_poison_memory_region(p, size)
+# define __asan_unpoison_memory_region(p, size)
+#endif
+
+/// \macro LLVM_THREAD_SANITIZER_BUILD
+/// \brief Whether LLVM itself is built with ThreadSanitizer instrumentation.
+#if __has_feature(thread_sanitizer) || defined(__SANITIZE_THREAD__)
+# define LLVM_THREAD_SANITIZER_BUILD 1
+#else
+# define LLVM_THREAD_SANITIZER_BUILD 0
+#endif
+
+#if LLVM_THREAD_SANITIZER_BUILD
+// Thread Sanitizer is a tool that finds races in code.
+// See http://code.google.com/p/data-race-test/wiki/DynamicAnnotations .
+// tsan detects these exact functions by name.
+extern "C" {
+void AnnotateHappensAfter(const char *file, int line, const volatile void *cv);
+void AnnotateHappensBefore(const char *file, int line, const volatile void *cv);
+void AnnotateIgnoreWritesBegin(const char *file, int line);
+void AnnotateIgnoreWritesEnd(const char *file, int line);
+}
+
+// This marker is used to define a happens-before arc. The race detector will
+// infer an arc from the begin to the end when they share the same pointer
+// argument.
+# define TsanHappensBefore(cv) AnnotateHappensBefore(__FILE__, __LINE__, cv)
+
+// This marker defines the destination of a happens-before arc.
+# define TsanHappensAfter(cv) AnnotateHappensAfter(__FILE__, __LINE__, cv)
+
+// Ignore any races on writes between here and the next TsanIgnoreWritesEnd.
+# define TsanIgnoreWritesBegin() AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
+
+// Resume checking for racy writes.
+# define TsanIgnoreWritesEnd() AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
+#else
+# define TsanHappensBefore(cv)
+# define TsanHappensAfter(cv)
+# define TsanIgnoreWritesBegin()
+# define TsanIgnoreWritesEnd()
+#endif
+
+/// \brief Mark debug helper function definitions like dump() that should not be
+/// stripped from debug builds.
+// FIXME: Move this to a private config.h as it's not usable in public headers.
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED
+#else
+#define LLVM_DUMP_METHOD LLVM_ATTRIBUTE_NOINLINE
+#endif
+
+/// \macro LLVM_THREAD_LOCAL
+/// \brief A thread-local storage specifier which can be used with globals,
+/// extern globals, and static globals.
+///
+/// This is essentially an extremely restricted analog to C++11's thread_local
+/// support, and uses that when available. However, it falls back on
+/// platform-specific or vendor-provided extensions when necessary. These
+/// extensions don't support many of the C++11 thread_local's features. You
+/// should only use this for PODs that you can statically initialize to
+/// some constant value. In almost all circumstances this is most appropriate
+/// for use with a pointer, integer, or small aggregation of pointers and
+/// integers.
+#if LLVM_ENABLE_THREADS
+#if __has_feature(cxx_thread_local)
+#define LLVM_THREAD_LOCAL thread_local
+#elif defined(_MSC_VER)
+// MSVC supports this with a __declspec.
+#define LLVM_THREAD_LOCAL __declspec(thread)
+#else
+// Clang, GCC, and other compatible compilers used __thread prior to C++11 and
+// we only need the restricted functionality that provides.
+#define LLVM_THREAD_LOCAL __thread
+#endif
+#else // !LLVM_ENABLE_THREADS
+// If threading is disabled entirely, this compiles to nothing and you get
+// a normal global variable.
+#define LLVM_THREAD_LOCAL
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Compression.h b/contrib/llvm/include/llvm/Support/Compression.h
new file mode 100644
index 0000000..28274d6
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Compression.h
@@ -0,0 +1,57 @@
+//===-- llvm/Support/Compression.h ---Compression----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains basic functions for compression/uncompression.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_COMPRESSION_H
+#define LLVM_SUPPORT_COMPRESSION_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+template <typename T> class SmallVectorImpl;
+class StringRef;
+
+namespace zlib {
+
+enum CompressionLevel {
+ NoCompression,
+ DefaultCompression,
+ BestSpeedCompression,
+ BestSizeCompression
+};
+
+enum Status {
+ StatusOK,
+ StatusUnsupported, // zlib is unavailable
+ StatusOutOfMemory, // there was not enough memory
+ StatusBufferTooShort, // there was not enough room in the output buffer
+ StatusInvalidArg, // invalid input parameter
+ StatusInvalidData // data was corrupted or incomplete
+};
+
+bool isAvailable();
+
+Status compress(StringRef InputBuffer, SmallVectorImpl<char> &CompressedBuffer,
+ CompressionLevel Level = DefaultCompression);
+
+Status uncompress(StringRef InputBuffer,
+ SmallVectorImpl<char> &UncompressedBuffer,
+ size_t UncompressedSize);
+
+uint32_t crc32(StringRef Buffer);
+
+} // End of namespace zlib
+
+} // End of namespace llvm
+
+#endif
+
diff --git a/contrib/llvm/include/llvm/Support/ConvertUTF.h b/contrib/llvm/include/llvm/Support/ConvertUTF.h
new file mode 100644
index 0000000..38952ec
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ConvertUTF.h
@@ -0,0 +1,268 @@
+/*===--- ConvertUTF.h - Universal Character Names conversions ---------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *==------------------------------------------------------------------------==*/
+/*
+ * Copyright 2001-2004 Unicode, Inc.
+ *
+ * Disclaimer
+ *
+ * This source code is provided as is by Unicode, Inc. No claims are
+ * made as to fitness for any particular purpose. No warranties of any
+ * kind are expressed or implied. The recipient agrees to determine
+ * applicability of information provided. If this file has been
+ * purchased on magnetic or optical media from Unicode, Inc., the
+ * sole remedy for any claim will be exchange of defective media
+ * within 90 days of receipt.
+ *
+ * Limitations on Rights to Redistribute This Code
+ *
+ * Unicode, Inc. hereby grants the right to freely use the information
+ * supplied in this file in the creation of products supporting the
+ * Unicode Standard, and to make copies of this file in any form
+ * for internal or external distribution as long as this notice
+ * remains attached.
+ */
+
+/* ---------------------------------------------------------------------
+
+ Conversions between UTF32, UTF-16, and UTF-8. Header file.
+
+ Several funtions are included here, forming a complete set of
+ conversions between the three formats. UTF-7 is not included
+ here, but is handled in a separate source file.
+
+ Each of these routines takes pointers to input buffers and output
+ buffers. The input buffers are const.
+
+ Each routine converts the text between *sourceStart and sourceEnd,
+ putting the result into the buffer between *targetStart and
+ targetEnd. Note: the end pointers are *after* the last item: e.g.
+ *(sourceEnd - 1) is the last item.
+
+ The return result indicates whether the conversion was successful,
+ and if not, whether the problem was in the source or target buffers.
+ (Only the first encountered problem is indicated.)
+
+ After the conversion, *sourceStart and *targetStart are both
+ updated to point to the end of last text successfully converted in
+ the respective buffers.
+
+ Input parameters:
+ sourceStart - pointer to a pointer to the source buffer.
+ The contents of this are modified on return so that
+ it points at the next thing to be converted.
+ targetStart - similarly, pointer to pointer to the target buffer.
+ sourceEnd, targetEnd - respectively pointers to the ends of the
+ two buffers, for overflow checking only.
+
+ These conversion functions take a ConversionFlags argument. When this
+ flag is set to strict, both irregular sequences and isolated surrogates
+ will cause an error. When the flag is set to lenient, both irregular
+ sequences and isolated surrogates are converted.
+
+ Whether the flag is strict or lenient, all illegal sequences will cause
+ an error return. This includes sequences such as: <F4 90 80 80>, <C0 80>,
+ or <A0> in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code
+ must check for illegal sequences.
+
+ When the flag is set to lenient, characters over 0x10FFFF are converted
+ to the replacement character; otherwise (when the flag is set to strict)
+ they constitute an error.
+
+ Output parameters:
+ The value "sourceIllegal" is returned from some routines if the input
+ sequence is malformed. When "sourceIllegal" is returned, the source
+ value will point to the illegal value that caused the problem. E.g.,
+ in UTF-8 when a sequence is malformed, it points to the start of the
+ malformed sequence.
+
+ Author: Mark E. Davis, 1994.
+ Rev History: Rick McGowan, fixes & updates May 2001.
+ Fixes & updates, Sept 2001.
+
+------------------------------------------------------------------------ */
+
+#ifndef LLVM_SUPPORT_CONVERTUTF_H
+#define LLVM_SUPPORT_CONVERTUTF_H
+
+/* ---------------------------------------------------------------------
+ The following 4 definitions are compiler-specific.
+ The C standard does not guarantee that wchar_t has at least
+ 16 bits, so wchar_t is no less portable than unsigned short!
+ All should be unsigned values to avoid sign extension during
+ bit mask & shift operations.
+------------------------------------------------------------------------ */
+
+typedef unsigned int UTF32; /* at least 32 bits */
+typedef unsigned short UTF16; /* at least 16 bits */
+typedef unsigned char UTF8; /* typically 8 bits */
+typedef unsigned char Boolean; /* 0 or 1 */
+
+/* Some fundamental constants */
+#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
+#define UNI_MAX_BMP (UTF32)0x0000FFFF
+#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
+#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
+#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
+
+#define UNI_MAX_UTF8_BYTES_PER_CODE_POINT 4
+
+#define UNI_UTF16_BYTE_ORDER_MARK_NATIVE 0xFEFF
+#define UNI_UTF16_BYTE_ORDER_MARK_SWAPPED 0xFFFE
+
+typedef enum {
+ conversionOK, /* conversion successful */
+ sourceExhausted, /* partial character in source, but hit end */
+ targetExhausted, /* insuff. room in target for conversion */
+ sourceIllegal /* source sequence is illegal/malformed */
+} ConversionResult;
+
+typedef enum {
+ strictConversion = 0,
+ lenientConversion
+} ConversionFlags;
+
+/* This is for C++ and does no harm in C */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ConversionResult ConvertUTF8toUTF16 (
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+
+/**
+ * Convert a partial UTF8 sequence to UTF32. If the sequence ends in an
+ * incomplete code unit sequence, returns \c sourceExhausted.
+ */
+ConversionResult ConvertUTF8toUTF32Partial(
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
+/**
+ * Convert a partial UTF8 sequence to UTF32. If the sequence ends in an
+ * incomplete code unit sequence, returns \c sourceIllegal.
+ */
+ConversionResult ConvertUTF8toUTF32(
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF16toUTF8 (
+ const UTF16** sourceStart, const UTF16* sourceEnd,
+ UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF32toUTF8 (
+ const UTF32** sourceStart, const UTF32* sourceEnd,
+ UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF16toUTF32 (
+ const UTF16** sourceStart, const UTF16* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF32toUTF16 (
+ const UTF32** sourceStart, const UTF32* sourceEnd,
+ UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+
+Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);
+
+Boolean isLegalUTF8String(const UTF8 **source, const UTF8 *sourceEnd);
+
+unsigned getNumBytesForUTF8(UTF8 firstByte);
+
+#ifdef __cplusplus
+}
+
+/*************************************************************************/
+/* Below are LLVM-specific wrappers of the functions above. */
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+/**
+ * Convert an UTF8 StringRef to UTF8, UTF16, or UTF32 depending on
+ * WideCharWidth. The converted data is written to ResultPtr, which needs to
+ * point to at least WideCharWidth * (Source.Size() + 1) bytes. On success,
+ * ResultPtr will point one after the end of the copied string. On failure,
+ * ResultPtr will not be changed, and ErrorPtr will be set to the location of
+ * the first character which could not be converted.
+ * \return true on success.
+ */
+bool ConvertUTF8toWide(unsigned WideCharWidth, llvm::StringRef Source,
+ char *&ResultPtr, const UTF8 *&ErrorPtr);
+
+/**
+ * Convert an Unicode code point to UTF8 sequence.
+ *
+ * \param Source a Unicode code point.
+ * \param [in,out] ResultPtr pointer to the output buffer, needs to be at least
+ * \c UNI_MAX_UTF8_BYTES_PER_CODE_POINT bytes. On success \c ResultPtr is
+ * updated one past end of the converted sequence.
+ *
+ * \returns true on success.
+ */
+bool ConvertCodePointToUTF8(unsigned Source, char *&ResultPtr);
+
+/**
+ * Convert the first UTF8 sequence in the given source buffer to a UTF32
+ * code point.
+ *
+ * \param [in,out] source A pointer to the source buffer. If the conversion
+ * succeeds, this pointer will be updated to point to the byte just past the
+ * end of the converted sequence.
+ * \param sourceEnd A pointer just past the end of the source buffer.
+ * \param [out] target The converted code
+ * \param flags Whether the conversion is strict or lenient.
+ *
+ * \returns conversionOK on success
+ *
+ * \sa ConvertUTF8toUTF32
+ */
+static inline ConversionResult convertUTF8Sequence(const UTF8 **source,
+ const UTF8 *sourceEnd,
+ UTF32 *target,
+ ConversionFlags flags) {
+ if (*source == sourceEnd)
+ return sourceExhausted;
+ unsigned size = getNumBytesForUTF8(**source);
+ if ((ptrdiff_t)size > sourceEnd - *source)
+ return sourceExhausted;
+ return ConvertUTF8toUTF32(source, *source + size, &target, target + 1, flags);
+}
+
+/**
+ * Returns true if a blob of text starts with a UTF-16 big or little endian byte
+ * order mark.
+ */
+bool hasUTF16ByteOrderMark(ArrayRef<char> SrcBytes);
+
+/**
+ * Converts a stream of raw bytes assumed to be UTF16 into a UTF8 std::string.
+ *
+ * \param [in] SrcBytes A buffer of what is assumed to be UTF-16 encoded text.
+ * \param [out] Out Converted UTF-8 is stored here on success.
+ * \returns true on success
+ */
+bool convertUTF16ToUTF8String(ArrayRef<char> SrcBytes, std::string &Out);
+
+/**
+ * Converts a UTF-8 string into a UTF-16 string with native endianness.
+ *
+ * \returns true on success
+ */
+bool convertUTF8ToUTF16String(StringRef SrcUTF8,
+ SmallVectorImpl<UTF16> &DstUTF16);
+
+} /* end namespace llvm */
+
+#endif
+
+/* --------------------------------------------------------------------- */
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/CrashRecoveryContext.h b/contrib/llvm/include/llvm/Support/CrashRecoveryContext.h
new file mode 100644
index 0000000..1a1c743
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/CrashRecoveryContext.h
@@ -0,0 +1,203 @@
+//===--- CrashRecoveryContext.h - Crash Recovery ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
+#define LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
+
+#include "llvm/ADT/STLExtras.h"
+#include <string>
+
+namespace llvm {
+class CrashRecoveryContextCleanup;
+
+/// \brief Crash recovery helper object.
+///
+/// This class implements support for running operations in a safe context so
+/// that crashes (memory errors, stack overflow, assertion violations) can be
+/// detected and control restored to the crashing thread. Crash detection is
+/// purely "best effort", the exact set of failures which can be recovered from
+/// is platform dependent.
+///
+/// Clients make use of this code by first calling
+/// CrashRecoveryContext::Enable(), and then executing unsafe operations via a
+/// CrashRecoveryContext object. For example:
+///
+/// void actual_work(void *);
+///
+/// void foo() {
+/// CrashRecoveryContext CRC;
+///
+/// if (!CRC.RunSafely(actual_work, 0)) {
+/// ... a crash was detected, report error to user ...
+/// }
+///
+/// ... no crash was detected ...
+/// }
+class CrashRecoveryContext {
+ void *Impl;
+ CrashRecoveryContextCleanup *head;
+
+public:
+ CrashRecoveryContext() : Impl(nullptr), head(nullptr) {}
+ ~CrashRecoveryContext();
+
+ void registerCleanup(CrashRecoveryContextCleanup *cleanup);
+ void unregisterCleanup(CrashRecoveryContextCleanup *cleanup);
+
+ /// \brief Enable crash recovery.
+ static void Enable();
+
+ /// \brief Disable crash recovery.
+ static void Disable();
+
+ /// \brief Return the active context, if the code is currently executing in a
+ /// thread which is in a protected context.
+ static CrashRecoveryContext *GetCurrent();
+
+ /// \brief Return true if the current thread is recovering from a
+ /// crash.
+ static bool isRecoveringFromCrash();
+
+ /// \brief Execute the provide callback function (with the given arguments) in
+ /// a protected context.
+ ///
+ /// \return True if the function completed successfully, and false if the
+ /// function crashed (or HandleCrash was called explicitly). Clients should
+ /// make as little assumptions as possible about the program state when
+ /// RunSafely has returned false. Clients can use getBacktrace() to retrieve
+ /// the backtrace of the crash on failures.
+ bool RunSafely(function_ref<void()> Fn);
+ bool RunSafely(void (*Fn)(void*), void *UserData) {
+ return RunSafely([&]() { Fn(UserData); });
+ }
+
+ /// \brief Execute the provide callback function (with the given arguments) in
+ /// a protected context which is run in another thread (optionally with a
+ /// requested stack size).
+ ///
+ /// See RunSafely() and llvm_execute_on_thread().
+ ///
+ /// On Darwin, if PRIO_DARWIN_BG is set on the calling thread, it will be
+ /// propagated to the new thread as well.
+ bool RunSafelyOnThread(function_ref<void()>, unsigned RequestedStackSize = 0);
+ bool RunSafelyOnThread(void (*Fn)(void*), void *UserData,
+ unsigned RequestedStackSize = 0) {
+ return RunSafelyOnThread([&]() { Fn(UserData); }, RequestedStackSize);
+ }
+
+ /// \brief Explicitly trigger a crash recovery in the current process, and
+ /// return failure from RunSafely(). This function does not return.
+ void HandleCrash();
+
+ /// \brief Return a string containing the backtrace where the crash was
+ /// detected; or empty if the backtrace wasn't recovered.
+ ///
+ /// This function is only valid when a crash has been detected (i.e.,
+ /// RunSafely() has returned false.
+ const std::string &getBacktrace() const;
+};
+
+class CrashRecoveryContextCleanup {
+protected:
+ CrashRecoveryContext *context;
+ CrashRecoveryContextCleanup(CrashRecoveryContext *context)
+ : context(context), cleanupFired(false) {}
+
+public:
+ bool cleanupFired;
+
+ virtual ~CrashRecoveryContextCleanup();
+ virtual void recoverResources() = 0;
+
+ CrashRecoveryContext *getContext() const {
+ return context;
+ }
+
+private:
+ friend class CrashRecoveryContext;
+ CrashRecoveryContextCleanup *prev, *next;
+};
+
+template<typename DERIVED, typename T>
+class CrashRecoveryContextCleanupBase : public CrashRecoveryContextCleanup {
+protected:
+ T *resource;
+ CrashRecoveryContextCleanupBase(CrashRecoveryContext *context, T *resource)
+ : CrashRecoveryContextCleanup(context), resource(resource) {}
+
+public:
+ static DERIVED *create(T *x) {
+ if (x) {
+ if (CrashRecoveryContext *context = CrashRecoveryContext::GetCurrent())
+ return new DERIVED(context, x);
+ }
+ return nullptr;
+ }
+};
+
+template <typename T>
+class CrashRecoveryContextDestructorCleanup : public
+ CrashRecoveryContextCleanupBase<CrashRecoveryContextDestructorCleanup<T>, T> {
+public:
+ CrashRecoveryContextDestructorCleanup(CrashRecoveryContext *context,
+ T *resource)
+ : CrashRecoveryContextCleanupBase<
+ CrashRecoveryContextDestructorCleanup<T>, T>(context, resource) {}
+
+ virtual void recoverResources() {
+ this->resource->~T();
+ }
+};
+
+template <typename T>
+class CrashRecoveryContextDeleteCleanup : public
+ CrashRecoveryContextCleanupBase<CrashRecoveryContextDeleteCleanup<T>, T> {
+public:
+ CrashRecoveryContextDeleteCleanup(CrashRecoveryContext *context, T *resource)
+ : CrashRecoveryContextCleanupBase<
+ CrashRecoveryContextDeleteCleanup<T>, T>(context, resource) {}
+
+ void recoverResources() override { delete this->resource; }
+};
+
+template <typename T>
+class CrashRecoveryContextReleaseRefCleanup : public
+ CrashRecoveryContextCleanupBase<CrashRecoveryContextReleaseRefCleanup<T>, T>
+{
+public:
+ CrashRecoveryContextReleaseRefCleanup(CrashRecoveryContext *context,
+ T *resource)
+ : CrashRecoveryContextCleanupBase<CrashRecoveryContextReleaseRefCleanup<T>,
+ T>(context, resource) {}
+
+ void recoverResources() override { this->resource->Release(); }
+};
+
+template <typename T, typename Cleanup = CrashRecoveryContextDeleteCleanup<T> >
+class CrashRecoveryContextCleanupRegistrar {
+ CrashRecoveryContextCleanup *cleanup;
+
+public:
+ CrashRecoveryContextCleanupRegistrar(T *x)
+ : cleanup(Cleanup::create(x)) {
+ if (cleanup)
+ cleanup->getContext()->registerCleanup(cleanup);
+ }
+
+ ~CrashRecoveryContextCleanupRegistrar() { unregister(); }
+
+ void unregister() {
+ if (cleanup && !cleanup->cleanupFired)
+ cleanup->getContext()->unregisterCleanup(cleanup);
+ cleanup = nullptr;
+ }
+};
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_CRASHRECOVERYCONTEXT_H
diff --git a/contrib/llvm/include/llvm/Support/DOTGraphTraits.h b/contrib/llvm/include/llvm/Support/DOTGraphTraits.h
new file mode 100644
index 0000000..4381b5b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/DOTGraphTraits.h
@@ -0,0 +1,167 @@
+//===-- llvm/Support/DotGraphTraits.h - Customize .dot output ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a template class that can be used to customize dot output
+// graphs generated by the GraphWriter.h file. The default implementation of
+// this file will produce a simple, but not very polished graph. By
+// specializing this template, lots of customization opportunities are possible.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DOTGRAPHTRAITS_H
+#define LLVM_SUPPORT_DOTGRAPHTRAITS_H
+
+#include <string>
+
+namespace llvm {
+
+/// DefaultDOTGraphTraits - This class provides the default implementations of
+/// all of the DOTGraphTraits methods. If a specialization does not need to
+/// override all methods here it should inherit so that it can get the default
+/// implementations.
+///
+struct DefaultDOTGraphTraits {
+private:
+ bool IsSimple;
+
+protected:
+ bool isSimple() {
+ return IsSimple;
+ }
+
+public:
+ explicit DefaultDOTGraphTraits(bool simple=false) : IsSimple (simple) {}
+
+ /// getGraphName - Return the label for the graph as a whole. Printed at the
+ /// top of the graph.
+ ///
+ template<typename GraphType>
+ static std::string getGraphName(const GraphType &) { return ""; }
+
+ /// getGraphProperties - Return any custom properties that should be included
+ /// in the top level graph structure for dot.
+ ///
+ template<typename GraphType>
+ static std::string getGraphProperties(const GraphType &) {
+ return "";
+ }
+
+ /// renderGraphFromBottomUp - If this function returns true, the graph is
+ /// emitted bottom-up instead of top-down. This requires graphviz 2.0 to work
+ /// though.
+ static bool renderGraphFromBottomUp() {
+ return false;
+ }
+
+ /// isNodeHidden - If the function returns true, the given node is not
+ /// displayed in the graph.
+ static bool isNodeHidden(const void *) {
+ return false;
+ }
+
+ /// getNodeLabel - Given a node and a pointer to the top level graph, return
+ /// the label to print in the node.
+ template<typename GraphType>
+ std::string getNodeLabel(const void *, const GraphType &) {
+ return "";
+ }
+
+ // getNodeIdentifierLabel - Returns a string representing the
+ // address or other unique identifier of the node. (Only used if
+ // non-empty.)
+ template <typename GraphType>
+ static std::string getNodeIdentifierLabel(const void *, const GraphType &) {
+ return "";
+ }
+
+ template<typename GraphType>
+ static std::string getNodeDescription(const void *, const GraphType &) {
+ return "";
+ }
+
+ /// If you want to specify custom node attributes, this is the place to do so
+ ///
+ template<typename GraphType>
+ static std::string getNodeAttributes(const void *,
+ const GraphType &) {
+ return "";
+ }
+
+ /// If you want to override the dot attributes printed for a particular edge,
+ /// override this method.
+ template<typename EdgeIter, typename GraphType>
+ static std::string getEdgeAttributes(const void *, EdgeIter,
+ const GraphType &) {
+ return "";
+ }
+
+ /// getEdgeSourceLabel - If you want to label the edge source itself,
+ /// implement this method.
+ template<typename EdgeIter>
+ static std::string getEdgeSourceLabel(const void *, EdgeIter) {
+ return "";
+ }
+
+ /// edgeTargetsEdgeSource - This method returns true if this outgoing edge
+ /// should actually target another edge source, not a node. If this method is
+ /// implemented, getEdgeTarget should be implemented.
+ template<typename EdgeIter>
+ static bool edgeTargetsEdgeSource(const void *, EdgeIter) {
+ return false;
+ }
+
+ /// getEdgeTarget - If edgeTargetsEdgeSource returns true, this method is
+ /// called to determine which outgoing edge of Node is the target of this
+ /// edge.
+ template<typename EdgeIter>
+ static EdgeIter getEdgeTarget(const void *, EdgeIter I) {
+ return I;
+ }
+
+ /// hasEdgeDestLabels - If this function returns true, the graph is able
+ /// to provide labels for edge destinations.
+ static bool hasEdgeDestLabels() {
+ return false;
+ }
+
+ /// numEdgeDestLabels - If hasEdgeDestLabels, this function returns the
+ /// number of incoming edge labels the given node has.
+ static unsigned numEdgeDestLabels(const void *) {
+ return 0;
+ }
+
+ /// getEdgeDestLabel - If hasEdgeDestLabels, this function returns the
+ /// incoming edge label with the given index in the given node.
+ static std::string getEdgeDestLabel(const void *, unsigned) {
+ return "";
+ }
+
+ /// addCustomGraphFeatures - If a graph is made up of more than just
+ /// straight-forward nodes and edges, this is the place to put all of the
+ /// custom stuff necessary. The GraphWriter object, instantiated with your
+ /// GraphType is passed in as an argument. You may call arbitrary methods on
+ /// it to add things to the output graph.
+ ///
+ template<typename GraphType, typename GraphWriter>
+ static void addCustomGraphFeatures(const GraphType &, GraphWriter &) {}
+};
+
+
+/// DOTGraphTraits - Template class that can be specialized to customize how
+/// graphs are converted to 'dot' graphs. When specializing, you may inherit
+/// from DefaultDOTGraphTraits if you don't need to override everything.
+///
+template <typename Ty>
+struct DOTGraphTraits : public DefaultDOTGraphTraits {
+ DOTGraphTraits (bool simple=false) : DefaultDOTGraphTraits (simple) {}
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/DataExtractor.h b/contrib/llvm/include/llvm/Support/DataExtractor.h
new file mode 100644
index 0000000..3ffa9bc
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/DataExtractor.h
@@ -0,0 +1,365 @@
+//===-- DataExtractor.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DATAEXTRACTOR_H
+#define LLVM_SUPPORT_DATAEXTRACTOR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+class DataExtractor {
+ StringRef Data;
+ uint8_t IsLittleEndian;
+ uint8_t AddressSize;
+public:
+ /// Construct with a buffer that is owned by the caller.
+ ///
+ /// This constructor allows us to use data that is owned by the
+ /// caller. The data must stay around as long as this object is
+ /// valid.
+ DataExtractor(StringRef Data, bool IsLittleEndian, uint8_t AddressSize)
+ : Data(Data), IsLittleEndian(IsLittleEndian), AddressSize(AddressSize) {}
+
+ /// \brief Get the data pointed to by this extractor.
+ StringRef getData() const { return Data; }
+ /// \brief Get the endianess for this extractor.
+ bool isLittleEndian() const { return IsLittleEndian; }
+ /// \brief Get the address size for this extractor.
+ uint8_t getAddressSize() const { return AddressSize; }
+ /// \brief Set the address size for this extractor.
+ void setAddressSize(uint8_t Size) { AddressSize = Size; }
+
+ /// Extract a C string from \a *offset_ptr.
+ ///
+ /// Returns a pointer to a C String from the data at the offset
+ /// pointed to by \a offset_ptr. A variable length NULL terminated C
+ /// string will be extracted and the \a offset_ptr will be
+ /// updated with the offset of the byte that follows the NULL
+ /// terminator byte.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @return
+ /// A pointer to the C string value in the data. If the offset
+ /// pointed to by \a offset_ptr is out of bounds, or if the
+ /// offset plus the length of the C string is out of bounds,
+ /// NULL will be returned.
+ const char *getCStr(uint32_t *offset_ptr) const;
+
+ /// Extract an unsigned integer of size \a byte_size from \a
+ /// *offset_ptr.
+ ///
+ /// Extract a single unsigned integer value and update the offset
+ /// pointed to by \a offset_ptr. The size of the extracted integer
+ /// is specified by the \a byte_size argument. \a byte_size should
+ /// have a value greater than or equal to one and less than or equal
+ /// to eight since the return value is 64 bits wide. Any
+ /// \a byte_size values less than 1 or greater than 8 will result in
+ /// nothing being extracted, and zero being returned.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @param[in] byte_size
+ /// The size in byte of the integer to extract.
+ ///
+ /// @return
+ /// The unsigned integer value that was extracted, or zero on
+ /// failure.
+ uint64_t getUnsigned(uint32_t *offset_ptr, uint32_t byte_size) const;
+
+ /// Extract an signed integer of size \a byte_size from \a *offset_ptr.
+ ///
+ /// Extract a single signed integer value (sign extending if required)
+ /// and update the offset pointed to by \a offset_ptr. The size of
+ /// the extracted integer is specified by the \a byte_size argument.
+ /// \a byte_size should have a value greater than or equal to one
+ /// and less than or equal to eight since the return value is 64
+ /// bits wide. Any \a byte_size values less than 1 or greater than
+ /// 8 will result in nothing being extracted, and zero being returned.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @param[in] size
+ /// The size in bytes of the integer to extract.
+ ///
+ /// @return
+ /// The sign extended signed integer value that was extracted,
+ /// or zero on failure.
+ int64_t getSigned(uint32_t *offset_ptr, uint32_t size) const;
+
+ //------------------------------------------------------------------
+ /// Extract an pointer from \a *offset_ptr.
+ ///
+ /// Extract a single pointer from the data and update the offset
+ /// pointed to by \a offset_ptr. The size of the extracted pointer
+ /// is \a getAddressSize(), so the address size has to be
+ /// set correctly prior to extracting any pointer values.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @return
+ /// The extracted pointer value as a 64 integer.
+ uint64_t getAddress(uint32_t *offset_ptr) const {
+ return getUnsigned(offset_ptr, AddressSize);
+ }
+
+ /// Extract a uint8_t value from \a *offset_ptr.
+ ///
+ /// Extract a single uint8_t from the binary data at the offset
+ /// pointed to by \a offset_ptr, and advance the offset on success.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @return
+ /// The extracted uint8_t value.
+ uint8_t getU8(uint32_t *offset_ptr) const;
+
+ /// Extract \a count uint8_t values from \a *offset_ptr.
+ ///
+ /// Extract \a count uint8_t values from the binary data at the
+ /// offset pointed to by \a offset_ptr, and advance the offset on
+ /// success. The extracted values are copied into \a dst.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @param[out] dst
+ /// A buffer to copy \a count uint8_t values into. \a dst must
+ /// be large enough to hold all requested data.
+ ///
+ /// @param[in] count
+ /// The number of uint8_t values to extract.
+ ///
+ /// @return
+ /// \a dst if all values were properly extracted and copied,
+ /// NULL otherise.
+ uint8_t *getU8(uint32_t *offset_ptr, uint8_t *dst, uint32_t count) const;
+
+ //------------------------------------------------------------------
+ /// Extract a uint16_t value from \a *offset_ptr.
+ ///
+ /// Extract a single uint16_t from the binary data at the offset
+ /// pointed to by \a offset_ptr, and update the offset on success.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @return
+ /// The extracted uint16_t value.
+ //------------------------------------------------------------------
+ uint16_t getU16(uint32_t *offset_ptr) const;
+
+ /// Extract \a count uint16_t values from \a *offset_ptr.
+ ///
+ /// Extract \a count uint16_t values from the binary data at the
+ /// offset pointed to by \a offset_ptr, and advance the offset on
+ /// success. The extracted values are copied into \a dst.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @param[out] dst
+ /// A buffer to copy \a count uint16_t values into. \a dst must
+ /// be large enough to hold all requested data.
+ ///
+ /// @param[in] count
+ /// The number of uint16_t values to extract.
+ ///
+ /// @return
+ /// \a dst if all values were properly extracted and copied,
+ /// NULL otherise.
+ uint16_t *getU16(uint32_t *offset_ptr, uint16_t *dst, uint32_t count) const;
+
+ /// Extract a uint32_t value from \a *offset_ptr.
+ ///
+ /// Extract a single uint32_t from the binary data at the offset
+ /// pointed to by \a offset_ptr, and update the offset on success.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @return
+ /// The extracted uint32_t value.
+ uint32_t getU32(uint32_t *offset_ptr) const;
+
+ /// Extract \a count uint32_t values from \a *offset_ptr.
+ ///
+ /// Extract \a count uint32_t values from the binary data at the
+ /// offset pointed to by \a offset_ptr, and advance the offset on
+ /// success. The extracted values are copied into \a dst.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @param[out] dst
+ /// A buffer to copy \a count uint32_t values into. \a dst must
+ /// be large enough to hold all requested data.
+ ///
+ /// @param[in] count
+ /// The number of uint32_t values to extract.
+ ///
+ /// @return
+ /// \a dst if all values were properly extracted and copied,
+ /// NULL otherise.
+ uint32_t *getU32(uint32_t *offset_ptr, uint32_t *dst, uint32_t count) const;
+
+ /// Extract a uint64_t value from \a *offset_ptr.
+ ///
+ /// Extract a single uint64_t from the binary data at the offset
+ /// pointed to by \a offset_ptr, and update the offset on success.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @return
+ /// The extracted uint64_t value.
+ uint64_t getU64(uint32_t *offset_ptr) const;
+
+ /// Extract \a count uint64_t values from \a *offset_ptr.
+ ///
+ /// Extract \a count uint64_t values from the binary data at the
+ /// offset pointed to by \a offset_ptr, and advance the offset on
+ /// success. The extracted values are copied into \a dst.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @param[out] dst
+ /// A buffer to copy \a count uint64_t values into. \a dst must
+ /// be large enough to hold all requested data.
+ ///
+ /// @param[in] count
+ /// The number of uint64_t values to extract.
+ ///
+ /// @return
+ /// \a dst if all values were properly extracted and copied,
+ /// NULL otherise.
+ uint64_t *getU64(uint32_t *offset_ptr, uint64_t *dst, uint32_t count) const;
+
+ /// Extract a signed LEB128 value from \a *offset_ptr.
+ ///
+ /// Extracts an signed LEB128 number from this object's data
+ /// starting at the offset pointed to by \a offset_ptr. The offset
+ /// pointed to by \a offset_ptr will be updated with the offset of
+ /// the byte following the last extracted byte.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @return
+ /// The extracted signed integer value.
+ int64_t getSLEB128(uint32_t *offset_ptr) const;
+
+ /// Extract a unsigned LEB128 value from \a *offset_ptr.
+ ///
+ /// Extracts an unsigned LEB128 number from this object's data
+ /// starting at the offset pointed to by \a offset_ptr. The offset
+ /// pointed to by \a offset_ptr will be updated with the offset of
+ /// the byte following the last extracted byte.
+ ///
+ /// @param[in,out] offset_ptr
+ /// A pointer to an offset within the data that will be advanced
+ /// by the appropriate number of bytes if the value is extracted
+ /// correctly. If the offset is out of bounds or there are not
+ /// enough bytes to extract this value, the offset will be left
+ /// unmodified.
+ ///
+ /// @return
+ /// The extracted unsigned integer value.
+ uint64_t getULEB128(uint32_t *offset_ptr) const;
+
+ /// Test the validity of \a offset.
+ ///
+ /// @return
+ /// \b true if \a offset is a valid offset into the data in this
+ /// object, \b false otherwise.
+ bool isValidOffset(uint32_t offset) const { return Data.size() > offset; }
+
+ /// Test the availability of \a length bytes of data from \a offset.
+ ///
+ /// @return
+ /// \b true if \a offset is a valid offset and there are \a
+ /// length bytes available at that offset, \b false otherwise.
+ bool isValidOffsetForDataOfSize(uint32_t offset, uint32_t length) const {
+ return offset + length >= offset && isValidOffset(offset + length - 1);
+ }
+
+ /// Test the availability of enough bytes of data for a pointer from
+ /// \a offset. The size of a pointer is \a getAddressSize().
+ ///
+ /// @return
+ /// \b true if \a offset is a valid offset and there are enough
+ /// bytes for a pointer available at that offset, \b false
+ /// otherwise.
+ bool isValidOffsetForAddress(uint32_t offset) const {
+ return isValidOffsetForDataOfSize(offset, AddressSize);
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/DataStream.h b/contrib/llvm/include/llvm/Support/DataStream.h
new file mode 100644
index 0000000..a544316
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/DataStream.h
@@ -0,0 +1,38 @@
+//===---- llvm/Support/DataStream.h - Lazy bitcode streaming ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines DataStreamer, which fetches bytes of data from
+// a stream source. It provides support for streaming (lazy reading) of
+// data, e.g. bitcode
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_SUPPORT_DATASTREAM_H
+#define LLVM_SUPPORT_DATASTREAM_H
+
+#include <memory>
+#include <string>
+
+namespace llvm {
+
+class DataStreamer {
+public:
+ /// Fetch bytes [start-end) from the stream, and write them to the
+ /// buffer pointed to by buf. Returns the number of bytes actually written.
+ virtual size_t GetBytes(unsigned char *buf, size_t len) = 0;
+
+ virtual ~DataStreamer();
+};
+
+std::unique_ptr<DataStreamer> getDataFileStreamer(const std::string &Filename,
+ std::string *Err);
+}
+
+#endif // LLVM_SUPPORT_DATASTREAM_H_
diff --git a/contrib/llvm/include/llvm/Support/DataTypes.h.in b/contrib/llvm/include/llvm/Support/DataTypes.h.in
new file mode 100644
index 0000000..b8b2ba5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/DataTypes.h.in
@@ -0,0 +1,123 @@
+/*===-- include/Support/DataTypes.h - Define fixed size types -----*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file contains definitions to figure out the size of _HOST_ data types.*|
+|* This file is important because different host OS's define different macros,*|
+|* which makes portability tough. This file exports the following *|
+|* definitions: *|
+|* *|
+|* [u]int(32|64)_t : typedefs for signed and unsigned 32/64 bit system types*|
+|* [U]INT(8|16|32|64)_(MIN|MAX) : Constants for the min and max values. *|
+|* *|
+|* No library is required when using these functions. *|
+|* *|
+|*===----------------------------------------------------------------------===*/
+
+/* Please leave this file C-compatible. */
+
+/* Please keep this file in sync with DataTypes.h.cmake */
+
+#ifndef SUPPORT_DATATYPES_H
+#define SUPPORT_DATATYPES_H
+
+#undef HAVE_INTTYPES_H
+#undef HAVE_STDINT_H
+#undef HAVE_UINT64_T
+#undef HAVE_U_INT64_T
+
+#ifdef __cplusplus
+#include <cmath>
+#else
+#include <math.h>
+#endif
+
+#ifdef HAVE_INTTYPES_H
+#include <inttypes.h>
+#endif
+
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#else
+#error "Compiler must provide an implementation of stdint.h"
+#endif
+
+#ifndef _MSC_VER
+
+/* Note that this header's correct operation depends on __STDC_LIMIT_MACROS
+ being defined. We would define it here, but in order to prevent Bad Things
+ happening when system headers or C++ STL headers include stdint.h before we
+ define it here, we define it on the g++ command line (in Makefile.rules). */
+#if !defined(__STDC_LIMIT_MACROS)
+# error "Must #define __STDC_LIMIT_MACROS before #including Support/DataTypes.h"
+#endif
+
+#if !defined(__STDC_CONSTANT_MACROS)
+# error "Must #define __STDC_CONSTANT_MACROS before " \
+ "#including Support/DataTypes.h"
+#endif
+
+/* Note that <inttypes.h> includes <stdint.h>, if this is a C99 system. */
+#include <sys/types.h>
+
+#ifdef _AIX
+#include "llvm/Support/AIXDataTypesFix.h"
+#endif
+
+/* Handle incorrect definition of uint64_t as u_int64_t */
+#ifndef HAVE_UINT64_T
+#ifdef HAVE_U_INT64_T
+typedef u_int64_t uint64_t;
+#else
+# error "Don't have a definition for uint64_t on this platform"
+#endif
+#endif
+
+#else /* _MSC_VER */
+#include <stdlib.h>
+#include <stddef.h>
+#include <sys/types.h>
+#ifdef __cplusplus
+#include <cmath>
+#else
+#include <math.h>
+#endif
+
+#if defined(_WIN64)
+typedef signed __int64 ssize_t;
+#else
+typedef signed int ssize_t;
+#endif /* _WIN64 */
+
+#ifndef HAVE_INTTYPES_H
+#define PRId64 "I64d"
+#define PRIi64 "I64i"
+#define PRIo64 "I64o"
+#define PRIu64 "I64u"
+#define PRIx64 "I64x"
+#define PRIX64 "I64X"
+#endif /* HAVE_INTTYPES_H */
+
+#endif /* _MSC_VER */
+
+/* Set defaults for constants which we cannot find. */
+#if !defined(INT64_MAX)
+# define INT64_MAX 9223372036854775807LL
+#endif
+#if !defined(INT64_MIN)
+# define INT64_MIN ((-INT64_MAX)-1)
+#endif
+#if !defined(UINT64_MAX)
+# define UINT64_MAX 0xffffffffffffffffULL
+#endif
+
+#ifndef HUGE_VALF
+#define HUGE_VALF (float)HUGE_VAL
+#endif
+
+#endif /* SUPPORT_DATATYPES_H */
diff --git a/contrib/llvm/include/llvm/Support/Debug.h b/contrib/llvm/include/llvm/Support/Debug.h
new file mode 100644
index 0000000..6e21347
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Debug.h
@@ -0,0 +1,96 @@
+//===- llvm/Support/Debug.h - Easy way to add debug output ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a handy way of adding debugging information to your
+// code, without it being enabled all of the time, and without having to add
+// command line options to enable it.
+//
+// In particular, just wrap your code with the DEBUG() macro, and it will be
+// enabled automatically if you specify '-debug' on the command-line.
+// DEBUG() requires the DEBUG_TYPE macro to be defined. Set it to "foo" specify
+// that your debug code belongs to class "foo". Be careful that you only do
+// this after including Debug.h and not around any #include of headers. Headers
+// should define and undef the macro acround the code that needs to use the
+// DEBUG() macro. Then, on the command line, you can specify '-debug-only=foo'
+// to enable JUST the debug information for the foo class.
+//
+// When compiling without assertions, the -debug-* options and all code in
+// DEBUG() statements disappears, so it does not affect the runtime of the code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DEBUG_H
+#define LLVM_SUPPORT_DEBUG_H
+
+namespace llvm {
+class raw_ostream;
+
+#ifndef NDEBUG
+/// DebugFlag - This boolean is set to true if the '-debug' command line option
+/// is specified. This should probably not be referenced directly, instead, use
+/// the DEBUG macro below.
+///
+extern bool DebugFlag;
+
+/// isCurrentDebugType - Return true if the specified string is the debug type
+/// specified on the command line, or if none was specified on the command line
+/// with the -debug-only=X option.
+///
+bool isCurrentDebugType(const char *Type);
+
+/// setCurrentDebugType - Set the current debug type, as if the -debug-only=X
+/// option were specified. Note that DebugFlag also needs to be set to true for
+/// debug output to be produced.
+///
+void setCurrentDebugType(const char *Type);
+
+/// DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug
+/// information. In the '-debug' option is specified on the commandline, and if
+/// this is a debug build, then the code specified as the option to the macro
+/// will be executed. Otherwise it will not be. Example:
+///
+/// DEBUG_WITH_TYPE("bitset", dbgs() << "Bitset contains: " << Bitset << "\n");
+///
+/// This will emit the debug information if -debug is present, and -debug-only
+/// is not specified, or is specified as "bitset".
+#define DEBUG_WITH_TYPE(TYPE, X) \
+ do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(TYPE)) { X; } \
+ } while (0)
+
+#else
+#define isCurrentDebugType(X) (false)
+#define setCurrentDebugType(X)
+#define DEBUG_WITH_TYPE(TYPE, X) do { } while (0)
+#endif
+
+/// EnableDebugBuffering - This defaults to false. If true, the debug
+/// stream will install signal handlers to dump any buffered debug
+/// output. It allows clients to selectively allow the debug stream
+/// to install signal handlers if they are certain there will be no
+/// conflict.
+///
+extern bool EnableDebugBuffering;
+
+/// dbgs() - This returns a reference to a raw_ostream for debugging
+/// messages. If debugging is disabled it returns errs(). Use it
+/// like: dbgs() << "foo" << "bar";
+raw_ostream &dbgs();
+
+// DEBUG macro - This macro should be used by passes to emit debug information.
+// In the '-debug' option is specified on the commandline, and if this is a
+// debug build, then the code specified as the option to the macro will be
+// executed. Otherwise it will not be. Example:
+//
+// DEBUG(dbgs() << "Bitset contains: " << Bitset << "\n");
+//
+#define DEBUG(X) DEBUG_WITH_TYPE(DEBUG_TYPE, X)
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Dwarf.def b/contrib/llvm/include/llvm/Support/Dwarf.def
new file mode 100644
index 0000000..b15070b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Dwarf.def
@@ -0,0 +1,352 @@
+//===- llvm/Support/Dwarf.def - Dwarf definitions ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Macros for running through Dwarf enumerators.
+//
+//===----------------------------------------------------------------------===//
+
+// TODO: Add other DW-based macros.
+#if !(defined HANDLE_DW_TAG || defined HANDLE_DW_OP || \
+ defined HANDLE_DW_LANG || defined HANDLE_DW_ATE || \
+ defined HANDLE_DW_VIRTUALITY)
+#error "Missing macro definition of HANDLE_DW*"
+#endif
+
+#ifndef HANDLE_DW_TAG
+#define HANDLE_DW_TAG(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_OP
+#define HANDLE_DW_OP(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_LANG
+#define HANDLE_DW_LANG(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_ATE
+#define HANDLE_DW_ATE(ID, NAME)
+#endif
+
+#ifndef HANDLE_DW_VIRTUALITY
+#define HANDLE_DW_VIRTUALITY(ID, NAME)
+#endif
+
+HANDLE_DW_TAG(0x0001, array_type)
+HANDLE_DW_TAG(0x0002, class_type)
+HANDLE_DW_TAG(0x0003, entry_point)
+HANDLE_DW_TAG(0x0004, enumeration_type)
+HANDLE_DW_TAG(0x0005, formal_parameter)
+HANDLE_DW_TAG(0x0008, imported_declaration)
+HANDLE_DW_TAG(0x000a, label)
+HANDLE_DW_TAG(0x000b, lexical_block)
+HANDLE_DW_TAG(0x000d, member)
+HANDLE_DW_TAG(0x000f, pointer_type)
+HANDLE_DW_TAG(0x0010, reference_type)
+HANDLE_DW_TAG(0x0011, compile_unit)
+HANDLE_DW_TAG(0x0012, string_type)
+HANDLE_DW_TAG(0x0013, structure_type)
+HANDLE_DW_TAG(0x0015, subroutine_type)
+HANDLE_DW_TAG(0x0016, typedef)
+HANDLE_DW_TAG(0x0017, union_type)
+HANDLE_DW_TAG(0x0018, unspecified_parameters)
+HANDLE_DW_TAG(0x0019, variant)
+HANDLE_DW_TAG(0x001a, common_block)
+HANDLE_DW_TAG(0x001b, common_inclusion)
+HANDLE_DW_TAG(0x001c, inheritance)
+HANDLE_DW_TAG(0x001d, inlined_subroutine)
+HANDLE_DW_TAG(0x001e, module)
+HANDLE_DW_TAG(0x001f, ptr_to_member_type)
+HANDLE_DW_TAG(0x0020, set_type)
+HANDLE_DW_TAG(0x0021, subrange_type)
+HANDLE_DW_TAG(0x0022, with_stmt)
+HANDLE_DW_TAG(0x0023, access_declaration)
+HANDLE_DW_TAG(0x0024, base_type)
+HANDLE_DW_TAG(0x0025, catch_block)
+HANDLE_DW_TAG(0x0026, const_type)
+HANDLE_DW_TAG(0x0027, constant)
+HANDLE_DW_TAG(0x0028, enumerator)
+HANDLE_DW_TAG(0x0029, file_type)
+HANDLE_DW_TAG(0x002a, friend)
+HANDLE_DW_TAG(0x002b, namelist)
+HANDLE_DW_TAG(0x002c, namelist_item)
+HANDLE_DW_TAG(0x002d, packed_type)
+HANDLE_DW_TAG(0x002e, subprogram)
+HANDLE_DW_TAG(0x002f, template_type_parameter)
+HANDLE_DW_TAG(0x0030, template_value_parameter)
+HANDLE_DW_TAG(0x0031, thrown_type)
+HANDLE_DW_TAG(0x0032, try_block)
+HANDLE_DW_TAG(0x0033, variant_part)
+HANDLE_DW_TAG(0x0034, variable)
+HANDLE_DW_TAG(0x0035, volatile_type)
+HANDLE_DW_TAG(0x0036, dwarf_procedure)
+HANDLE_DW_TAG(0x0037, restrict_type)
+HANDLE_DW_TAG(0x0038, interface_type)
+HANDLE_DW_TAG(0x0039, namespace)
+HANDLE_DW_TAG(0x003a, imported_module)
+HANDLE_DW_TAG(0x003b, unspecified_type)
+HANDLE_DW_TAG(0x003c, partial_unit)
+HANDLE_DW_TAG(0x003d, imported_unit)
+HANDLE_DW_TAG(0x003f, condition)
+HANDLE_DW_TAG(0x0040, shared_type)
+HANDLE_DW_TAG(0x0041, type_unit)
+HANDLE_DW_TAG(0x0042, rvalue_reference_type)
+HANDLE_DW_TAG(0x0043, template_alias)
+
+// New in DWARF v5.
+HANDLE_DW_TAG(0x0044, coarray_type)
+HANDLE_DW_TAG(0x0045, generic_subrange)
+HANDLE_DW_TAG(0x0046, dynamic_type)
+
+// User-defined tags.
+HANDLE_DW_TAG(0x4081, MIPS_loop)
+HANDLE_DW_TAG(0x4101, format_label)
+HANDLE_DW_TAG(0x4102, function_template)
+HANDLE_DW_TAG(0x4103, class_template)
+HANDLE_DW_TAG(0x4106, GNU_template_template_param)
+HANDLE_DW_TAG(0x4107, GNU_template_parameter_pack)
+HANDLE_DW_TAG(0x4108, GNU_formal_parameter_pack)
+HANDLE_DW_TAG(0x4200, APPLE_property)
+HANDLE_DW_TAG(0xb000, BORLAND_property)
+HANDLE_DW_TAG(0xb001, BORLAND_Delphi_string)
+HANDLE_DW_TAG(0xb002, BORLAND_Delphi_dynamic_array)
+HANDLE_DW_TAG(0xb003, BORLAND_Delphi_set)
+HANDLE_DW_TAG(0xb004, BORLAND_Delphi_variant)
+
+HANDLE_DW_OP(0x03, addr)
+HANDLE_DW_OP(0x06, deref)
+HANDLE_DW_OP(0x08, const1u)
+HANDLE_DW_OP(0x09, const1s)
+HANDLE_DW_OP(0x0a, const2u)
+HANDLE_DW_OP(0x0b, const2s)
+HANDLE_DW_OP(0x0c, const4u)
+HANDLE_DW_OP(0x0d, const4s)
+HANDLE_DW_OP(0x0e, const8u)
+HANDLE_DW_OP(0x0f, const8s)
+HANDLE_DW_OP(0x10, constu)
+HANDLE_DW_OP(0x11, consts)
+HANDLE_DW_OP(0x12, dup)
+HANDLE_DW_OP(0x13, drop)
+HANDLE_DW_OP(0x14, over)
+HANDLE_DW_OP(0x15, pick)
+HANDLE_DW_OP(0x16, swap)
+HANDLE_DW_OP(0x17, rot)
+HANDLE_DW_OP(0x18, xderef)
+HANDLE_DW_OP(0x19, abs)
+HANDLE_DW_OP(0x1a, and)
+HANDLE_DW_OP(0x1b, div)
+HANDLE_DW_OP(0x1c, minus)
+HANDLE_DW_OP(0x1d, mod)
+HANDLE_DW_OP(0x1e, mul)
+HANDLE_DW_OP(0x1f, neg)
+HANDLE_DW_OP(0x20, not)
+HANDLE_DW_OP(0x21, or )
+HANDLE_DW_OP(0x22, plus)
+HANDLE_DW_OP(0x23, plus_uconst)
+HANDLE_DW_OP(0x24, shl)
+HANDLE_DW_OP(0x25, shr)
+HANDLE_DW_OP(0x26, shra)
+HANDLE_DW_OP(0x27, xor)
+HANDLE_DW_OP(0x2f, skip)
+HANDLE_DW_OP(0x28, bra)
+HANDLE_DW_OP(0x29, eq)
+HANDLE_DW_OP(0x2a, ge)
+HANDLE_DW_OP(0x2b, gt)
+HANDLE_DW_OP(0x2c, le)
+HANDLE_DW_OP(0x2d, lt)
+HANDLE_DW_OP(0x2e, ne)
+HANDLE_DW_OP(0x30, lit0)
+HANDLE_DW_OP(0x31, lit1)
+HANDLE_DW_OP(0x32, lit2)
+HANDLE_DW_OP(0x33, lit3)
+HANDLE_DW_OP(0x34, lit4)
+HANDLE_DW_OP(0x35, lit5)
+HANDLE_DW_OP(0x36, lit6)
+HANDLE_DW_OP(0x37, lit7)
+HANDLE_DW_OP(0x38, lit8)
+HANDLE_DW_OP(0x39, lit9)
+HANDLE_DW_OP(0x3a, lit10)
+HANDLE_DW_OP(0x3b, lit11)
+HANDLE_DW_OP(0x3c, lit12)
+HANDLE_DW_OP(0x3d, lit13)
+HANDLE_DW_OP(0x3e, lit14)
+HANDLE_DW_OP(0x3f, lit15)
+HANDLE_DW_OP(0x40, lit16)
+HANDLE_DW_OP(0x41, lit17)
+HANDLE_DW_OP(0x42, lit18)
+HANDLE_DW_OP(0x43, lit19)
+HANDLE_DW_OP(0x44, lit20)
+HANDLE_DW_OP(0x45, lit21)
+HANDLE_DW_OP(0x46, lit22)
+HANDLE_DW_OP(0x47, lit23)
+HANDLE_DW_OP(0x48, lit24)
+HANDLE_DW_OP(0x49, lit25)
+HANDLE_DW_OP(0x4a, lit26)
+HANDLE_DW_OP(0x4b, lit27)
+HANDLE_DW_OP(0x4c, lit28)
+HANDLE_DW_OP(0x4d, lit29)
+HANDLE_DW_OP(0x4e, lit30)
+HANDLE_DW_OP(0x4f, lit31)
+HANDLE_DW_OP(0x50, reg0)
+HANDLE_DW_OP(0x51, reg1)
+HANDLE_DW_OP(0x52, reg2)
+HANDLE_DW_OP(0x53, reg3)
+HANDLE_DW_OP(0x54, reg4)
+HANDLE_DW_OP(0x55, reg5)
+HANDLE_DW_OP(0x56, reg6)
+HANDLE_DW_OP(0x57, reg7)
+HANDLE_DW_OP(0x58, reg8)
+HANDLE_DW_OP(0x59, reg9)
+HANDLE_DW_OP(0x5a, reg10)
+HANDLE_DW_OP(0x5b, reg11)
+HANDLE_DW_OP(0x5c, reg12)
+HANDLE_DW_OP(0x5d, reg13)
+HANDLE_DW_OP(0x5e, reg14)
+HANDLE_DW_OP(0x5f, reg15)
+HANDLE_DW_OP(0x60, reg16)
+HANDLE_DW_OP(0x61, reg17)
+HANDLE_DW_OP(0x62, reg18)
+HANDLE_DW_OP(0x63, reg19)
+HANDLE_DW_OP(0x64, reg20)
+HANDLE_DW_OP(0x65, reg21)
+HANDLE_DW_OP(0x66, reg22)
+HANDLE_DW_OP(0x67, reg23)
+HANDLE_DW_OP(0x68, reg24)
+HANDLE_DW_OP(0x69, reg25)
+HANDLE_DW_OP(0x6a, reg26)
+HANDLE_DW_OP(0x6b, reg27)
+HANDLE_DW_OP(0x6c, reg28)
+HANDLE_DW_OP(0x6d, reg29)
+HANDLE_DW_OP(0x6e, reg30)
+HANDLE_DW_OP(0x6f, reg31)
+HANDLE_DW_OP(0x70, breg0)
+HANDLE_DW_OP(0x71, breg1)
+HANDLE_DW_OP(0x72, breg2)
+HANDLE_DW_OP(0x73, breg3)
+HANDLE_DW_OP(0x74, breg4)
+HANDLE_DW_OP(0x75, breg5)
+HANDLE_DW_OP(0x76, breg6)
+HANDLE_DW_OP(0x77, breg7)
+HANDLE_DW_OP(0x78, breg8)
+HANDLE_DW_OP(0x79, breg9)
+HANDLE_DW_OP(0x7a, breg10)
+HANDLE_DW_OP(0x7b, breg11)
+HANDLE_DW_OP(0x7c, breg12)
+HANDLE_DW_OP(0x7d, breg13)
+HANDLE_DW_OP(0x7e, breg14)
+HANDLE_DW_OP(0x7f, breg15)
+HANDLE_DW_OP(0x80, breg16)
+HANDLE_DW_OP(0x81, breg17)
+HANDLE_DW_OP(0x82, breg18)
+HANDLE_DW_OP(0x83, breg19)
+HANDLE_DW_OP(0x84, breg20)
+HANDLE_DW_OP(0x85, breg21)
+HANDLE_DW_OP(0x86, breg22)
+HANDLE_DW_OP(0x87, breg23)
+HANDLE_DW_OP(0x88, breg24)
+HANDLE_DW_OP(0x89, breg25)
+HANDLE_DW_OP(0x8a, breg26)
+HANDLE_DW_OP(0x8b, breg27)
+HANDLE_DW_OP(0x8c, breg28)
+HANDLE_DW_OP(0x8d, breg29)
+HANDLE_DW_OP(0x8e, breg30)
+HANDLE_DW_OP(0x8f, breg31)
+HANDLE_DW_OP(0x90, regx)
+HANDLE_DW_OP(0x91, fbreg)
+HANDLE_DW_OP(0x92, bregx)
+HANDLE_DW_OP(0x93, piece)
+HANDLE_DW_OP(0x94, deref_size)
+HANDLE_DW_OP(0x95, xderef_size)
+HANDLE_DW_OP(0x96, nop)
+HANDLE_DW_OP(0x97, push_object_address)
+HANDLE_DW_OP(0x98, call2)
+HANDLE_DW_OP(0x99, call4)
+HANDLE_DW_OP(0x9a, call_ref)
+HANDLE_DW_OP(0x9b, form_tls_address)
+HANDLE_DW_OP(0x9c, call_frame_cfa)
+HANDLE_DW_OP(0x9d, bit_piece)
+HANDLE_DW_OP(0x9e, implicit_value)
+HANDLE_DW_OP(0x9f, stack_value)
+
+// Extensions for GNU-style thread-local storage.
+HANDLE_DW_OP(0xe0, GNU_push_tls_address)
+
+// Extensions for Fission proposal.
+HANDLE_DW_OP(0xfb, GNU_addr_index)
+HANDLE_DW_OP(0xfc, GNU_const_index)
+
+// DWARF languages.
+HANDLE_DW_LANG(0x0001, C89)
+HANDLE_DW_LANG(0x0002, C)
+HANDLE_DW_LANG(0x0003, Ada83)
+HANDLE_DW_LANG(0x0004, C_plus_plus)
+HANDLE_DW_LANG(0x0005, Cobol74)
+HANDLE_DW_LANG(0x0006, Cobol85)
+HANDLE_DW_LANG(0x0007, Fortran77)
+HANDLE_DW_LANG(0x0008, Fortran90)
+HANDLE_DW_LANG(0x0009, Pascal83)
+HANDLE_DW_LANG(0x000a, Modula2)
+HANDLE_DW_LANG(0x000b, Java)
+HANDLE_DW_LANG(0x000c, C99)
+HANDLE_DW_LANG(0x000d, Ada95)
+HANDLE_DW_LANG(0x000e, Fortran95)
+HANDLE_DW_LANG(0x000f, PLI)
+HANDLE_DW_LANG(0x0010, ObjC)
+HANDLE_DW_LANG(0x0011, ObjC_plus_plus)
+HANDLE_DW_LANG(0x0012, UPC)
+HANDLE_DW_LANG(0x0013, D)
+
+// New in DWARF 5:
+HANDLE_DW_LANG(0x0014, Python)
+HANDLE_DW_LANG(0x0015, OpenCL)
+HANDLE_DW_LANG(0x0016, Go)
+HANDLE_DW_LANG(0x0017, Modula3)
+HANDLE_DW_LANG(0x0018, Haskell)
+HANDLE_DW_LANG(0x0019, C_plus_plus_03)
+HANDLE_DW_LANG(0x001a, C_plus_plus_11)
+HANDLE_DW_LANG(0x001b, OCaml)
+HANDLE_DW_LANG(0x001c, Rust)
+HANDLE_DW_LANG(0x001d, C11)
+HANDLE_DW_LANG(0x001e, Swift)
+HANDLE_DW_LANG(0x001f, Julia)
+HANDLE_DW_LANG(0x0020, Dylan)
+HANDLE_DW_LANG(0x0021, C_plus_plus_14)
+HANDLE_DW_LANG(0x0022, Fortran03)
+HANDLE_DW_LANG(0x0023, Fortran08)
+HANDLE_DW_LANG(0x8001, Mips_Assembler)
+HANDLE_DW_LANG(0xb000, BORLAND_Delphi)
+
+// DWARF attribute type encodings.
+HANDLE_DW_ATE(0x01, address)
+HANDLE_DW_ATE(0x02, boolean)
+HANDLE_DW_ATE(0x03, complex_float)
+HANDLE_DW_ATE(0x04, float)
+HANDLE_DW_ATE(0x05, signed)
+HANDLE_DW_ATE(0x06, signed_char)
+HANDLE_DW_ATE(0x07, unsigned)
+HANDLE_DW_ATE(0x08, unsigned_char)
+HANDLE_DW_ATE(0x09, imaginary_float)
+HANDLE_DW_ATE(0x0a, packed_decimal)
+HANDLE_DW_ATE(0x0b, numeric_string)
+HANDLE_DW_ATE(0x0c, edited)
+HANDLE_DW_ATE(0x0d, signed_fixed)
+HANDLE_DW_ATE(0x0e, unsigned_fixed)
+HANDLE_DW_ATE(0x0f, decimal_float)
+HANDLE_DW_ATE(0x10, UTF)
+
+// DWARF virtuality codes.
+HANDLE_DW_VIRTUALITY(0x00, none)
+HANDLE_DW_VIRTUALITY(0x01, virtual)
+HANDLE_DW_VIRTUALITY(0x02, pure_virtual)
+
+#undef HANDLE_DW_TAG
+#undef HANDLE_DW_OP
+#undef HANDLE_DW_LANG
+#undef HANDLE_DW_ATE
+#undef HANDLE_DW_VIRTUALITY
diff --git a/contrib/llvm/include/llvm/Support/Dwarf.h b/contrib/llvm/include/llvm/Support/Dwarf.h
new file mode 100644
index 0000000..cea61bd
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Dwarf.h
@@ -0,0 +1,700 @@
+//===-- llvm/Support/Dwarf.h ---Dwarf Constants------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// \brief This file contains constants used for implementing Dwarf
+// debug support.
+//
+// For details on the Dwarf specfication see the latest DWARF Debugging
+// Information Format standard document on http://www.dwarfstd.org. This
+// file often includes support for non-released standard features.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DWARF_H
+#define LLVM_SUPPORT_DWARF_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+namespace dwarf {
+
+//===----------------------------------------------------------------------===//
+// Dwarf constants as gleaned from the DWARF Debugging Information Format V.4
+// reference manual http://www.dwarfstd.org/.
+//
+
+// Do not mix the following two enumerations sets. DW_TAG_invalid changes the
+// enumeration base type.
+
+enum LLVMConstants : uint32_t {
+ // LLVM mock tags (see also llvm/Support/Dwarf.def).
+ DW_TAG_invalid = ~0U, // Tag for invalid results.
+ DW_VIRTUALITY_invalid = ~0U, // Virtuality for invalid results.
+ DW_MACINFO_invalid = ~0U, // Macinfo type for invalid results.
+
+ // Other constants.
+ DWARF_VERSION = 4, // Default dwarf version we output.
+ DW_PUBTYPES_VERSION = 2, // Section version number for .debug_pubtypes.
+ DW_PUBNAMES_VERSION = 2, // Section version number for .debug_pubnames.
+ DW_ARANGES_VERSION = 2 // Section version number for .debug_aranges.
+};
+
+// Special ID values that distinguish a CIE from a FDE in DWARF CFI.
+// Not inside an enum because a 64-bit value is needed.
+const uint32_t DW_CIE_ID = UINT32_MAX;
+const uint64_t DW64_CIE_ID = UINT64_MAX;
+
+enum Tag : uint16_t {
+#define HANDLE_DW_TAG(ID, NAME) DW_TAG_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
+ DW_TAG_lo_user = 0x4080,
+ DW_TAG_hi_user = 0xffff,
+ DW_TAG_user_base = 0x1000 // Recommended base for user tags.
+};
+
+inline bool isType(Tag T) {
+ switch (T) {
+ case DW_TAG_array_type:
+ case DW_TAG_class_type:
+ case DW_TAG_interface_type:
+ case DW_TAG_enumeration_type:
+ case DW_TAG_pointer_type:
+ case DW_TAG_reference_type:
+ case DW_TAG_rvalue_reference_type:
+ case DW_TAG_string_type:
+ case DW_TAG_structure_type:
+ case DW_TAG_subroutine_type:
+ case DW_TAG_union_type:
+ case DW_TAG_ptr_to_member_type:
+ case DW_TAG_set_type:
+ case DW_TAG_subrange_type:
+ case DW_TAG_base_type:
+ case DW_TAG_const_type:
+ case DW_TAG_file_type:
+ case DW_TAG_packed_type:
+ case DW_TAG_volatile_type:
+ case DW_TAG_typedef:
+ return true;
+ default:
+ return false;
+ }
+}
+
+enum Attribute : uint16_t {
+ // Attributes
+ DW_AT_sibling = 0x01,
+ DW_AT_location = 0x02,
+ DW_AT_name = 0x03,
+ DW_AT_ordering = 0x09,
+ DW_AT_byte_size = 0x0b,
+ DW_AT_bit_offset = 0x0c,
+ DW_AT_bit_size = 0x0d,
+ DW_AT_stmt_list = 0x10,
+ DW_AT_low_pc = 0x11,
+ DW_AT_high_pc = 0x12,
+ DW_AT_language = 0x13,
+ DW_AT_discr = 0x15,
+ DW_AT_discr_value = 0x16,
+ DW_AT_visibility = 0x17,
+ DW_AT_import = 0x18,
+ DW_AT_string_length = 0x19,
+ DW_AT_common_reference = 0x1a,
+ DW_AT_comp_dir = 0x1b,
+ DW_AT_const_value = 0x1c,
+ DW_AT_containing_type = 0x1d,
+ DW_AT_default_value = 0x1e,
+ DW_AT_inline = 0x20,
+ DW_AT_is_optional = 0x21,
+ DW_AT_lower_bound = 0x22,
+ DW_AT_producer = 0x25,
+ DW_AT_prototyped = 0x27,
+ DW_AT_return_addr = 0x2a,
+ DW_AT_start_scope = 0x2c,
+ DW_AT_bit_stride = 0x2e,
+ DW_AT_upper_bound = 0x2f,
+ DW_AT_abstract_origin = 0x31,
+ DW_AT_accessibility = 0x32,
+ DW_AT_address_class = 0x33,
+ DW_AT_artificial = 0x34,
+ DW_AT_base_types = 0x35,
+ DW_AT_calling_convention = 0x36,
+ DW_AT_count = 0x37,
+ DW_AT_data_member_location = 0x38,
+ DW_AT_decl_column = 0x39,
+ DW_AT_decl_file = 0x3a,
+ DW_AT_decl_line = 0x3b,
+ DW_AT_declaration = 0x3c,
+ DW_AT_discr_list = 0x3d,
+ DW_AT_encoding = 0x3e,
+ DW_AT_external = 0x3f,
+ DW_AT_frame_base = 0x40,
+ DW_AT_friend = 0x41,
+ DW_AT_identifier_case = 0x42,
+ DW_AT_macro_info = 0x43,
+ DW_AT_namelist_item = 0x44,
+ DW_AT_priority = 0x45,
+ DW_AT_segment = 0x46,
+ DW_AT_specification = 0x47,
+ DW_AT_static_link = 0x48,
+ DW_AT_type = 0x49,
+ DW_AT_use_location = 0x4a,
+ DW_AT_variable_parameter = 0x4b,
+ DW_AT_virtuality = 0x4c,
+ DW_AT_vtable_elem_location = 0x4d,
+ DW_AT_allocated = 0x4e,
+ DW_AT_associated = 0x4f,
+ DW_AT_data_location = 0x50,
+ DW_AT_byte_stride = 0x51,
+ DW_AT_entry_pc = 0x52,
+ DW_AT_use_UTF8 = 0x53,
+ DW_AT_extension = 0x54,
+ DW_AT_ranges = 0x55,
+ DW_AT_trampoline = 0x56,
+ DW_AT_call_column = 0x57,
+ DW_AT_call_file = 0x58,
+ DW_AT_call_line = 0x59,
+ DW_AT_description = 0x5a,
+ DW_AT_binary_scale = 0x5b,
+ DW_AT_decimal_scale = 0x5c,
+ DW_AT_small = 0x5d,
+ DW_AT_decimal_sign = 0x5e,
+ DW_AT_digit_count = 0x5f,
+ DW_AT_picture_string = 0x60,
+ DW_AT_mutable = 0x61,
+ DW_AT_threads_scaled = 0x62,
+ DW_AT_explicit = 0x63,
+ DW_AT_object_pointer = 0x64,
+ DW_AT_endianity = 0x65,
+ DW_AT_elemental = 0x66,
+ DW_AT_pure = 0x67,
+ DW_AT_recursive = 0x68,
+ DW_AT_signature = 0x69,
+ DW_AT_main_subprogram = 0x6a,
+ DW_AT_data_bit_offset = 0x6b,
+ DW_AT_const_expr = 0x6c,
+ DW_AT_enum_class = 0x6d,
+ DW_AT_linkage_name = 0x6e,
+
+ // New in DWARF 5:
+ DW_AT_string_length_bit_size = 0x6f,
+ DW_AT_string_length_byte_size = 0x70,
+ DW_AT_rank = 0x71,
+ DW_AT_str_offsets_base = 0x72,
+ DW_AT_addr_base = 0x73,
+ DW_AT_ranges_base = 0x74,
+ DW_AT_dwo_id = 0x75,
+ DW_AT_dwo_name = 0x76,
+ DW_AT_reference = 0x77,
+ DW_AT_rvalue_reference = 0x78,
+ DW_AT_macros = 0x79,
+
+ DW_AT_lo_user = 0x2000,
+ DW_AT_hi_user = 0x3fff,
+
+ DW_AT_MIPS_loop_begin = 0x2002,
+ DW_AT_MIPS_tail_loop_begin = 0x2003,
+ DW_AT_MIPS_epilog_begin = 0x2004,
+ DW_AT_MIPS_loop_unroll_factor = 0x2005,
+ DW_AT_MIPS_software_pipeline_depth = 0x2006,
+ DW_AT_MIPS_linkage_name = 0x2007,
+ DW_AT_MIPS_stride = 0x2008,
+ DW_AT_MIPS_abstract_name = 0x2009,
+ DW_AT_MIPS_clone_origin = 0x200a,
+ DW_AT_MIPS_has_inlines = 0x200b,
+ DW_AT_MIPS_stride_byte = 0x200c,
+ DW_AT_MIPS_stride_elem = 0x200d,
+ DW_AT_MIPS_ptr_dopetype = 0x200e,
+ DW_AT_MIPS_allocatable_dopetype = 0x200f,
+ DW_AT_MIPS_assumed_shape_dopetype = 0x2010,
+
+ // This one appears to have only been implemented by Open64 for
+ // fortran and may conflict with other extensions.
+ DW_AT_MIPS_assumed_size = 0x2011,
+
+ // GNU extensions
+ DW_AT_sf_names = 0x2101,
+ DW_AT_src_info = 0x2102,
+ DW_AT_mac_info = 0x2103,
+ DW_AT_src_coords = 0x2104,
+ DW_AT_body_begin = 0x2105,
+ DW_AT_body_end = 0x2106,
+ DW_AT_GNU_vector = 0x2107,
+ DW_AT_GNU_template_name = 0x2110,
+
+ DW_AT_GNU_odr_signature = 0x210f,
+ DW_AT_GNU_macros = 0x2119,
+
+ // Extensions for Fission proposal.
+ DW_AT_GNU_dwo_name = 0x2130,
+ DW_AT_GNU_dwo_id = 0x2131,
+ DW_AT_GNU_ranges_base = 0x2132,
+ DW_AT_GNU_addr_base = 0x2133,
+ DW_AT_GNU_pubnames = 0x2134,
+ DW_AT_GNU_pubtypes = 0x2135,
+ DW_AT_GNU_discriminator = 0x2136,
+
+ // Borland extensions.
+ DW_AT_BORLAND_property_read = 0x3b11,
+ DW_AT_BORLAND_property_write = 0x3b12,
+ DW_AT_BORLAND_property_implements = 0x3b13,
+ DW_AT_BORLAND_property_index = 0x3b14,
+ DW_AT_BORLAND_property_default = 0x3b15,
+ DW_AT_BORLAND_Delphi_unit = 0x3b20,
+ DW_AT_BORLAND_Delphi_class = 0x3b21,
+ DW_AT_BORLAND_Delphi_record = 0x3b22,
+ DW_AT_BORLAND_Delphi_metaclass = 0x3b23,
+ DW_AT_BORLAND_Delphi_constructor = 0x3b24,
+ DW_AT_BORLAND_Delphi_destructor = 0x3b25,
+ DW_AT_BORLAND_Delphi_anonymous_method = 0x3b26,
+ DW_AT_BORLAND_Delphi_interface = 0x3b27,
+ DW_AT_BORLAND_Delphi_ABI = 0x3b28,
+ DW_AT_BORLAND_Delphi_return = 0x3b29,
+ DW_AT_BORLAND_Delphi_frameptr = 0x3b30,
+ DW_AT_BORLAND_closure = 0x3b31,
+
+ // LLVM project extensions.
+ DW_AT_LLVM_include_path = 0x3e00,
+ DW_AT_LLVM_config_macros = 0x3e01,
+ DW_AT_LLVM_isysroot = 0x3e02,
+
+ // Apple extensions.
+ DW_AT_APPLE_optimized = 0x3fe1,
+ DW_AT_APPLE_flags = 0x3fe2,
+ DW_AT_APPLE_isa = 0x3fe3,
+ DW_AT_APPLE_block = 0x3fe4,
+ DW_AT_APPLE_major_runtime_vers = 0x3fe5,
+ DW_AT_APPLE_runtime_class = 0x3fe6,
+ DW_AT_APPLE_omit_frame_ptr = 0x3fe7,
+ DW_AT_APPLE_property_name = 0x3fe8,
+ DW_AT_APPLE_property_getter = 0x3fe9,
+ DW_AT_APPLE_property_setter = 0x3fea,
+ DW_AT_APPLE_property_attribute = 0x3feb,
+ DW_AT_APPLE_objc_complete_type = 0x3fec,
+ DW_AT_APPLE_property = 0x3fed
+};
+
+enum Form : uint16_t {
+ // Attribute form encodings
+ DW_FORM_addr = 0x01,
+ DW_FORM_block2 = 0x03,
+ DW_FORM_block4 = 0x04,
+ DW_FORM_data2 = 0x05,
+ DW_FORM_data4 = 0x06,
+ DW_FORM_data8 = 0x07,
+ DW_FORM_string = 0x08,
+ DW_FORM_block = 0x09,
+ DW_FORM_block1 = 0x0a,
+ DW_FORM_data1 = 0x0b,
+ DW_FORM_flag = 0x0c,
+ DW_FORM_sdata = 0x0d,
+ DW_FORM_strp = 0x0e,
+ DW_FORM_udata = 0x0f,
+ DW_FORM_ref_addr = 0x10,
+ DW_FORM_ref1 = 0x11,
+ DW_FORM_ref2 = 0x12,
+ DW_FORM_ref4 = 0x13,
+ DW_FORM_ref8 = 0x14,
+ DW_FORM_ref_udata = 0x15,
+ DW_FORM_indirect = 0x16,
+ DW_FORM_sec_offset = 0x17,
+ DW_FORM_exprloc = 0x18,
+ DW_FORM_flag_present = 0x19,
+ DW_FORM_ref_sig8 = 0x20,
+
+ // Extensions for Fission proposal
+ DW_FORM_GNU_addr_index = 0x1f01,
+ DW_FORM_GNU_str_index = 0x1f02,
+
+ // Alternate debug sections proposal (output of "dwz" tool).
+ DW_FORM_GNU_ref_alt = 0x1f20,
+ DW_FORM_GNU_strp_alt = 0x1f21
+};
+
+enum LocationAtom {
+#define HANDLE_DW_OP(ID, NAME) DW_OP_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
+ DW_OP_lo_user = 0xe0,
+ DW_OP_hi_user = 0xff
+};
+
+enum TypeKind {
+#define HANDLE_DW_ATE(ID, NAME) DW_ATE_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
+ DW_ATE_lo_user = 0x80,
+ DW_ATE_hi_user = 0xff
+};
+
+enum DecimalSignEncoding {
+ // Decimal sign attribute values
+ DW_DS_unsigned = 0x01,
+ DW_DS_leading_overpunch = 0x02,
+ DW_DS_trailing_overpunch = 0x03,
+ DW_DS_leading_separate = 0x04,
+ DW_DS_trailing_separate = 0x05
+};
+
+enum EndianityEncoding {
+ // Endianity attribute values
+ DW_END_default = 0x00,
+ DW_END_big = 0x01,
+ DW_END_little = 0x02,
+ DW_END_lo_user = 0x40,
+ DW_END_hi_user = 0xff
+};
+
+enum AccessAttribute {
+ // Accessibility codes
+ DW_ACCESS_public = 0x01,
+ DW_ACCESS_protected = 0x02,
+ DW_ACCESS_private = 0x03
+};
+
+enum VisibilityAttribute {
+ // Visibility codes
+ DW_VIS_local = 0x01,
+ DW_VIS_exported = 0x02,
+ DW_VIS_qualified = 0x03
+};
+
+enum VirtualityAttribute {
+#define HANDLE_DW_VIRTUALITY(ID, NAME) DW_VIRTUALITY_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
+ DW_VIRTUALITY_max = 0x02
+};
+
+enum SourceLanguage {
+#define HANDLE_DW_LANG(ID, NAME) DW_LANG_##NAME = ID,
+#include "llvm/Support/Dwarf.def"
+ DW_LANG_lo_user = 0x8000,
+ DW_LANG_hi_user = 0xffff
+};
+
+enum CaseSensitivity {
+ // Identifier case codes
+ DW_ID_case_sensitive = 0x00,
+ DW_ID_up_case = 0x01,
+ DW_ID_down_case = 0x02,
+ DW_ID_case_insensitive = 0x03
+};
+
+enum CallingConvention {
+ // Calling convention codes
+ DW_CC_normal = 0x01,
+ DW_CC_program = 0x02,
+ DW_CC_nocall = 0x03,
+ DW_CC_lo_user = 0x40,
+ DW_CC_GNU_borland_fastcall_i386 = 0x41,
+ DW_CC_BORLAND_safecall = 0xb0,
+ DW_CC_BORLAND_stdcall = 0xb1,
+ DW_CC_BORLAND_pascal = 0xb2,
+ DW_CC_BORLAND_msfastcall = 0xb3,
+ DW_CC_BORLAND_msreturn = 0xb4,
+ DW_CC_BORLAND_thiscall = 0xb5,
+ DW_CC_BORLAND_fastcall = 0xb6,
+ DW_CC_hi_user = 0xff
+};
+
+enum InlineAttribute {
+ // Inline codes
+ DW_INL_not_inlined = 0x00,
+ DW_INL_inlined = 0x01,
+ DW_INL_declared_not_inlined = 0x02,
+ DW_INL_declared_inlined = 0x03
+};
+
+enum ArrayDimensionOrdering {
+ // Array ordering
+ DW_ORD_row_major = 0x00,
+ DW_ORD_col_major = 0x01
+};
+
+enum DiscriminantList {
+ // Discriminant descriptor values
+ DW_DSC_label = 0x00,
+ DW_DSC_range = 0x01
+};
+
+enum LineNumberOps {
+ // Line Number Standard Opcode Encodings
+ DW_LNS_extended_op = 0x00,
+ DW_LNS_copy = 0x01,
+ DW_LNS_advance_pc = 0x02,
+ DW_LNS_advance_line = 0x03,
+ DW_LNS_set_file = 0x04,
+ DW_LNS_set_column = 0x05,
+ DW_LNS_negate_stmt = 0x06,
+ DW_LNS_set_basic_block = 0x07,
+ DW_LNS_const_add_pc = 0x08,
+ DW_LNS_fixed_advance_pc = 0x09,
+ DW_LNS_set_prologue_end = 0x0a,
+ DW_LNS_set_epilogue_begin = 0x0b,
+ DW_LNS_set_isa = 0x0c
+};
+
+enum LineNumberExtendedOps {
+ // Line Number Extended Opcode Encodings
+ DW_LNE_end_sequence = 0x01,
+ DW_LNE_set_address = 0x02,
+ DW_LNE_define_file = 0x03,
+ DW_LNE_set_discriminator = 0x04,
+ DW_LNE_lo_user = 0x80,
+ DW_LNE_hi_user = 0xff
+};
+
+enum MacinfoRecordType {
+ // Macinfo Type Encodings
+ DW_MACINFO_define = 0x01,
+ DW_MACINFO_undef = 0x02,
+ DW_MACINFO_start_file = 0x03,
+ DW_MACINFO_end_file = 0x04,
+ DW_MACINFO_vendor_ext = 0xff
+};
+
+enum MacroEntryType {
+ // Macro Information Entry Type Encodings
+ DW_MACRO_define = 0x01,
+ DW_MACRO_undef = 0x02,
+ DW_MACRO_start_file = 0x03,
+ DW_MACRO_end_file = 0x04,
+ DW_MACRO_define_indirect = 0x05,
+ DW_MACRO_undef_indirect = 0x06,
+ DW_MACRO_transparent_include = 0x07,
+ DW_MACRO_define_indirect_sup = 0x08,
+ DW_MACRO_undef_indirect_sup = 0x09,
+ DW_MACRO_transparent_include_sup = 0x0a,
+ DW_MACRO_define_indirectx = 0x0b,
+ DW_MACRO_undef_indirectx = 0x0c,
+ DW_MACRO_lo_user = 0xe0,
+ DW_MACRO_hi_user = 0xff
+};
+
+enum CallFrameInfo {
+ // Call frame instruction encodings
+ DW_CFA_extended = 0x00,
+ DW_CFA_nop = 0x00,
+ DW_CFA_advance_loc = 0x40,
+ DW_CFA_offset = 0x80,
+ DW_CFA_restore = 0xc0,
+ DW_CFA_set_loc = 0x01,
+ DW_CFA_advance_loc1 = 0x02,
+ DW_CFA_advance_loc2 = 0x03,
+ DW_CFA_advance_loc4 = 0x04,
+ DW_CFA_offset_extended = 0x05,
+ DW_CFA_restore_extended = 0x06,
+ DW_CFA_undefined = 0x07,
+ DW_CFA_same_value = 0x08,
+ DW_CFA_register = 0x09,
+ DW_CFA_remember_state = 0x0a,
+ DW_CFA_restore_state = 0x0b,
+ DW_CFA_def_cfa = 0x0c,
+ DW_CFA_def_cfa_register = 0x0d,
+ DW_CFA_def_cfa_offset = 0x0e,
+ DW_CFA_def_cfa_expression = 0x0f,
+ DW_CFA_expression = 0x10,
+ DW_CFA_offset_extended_sf = 0x11,
+ DW_CFA_def_cfa_sf = 0x12,
+ DW_CFA_def_cfa_offset_sf = 0x13,
+ DW_CFA_val_offset = 0x14,
+ DW_CFA_val_offset_sf = 0x15,
+ DW_CFA_val_expression = 0x16,
+ DW_CFA_MIPS_advance_loc8 = 0x1d,
+ DW_CFA_GNU_window_save = 0x2d,
+ DW_CFA_GNU_args_size = 0x2e,
+ DW_CFA_lo_user = 0x1c,
+ DW_CFA_hi_user = 0x3f
+};
+
+enum Constants {
+ // Children flag
+ DW_CHILDREN_no = 0x00,
+ DW_CHILDREN_yes = 0x01,
+
+ DW_EH_PE_absptr = 0x00,
+ DW_EH_PE_omit = 0xff,
+ DW_EH_PE_uleb128 = 0x01,
+ DW_EH_PE_udata2 = 0x02,
+ DW_EH_PE_udata4 = 0x03,
+ DW_EH_PE_udata8 = 0x04,
+ DW_EH_PE_sleb128 = 0x09,
+ DW_EH_PE_sdata2 = 0x0A,
+ DW_EH_PE_sdata4 = 0x0B,
+ DW_EH_PE_sdata8 = 0x0C,
+ DW_EH_PE_signed = 0x08,
+ DW_EH_PE_pcrel = 0x10,
+ DW_EH_PE_textrel = 0x20,
+ DW_EH_PE_datarel = 0x30,
+ DW_EH_PE_funcrel = 0x40,
+ DW_EH_PE_aligned = 0x50,
+ DW_EH_PE_indirect = 0x80
+};
+
+// Constants for debug_loc.dwo in the DWARF5 Split Debug Info Proposal
+enum LocationListEntry : unsigned char {
+ DW_LLE_end_of_list_entry,
+ DW_LLE_base_address_selection_entry,
+ DW_LLE_start_end_entry,
+ DW_LLE_start_length_entry,
+ DW_LLE_offset_pair_entry
+};
+
+/// Contstants for the DW_APPLE_PROPERTY_attributes attribute.
+/// Keep this list in sync with clang's DeclSpec.h ObjCPropertyAttributeKind.
+enum ApplePropertyAttributes {
+ // Apple Objective-C Property Attributes
+ DW_APPLE_PROPERTY_readonly = 0x01,
+ DW_APPLE_PROPERTY_getter = 0x02,
+ DW_APPLE_PROPERTY_assign = 0x04,
+ DW_APPLE_PROPERTY_readwrite = 0x08,
+ DW_APPLE_PROPERTY_retain = 0x10,
+ DW_APPLE_PROPERTY_copy = 0x20,
+ DW_APPLE_PROPERTY_nonatomic = 0x40,
+ DW_APPLE_PROPERTY_setter = 0x80,
+ DW_APPLE_PROPERTY_atomic = 0x100,
+ DW_APPLE_PROPERTY_weak = 0x200,
+ DW_APPLE_PROPERTY_strong = 0x400,
+ DW_APPLE_PROPERTY_unsafe_unretained = 0x800
+};
+
+// Constants for the DWARF5 Accelerator Table Proposal
+enum AcceleratorTable {
+ // Data layout descriptors.
+ DW_ATOM_null = 0u, // Marker as the end of a list of atoms.
+ DW_ATOM_die_offset = 1u, // DIE offset in the debug_info section.
+ DW_ATOM_cu_offset = 2u, // Offset of the compile unit header that contains the
+ // item in question.
+ DW_ATOM_die_tag = 3u, // A tag entry.
+ DW_ATOM_type_flags = 4u, // Set of flags for a type.
+
+ // DW_ATOM_type_flags values.
+
+ // Always set for C++, only set for ObjC if this is the @implementation for a
+ // class.
+ DW_FLAG_type_implementation = 2u,
+
+ // Hash functions.
+
+ // Daniel J. Bernstein hash.
+ DW_hash_function_djb = 0u
+};
+
+// Constants for the GNU pubnames/pubtypes extensions supporting gdb index.
+enum GDBIndexEntryKind {
+ GIEK_NONE,
+ GIEK_TYPE,
+ GIEK_VARIABLE,
+ GIEK_FUNCTION,
+ GIEK_OTHER,
+ GIEK_UNUSED5,
+ GIEK_UNUSED6,
+ GIEK_UNUSED7
+};
+
+enum GDBIndexEntryLinkage {
+ GIEL_EXTERNAL,
+ GIEL_STATIC
+};
+
+/// \defgroup DwarfConstantsDumping Dwarf constants dumping functions
+///
+/// All these functions map their argument's value back to the
+/// corresponding enumerator name or return nullptr if the value isn't
+/// known.
+///
+/// @{
+const char *TagString(unsigned Tag);
+const char *ChildrenString(unsigned Children);
+const char *AttributeString(unsigned Attribute);
+const char *FormEncodingString(unsigned Encoding);
+const char *OperationEncodingString(unsigned Encoding);
+const char *AttributeEncodingString(unsigned Encoding);
+const char *DecimalSignString(unsigned Sign);
+const char *EndianityString(unsigned Endian);
+const char *AccessibilityString(unsigned Access);
+const char *VisibilityString(unsigned Visibility);
+const char *VirtualityString(unsigned Virtuality);
+const char *LanguageString(unsigned Language);
+const char *CaseString(unsigned Case);
+const char *ConventionString(unsigned Convention);
+const char *InlineCodeString(unsigned Code);
+const char *ArrayOrderString(unsigned Order);
+const char *DiscriminantString(unsigned Discriminant);
+const char *LNStandardString(unsigned Standard);
+const char *LNExtendedString(unsigned Encoding);
+const char *MacinfoString(unsigned Encoding);
+const char *CallFrameString(unsigned Encoding);
+const char *ApplePropertyString(unsigned);
+const char *AtomTypeString(unsigned Atom);
+const char *GDBIndexEntryKindString(GDBIndexEntryKind Kind);
+const char *GDBIndexEntryLinkageString(GDBIndexEntryLinkage Linkage);
+/// @}
+
+/// \defgroup DwarfConstantsParsing Dwarf constants parsing functions
+///
+/// These functions map their strings back to the corresponding enumeration
+/// value or return 0 if there is none, except for these exceptions:
+///
+/// \li \a getTag() returns \a DW_TAG_invalid on invalid input.
+/// \li \a getVirtuality() returns \a DW_VIRTUALITY_invalid on invalid input.
+/// \li \a getMacinfo() returns \a DW_MACINFO_invalid on invalid input.
+///
+/// @{
+unsigned getTag(StringRef TagString);
+unsigned getOperationEncoding(StringRef OperationEncodingString);
+unsigned getVirtuality(StringRef VirtualityString);
+unsigned getLanguage(StringRef LanguageString);
+unsigned getAttributeEncoding(StringRef EncodingString);
+unsigned getMacinfo(StringRef MacinfoString);
+/// @}
+
+/// \brief Returns the symbolic string representing Val when used as a value
+/// for attribute Attr.
+const char *AttributeValueString(uint16_t Attr, unsigned Val);
+
+/// \brief Decsribes an entry of the various gnu_pub* debug sections.
+///
+/// The gnu_pub* kind looks like:
+///
+/// 0-3 reserved
+/// 4-6 symbol kind
+/// 7 0 == global, 1 == static
+///
+/// A gdb_index descriptor includes the above kind, shifted 24 bits up with the
+/// offset of the cu within the debug_info section stored in those 24 bits.
+struct PubIndexEntryDescriptor {
+ GDBIndexEntryKind Kind;
+ GDBIndexEntryLinkage Linkage;
+ PubIndexEntryDescriptor(GDBIndexEntryKind Kind, GDBIndexEntryLinkage Linkage)
+ : Kind(Kind), Linkage(Linkage) {}
+ /* implicit */ PubIndexEntryDescriptor(GDBIndexEntryKind Kind)
+ : Kind(Kind), Linkage(GIEL_EXTERNAL) {}
+ explicit PubIndexEntryDescriptor(uint8_t Value)
+ : Kind(static_cast<GDBIndexEntryKind>((Value & KIND_MASK) >>
+ KIND_OFFSET)),
+ Linkage(static_cast<GDBIndexEntryLinkage>((Value & LINKAGE_MASK) >>
+ LINKAGE_OFFSET)) {}
+ uint8_t toBits() { return Kind << KIND_OFFSET | Linkage << LINKAGE_OFFSET; }
+
+private:
+ enum {
+ KIND_OFFSET = 4,
+ KIND_MASK = 7 << KIND_OFFSET,
+ LINKAGE_OFFSET = 7,
+ LINKAGE_MASK = 1 << LINKAGE_OFFSET
+ };
+};
+
+} // End of namespace dwarf
+
+} // End of namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/DynamicLibrary.h b/contrib/llvm/include/llvm/Support/DynamicLibrary.h
new file mode 100644
index 0000000..a7d2221
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/DynamicLibrary.h
@@ -0,0 +1,105 @@
+//===-- llvm/Support/DynamicLibrary.h - Portable Dynamic Library -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the sys::DynamicLibrary class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DYNAMICLIBRARY_H
+#define LLVM_SUPPORT_DYNAMICLIBRARY_H
+
+#include <string>
+
+namespace llvm {
+
+class StringRef;
+
+namespace sys {
+
+ /// This class provides a portable interface to dynamic libraries which also
+ /// might be known as shared libraries, shared objects, dynamic shared
+ /// objects, or dynamic link libraries. Regardless of the terminology or the
+ /// operating system interface, this class provides a portable interface that
+ /// allows dynamic libraries to be loaded and searched for externally
+ /// defined symbols. This is typically used to provide "plug-in" support.
+ /// It also allows for symbols to be defined which don't live in any library,
+ /// but rather the main program itself, useful on Windows where the main
+ /// executable cannot be searched.
+ ///
+ /// Note: there is currently no interface for temporarily loading a library,
+ /// or for unloading libraries when the LLVM library is unloaded.
+ class DynamicLibrary {
+ // Placeholder whose address represents an invalid library.
+ // We use this instead of NULL or a pointer-int pair because the OS library
+ // might define 0 or 1 to be "special" handles, such as "search all".
+ static char Invalid;
+
+ // Opaque data used to interface with OS-specific dynamic library handling.
+ void *Data;
+
+ public:
+ explicit DynamicLibrary(void *data = &Invalid) : Data(data) {}
+
+ /// Returns true if the object refers to a valid library.
+ bool isValid() const { return Data != &Invalid; }
+
+ /// Searches through the library for the symbol \p symbolName. If it is
+ /// found, the address of that symbol is returned. If not, NULL is returned.
+ /// Note that NULL will also be returned if the library failed to load.
+ /// Use isValid() to distinguish these cases if it is important.
+ /// Note that this will \e not search symbols explicitly registered by
+ /// AddSymbol().
+ void *getAddressOfSymbol(const char *symbolName);
+
+ /// This function permanently loads the dynamic library at the given path.
+ /// The library will only be unloaded when the program terminates.
+ /// This returns a valid DynamicLibrary instance on success and an invalid
+ /// instance on failure (see isValid()). \p *errMsg will only be modified
+ /// if the library fails to load.
+ ///
+ /// It is safe to call this function multiple times for the same library.
+ /// @brief Open a dynamic library permanently.
+ static DynamicLibrary getPermanentLibrary(const char *filename,
+ std::string *errMsg = nullptr);
+
+ /// This function permanently loads the dynamic library at the given path.
+ /// Use this instead of getPermanentLibrary() when you won't need to get
+ /// symbols from the library itself.
+ ///
+ /// It is safe to call this function multiple times for the same library.
+ static bool LoadLibraryPermanently(const char *Filename,
+ std::string *ErrMsg = nullptr) {
+ return !getPermanentLibrary(Filename, ErrMsg).isValid();
+ }
+
+ /// This function will search through all previously loaded dynamic
+ /// libraries for the symbol \p symbolName. If it is found, the address of
+ /// that symbol is returned. If not, null is returned. Note that this will
+ /// search permanently loaded libraries (getPermanentLibrary()) as well
+ /// as explicitly registered symbols (AddSymbol()).
+ /// @throws std::string on error.
+ /// @brief Search through libraries for address of a symbol
+ static void *SearchForAddressOfSymbol(const char *symbolName);
+
+ /// @brief Convenience function for C++ophiles.
+ static void *SearchForAddressOfSymbol(const std::string &symbolName) {
+ return SearchForAddressOfSymbol(symbolName.c_str());
+ }
+
+ /// This functions permanently adds the symbol \p symbolName with the
+ /// value \p symbolValue. These symbols are searched before any
+ /// libraries.
+ /// @brief Add searchable symbol/value pair.
+ static void AddSymbol(StringRef symbolName, void *symbolValue);
+ };
+
+} // End sys namespace
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ELF.h b/contrib/llvm/include/llvm/Support/ELF.h
new file mode 100644
index 0000000..97708a7
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELF.h
@@ -0,0 +1,1288 @@
+//===-- llvm/Support/ELF.h - ELF constants and data structures --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains common, non-processor-specific data structures and
+// constants for the ELF file format.
+//
+// The details of the ELF32 bits in this file are largely based on the Tool
+// Interface Standard (TIS) Executable and Linking Format (ELF) Specification
+// Version 1.2, May 1995. The ELF64 stuff is based on ELF-64 Object File Format
+// Version 1.5, Draft 2, May 1998 as well as OpenBSD header files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ELF_H
+#define LLVM_SUPPORT_ELF_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include <cstring>
+
+namespace llvm {
+
+namespace ELF {
+
+typedef uint32_t Elf32_Addr; // Program address
+typedef uint32_t Elf32_Off; // File offset
+typedef uint16_t Elf32_Half;
+typedef uint32_t Elf32_Word;
+typedef int32_t Elf32_Sword;
+
+typedef uint64_t Elf64_Addr;
+typedef uint64_t Elf64_Off;
+typedef uint16_t Elf64_Half;
+typedef uint32_t Elf64_Word;
+typedef int32_t Elf64_Sword;
+typedef uint64_t Elf64_Xword;
+typedef int64_t Elf64_Sxword;
+
+// Object file magic string.
+static const char ElfMagic[] = { 0x7f, 'E', 'L', 'F', '\0' };
+
+// e_ident size and indices.
+enum {
+ EI_MAG0 = 0, // File identification index.
+ EI_MAG1 = 1, // File identification index.
+ EI_MAG2 = 2, // File identification index.
+ EI_MAG3 = 3, // File identification index.
+ EI_CLASS = 4, // File class.
+ EI_DATA = 5, // Data encoding.
+ EI_VERSION = 6, // File version.
+ EI_OSABI = 7, // OS/ABI identification.
+ EI_ABIVERSION = 8, // ABI version.
+ EI_PAD = 9, // Start of padding bytes.
+ EI_NIDENT = 16 // Number of bytes in e_ident.
+};
+
+struct Elf32_Ehdr {
+ unsigned char e_ident[EI_NIDENT]; // ELF Identification bytes
+ Elf32_Half e_type; // Type of file (see ET_* below)
+ Elf32_Half e_machine; // Required architecture for this file (see EM_*)
+ Elf32_Word e_version; // Must be equal to 1
+ Elf32_Addr e_entry; // Address to jump to in order to start program
+ Elf32_Off e_phoff; // Program header table's file offset, in bytes
+ Elf32_Off e_shoff; // Section header table's file offset, in bytes
+ Elf32_Word e_flags; // Processor-specific flags
+ Elf32_Half e_ehsize; // Size of ELF header, in bytes
+ Elf32_Half e_phentsize; // Size of an entry in the program header table
+ Elf32_Half e_phnum; // Number of entries in the program header table
+ Elf32_Half e_shentsize; // Size of an entry in the section header table
+ Elf32_Half e_shnum; // Number of entries in the section header table
+ Elf32_Half e_shstrndx; // Sect hdr table index of sect name string table
+ bool checkMagic() const {
+ return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
+ }
+ unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
+};
+
+// 64-bit ELF header. Fields are the same as for ELF32, but with different
+// types (see above).
+struct Elf64_Ehdr {
+ unsigned char e_ident[EI_NIDENT];
+ Elf64_Half e_type;
+ Elf64_Half e_machine;
+ Elf64_Word e_version;
+ Elf64_Addr e_entry;
+ Elf64_Off e_phoff;
+ Elf64_Off e_shoff;
+ Elf64_Word e_flags;
+ Elf64_Half e_ehsize;
+ Elf64_Half e_phentsize;
+ Elf64_Half e_phnum;
+ Elf64_Half e_shentsize;
+ Elf64_Half e_shnum;
+ Elf64_Half e_shstrndx;
+ bool checkMagic() const {
+ return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
+ }
+ unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
+ unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
+};
+
+// File types
+enum {
+ ET_NONE = 0, // No file type
+ ET_REL = 1, // Relocatable file
+ ET_EXEC = 2, // Executable file
+ ET_DYN = 3, // Shared object file
+ ET_CORE = 4, // Core file
+ ET_LOPROC = 0xff00, // Beginning of processor-specific codes
+ ET_HIPROC = 0xffff // Processor-specific
+};
+
+// Versioning
+enum {
+ EV_NONE = 0,
+ EV_CURRENT = 1
+};
+
+// Machine architectures
+// See current registered ELF machine architectures at:
+// http://www.uxsglobal.com/developers/gabi/latest/ch4.eheader.html
+enum {
+ EM_NONE = 0, // No machine
+ EM_M32 = 1, // AT&T WE 32100
+ EM_SPARC = 2, // SPARC
+ EM_386 = 3, // Intel 386
+ EM_68K = 4, // Motorola 68000
+ EM_88K = 5, // Motorola 88000
+ EM_IAMCU = 6, // Intel MCU
+ EM_860 = 7, // Intel 80860
+ EM_MIPS = 8, // MIPS R3000
+ EM_S370 = 9, // IBM System/370
+ EM_MIPS_RS3_LE = 10, // MIPS RS3000 Little-endian
+ EM_PARISC = 15, // Hewlett-Packard PA-RISC
+ EM_VPP500 = 17, // Fujitsu VPP500
+ EM_SPARC32PLUS = 18, // Enhanced instruction set SPARC
+ EM_960 = 19, // Intel 80960
+ EM_PPC = 20, // PowerPC
+ EM_PPC64 = 21, // PowerPC64
+ EM_S390 = 22, // IBM System/390
+ EM_SPU = 23, // IBM SPU/SPC
+ EM_V800 = 36, // NEC V800
+ EM_FR20 = 37, // Fujitsu FR20
+ EM_RH32 = 38, // TRW RH-32
+ EM_RCE = 39, // Motorola RCE
+ EM_ARM = 40, // ARM
+ EM_ALPHA = 41, // DEC Alpha
+ EM_SH = 42, // Hitachi SH
+ EM_SPARCV9 = 43, // SPARC V9
+ EM_TRICORE = 44, // Siemens TriCore
+ EM_ARC = 45, // Argonaut RISC Core
+ EM_H8_300 = 46, // Hitachi H8/300
+ EM_H8_300H = 47, // Hitachi H8/300H
+ EM_H8S = 48, // Hitachi H8S
+ EM_H8_500 = 49, // Hitachi H8/500
+ EM_IA_64 = 50, // Intel IA-64 processor architecture
+ EM_MIPS_X = 51, // Stanford MIPS-X
+ EM_COLDFIRE = 52, // Motorola ColdFire
+ EM_68HC12 = 53, // Motorola M68HC12
+ EM_MMA = 54, // Fujitsu MMA Multimedia Accelerator
+ EM_PCP = 55, // Siemens PCP
+ EM_NCPU = 56, // Sony nCPU embedded RISC processor
+ EM_NDR1 = 57, // Denso NDR1 microprocessor
+ EM_STARCORE = 58, // Motorola Star*Core processor
+ EM_ME16 = 59, // Toyota ME16 processor
+ EM_ST100 = 60, // STMicroelectronics ST100 processor
+ EM_TINYJ = 61, // Advanced Logic Corp. TinyJ embedded processor family
+ EM_X86_64 = 62, // AMD x86-64 architecture
+ EM_PDSP = 63, // Sony DSP Processor
+ EM_PDP10 = 64, // Digital Equipment Corp. PDP-10
+ EM_PDP11 = 65, // Digital Equipment Corp. PDP-11
+ EM_FX66 = 66, // Siemens FX66 microcontroller
+ EM_ST9PLUS = 67, // STMicroelectronics ST9+ 8/16 bit microcontroller
+ EM_ST7 = 68, // STMicroelectronics ST7 8-bit microcontroller
+ EM_68HC16 = 69, // Motorola MC68HC16 Microcontroller
+ EM_68HC11 = 70, // Motorola MC68HC11 Microcontroller
+ EM_68HC08 = 71, // Motorola MC68HC08 Microcontroller
+ EM_68HC05 = 72, // Motorola MC68HC05 Microcontroller
+ EM_SVX = 73, // Silicon Graphics SVx
+ EM_ST19 = 74, // STMicroelectronics ST19 8-bit microcontroller
+ EM_VAX = 75, // Digital VAX
+ EM_CRIS = 76, // Axis Communications 32-bit embedded processor
+ EM_JAVELIN = 77, // Infineon Technologies 32-bit embedded processor
+ EM_FIREPATH = 78, // Element 14 64-bit DSP Processor
+ EM_ZSP = 79, // LSI Logic 16-bit DSP Processor
+ EM_MMIX = 80, // Donald Knuth's educational 64-bit processor
+ EM_HUANY = 81, // Harvard University machine-independent object files
+ EM_PRISM = 82, // SiTera Prism
+ EM_AVR = 83, // Atmel AVR 8-bit microcontroller
+ EM_FR30 = 84, // Fujitsu FR30
+ EM_D10V = 85, // Mitsubishi D10V
+ EM_D30V = 86, // Mitsubishi D30V
+ EM_V850 = 87, // NEC v850
+ EM_M32R = 88, // Mitsubishi M32R
+ EM_MN10300 = 89, // Matsushita MN10300
+ EM_MN10200 = 90, // Matsushita MN10200
+ EM_PJ = 91, // picoJava
+ EM_OPENRISC = 92, // OpenRISC 32-bit embedded processor
+ EM_ARC_COMPACT = 93, // ARC International ARCompact processor (old
+ // spelling/synonym: EM_ARC_A5)
+ EM_XTENSA = 94, // Tensilica Xtensa Architecture
+ EM_VIDEOCORE = 95, // Alphamosaic VideoCore processor
+ EM_TMM_GPP = 96, // Thompson Multimedia General Purpose Processor
+ EM_NS32K = 97, // National Semiconductor 32000 series
+ EM_TPC = 98, // Tenor Network TPC processor
+ EM_SNP1K = 99, // Trebia SNP 1000 processor
+ EM_ST200 = 100, // STMicroelectronics (www.st.com) ST200
+ EM_IP2K = 101, // Ubicom IP2xxx microcontroller family
+ EM_MAX = 102, // MAX Processor
+ EM_CR = 103, // National Semiconductor CompactRISC microprocessor
+ EM_F2MC16 = 104, // Fujitsu F2MC16
+ EM_MSP430 = 105, // Texas Instruments embedded microcontroller msp430
+ EM_BLACKFIN = 106, // Analog Devices Blackfin (DSP) processor
+ EM_SE_C33 = 107, // S1C33 Family of Seiko Epson processors
+ EM_SEP = 108, // Sharp embedded microprocessor
+ EM_ARCA = 109, // Arca RISC Microprocessor
+ EM_UNICORE = 110, // Microprocessor series from PKU-Unity Ltd. and MPRC
+ // of Peking University
+ EM_EXCESS = 111, // eXcess: 16/32/64-bit configurable embedded CPU
+ EM_DXP = 112, // Icera Semiconductor Inc. Deep Execution Processor
+ EM_ALTERA_NIOS2 = 113, // Altera Nios II soft-core processor
+ EM_CRX = 114, // National Semiconductor CompactRISC CRX
+ EM_XGATE = 115, // Motorola XGATE embedded processor
+ EM_C166 = 116, // Infineon C16x/XC16x processor
+ EM_M16C = 117, // Renesas M16C series microprocessors
+ EM_DSPIC30F = 118, // Microchip Technology dsPIC30F Digital Signal
+ // Controller
+ EM_CE = 119, // Freescale Communication Engine RISC core
+ EM_M32C = 120, // Renesas M32C series microprocessors
+ EM_TSK3000 = 131, // Altium TSK3000 core
+ EM_RS08 = 132, // Freescale RS08 embedded processor
+ EM_SHARC = 133, // Analog Devices SHARC family of 32-bit DSP
+ // processors
+ EM_ECOG2 = 134, // Cyan Technology eCOG2 microprocessor
+ EM_SCORE7 = 135, // Sunplus S+core7 RISC processor
+ EM_DSP24 = 136, // New Japan Radio (NJR) 24-bit DSP Processor
+ EM_VIDEOCORE3 = 137, // Broadcom VideoCore III processor
+ EM_LATTICEMICO32 = 138, // RISC processor for Lattice FPGA architecture
+ EM_SE_C17 = 139, // Seiko Epson C17 family
+ EM_TI_C6000 = 140, // The Texas Instruments TMS320C6000 DSP family
+ EM_TI_C2000 = 141, // The Texas Instruments TMS320C2000 DSP family
+ EM_TI_C5500 = 142, // The Texas Instruments TMS320C55x DSP family
+ EM_MMDSP_PLUS = 160, // STMicroelectronics 64bit VLIW Data Signal Processor
+ EM_CYPRESS_M8C = 161, // Cypress M8C microprocessor
+ EM_R32C = 162, // Renesas R32C series microprocessors
+ EM_TRIMEDIA = 163, // NXP Semiconductors TriMedia architecture family
+ EM_HEXAGON = 164, // Qualcomm Hexagon processor
+ EM_8051 = 165, // Intel 8051 and variants
+ EM_STXP7X = 166, // STMicroelectronics STxP7x family of configurable
+ // and extensible RISC processors
+ EM_NDS32 = 167, // Andes Technology compact code size embedded RISC
+ // processor family
+ EM_ECOG1 = 168, // Cyan Technology eCOG1X family
+ EM_ECOG1X = 168, // Cyan Technology eCOG1X family
+ EM_MAXQ30 = 169, // Dallas Semiconductor MAXQ30 Core Micro-controllers
+ EM_XIMO16 = 170, // New Japan Radio (NJR) 16-bit DSP Processor
+ EM_MANIK = 171, // M2000 Reconfigurable RISC Microprocessor
+ EM_CRAYNV2 = 172, // Cray Inc. NV2 vector architecture
+ EM_RX = 173, // Renesas RX family
+ EM_METAG = 174, // Imagination Technologies META processor
+ // architecture
+ EM_MCST_ELBRUS = 175, // MCST Elbrus general purpose hardware architecture
+ EM_ECOG16 = 176, // Cyan Technology eCOG16 family
+ EM_CR16 = 177, // National Semiconductor CompactRISC CR16 16-bit
+ // microprocessor
+ EM_ETPU = 178, // Freescale Extended Time Processing Unit
+ EM_SLE9X = 179, // Infineon Technologies SLE9X core
+ EM_L10M = 180, // Intel L10M
+ EM_K10M = 181, // Intel K10M
+ EM_AARCH64 = 183, // ARM AArch64
+ EM_AVR32 = 185, // Atmel Corporation 32-bit microprocessor family
+ EM_STM8 = 186, // STMicroeletronics STM8 8-bit microcontroller
+ EM_TILE64 = 187, // Tilera TILE64 multicore architecture family
+ EM_TILEPRO = 188, // Tilera TILEPro multicore architecture family
+ EM_CUDA = 190, // NVIDIA CUDA architecture
+ EM_TILEGX = 191, // Tilera TILE-Gx multicore architecture family
+ EM_CLOUDSHIELD = 192, // CloudShield architecture family
+ EM_COREA_1ST = 193, // KIPO-KAIST Core-A 1st generation processor family
+ EM_COREA_2ND = 194, // KIPO-KAIST Core-A 2nd generation processor family
+ EM_ARC_COMPACT2 = 195, // Synopsys ARCompact V2
+ EM_OPEN8 = 196, // Open8 8-bit RISC soft processor core
+ EM_RL78 = 197, // Renesas RL78 family
+ EM_VIDEOCORE5 = 198, // Broadcom VideoCore V processor
+ EM_78KOR = 199, // Renesas 78KOR family
+ EM_56800EX = 200, // Freescale 56800EX Digital Signal Controller (DSC)
+ EM_BA1 = 201, // Beyond BA1 CPU architecture
+ EM_BA2 = 202, // Beyond BA2 CPU architecture
+ EM_XCORE = 203, // XMOS xCORE processor family
+ EM_MCHP_PIC = 204, // Microchip 8-bit PIC(r) family
+ EM_INTEL205 = 205, // Reserved by Intel
+ EM_INTEL206 = 206, // Reserved by Intel
+ EM_INTEL207 = 207, // Reserved by Intel
+ EM_INTEL208 = 208, // Reserved by Intel
+ EM_INTEL209 = 209, // Reserved by Intel
+ EM_KM32 = 210, // KM211 KM32 32-bit processor
+ EM_KMX32 = 211, // KM211 KMX32 32-bit processor
+ EM_KMX16 = 212, // KM211 KMX16 16-bit processor
+ EM_KMX8 = 213, // KM211 KMX8 8-bit processor
+ EM_KVARC = 214, // KM211 KVARC processor
+ EM_CDP = 215, // Paneve CDP architecture family
+ EM_COGE = 216, // Cognitive Smart Memory Processor
+ EM_COOL = 217, // iCelero CoolEngine
+ EM_NORC = 218, // Nanoradio Optimized RISC
+ EM_CSR_KALIMBA = 219, // CSR Kalimba architecture family
+ EM_AMDGPU = 224 // AMD GPU architecture
+};
+
+// Object file classes.
+enum {
+ ELFCLASSNONE = 0,
+ ELFCLASS32 = 1, // 32-bit object file
+ ELFCLASS64 = 2 // 64-bit object file
+};
+
+// Object file byte orderings.
+enum {
+ ELFDATANONE = 0, // Invalid data encoding.
+ ELFDATA2LSB = 1, // Little-endian object file
+ ELFDATA2MSB = 2 // Big-endian object file
+};
+
+// OS ABI identification.
+enum {
+ ELFOSABI_NONE = 0, // UNIX System V ABI
+ ELFOSABI_HPUX = 1, // HP-UX operating system
+ ELFOSABI_NETBSD = 2, // NetBSD
+ ELFOSABI_GNU = 3, // GNU/Linux
+ ELFOSABI_LINUX = 3, // Historical alias for ELFOSABI_GNU.
+ ELFOSABI_HURD = 4, // GNU/Hurd
+ ELFOSABI_SOLARIS = 6, // Solaris
+ ELFOSABI_AIX = 7, // AIX
+ ELFOSABI_IRIX = 8, // IRIX
+ ELFOSABI_FREEBSD = 9, // FreeBSD
+ ELFOSABI_TRU64 = 10, // TRU64 UNIX
+ ELFOSABI_MODESTO = 11, // Novell Modesto
+ ELFOSABI_OPENBSD = 12, // OpenBSD
+ ELFOSABI_OPENVMS = 13, // OpenVMS
+ ELFOSABI_NSK = 14, // Hewlett-Packard Non-Stop Kernel
+ ELFOSABI_AROS = 15, // AROS
+ ELFOSABI_FENIXOS = 16, // FenixOS
+ ELFOSABI_CLOUDABI = 17, // Nuxi CloudABI
+ ELFOSABI_C6000_ELFABI = 64, // Bare-metal TMS320C6000
+ ELFOSABI_AMDGPU_HSA = 64, // AMD HSA runtime
+ ELFOSABI_C6000_LINUX = 65, // Linux TMS320C6000
+ ELFOSABI_ARM = 97, // ARM
+ ELFOSABI_STANDALONE = 255 // Standalone (embedded) application
+};
+
+#define ELF_RELOC(name, value) name = value,
+
+// X86_64 relocations.
+enum {
+#include "ELFRelocs/x86_64.def"
+};
+
+// i386 relocations.
+enum {
+#include "ELFRelocs/i386.def"
+};
+
+// ELF Relocation types for PPC32
+enum {
+#include "ELFRelocs/PowerPC.def"
+};
+
+// Specific e_flags for PPC64
+enum {
+ // e_flags bits specifying ABI:
+ // 1 for original ABI using function descriptors,
+ // 2 for revised ABI without function descriptors,
+ // 0 for unspecified or not using any features affected by the differences.
+ EF_PPC64_ABI = 3
+};
+
+// Special values for the st_other field in the symbol table entry for PPC64.
+enum {
+ STO_PPC64_LOCAL_BIT = 5,
+ STO_PPC64_LOCAL_MASK = (7 << STO_PPC64_LOCAL_BIT)
+};
+static inline int64_t
+decodePPC64LocalEntryOffset(unsigned Other) {
+ unsigned Val = (Other & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT;
+ return ((1 << Val) >> 2) << 2;
+}
+static inline unsigned
+encodePPC64LocalEntryOffset(int64_t Offset) {
+ unsigned Val = (Offset >= 4 * 4
+ ? (Offset >= 8 * 4
+ ? (Offset >= 16 * 4 ? 6 : 5)
+ : 4)
+ : (Offset >= 2 * 4
+ ? 3
+ : (Offset >= 1 * 4 ? 2 : 0)));
+ return Val << STO_PPC64_LOCAL_BIT;
+}
+
+// ELF Relocation types for PPC64
+enum {
+#include "ELFRelocs/PowerPC64.def"
+};
+
+// ELF Relocation types for AArch64
+enum {
+#include "ELFRelocs/AArch64.def"
+};
+
+// ARM Specific e_flags
+enum : unsigned {
+ EF_ARM_SOFT_FLOAT = 0x00000200U,
+ EF_ARM_VFP_FLOAT = 0x00000400U,
+ EF_ARM_EABI_UNKNOWN = 0x00000000U,
+ EF_ARM_EABI_VER1 = 0x01000000U,
+ EF_ARM_EABI_VER2 = 0x02000000U,
+ EF_ARM_EABI_VER3 = 0x03000000U,
+ EF_ARM_EABI_VER4 = 0x04000000U,
+ EF_ARM_EABI_VER5 = 0x05000000U,
+ EF_ARM_EABIMASK = 0xFF000000U
+};
+
+// ELF Relocation types for ARM
+enum {
+#include "ELFRelocs/ARM.def"
+};
+
+// AVR specific e_flags
+enum : unsigned {
+ EF_AVR_ARCH_AVR1 = 1,
+ EF_AVR_ARCH_AVR2 = 2,
+ EF_AVR_ARCH_AVR25 = 25,
+ EF_AVR_ARCH_AVR3 = 3,
+ EF_AVR_ARCH_AVR31 = 31,
+ EF_AVR_ARCH_AVR35 = 35,
+ EF_AVR_ARCH_AVR4 = 4,
+ EF_AVR_ARCH_AVR5 = 5,
+ EF_AVR_ARCH_AVR51 = 51,
+ EF_AVR_ARCH_AVR6 = 6,
+ EF_AVR_ARCH_AVRTINY = 100,
+ EF_AVR_ARCH_XMEGA1 = 101,
+ EF_AVR_ARCH_XMEGA2 = 102,
+ EF_AVR_ARCH_XMEGA3 = 103,
+ EF_AVR_ARCH_XMEGA4 = 104,
+ EF_AVR_ARCH_XMEGA5 = 105,
+ EF_AVR_ARCH_XMEGA6 = 106,
+ EF_AVR_ARCH_XMEGA7 = 107
+};
+
+// ELF Relocation types for AVR
+enum {
+#include "ELFRelocs/AVR.def"
+};
+
+// Mips Specific e_flags
+enum : unsigned {
+ EF_MIPS_NOREORDER = 0x00000001, // Don't reorder instructions
+ EF_MIPS_PIC = 0x00000002, // Position independent code
+ EF_MIPS_CPIC = 0x00000004, // Call object with Position independent code
+ EF_MIPS_ABI2 = 0x00000020, // File uses N32 ABI
+ EF_MIPS_32BITMODE = 0x00000100, // Code compiled for a 64-bit machine
+ // in 32-bit mode
+ EF_MIPS_FP64 = 0x00000200, // Code compiled for a 32-bit machine
+ // but uses 64-bit FP registers
+ EF_MIPS_NAN2008 = 0x00000400, // Uses IEE 754-2008 NaN encoding
+
+ // ABI flags
+ EF_MIPS_ABI_O32 = 0x00001000, // This file follows the first MIPS 32 bit ABI
+ EF_MIPS_ABI_O64 = 0x00002000, // O32 ABI extended for 64-bit architecture.
+ EF_MIPS_ABI_EABI32 = 0x00003000, // EABI in 32 bit mode.
+ EF_MIPS_ABI_EABI64 = 0x00004000, // EABI in 64 bit mode.
+ EF_MIPS_ABI = 0x0000f000, // Mask for selecting EF_MIPS_ABI_ variant.
+
+ // MIPS machine variant
+ EF_MIPS_MACH_3900 = 0x00810000, // Toshiba R3900
+ EF_MIPS_MACH_4010 = 0x00820000, // LSI R4010
+ EF_MIPS_MACH_4100 = 0x00830000, // NEC VR4100
+ EF_MIPS_MACH_4650 = 0x00850000, // MIPS R4650
+ EF_MIPS_MACH_4120 = 0x00870000, // NEC VR4120
+ EF_MIPS_MACH_4111 = 0x00880000, // NEC VR4111/VR4181
+ EF_MIPS_MACH_SB1 = 0x008a0000, // Broadcom SB-1
+ EF_MIPS_MACH_OCTEON = 0x008b0000, // Cavium Networks Octeon
+ EF_MIPS_MACH_XLR = 0x008c0000, // RMI Xlr
+ EF_MIPS_MACH_OCTEON2 = 0x008d0000, // Cavium Networks Octeon2
+ EF_MIPS_MACH_OCTEON3 = 0x008e0000, // Cavium Networks Octeon3
+ EF_MIPS_MACH_5400 = 0x00910000, // NEC VR5400
+ EF_MIPS_MACH_5900 = 0x00920000, // MIPS R5900
+ EF_MIPS_MACH_5500 = 0x00980000, // NEC VR5500
+ EF_MIPS_MACH_9000 = 0x00990000, // Unknown
+ EF_MIPS_MACH_LS2E = 0x00a00000, // ST Microelectronics Loongson 2E
+ EF_MIPS_MACH_LS2F = 0x00a10000, // ST Microelectronics Loongson 2F
+ EF_MIPS_MACH_LS3A = 0x00a20000, // Loongson 3A
+ EF_MIPS_MACH = 0x00ff0000, // EF_MIPS_MACH_xxx selection mask
+
+ // ARCH_ASE
+ EF_MIPS_MICROMIPS = 0x02000000, // microMIPS
+ EF_MIPS_ARCH_ASE_M16 =
+ 0x04000000, // Has Mips-16 ISA extensions
+ EF_MIPS_ARCH_ASE_MDMX =
+ 0x08000000, // Has MDMX multimedia extensions
+ EF_MIPS_ARCH_ASE = 0x0f000000, // Mask for EF_MIPS_ARCH_ASE_xxx flags
+
+ // ARCH
+ EF_MIPS_ARCH_1 = 0x00000000, // MIPS1 instruction set
+ EF_MIPS_ARCH_2 = 0x10000000, // MIPS2 instruction set
+ EF_MIPS_ARCH_3 = 0x20000000, // MIPS3 instruction set
+ EF_MIPS_ARCH_4 = 0x30000000, // MIPS4 instruction set
+ EF_MIPS_ARCH_5 = 0x40000000, // MIPS5 instruction set
+ EF_MIPS_ARCH_32 = 0x50000000, // MIPS32 instruction set per linux not elf.h
+ EF_MIPS_ARCH_64 = 0x60000000, // MIPS64 instruction set per linux not elf.h
+ EF_MIPS_ARCH_32R2 = 0x70000000, // mips32r2, mips32r3, mips32r5
+ EF_MIPS_ARCH_64R2 = 0x80000000, // mips64r2, mips64r3, mips64r5
+ EF_MIPS_ARCH_32R6 = 0x90000000, // mips32r6
+ EF_MIPS_ARCH_64R6 = 0xa0000000, // mips64r6
+ EF_MIPS_ARCH = 0xf0000000 // Mask for applying EF_MIPS_ARCH_ variant
+};
+
+// ELF Relocation types for Mips
+enum {
+#include "ELFRelocs/Mips.def"
+};
+
+// Special values for the st_other field in the symbol table entry for MIPS.
+enum {
+ STO_MIPS_OPTIONAL = 0x04, // Symbol whose definition is optional
+ STO_MIPS_PLT = 0x08, // PLT entry related dynamic table record
+ STO_MIPS_PIC = 0x20, // PIC func in an object mixes PIC/non-PIC
+ STO_MIPS_MICROMIPS = 0x80, // MIPS Specific ISA for MicroMips
+ STO_MIPS_MIPS16 = 0xf0 // MIPS Specific ISA for Mips16
+};
+
+// .MIPS.options section descriptor kinds
+enum {
+ ODK_NULL = 0, // Undefined
+ ODK_REGINFO = 1, // Register usage information
+ ODK_EXCEPTIONS = 2, // Exception processing options
+ ODK_PAD = 3, // Section padding options
+ ODK_HWPATCH = 4, // Hardware patches applied
+ ODK_FILL = 5, // Linker fill value
+ ODK_TAGS = 6, // Space for tool identification
+ ODK_HWAND = 7, // Hardware AND patches applied
+ ODK_HWOR = 8, // Hardware OR patches applied
+ ODK_GP_GROUP = 9, // GP group to use for text/data sections
+ ODK_IDENT = 10, // ID information
+ ODK_PAGESIZE = 11 // Page size information
+};
+
+// Hexagon-specific e_flags
+enum {
+ // Object processor version flags, bits[11:0]
+ EF_HEXAGON_MACH_V2 = 0x00000001, // Hexagon V2
+ EF_HEXAGON_MACH_V3 = 0x00000002, // Hexagon V3
+ EF_HEXAGON_MACH_V4 = 0x00000003, // Hexagon V4
+ EF_HEXAGON_MACH_V5 = 0x00000004, // Hexagon V5
+ EF_HEXAGON_MACH_V55 = 0x00000005, // Hexagon V55
+ EF_HEXAGON_MACH_V60 = 0x00000060, // Hexagon V60
+
+ // Highest ISA version flags
+ EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[11:0]
+ // of e_flags
+ EF_HEXAGON_ISA_V2 = 0x00000010, // Hexagon V2 ISA
+ EF_HEXAGON_ISA_V3 = 0x00000020, // Hexagon V3 ISA
+ EF_HEXAGON_ISA_V4 = 0x00000030, // Hexagon V4 ISA
+ EF_HEXAGON_ISA_V5 = 0x00000040, // Hexagon V5 ISA
+ EF_HEXAGON_ISA_V55 = 0x00000050, // Hexagon V55 ISA
+ EF_HEXAGON_ISA_V60 = 0x00000060, // Hexagon V60 ISA
+};
+
+// Hexagon-specific section indexes for common small data
+enum {
+ SHN_HEXAGON_SCOMMON = 0xff00, // Other access sizes
+ SHN_HEXAGON_SCOMMON_1 = 0xff01, // Byte-sized access
+ SHN_HEXAGON_SCOMMON_2 = 0xff02, // Half-word-sized access
+ SHN_HEXAGON_SCOMMON_4 = 0xff03, // Word-sized access
+ SHN_HEXAGON_SCOMMON_8 = 0xff04 // Double-word-size access
+};
+
+// ELF Relocation types for Hexagon
+enum {
+#include "ELFRelocs/Hexagon.def"
+};
+
+// ELF Relocation types for S390/zSeries
+enum {
+#include "ELFRelocs/SystemZ.def"
+};
+
+// ELF Relocation type for Sparc.
+enum {
+#include "ELFRelocs/Sparc.def"
+};
+
+#undef ELF_RELOC
+
+// Section header.
+struct Elf32_Shdr {
+ Elf32_Word sh_name; // Section name (index into string table)
+ Elf32_Word sh_type; // Section type (SHT_*)
+ Elf32_Word sh_flags; // Section flags (SHF_*)
+ Elf32_Addr sh_addr; // Address where section is to be loaded
+ Elf32_Off sh_offset; // File offset of section data, in bytes
+ Elf32_Word sh_size; // Size of section, in bytes
+ Elf32_Word sh_link; // Section type-specific header table index link
+ Elf32_Word sh_info; // Section type-specific extra information
+ Elf32_Word sh_addralign; // Section address alignment
+ Elf32_Word sh_entsize; // Size of records contained within the section
+};
+
+// Section header for ELF64 - same fields as ELF32, different types.
+struct Elf64_Shdr {
+ Elf64_Word sh_name;
+ Elf64_Word sh_type;
+ Elf64_Xword sh_flags;
+ Elf64_Addr sh_addr;
+ Elf64_Off sh_offset;
+ Elf64_Xword sh_size;
+ Elf64_Word sh_link;
+ Elf64_Word sh_info;
+ Elf64_Xword sh_addralign;
+ Elf64_Xword sh_entsize;
+};
+
+// Special section indices.
+enum {
+ SHN_UNDEF = 0, // Undefined, missing, irrelevant, or meaningless
+ SHN_LORESERVE = 0xff00, // Lowest reserved index
+ SHN_LOPROC = 0xff00, // Lowest processor-specific index
+ SHN_HIPROC = 0xff1f, // Highest processor-specific index
+ SHN_LOOS = 0xff20, // Lowest operating system-specific index
+ SHN_HIOS = 0xff3f, // Highest operating system-specific index
+ SHN_ABS = 0xfff1, // Symbol has absolute value; does not need relocation
+ SHN_COMMON = 0xfff2, // FORTRAN COMMON or C external global variables
+ SHN_XINDEX = 0xffff, // Mark that the index is >= SHN_LORESERVE
+ SHN_HIRESERVE = 0xffff // Highest reserved index
+};
+
+// Section types.
+enum : unsigned {
+ SHT_NULL = 0, // No associated section (inactive entry).
+ SHT_PROGBITS = 1, // Program-defined contents.
+ SHT_SYMTAB = 2, // Symbol table.
+ SHT_STRTAB = 3, // String table.
+ SHT_RELA = 4, // Relocation entries; explicit addends.
+ SHT_HASH = 5, // Symbol hash table.
+ SHT_DYNAMIC = 6, // Information for dynamic linking.
+ SHT_NOTE = 7, // Information about the file.
+ SHT_NOBITS = 8, // Data occupies no space in the file.
+ SHT_REL = 9, // Relocation entries; no explicit addends.
+ SHT_SHLIB = 10, // Reserved.
+ SHT_DYNSYM = 11, // Symbol table.
+ SHT_INIT_ARRAY = 14, // Pointers to initialization functions.
+ SHT_FINI_ARRAY = 15, // Pointers to termination functions.
+ SHT_PREINIT_ARRAY = 16, // Pointers to pre-init functions.
+ SHT_GROUP = 17, // Section group.
+ SHT_SYMTAB_SHNDX = 18, // Indices for SHN_XINDEX entries.
+ SHT_LOOS = 0x60000000, // Lowest operating system-specific type.
+ SHT_GNU_ATTRIBUTES= 0x6ffffff5, // Object attributes.
+ SHT_GNU_HASH = 0x6ffffff6, // GNU-style hash table.
+ SHT_GNU_verdef = 0x6ffffffd, // GNU version definitions.
+ SHT_GNU_verneed = 0x6ffffffe, // GNU version references.
+ SHT_GNU_versym = 0x6fffffff, // GNU symbol versions table.
+ SHT_HIOS = 0x6fffffff, // Highest operating system-specific type.
+ SHT_LOPROC = 0x70000000, // Lowest processor arch-specific type.
+ // Fixme: All this is duplicated in MCSectionELF. Why??
+ // Exception Index table
+ SHT_ARM_EXIDX = 0x70000001U,
+ // BPABI DLL dynamic linking pre-emption map
+ SHT_ARM_PREEMPTMAP = 0x70000002U,
+ // Object file compatibility attributes
+ SHT_ARM_ATTRIBUTES = 0x70000003U,
+ SHT_ARM_DEBUGOVERLAY = 0x70000004U,
+ SHT_ARM_OVERLAYSECTION = 0x70000005U,
+ SHT_HEX_ORDERED = 0x70000000, // Link editor is to sort the entries in
+ // this section based on their sizes
+ SHT_X86_64_UNWIND = 0x70000001, // Unwind information
+
+ SHT_MIPS_REGINFO = 0x70000006, // Register usage information
+ SHT_MIPS_OPTIONS = 0x7000000d, // General options
+ SHT_MIPS_ABIFLAGS = 0x7000002a, // ABI information.
+
+ SHT_HIPROC = 0x7fffffff, // Highest processor arch-specific type.
+ SHT_LOUSER = 0x80000000, // Lowest type reserved for applications.
+ SHT_HIUSER = 0xffffffff // Highest type reserved for applications.
+};
+
+// Section flags.
+enum : unsigned {
+ // Section data should be writable during execution.
+ SHF_WRITE = 0x1,
+
+ // Section occupies memory during program execution.
+ SHF_ALLOC = 0x2,
+
+ // Section contains executable machine instructions.
+ SHF_EXECINSTR = 0x4,
+
+ // The data in this section may be merged.
+ SHF_MERGE = 0x10,
+
+ // The data in this section is null-terminated strings.
+ SHF_STRINGS = 0x20,
+
+ // A field in this section holds a section header table index.
+ SHF_INFO_LINK = 0x40U,
+
+ // Adds special ordering requirements for link editors.
+ SHF_LINK_ORDER = 0x80U,
+
+ // This section requires special OS-specific processing to avoid incorrect
+ // behavior.
+ SHF_OS_NONCONFORMING = 0x100U,
+
+ // This section is a member of a section group.
+ SHF_GROUP = 0x200U,
+
+ // This section holds Thread-Local Storage.
+ SHF_TLS = 0x400U,
+
+ // This section is excluded from the final executable or shared library.
+ SHF_EXCLUDE = 0x80000000U,
+
+ // Start of target-specific flags.
+
+ /// XCORE_SHF_CP_SECTION - All sections with the "c" flag are grouped
+ /// together by the linker to form the constant pool and the cp register is
+ /// set to the start of the constant pool by the boot code.
+ XCORE_SHF_CP_SECTION = 0x800U,
+
+ /// XCORE_SHF_DP_SECTION - All sections with the "d" flag are grouped
+ /// together by the linker to form the data section and the dp register is
+ /// set to the start of the section by the boot code.
+ XCORE_SHF_DP_SECTION = 0x1000U,
+
+ SHF_MASKOS = 0x0ff00000,
+
+ // Bits indicating processor-specific flags.
+ SHF_MASKPROC = 0xf0000000,
+
+ // If an object file section does not have this flag set, then it may not hold
+ // more than 2GB and can be freely referred to in objects using smaller code
+ // models. Otherwise, only objects using larger code models can refer to them.
+ // For example, a medium code model object can refer to data in a section that
+ // sets this flag besides being able to refer to data in a section that does
+ // not set it; likewise, a small code model object can refer only to code in a
+ // section that does not set this flag.
+ SHF_X86_64_LARGE = 0x10000000,
+
+ // All sections with the GPREL flag are grouped into a global data area
+ // for faster accesses
+ SHF_HEX_GPREL = 0x10000000,
+
+ // Section contains text/data which may be replicated in other sections.
+ // Linker must retain only one copy.
+ SHF_MIPS_NODUPES = 0x01000000,
+
+ // Linker must generate implicit hidden weak names.
+ SHF_MIPS_NAMES = 0x02000000,
+
+ // Section data local to process.
+ SHF_MIPS_LOCAL = 0x04000000,
+
+ // Do not strip this section.
+ SHF_MIPS_NOSTRIP = 0x08000000,
+
+ // Section must be part of global data area.
+ SHF_MIPS_GPREL = 0x10000000,
+
+ // This section should be merged.
+ SHF_MIPS_MERGE = 0x20000000,
+
+ // Address size to be inferred from section entry size.
+ SHF_MIPS_ADDR = 0x40000000,
+
+ // Section data is string data by default.
+ SHF_MIPS_STRING = 0x80000000,
+
+ SHF_AMDGPU_HSA_GLOBAL = 0x00100000,
+ SHF_AMDGPU_HSA_READONLY = 0x00200000,
+ SHF_AMDGPU_HSA_CODE = 0x00400000,
+ SHF_AMDGPU_HSA_AGENT = 0x00800000
+};
+
+// Section Group Flags
+enum : unsigned {
+ GRP_COMDAT = 0x1,
+ GRP_MASKOS = 0x0ff00000,
+ GRP_MASKPROC = 0xf0000000
+};
+
+// Symbol table entries for ELF32.
+struct Elf32_Sym {
+ Elf32_Word st_name; // Symbol name (index into string table)
+ Elf32_Addr st_value; // Value or address associated with the symbol
+ Elf32_Word st_size; // Size of the symbol
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf32_Half st_shndx; // Which section (header table index) it's defined in
+
+ // These accessors and mutators correspond to the ELF32_ST_BIND,
+ // ELF32_ST_TYPE, and ELF32_ST_INFO macros defined in the ELF specification:
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
+ st_info = (b << 4) + (t & 0x0f);
+ }
+};
+
+// Symbol table entries for ELF64.
+struct Elf64_Sym {
+ Elf64_Word st_name; // Symbol name (index into string table)
+ unsigned char st_info; // Symbol's type and binding attributes
+ unsigned char st_other; // Must be zero; reserved
+ Elf64_Half st_shndx; // Which section (header tbl index) it's defined in
+ Elf64_Addr st_value; // Value or address associated with the symbol
+ Elf64_Xword st_size; // Size of the symbol
+
+ // These accessors and mutators are identical to those defined for ELF32
+ // symbol table entries.
+ unsigned char getBinding() const { return st_info >> 4; }
+ unsigned char getType() const { return st_info & 0x0f; }
+ void setBinding(unsigned char b) { setBindingAndType(b, getType()); }
+ void setType(unsigned char t) { setBindingAndType(getBinding(), t); }
+ void setBindingAndType(unsigned char b, unsigned char t) {
+ st_info = (b << 4) + (t & 0x0f);
+ }
+};
+
+// The size (in bytes) of symbol table entries.
+enum {
+ SYMENTRY_SIZE32 = 16, // 32-bit symbol entry size
+ SYMENTRY_SIZE64 = 24 // 64-bit symbol entry size.
+};
+
+// Symbol bindings.
+enum {
+ STB_LOCAL = 0, // Local symbol, not visible outside obj file containing def
+ STB_GLOBAL = 1, // Global symbol, visible to all object files being combined
+ STB_WEAK = 2, // Weak symbol, like global but lower-precedence
+ STB_GNU_UNIQUE = 10,
+ STB_LOOS = 10, // Lowest operating system-specific binding type
+ STB_HIOS = 12, // Highest operating system-specific binding type
+ STB_LOPROC = 13, // Lowest processor-specific binding type
+ STB_HIPROC = 15 // Highest processor-specific binding type
+};
+
+// Symbol types.
+enum {
+ STT_NOTYPE = 0, // Symbol's type is not specified
+ STT_OBJECT = 1, // Symbol is a data object (variable, array, etc.)
+ STT_FUNC = 2, // Symbol is executable code (function, etc.)
+ STT_SECTION = 3, // Symbol refers to a section
+ STT_FILE = 4, // Local, absolute symbol that refers to a file
+ STT_COMMON = 5, // An uninitialized common block
+ STT_TLS = 6, // Thread local data object
+ STT_GNU_IFUNC = 10, // GNU indirect function
+ STT_LOOS = 10, // Lowest operating system-specific symbol type
+ STT_HIOS = 12, // Highest operating system-specific symbol type
+ STT_LOPROC = 13, // Lowest processor-specific symbol type
+ STT_HIPROC = 15, // Highest processor-specific symbol type
+
+ // AMDGPU symbol types
+ STT_AMDGPU_HSA_KERNEL = 10,
+ STT_AMDGPU_HSA_INDIRECT_FUNCTION = 11,
+ STT_AMDGPU_HSA_METADATA = 12
+};
+
+enum {
+ STV_DEFAULT = 0, // Visibility is specified by binding type
+ STV_INTERNAL = 1, // Defined by processor supplements
+ STV_HIDDEN = 2, // Not visible to other components
+ STV_PROTECTED = 3 // Visible in other components but not preemptable
+};
+
+// Symbol number.
+enum {
+ STN_UNDEF = 0
+};
+
+// Special relocation symbols used in the MIPS64 ELF relocation entries
+enum {
+ RSS_UNDEF = 0, // None
+ RSS_GP = 1, // Value of gp
+ RSS_GP0 = 2, // Value of gp used to create object being relocated
+ RSS_LOC = 3 // Address of location being relocated
+};
+
+// Relocation entry, without explicit addend.
+struct Elf32_Rel {
+ Elf32_Addr r_offset; // Location (file byte offset, or program virtual addr)
+ Elf32_Word r_info; // Symbol table index and type of relocation to apply
+
+ // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
+ // and ELF32_R_INFO macros defined in the ELF specification:
+ Elf32_Word getSymbol() const { return (r_info >> 8); }
+ unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf32_Word s, unsigned char t) {
+ r_info = (s << 8) + t;
+ }
+};
+
+// Relocation entry with explicit addend.
+struct Elf32_Rela {
+ Elf32_Addr r_offset; // Location (file byte offset, or program virtual addr)
+ Elf32_Word r_info; // Symbol table index and type of relocation to apply
+ Elf32_Sword r_addend; // Compute value for relocatable field by adding this
+
+ // These accessors and mutators correspond to the ELF32_R_SYM, ELF32_R_TYPE,
+ // and ELF32_R_INFO macros defined in the ELF specification:
+ Elf32_Word getSymbol() const { return (r_info >> 8); }
+ unsigned char getType() const { return (unsigned char) (r_info & 0x0ff); }
+ void setSymbol(Elf32_Word s) { setSymbolAndType(s, getType()); }
+ void setType(unsigned char t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf32_Word s, unsigned char t) {
+ r_info = (s << 8) + t;
+ }
+};
+
+// Relocation entry, without explicit addend.
+struct Elf64_Rel {
+ Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+ Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
+
+ // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+ // and ELF64_R_INFO macros defined in the ELF specification:
+ Elf64_Word getSymbol() const { return (r_info >> 32); }
+ Elf64_Word getType() const {
+ return (Elf64_Word) (r_info & 0xffffffffL);
+ }
+ void setSymbol(Elf64_Word s) { setSymbolAndType(s, getType()); }
+ void setType(Elf64_Word t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf64_Word s, Elf64_Word t) {
+ r_info = ((Elf64_Xword)s << 32) + (t&0xffffffffL);
+ }
+};
+
+// Relocation entry with explicit addend.
+struct Elf64_Rela {
+ Elf64_Addr r_offset; // Location (file byte offset, or program virtual addr).
+ Elf64_Xword r_info; // Symbol table index and type of relocation to apply.
+ Elf64_Sxword r_addend; // Compute value for relocatable field by adding this.
+
+ // These accessors and mutators correspond to the ELF64_R_SYM, ELF64_R_TYPE,
+ // and ELF64_R_INFO macros defined in the ELF specification:
+ Elf64_Word getSymbol() const { return (r_info >> 32); }
+ Elf64_Word getType() const {
+ return (Elf64_Word) (r_info & 0xffffffffL);
+ }
+ void setSymbol(Elf64_Word s) { setSymbolAndType(s, getType()); }
+ void setType(Elf64_Word t) { setSymbolAndType(getSymbol(), t); }
+ void setSymbolAndType(Elf64_Word s, Elf64_Word t) {
+ r_info = ((Elf64_Xword)s << 32) + (t&0xffffffffL);
+ }
+};
+
+// Program header for ELF32.
+struct Elf32_Phdr {
+ Elf32_Word p_type; // Type of segment
+ Elf32_Off p_offset; // File offset where segment is located, in bytes
+ Elf32_Addr p_vaddr; // Virtual address of beginning of segment
+ Elf32_Addr p_paddr; // Physical address of beginning of segment (OS-specific)
+ Elf32_Word p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf32_Word p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf32_Word p_flags; // Segment flags
+ Elf32_Word p_align; // Segment alignment constraint
+};
+
+// Program header for ELF64.
+struct Elf64_Phdr {
+ Elf64_Word p_type; // Type of segment
+ Elf64_Word p_flags; // Segment flags
+ Elf64_Off p_offset; // File offset where segment is located, in bytes
+ Elf64_Addr p_vaddr; // Virtual address of beginning of segment
+ Elf64_Addr p_paddr; // Physical addr of beginning of segment (OS-specific)
+ Elf64_Xword p_filesz; // Num. of bytes in file image of segment (may be zero)
+ Elf64_Xword p_memsz; // Num. of bytes in mem image of segment (may be zero)
+ Elf64_Xword p_align; // Segment alignment constraint
+};
+
+// Segment types.
+enum {
+ PT_NULL = 0, // Unused segment.
+ PT_LOAD = 1, // Loadable segment.
+ PT_DYNAMIC = 2, // Dynamic linking information.
+ PT_INTERP = 3, // Interpreter pathname.
+ PT_NOTE = 4, // Auxiliary information.
+ PT_SHLIB = 5, // Reserved.
+ PT_PHDR = 6, // The program header table itself.
+ PT_TLS = 7, // The thread-local storage template.
+ PT_LOOS = 0x60000000, // Lowest operating system-specific pt entry type.
+ PT_HIOS = 0x6fffffff, // Highest operating system-specific pt entry type.
+ PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type.
+ PT_HIPROC = 0x7fffffff, // Highest processor-specific program hdr entry type.
+
+ // x86-64 program header types.
+ // These all contain stack unwind tables.
+ PT_GNU_EH_FRAME = 0x6474e550,
+ PT_SUNW_EH_FRAME = 0x6474e550,
+ PT_SUNW_UNWIND = 0x6464e550,
+
+ PT_GNU_STACK = 0x6474e551, // Indicates stack executability.
+ PT_GNU_RELRO = 0x6474e552, // Read-only after relocation.
+
+ // ARM program header types.
+ PT_ARM_ARCHEXT = 0x70000000, // Platform architecture compatibility info
+ // These all contain stack unwind tables.
+ PT_ARM_EXIDX = 0x70000001,
+ PT_ARM_UNWIND = 0x70000001,
+
+ // MIPS program header types.
+ PT_MIPS_REGINFO = 0x70000000, // Register usage information.
+ PT_MIPS_RTPROC = 0x70000001, // Runtime procedure table.
+ PT_MIPS_OPTIONS = 0x70000002, // Options segment.
+ PT_MIPS_ABIFLAGS = 0x70000003, // Abiflags segment.
+
+ // AMDGPU program header types.
+ PT_AMDGPU_HSA_LOAD_GLOBAL_PROGRAM = 0x60000000,
+ PT_AMDGPU_HSA_LOAD_GLOBAL_AGENT = 0x60000001,
+ PT_AMDGPU_HSA_LOAD_READONLY_AGENT = 0x60000002,
+ PT_AMDGPU_HSA_LOAD_CODE_AGENT = 0x60000003
+};
+
+// Segment flag bits.
+enum : unsigned {
+ PF_X = 1, // Execute
+ PF_W = 2, // Write
+ PF_R = 4, // Read
+ PF_MASKOS = 0x0ff00000,// Bits for operating system-specific semantics.
+ PF_MASKPROC = 0xf0000000 // Bits for processor-specific semantics.
+};
+
+// Dynamic table entry for ELF32.
+struct Elf32_Dyn
+{
+ Elf32_Sword d_tag; // Type of dynamic table entry.
+ union
+ {
+ Elf32_Word d_val; // Integer value of entry.
+ Elf32_Addr d_ptr; // Pointer value of entry.
+ } d_un;
+};
+
+// Dynamic table entry for ELF64.
+struct Elf64_Dyn
+{
+ Elf64_Sxword d_tag; // Type of dynamic table entry.
+ union
+ {
+ Elf64_Xword d_val; // Integer value of entry.
+ Elf64_Addr d_ptr; // Pointer value of entry.
+ } d_un;
+};
+
+// Dynamic table entry tags.
+enum {
+ DT_NULL = 0, // Marks end of dynamic array.
+ DT_NEEDED = 1, // String table offset of needed library.
+ DT_PLTRELSZ = 2, // Size of relocation entries in PLT.
+ DT_PLTGOT = 3, // Address associated with linkage table.
+ DT_HASH = 4, // Address of symbolic hash table.
+ DT_STRTAB = 5, // Address of dynamic string table.
+ DT_SYMTAB = 6, // Address of dynamic symbol table.
+ DT_RELA = 7, // Address of relocation table (Rela entries).
+ DT_RELASZ = 8, // Size of Rela relocation table.
+ DT_RELAENT = 9, // Size of a Rela relocation entry.
+ DT_STRSZ = 10, // Total size of the string table.
+ DT_SYMENT = 11, // Size of a symbol table entry.
+ DT_INIT = 12, // Address of initialization function.
+ DT_FINI = 13, // Address of termination function.
+ DT_SONAME = 14, // String table offset of a shared objects name.
+ DT_RPATH = 15, // String table offset of library search path.
+ DT_SYMBOLIC = 16, // Changes symbol resolution algorithm.
+ DT_REL = 17, // Address of relocation table (Rel entries).
+ DT_RELSZ = 18, // Size of Rel relocation table.
+ DT_RELENT = 19, // Size of a Rel relocation entry.
+ DT_PLTREL = 20, // Type of relocation entry used for linking.
+ DT_DEBUG = 21, // Reserved for debugger.
+ DT_TEXTREL = 22, // Relocations exist for non-writable segments.
+ DT_JMPREL = 23, // Address of relocations associated with PLT.
+ DT_BIND_NOW = 24, // Process all relocations before execution.
+ DT_INIT_ARRAY = 25, // Pointer to array of initialization functions.
+ DT_FINI_ARRAY = 26, // Pointer to array of termination functions.
+ DT_INIT_ARRAYSZ = 27, // Size of DT_INIT_ARRAY.
+ DT_FINI_ARRAYSZ = 28, // Size of DT_FINI_ARRAY.
+ DT_RUNPATH = 29, // String table offset of lib search path.
+ DT_FLAGS = 30, // Flags.
+ DT_ENCODING = 32, // Values from here to DT_LOOS follow the rules
+ // for the interpretation of the d_un union.
+
+ DT_PREINIT_ARRAY = 32, // Pointer to array of preinit functions.
+ DT_PREINIT_ARRAYSZ = 33, // Size of the DT_PREINIT_ARRAY array.
+
+ DT_LOOS = 0x60000000, // Start of environment specific tags.
+ DT_HIOS = 0x6FFFFFFF, // End of environment specific tags.
+ DT_LOPROC = 0x70000000, // Start of processor specific tags.
+ DT_HIPROC = 0x7FFFFFFF, // End of processor specific tags.
+
+ DT_GNU_HASH = 0x6FFFFEF5, // Reference to the GNU hash table.
+ DT_RELACOUNT = 0x6FFFFFF9, // ELF32_Rela count.
+ DT_RELCOUNT = 0x6FFFFFFA, // ELF32_Rel count.
+
+ DT_FLAGS_1 = 0X6FFFFFFB, // Flags_1.
+ DT_VERSYM = 0x6FFFFFF0, // The address of .gnu.version section.
+ DT_VERDEF = 0X6FFFFFFC, // The address of the version definition table.
+ DT_VERDEFNUM = 0X6FFFFFFD, // The number of entries in DT_VERDEF.
+ DT_VERNEED = 0X6FFFFFFE, // The address of the version Dependency table.
+ DT_VERNEEDNUM = 0X6FFFFFFF, // The number of entries in DT_VERNEED.
+
+ // Mips specific dynamic table entry tags.
+ DT_MIPS_RLD_VERSION = 0x70000001, // 32 bit version number for runtime
+ // linker interface.
+ DT_MIPS_TIME_STAMP = 0x70000002, // Time stamp.
+ DT_MIPS_ICHECKSUM = 0x70000003, // Checksum of external strings
+ // and common sizes.
+ DT_MIPS_IVERSION = 0x70000004, // Index of version string
+ // in string table.
+ DT_MIPS_FLAGS = 0x70000005, // 32 bits of flags.
+ DT_MIPS_BASE_ADDRESS = 0x70000006, // Base address of the segment.
+ DT_MIPS_MSYM = 0x70000007, // Address of .msym section.
+ DT_MIPS_CONFLICT = 0x70000008, // Address of .conflict section.
+ DT_MIPS_LIBLIST = 0x70000009, // Address of .liblist section.
+ DT_MIPS_LOCAL_GOTNO = 0x7000000a, // Number of local global offset
+ // table entries.
+ DT_MIPS_CONFLICTNO = 0x7000000b, // Number of entries
+ // in the .conflict section.
+ DT_MIPS_LIBLISTNO = 0x70000010, // Number of entries
+ // in the .liblist section.
+ DT_MIPS_SYMTABNO = 0x70000011, // Number of entries
+ // in the .dynsym section.
+ DT_MIPS_UNREFEXTNO = 0x70000012, // Index of first external dynamic symbol
+ // not referenced locally.
+ DT_MIPS_GOTSYM = 0x70000013, // Index of first dynamic symbol
+ // in global offset table.
+ DT_MIPS_HIPAGENO = 0x70000014, // Number of page table entries
+ // in global offset table.
+ DT_MIPS_RLD_MAP = 0x70000016, // Address of run time loader map,
+ // used for debugging.
+ DT_MIPS_DELTA_CLASS = 0x70000017, // Delta C++ class definition.
+ DT_MIPS_DELTA_CLASS_NO = 0x70000018, // Number of entries
+ // in DT_MIPS_DELTA_CLASS.
+ DT_MIPS_DELTA_INSTANCE = 0x70000019, // Delta C++ class instances.
+ DT_MIPS_DELTA_INSTANCE_NO = 0x7000001A, // Number of entries
+ // in DT_MIPS_DELTA_INSTANCE.
+ DT_MIPS_DELTA_RELOC = 0x7000001B, // Delta relocations.
+ DT_MIPS_DELTA_RELOC_NO = 0x7000001C, // Number of entries
+ // in DT_MIPS_DELTA_RELOC.
+ DT_MIPS_DELTA_SYM = 0x7000001D, // Delta symbols that Delta
+ // relocations refer to.
+ DT_MIPS_DELTA_SYM_NO = 0x7000001E, // Number of entries
+ // in DT_MIPS_DELTA_SYM.
+ DT_MIPS_DELTA_CLASSSYM = 0x70000020, // Delta symbols that hold
+ // class declarations.
+ DT_MIPS_DELTA_CLASSSYM_NO = 0x70000021, // Number of entries
+ // in DT_MIPS_DELTA_CLASSSYM.
+ DT_MIPS_CXX_FLAGS = 0x70000022, // Flags indicating information
+ // about C++ flavor.
+ DT_MIPS_PIXIE_INIT = 0x70000023, // Pixie information.
+ DT_MIPS_SYMBOL_LIB = 0x70000024, // Address of .MIPS.symlib
+ DT_MIPS_LOCALPAGE_GOTIDX = 0x70000025, // The GOT index of the first PTE
+ // for a segment
+ DT_MIPS_LOCAL_GOTIDX = 0x70000026, // The GOT index of the first PTE
+ // for a local symbol
+ DT_MIPS_HIDDEN_GOTIDX = 0x70000027, // The GOT index of the first PTE
+ // for a hidden symbol
+ DT_MIPS_PROTECTED_GOTIDX = 0x70000028, // The GOT index of the first PTE
+ // for a protected symbol
+ DT_MIPS_OPTIONS = 0x70000029, // Address of `.MIPS.options'.
+ DT_MIPS_INTERFACE = 0x7000002A, // Address of `.interface'.
+ DT_MIPS_DYNSTR_ALIGN = 0x7000002B, // Unknown.
+ DT_MIPS_INTERFACE_SIZE = 0x7000002C, // Size of the .interface section.
+ DT_MIPS_RLD_TEXT_RESOLVE_ADDR = 0x7000002D, // Size of rld_text_resolve
+ // function stored in the GOT.
+ DT_MIPS_PERF_SUFFIX = 0x7000002E, // Default suffix of DSO to be added
+ // by rld on dlopen() calls.
+ DT_MIPS_COMPACT_SIZE = 0x7000002F, // Size of compact relocation
+ // section (O32).
+ DT_MIPS_GP_VALUE = 0x70000030, // GP value for auxiliary GOTs.
+ DT_MIPS_AUX_DYNAMIC = 0x70000031, // Address of auxiliary .dynamic.
+ DT_MIPS_PLTGOT = 0x70000032, // Address of the base of the PLTGOT.
+ DT_MIPS_RWPLT = 0x70000034, // Points to the base
+ // of a writable PLT.
+ DT_MIPS_RLD_MAP_REL = 0x70000035 // Relative offset of run time loader
+ // map, used for debugging.
+};
+
+// DT_FLAGS values.
+enum {
+ DF_ORIGIN = 0x01, // The object may reference $ORIGIN.
+ DF_SYMBOLIC = 0x02, // Search the shared lib before searching the exe.
+ DF_TEXTREL = 0x04, // Relocations may modify a non-writable segment.
+ DF_BIND_NOW = 0x08, // Process all relocations on load.
+ DF_STATIC_TLS = 0x10 // Reject attempts to load dynamically.
+};
+
+// State flags selectable in the `d_un.d_val' element of the DT_FLAGS_1 entry.
+enum {
+ DF_1_NOW = 0x00000001, // Set RTLD_NOW for this object.
+ DF_1_GLOBAL = 0x00000002, // Set RTLD_GLOBAL for this object.
+ DF_1_GROUP = 0x00000004, // Set RTLD_GROUP for this object.
+ DF_1_NODELETE = 0x00000008, // Set RTLD_NODELETE for this object.
+ DF_1_LOADFLTR = 0x00000010, // Trigger filtee loading at runtime.
+ DF_1_INITFIRST = 0x00000020, // Set RTLD_INITFIRST for this object.
+ DF_1_NOOPEN = 0x00000040, // Set RTLD_NOOPEN for this object.
+ DF_1_ORIGIN = 0x00000080, // $ORIGIN must be handled.
+ DF_1_DIRECT = 0x00000100, // Direct binding enabled.
+ DF_1_TRANS = 0x00000200,
+ DF_1_INTERPOSE = 0x00000400, // Object is used to interpose.
+ DF_1_NODEFLIB = 0x00000800, // Ignore default lib search path.
+ DF_1_NODUMP = 0x00001000, // Object can't be dldump'ed.
+ DF_1_CONFALT = 0x00002000, // Configuration alternative created.
+ DF_1_ENDFILTEE = 0x00004000, // Filtee terminates filters search.
+ DF_1_DISPRELDNE = 0x00008000, // Disp reloc applied at build time.
+ DF_1_DISPRELPND = 0x00010000, // Disp reloc applied at run-time.
+ DF_1_NODIRECT = 0x00020000, // Object has no-direct binding.
+ DF_1_IGNMULDEF = 0x00040000,
+ DF_1_NOKSYMS = 0x00080000,
+ DF_1_NOHDR = 0x00100000,
+ DF_1_EDITED = 0x00200000, // Object is modified after built.
+ DF_1_NORELOC = 0x00400000,
+ DF_1_SYMINTPOSE = 0x00800000, // Object has individual interposers.
+ DF_1_GLOBAUDIT = 0x01000000, // Global auditing required.
+ DF_1_SINGLETON = 0x02000000 // Singleton symbols are used.
+};
+
+// DT_MIPS_FLAGS values.
+enum {
+ RHF_NONE = 0x00000000, // No flags.
+ RHF_QUICKSTART = 0x00000001, // Uses shortcut pointers.
+ RHF_NOTPOT = 0x00000002, // Hash size is not a power of two.
+ RHS_NO_LIBRARY_REPLACEMENT = 0x00000004, // Ignore LD_LIBRARY_PATH.
+ RHF_NO_MOVE = 0x00000008, // DSO address may not be relocated.
+ RHF_SGI_ONLY = 0x00000010, // SGI specific features.
+ RHF_GUARANTEE_INIT = 0x00000020, // Guarantee that .init will finish
+ // executing before any non-init
+ // code in DSO is called.
+ RHF_DELTA_C_PLUS_PLUS = 0x00000040, // Contains Delta C++ code.
+ RHF_GUARANTEE_START_INIT = 0x00000080, // Guarantee that .init will start
+ // executing before any non-init
+ // code in DSO is called.
+ RHF_PIXIE = 0x00000100, // Generated by pixie.
+ RHF_DEFAULT_DELAY_LOAD = 0x00000200, // Delay-load DSO by default.
+ RHF_REQUICKSTART = 0x00000400, // Object may be requickstarted
+ RHF_REQUICKSTARTED = 0x00000800, // Object has been requickstarted
+ RHF_CORD = 0x00001000, // Generated by cord.
+ RHF_NO_UNRES_UNDEF = 0x00002000, // Object contains no unresolved
+ // undef symbols.
+ RHF_RLD_ORDER_SAFE = 0x00004000 // Symbol table is in a safe order.
+};
+
+// ElfXX_VerDef structure version (GNU versioning)
+enum {
+ VER_DEF_NONE = 0,
+ VER_DEF_CURRENT = 1
+};
+
+// VerDef Flags (ElfXX_VerDef::vd_flags)
+enum {
+ VER_FLG_BASE = 0x1,
+ VER_FLG_WEAK = 0x2,
+ VER_FLG_INFO = 0x4
+};
+
+// Special constants for the version table. (SHT_GNU_versym/.gnu.version)
+enum {
+ VER_NDX_LOCAL = 0, // Unversioned local symbol
+ VER_NDX_GLOBAL = 1, // Unversioned global symbol
+ VERSYM_VERSION = 0x7fff, // Version Index mask
+ VERSYM_HIDDEN = 0x8000 // Hidden bit (non-default version)
+};
+
+// ElfXX_VerNeed structure version (GNU versioning)
+enum {
+ VER_NEED_NONE = 0,
+ VER_NEED_CURRENT = 1
+};
+
+} // end namespace ELF
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/AArch64.def b/contrib/llvm/include/llvm/Support/ELFRelocs/AArch64.def
new file mode 100644
index 0000000..aa0c560
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/AArch64.def
@@ -0,0 +1,147 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// ABI release 1.0
+ELF_RELOC(R_AARCH64_NONE, 0)
+
+ELF_RELOC(R_AARCH64_ABS64, 0x101)
+ELF_RELOC(R_AARCH64_ABS32, 0x102)
+ELF_RELOC(R_AARCH64_ABS16, 0x103)
+ELF_RELOC(R_AARCH64_PREL64, 0x104)
+ELF_RELOC(R_AARCH64_PREL32, 0x105)
+ELF_RELOC(R_AARCH64_PREL16, 0x106)
+
+ELF_RELOC(R_AARCH64_MOVW_UABS_G0, 0x107)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G0_NC, 0x108)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G1, 0x109)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G1_NC, 0x10a)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G2, 0x10b)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G2_NC, 0x10c)
+ELF_RELOC(R_AARCH64_MOVW_UABS_G3, 0x10d)
+ELF_RELOC(R_AARCH64_MOVW_SABS_G0, 0x10e)
+ELF_RELOC(R_AARCH64_MOVW_SABS_G1, 0x10f)
+ELF_RELOC(R_AARCH64_MOVW_SABS_G2, 0x110)
+
+ELF_RELOC(R_AARCH64_LD_PREL_LO19, 0x111)
+ELF_RELOC(R_AARCH64_ADR_PREL_LO21, 0x112)
+ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21, 0x113)
+ELF_RELOC(R_AARCH64_ADR_PREL_PG_HI21_NC, 0x114)
+ELF_RELOC(R_AARCH64_ADD_ABS_LO12_NC, 0x115)
+ELF_RELOC(R_AARCH64_LDST8_ABS_LO12_NC, 0x116)
+
+ELF_RELOC(R_AARCH64_TSTBR14, 0x117)
+ELF_RELOC(R_AARCH64_CONDBR19, 0x118)
+ELF_RELOC(R_AARCH64_JUMP26, 0x11a)
+ELF_RELOC(R_AARCH64_CALL26, 0x11b)
+
+ELF_RELOC(R_AARCH64_LDST16_ABS_LO12_NC, 0x11c)
+ELF_RELOC(R_AARCH64_LDST32_ABS_LO12_NC, 0x11d)
+ELF_RELOC(R_AARCH64_LDST64_ABS_LO12_NC, 0x11e)
+
+ELF_RELOC(R_AARCH64_MOVW_PREL_G0, 0x11f)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G0_NC, 0x120)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G1, 0x121)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G1_NC, 0x122)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G2, 0x123)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G2_NC, 0x124)
+ELF_RELOC(R_AARCH64_MOVW_PREL_G3, 0x125)
+
+ELF_RELOC(R_AARCH64_LDST128_ABS_LO12_NC, 0x12b)
+
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0, 0x12c)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G0_NC, 0x12d)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1, 0x12e)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G1_NC, 0x12f)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2, 0x130)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G2_NC, 0x131)
+ELF_RELOC(R_AARCH64_MOVW_GOTOFF_G3, 0x132)
+
+ELF_RELOC(R_AARCH64_GOTREL64, 0x133)
+ELF_RELOC(R_AARCH64_GOTREL32, 0x134)
+
+ELF_RELOC(R_AARCH64_GOT_LD_PREL19, 0x135)
+ELF_RELOC(R_AARCH64_LD64_GOTOFF_LO15, 0x136)
+ELF_RELOC(R_AARCH64_ADR_GOT_PAGE, 0x137)
+ELF_RELOC(R_AARCH64_LD64_GOT_LO12_NC, 0x138)
+ELF_RELOC(R_AARCH64_LD64_GOTPAGE_LO15, 0x139)
+
+ELF_RELOC(R_AARCH64_TLSGD_ADR_PREL21, 0x200)
+ELF_RELOC(R_AARCH64_TLSGD_ADR_PAGE21, 0x201)
+ELF_RELOC(R_AARCH64_TLSGD_ADD_LO12_NC, 0x202)
+ELF_RELOC(R_AARCH64_TLSGD_MOVW_G1, 0x203)
+ELF_RELOC(R_AARCH64_TLSGD_MOVW_G0_NC, 0x204)
+
+ELF_RELOC(R_AARCH64_TLSLD_ADR_PREL21, 0x205)
+ELF_RELOC(R_AARCH64_TLSLD_ADR_PAGE21, 0x206)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_LO12_NC, 0x207)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_G1, 0x208)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_G0_NC, 0x209)
+ELF_RELOC(R_AARCH64_TLSLD_LD_PREL19, 0x20a)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G2, 0x20b)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1, 0x20c)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC, 0x20d)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0, 0x20e)
+ELF_RELOC(R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC, 0x20f)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_HI12, 0x210)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12, 0x211)
+ELF_RELOC(R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC, 0x212)
+ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12, 0x213)
+ELF_RELOC(R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC, 0x214)
+ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12, 0x215)
+ELF_RELOC(R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC, 0x216)
+ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12, 0x217)
+ELF_RELOC(R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC, 0x218)
+ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12, 0x219)
+ELF_RELOC(R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC, 0x21a)
+
+ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, 0x21b)
+ELF_RELOC(R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, 0x21c)
+ELF_RELOC(R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, 0x21d)
+ELF_RELOC(R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, 0x21e)
+ELF_RELOC(R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, 0x21f)
+
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G2, 0x220)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1, 0x221)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, 0x222)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0, 0x223)
+ELF_RELOC(R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, 0x224)
+ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_HI12, 0x225)
+ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12, 0x226)
+ELF_RELOC(R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, 0x227)
+ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12, 0x228)
+ELF_RELOC(R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC, 0x229)
+ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12, 0x22a)
+ELF_RELOC(R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC, 0x22b)
+ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12, 0x22c)
+ELF_RELOC(R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC, 0x22d)
+ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12, 0x22e)
+ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC, 0x22f)
+
+ELF_RELOC(R_AARCH64_TLSDESC_LD_PREL19, 0x230)
+ELF_RELOC(R_AARCH64_TLSDESC_ADR_PREL21, 0x231)
+ELF_RELOC(R_AARCH64_TLSDESC_ADR_PAGE21, 0x232)
+ELF_RELOC(R_AARCH64_TLSDESC_LD64_LO12_NC, 0x233)
+ELF_RELOC(R_AARCH64_TLSDESC_ADD_LO12_NC, 0x234)
+ELF_RELOC(R_AARCH64_TLSDESC_OFF_G1, 0x235)
+ELF_RELOC(R_AARCH64_TLSDESC_OFF_G0_NC, 0x236)
+ELF_RELOC(R_AARCH64_TLSDESC_LDR, 0x237)
+ELF_RELOC(R_AARCH64_TLSDESC_ADD, 0x238)
+ELF_RELOC(R_AARCH64_TLSDESC_CALL, 0x239)
+
+ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12, 0x23a)
+ELF_RELOC(R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC, 0x23b)
+
+ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12, 0x23c)
+ELF_RELOC(R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC, 0x23d)
+
+ELF_RELOC(R_AARCH64_COPY, 0x400)
+ELF_RELOC(R_AARCH64_GLOB_DAT, 0x401)
+ELF_RELOC(R_AARCH64_JUMP_SLOT, 0x402)
+ELF_RELOC(R_AARCH64_RELATIVE, 0x403)
+ELF_RELOC(R_AARCH64_TLS_DTPREL64, 0x404)
+ELF_RELOC(R_AARCH64_TLS_DTPMOD64, 0x405)
+ELF_RELOC(R_AARCH64_TLS_TPREL64, 0x406)
+ELF_RELOC(R_AARCH64_TLSDESC, 0x407)
+ELF_RELOC(R_AARCH64_IRELATIVE, 0x408)
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/ARM.def b/contrib/llvm/include/llvm/Support/ELFRelocs/ARM.def
new file mode 100644
index 0000000..730fc5b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/ARM.def
@@ -0,0 +1,138 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// Meets 2.09 ABI Specs.
+ELF_RELOC(R_ARM_NONE, 0x00)
+ELF_RELOC(R_ARM_PC24, 0x01)
+ELF_RELOC(R_ARM_ABS32, 0x02)
+ELF_RELOC(R_ARM_REL32, 0x03)
+ELF_RELOC(R_ARM_LDR_PC_G0, 0x04)
+ELF_RELOC(R_ARM_ABS16, 0x05)
+ELF_RELOC(R_ARM_ABS12, 0x06)
+ELF_RELOC(R_ARM_THM_ABS5, 0x07)
+ELF_RELOC(R_ARM_ABS8, 0x08)
+ELF_RELOC(R_ARM_SBREL32, 0x09)
+ELF_RELOC(R_ARM_THM_CALL, 0x0a)
+ELF_RELOC(R_ARM_THM_PC8, 0x0b)
+ELF_RELOC(R_ARM_BREL_ADJ, 0x0c)
+ELF_RELOC(R_ARM_TLS_DESC, 0x0d)
+ELF_RELOC(R_ARM_THM_SWI8, 0x0e)
+ELF_RELOC(R_ARM_XPC25, 0x0f)
+ELF_RELOC(R_ARM_THM_XPC22, 0x10)
+ELF_RELOC(R_ARM_TLS_DTPMOD32, 0x11)
+ELF_RELOC(R_ARM_TLS_DTPOFF32, 0x12)
+ELF_RELOC(R_ARM_TLS_TPOFF32, 0x13)
+ELF_RELOC(R_ARM_COPY, 0x14)
+ELF_RELOC(R_ARM_GLOB_DAT, 0x15)
+ELF_RELOC(R_ARM_JUMP_SLOT, 0x16)
+ELF_RELOC(R_ARM_RELATIVE, 0x17)
+ELF_RELOC(R_ARM_GOTOFF32, 0x18)
+ELF_RELOC(R_ARM_BASE_PREL, 0x19)
+ELF_RELOC(R_ARM_GOT_BREL, 0x1a)
+ELF_RELOC(R_ARM_PLT32, 0x1b)
+ELF_RELOC(R_ARM_CALL, 0x1c)
+ELF_RELOC(R_ARM_JUMP24, 0x1d)
+ELF_RELOC(R_ARM_THM_JUMP24, 0x1e)
+ELF_RELOC(R_ARM_BASE_ABS, 0x1f)
+ELF_RELOC(R_ARM_ALU_PCREL_7_0, 0x20)
+ELF_RELOC(R_ARM_ALU_PCREL_15_8, 0x21)
+ELF_RELOC(R_ARM_ALU_PCREL_23_15, 0x22)
+ELF_RELOC(R_ARM_LDR_SBREL_11_0_NC, 0x23)
+ELF_RELOC(R_ARM_ALU_SBREL_19_12_NC, 0x24)
+ELF_RELOC(R_ARM_ALU_SBREL_27_20_CK, 0x25)
+ELF_RELOC(R_ARM_TARGET1, 0x26)
+ELF_RELOC(R_ARM_SBREL31, 0x27)
+ELF_RELOC(R_ARM_V4BX, 0x28)
+ELF_RELOC(R_ARM_TARGET2, 0x29)
+ELF_RELOC(R_ARM_PREL31, 0x2a)
+ELF_RELOC(R_ARM_MOVW_ABS_NC, 0x2b)
+ELF_RELOC(R_ARM_MOVT_ABS, 0x2c)
+ELF_RELOC(R_ARM_MOVW_PREL_NC, 0x2d)
+ELF_RELOC(R_ARM_MOVT_PREL, 0x2e)
+ELF_RELOC(R_ARM_THM_MOVW_ABS_NC, 0x2f)
+ELF_RELOC(R_ARM_THM_MOVT_ABS, 0x30)
+ELF_RELOC(R_ARM_THM_MOVW_PREL_NC, 0x31)
+ELF_RELOC(R_ARM_THM_MOVT_PREL, 0x32)
+ELF_RELOC(R_ARM_THM_JUMP19, 0x33)
+ELF_RELOC(R_ARM_THM_JUMP6, 0x34)
+ELF_RELOC(R_ARM_THM_ALU_PREL_11_0, 0x35)
+ELF_RELOC(R_ARM_THM_PC12, 0x36)
+ELF_RELOC(R_ARM_ABS32_NOI, 0x37)
+ELF_RELOC(R_ARM_REL32_NOI, 0x38)
+ELF_RELOC(R_ARM_ALU_PC_G0_NC, 0x39)
+ELF_RELOC(R_ARM_ALU_PC_G0, 0x3a)
+ELF_RELOC(R_ARM_ALU_PC_G1_NC, 0x3b)
+ELF_RELOC(R_ARM_ALU_PC_G1, 0x3c)
+ELF_RELOC(R_ARM_ALU_PC_G2, 0x3d)
+ELF_RELOC(R_ARM_LDR_PC_G1, 0x3e)
+ELF_RELOC(R_ARM_LDR_PC_G2, 0x3f)
+ELF_RELOC(R_ARM_LDRS_PC_G0, 0x40)
+ELF_RELOC(R_ARM_LDRS_PC_G1, 0x41)
+ELF_RELOC(R_ARM_LDRS_PC_G2, 0x42)
+ELF_RELOC(R_ARM_LDC_PC_G0, 0x43)
+ELF_RELOC(R_ARM_LDC_PC_G1, 0x44)
+ELF_RELOC(R_ARM_LDC_PC_G2, 0x45)
+ELF_RELOC(R_ARM_ALU_SB_G0_NC, 0x46)
+ELF_RELOC(R_ARM_ALU_SB_G0, 0x47)
+ELF_RELOC(R_ARM_ALU_SB_G1_NC, 0x48)
+ELF_RELOC(R_ARM_ALU_SB_G1, 0x49)
+ELF_RELOC(R_ARM_ALU_SB_G2, 0x4a)
+ELF_RELOC(R_ARM_LDR_SB_G0, 0x4b)
+ELF_RELOC(R_ARM_LDR_SB_G1, 0x4c)
+ELF_RELOC(R_ARM_LDR_SB_G2, 0x4d)
+ELF_RELOC(R_ARM_LDRS_SB_G0, 0x4e)
+ELF_RELOC(R_ARM_LDRS_SB_G1, 0x4f)
+ELF_RELOC(R_ARM_LDRS_SB_G2, 0x50)
+ELF_RELOC(R_ARM_LDC_SB_G0, 0x51)
+ELF_RELOC(R_ARM_LDC_SB_G1, 0x52)
+ELF_RELOC(R_ARM_LDC_SB_G2, 0x53)
+ELF_RELOC(R_ARM_MOVW_BREL_NC, 0x54)
+ELF_RELOC(R_ARM_MOVT_BREL, 0x55)
+ELF_RELOC(R_ARM_MOVW_BREL, 0x56)
+ELF_RELOC(R_ARM_THM_MOVW_BREL_NC, 0x57)
+ELF_RELOC(R_ARM_THM_MOVT_BREL, 0x58)
+ELF_RELOC(R_ARM_THM_MOVW_BREL, 0x59)
+ELF_RELOC(R_ARM_TLS_GOTDESC, 0x5a)
+ELF_RELOC(R_ARM_TLS_CALL, 0x5b)
+ELF_RELOC(R_ARM_TLS_DESCSEQ, 0x5c)
+ELF_RELOC(R_ARM_THM_TLS_CALL, 0x5d)
+ELF_RELOC(R_ARM_PLT32_ABS, 0x5e)
+ELF_RELOC(R_ARM_GOT_ABS, 0x5f)
+ELF_RELOC(R_ARM_GOT_PREL, 0x60)
+ELF_RELOC(R_ARM_GOT_BREL12, 0x61)
+ELF_RELOC(R_ARM_GOTOFF12, 0x62)
+ELF_RELOC(R_ARM_GOTRELAX, 0x63)
+ELF_RELOC(R_ARM_GNU_VTENTRY, 0x64)
+ELF_RELOC(R_ARM_GNU_VTINHERIT, 0x65)
+ELF_RELOC(R_ARM_THM_JUMP11, 0x66)
+ELF_RELOC(R_ARM_THM_JUMP8, 0x67)
+ELF_RELOC(R_ARM_TLS_GD32, 0x68)
+ELF_RELOC(R_ARM_TLS_LDM32, 0x69)
+ELF_RELOC(R_ARM_TLS_LDO32, 0x6a)
+ELF_RELOC(R_ARM_TLS_IE32, 0x6b)
+ELF_RELOC(R_ARM_TLS_LE32, 0x6c)
+ELF_RELOC(R_ARM_TLS_LDO12, 0x6d)
+ELF_RELOC(R_ARM_TLS_LE12, 0x6e)
+ELF_RELOC(R_ARM_TLS_IE12GP, 0x6f)
+ELF_RELOC(R_ARM_PRIVATE_0, 0x70)
+ELF_RELOC(R_ARM_PRIVATE_1, 0x71)
+ELF_RELOC(R_ARM_PRIVATE_2, 0x72)
+ELF_RELOC(R_ARM_PRIVATE_3, 0x73)
+ELF_RELOC(R_ARM_PRIVATE_4, 0x74)
+ELF_RELOC(R_ARM_PRIVATE_5, 0x75)
+ELF_RELOC(R_ARM_PRIVATE_6, 0x76)
+ELF_RELOC(R_ARM_PRIVATE_7, 0x77)
+ELF_RELOC(R_ARM_PRIVATE_8, 0x78)
+ELF_RELOC(R_ARM_PRIVATE_9, 0x79)
+ELF_RELOC(R_ARM_PRIVATE_10, 0x7a)
+ELF_RELOC(R_ARM_PRIVATE_11, 0x7b)
+ELF_RELOC(R_ARM_PRIVATE_12, 0x7c)
+ELF_RELOC(R_ARM_PRIVATE_13, 0x7d)
+ELF_RELOC(R_ARM_PRIVATE_14, 0x7e)
+ELF_RELOC(R_ARM_PRIVATE_15, 0x7f)
+ELF_RELOC(R_ARM_ME_TOO, 0x80)
+ELF_RELOC(R_ARM_THM_TLS_DESCSEQ16, 0x81)
+ELF_RELOC(R_ARM_THM_TLS_DESCSEQ32, 0x82)
+ELF_RELOC(R_ARM_IRELATIVE, 0xa0)
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/AVR.def b/contrib/llvm/include/llvm/Support/ELFRelocs/AVR.def
new file mode 100644
index 0000000..5692d6c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/AVR.def
@@ -0,0 +1,40 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_AVR_NONE, 0)
+ELF_RELOC(R_AVR_32, 1)
+ELF_RELOC(R_AVR_7_PCREL, 2)
+ELF_RELOC(R_AVR_13_PCREL, 3)
+ELF_RELOC(R_AVR_16, 4)
+ELF_RELOC(R_AVR_16_PM, 5)
+ELF_RELOC(R_AVR_LO8_LDI, 6)
+ELF_RELOC(R_AVR_HI8_LDI, 7)
+ELF_RELOC(R_AVR_HH8_LDI, 8)
+ELF_RELOC(R_AVR_LO8_LDI_NEG, 9)
+ELF_RELOC(R_AVR_HI8_LDI_NEG, 10)
+ELF_RELOC(R_AVR_HH8_LDI_NEG, 11)
+ELF_RELOC(R_AVR_LO8_LDI_PM, 12)
+ELF_RELOC(R_AVR_HI8_LDI_PM, 13)
+ELF_RELOC(R_AVR_HH8_LDI_PM, 14)
+ELF_RELOC(R_AVR_LO8_LDI_PM_NEG, 15)
+ELF_RELOC(R_AVR_HI8_LDI_PM_NEG, 16)
+ELF_RELOC(R_AVR_HH8_LDI_PM_NEG, 17)
+ELF_RELOC(R_AVR_CALL, 18)
+ELF_RELOC(R_AVR_LDI, 19)
+ELF_RELOC(R_AVR_6, 20)
+ELF_RELOC(R_AVR_6_ADIW, 21)
+ELF_RELOC(R_AVR_MS8_LDI, 22)
+ELF_RELOC(R_AVR_MS8_LDI_NEG, 23)
+ELF_RELOC(R_AVR_LO8_LDI_GS, 24)
+ELF_RELOC(R_AVR_HI8_LDI_GS, 25)
+ELF_RELOC(R_AVR_8, 26)
+ELF_RELOC(R_AVR_8_LO8, 27)
+ELF_RELOC(R_AVR_8_HI8, 28)
+ELF_RELOC(R_AVR_8_HLO8, 29)
+ELF_RELOC(R_AVR_SYM_DIFF, 30)
+ELF_RELOC(R_AVR_16_LDST, 31)
+ELF_RELOC(R_AVR_LDS_STS_16, 33)
+ELF_RELOC(R_AVR_PORT6, 34)
+ELF_RELOC(R_AVR_PORT5, 35)
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/Hexagon.def b/contrib/llvm/include/llvm/Support/ELFRelocs/Hexagon.def
new file mode 100644
index 0000000..a698ecb
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/Hexagon.def
@@ -0,0 +1,100 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// Release 5 ABI
+ELF_RELOC(R_HEX_NONE, 0)
+ELF_RELOC(R_HEX_B22_PCREL, 1)
+ELF_RELOC(R_HEX_B15_PCREL, 2)
+ELF_RELOC(R_HEX_B7_PCREL, 3)
+ELF_RELOC(R_HEX_LO16, 4)
+ELF_RELOC(R_HEX_HI16, 5)
+ELF_RELOC(R_HEX_32, 6)
+ELF_RELOC(R_HEX_16, 7)
+ELF_RELOC(R_HEX_8, 8)
+ELF_RELOC(R_HEX_GPREL16_0, 9)
+ELF_RELOC(R_HEX_GPREL16_1, 10)
+ELF_RELOC(R_HEX_GPREL16_2, 11)
+ELF_RELOC(R_HEX_GPREL16_3, 12)
+ELF_RELOC(R_HEX_HL16, 13)
+ELF_RELOC(R_HEX_B13_PCREL, 14)
+ELF_RELOC(R_HEX_B9_PCREL, 15)
+ELF_RELOC(R_HEX_B32_PCREL_X, 16)
+ELF_RELOC(R_HEX_32_6_X, 17)
+ELF_RELOC(R_HEX_B22_PCREL_X, 18)
+ELF_RELOC(R_HEX_B15_PCREL_X, 19)
+ELF_RELOC(R_HEX_B13_PCREL_X, 20)
+ELF_RELOC(R_HEX_B9_PCREL_X, 21)
+ELF_RELOC(R_HEX_B7_PCREL_X, 22)
+ELF_RELOC(R_HEX_16_X, 23)
+ELF_RELOC(R_HEX_12_X, 24)
+ELF_RELOC(R_HEX_11_X, 25)
+ELF_RELOC(R_HEX_10_X, 26)
+ELF_RELOC(R_HEX_9_X, 27)
+ELF_RELOC(R_HEX_8_X, 28)
+ELF_RELOC(R_HEX_7_X, 29)
+ELF_RELOC(R_HEX_6_X, 30)
+ELF_RELOC(R_HEX_32_PCREL, 31)
+ELF_RELOC(R_HEX_COPY, 32)
+ELF_RELOC(R_HEX_GLOB_DAT, 33)
+ELF_RELOC(R_HEX_JMP_SLOT, 34)
+ELF_RELOC(R_HEX_RELATIVE, 35)
+ELF_RELOC(R_HEX_PLT_B22_PCREL, 36)
+ELF_RELOC(R_HEX_GOTREL_LO16, 37)
+ELF_RELOC(R_HEX_GOTREL_HI16, 38)
+ELF_RELOC(R_HEX_GOTREL_32, 39)
+ELF_RELOC(R_HEX_GOT_LO16, 40)
+ELF_RELOC(R_HEX_GOT_HI16, 41)
+ELF_RELOC(R_HEX_GOT_32, 42)
+ELF_RELOC(R_HEX_GOT_16, 43)
+ELF_RELOC(R_HEX_DTPMOD_32, 44)
+ELF_RELOC(R_HEX_DTPREL_LO16, 45)
+ELF_RELOC(R_HEX_DTPREL_HI16, 46)
+ELF_RELOC(R_HEX_DTPREL_32, 47)
+ELF_RELOC(R_HEX_DTPREL_16, 48)
+ELF_RELOC(R_HEX_GD_PLT_B22_PCREL, 49)
+ELF_RELOC(R_HEX_GD_GOT_LO16, 50)
+ELF_RELOC(R_HEX_GD_GOT_HI16, 51)
+ELF_RELOC(R_HEX_GD_GOT_32, 52)
+ELF_RELOC(R_HEX_GD_GOT_16, 53)
+ELF_RELOC(R_HEX_IE_LO16, 54)
+ELF_RELOC(R_HEX_IE_HI16, 55)
+ELF_RELOC(R_HEX_IE_32, 56)
+ELF_RELOC(R_HEX_IE_GOT_LO16, 57)
+ELF_RELOC(R_HEX_IE_GOT_HI16, 58)
+ELF_RELOC(R_HEX_IE_GOT_32, 59)
+ELF_RELOC(R_HEX_IE_GOT_16, 60)
+ELF_RELOC(R_HEX_TPREL_LO16, 61)
+ELF_RELOC(R_HEX_TPREL_HI16, 62)
+ELF_RELOC(R_HEX_TPREL_32, 63)
+ELF_RELOC(R_HEX_TPREL_16, 64)
+ELF_RELOC(R_HEX_6_PCREL_X, 65)
+ELF_RELOC(R_HEX_GOTREL_32_6_X, 66)
+ELF_RELOC(R_HEX_GOTREL_16_X, 67)
+ELF_RELOC(R_HEX_GOTREL_11_X, 68)
+ELF_RELOC(R_HEX_GOT_32_6_X, 69)
+ELF_RELOC(R_HEX_GOT_16_X, 70)
+ELF_RELOC(R_HEX_GOT_11_X, 71)
+ELF_RELOC(R_HEX_DTPREL_32_6_X, 72)
+ELF_RELOC(R_HEX_DTPREL_16_X, 73)
+ELF_RELOC(R_HEX_DTPREL_11_X, 74)
+ELF_RELOC(R_HEX_GD_GOT_32_6_X, 75)
+ELF_RELOC(R_HEX_GD_GOT_16_X, 76)
+ELF_RELOC(R_HEX_GD_GOT_11_X, 77)
+ELF_RELOC(R_HEX_IE_32_6_X, 78)
+ELF_RELOC(R_HEX_IE_16_X, 79)
+ELF_RELOC(R_HEX_IE_GOT_32_6_X, 80)
+ELF_RELOC(R_HEX_IE_GOT_16_X, 81)
+ELF_RELOC(R_HEX_IE_GOT_11_X, 82)
+ELF_RELOC(R_HEX_TPREL_32_6_X, 83)
+ELF_RELOC(R_HEX_TPREL_16_X, 84)
+ELF_RELOC(R_HEX_TPREL_11_X, 85)
+ELF_RELOC(R_HEX_LD_PLT_B22_PCREL, 86)
+ELF_RELOC(R_HEX_LD_GOT_LO16, 87)
+ELF_RELOC(R_HEX_LD_GOT_HI16, 88)
+ELF_RELOC(R_HEX_LD_GOT_32, 89)
+ELF_RELOC(R_HEX_LD_GOT_16, 90)
+ELF_RELOC(R_HEX_LD_GOT_32_6_X, 91)
+ELF_RELOC(R_HEX_LD_GOT_16_X, 92)
+ELF_RELOC(R_HEX_LD_GOT_11_X, 93)
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/Mips.def b/contrib/llvm/include/llvm/Support/ELFRelocs/Mips.def
new file mode 100644
index 0000000..77e7f8e
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/Mips.def
@@ -0,0 +1,117 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_MIPS_NONE, 0)
+ELF_RELOC(R_MIPS_16, 1)
+ELF_RELOC(R_MIPS_32, 2)
+ELF_RELOC(R_MIPS_REL32, 3)
+ELF_RELOC(R_MIPS_26, 4)
+ELF_RELOC(R_MIPS_HI16, 5)
+ELF_RELOC(R_MIPS_LO16, 6)
+ELF_RELOC(R_MIPS_GPREL16, 7)
+ELF_RELOC(R_MIPS_LITERAL, 8)
+ELF_RELOC(R_MIPS_GOT16, 9)
+ELF_RELOC(R_MIPS_PC16, 10)
+ELF_RELOC(R_MIPS_CALL16, 11)
+ELF_RELOC(R_MIPS_GPREL32, 12)
+ELF_RELOC(R_MIPS_UNUSED1, 13)
+ELF_RELOC(R_MIPS_UNUSED2, 14)
+ELF_RELOC(R_MIPS_UNUSED3, 15)
+ELF_RELOC(R_MIPS_SHIFT5, 16)
+ELF_RELOC(R_MIPS_SHIFT6, 17)
+ELF_RELOC(R_MIPS_64, 18)
+ELF_RELOC(R_MIPS_GOT_DISP, 19)
+ELF_RELOC(R_MIPS_GOT_PAGE, 20)
+ELF_RELOC(R_MIPS_GOT_OFST, 21)
+ELF_RELOC(R_MIPS_GOT_HI16, 22)
+ELF_RELOC(R_MIPS_GOT_LO16, 23)
+ELF_RELOC(R_MIPS_SUB, 24)
+ELF_RELOC(R_MIPS_INSERT_A, 25)
+ELF_RELOC(R_MIPS_INSERT_B, 26)
+ELF_RELOC(R_MIPS_DELETE, 27)
+ELF_RELOC(R_MIPS_HIGHER, 28)
+ELF_RELOC(R_MIPS_HIGHEST, 29)
+ELF_RELOC(R_MIPS_CALL_HI16, 30)
+ELF_RELOC(R_MIPS_CALL_LO16, 31)
+ELF_RELOC(R_MIPS_SCN_DISP, 32)
+ELF_RELOC(R_MIPS_REL16, 33)
+ELF_RELOC(R_MIPS_ADD_IMMEDIATE, 34)
+ELF_RELOC(R_MIPS_PJUMP, 35)
+ELF_RELOC(R_MIPS_RELGOT, 36)
+ELF_RELOC(R_MIPS_JALR, 37)
+ELF_RELOC(R_MIPS_TLS_DTPMOD32, 38)
+ELF_RELOC(R_MIPS_TLS_DTPREL32, 39)
+ELF_RELOC(R_MIPS_TLS_DTPMOD64, 40)
+ELF_RELOC(R_MIPS_TLS_DTPREL64, 41)
+ELF_RELOC(R_MIPS_TLS_GD, 42)
+ELF_RELOC(R_MIPS_TLS_LDM, 43)
+ELF_RELOC(R_MIPS_TLS_DTPREL_HI16, 44)
+ELF_RELOC(R_MIPS_TLS_DTPREL_LO16, 45)
+ELF_RELOC(R_MIPS_TLS_GOTTPREL, 46)
+ELF_RELOC(R_MIPS_TLS_TPREL32, 47)
+ELF_RELOC(R_MIPS_TLS_TPREL64, 48)
+ELF_RELOC(R_MIPS_TLS_TPREL_HI16, 49)
+ELF_RELOC(R_MIPS_TLS_TPREL_LO16, 50)
+ELF_RELOC(R_MIPS_GLOB_DAT, 51)
+ELF_RELOC(R_MIPS_PC21_S2, 60)
+ELF_RELOC(R_MIPS_PC26_S2, 61)
+ELF_RELOC(R_MIPS_PC18_S3, 62)
+ELF_RELOC(R_MIPS_PC19_S2, 63)
+ELF_RELOC(R_MIPS_PCHI16, 64)
+ELF_RELOC(R_MIPS_PCLO16, 65)
+ELF_RELOC(R_MIPS16_26, 100)
+ELF_RELOC(R_MIPS16_GPREL, 101)
+ELF_RELOC(R_MIPS16_GOT16, 102)
+ELF_RELOC(R_MIPS16_CALL16, 103)
+ELF_RELOC(R_MIPS16_HI16, 104)
+ELF_RELOC(R_MIPS16_LO16, 105)
+ELF_RELOC(R_MIPS16_TLS_GD, 106)
+ELF_RELOC(R_MIPS16_TLS_LDM, 107)
+ELF_RELOC(R_MIPS16_TLS_DTPREL_HI16, 108)
+ELF_RELOC(R_MIPS16_TLS_DTPREL_LO16, 109)
+ELF_RELOC(R_MIPS16_TLS_GOTTPREL, 110)
+ELF_RELOC(R_MIPS16_TLS_TPREL_HI16, 111)
+ELF_RELOC(R_MIPS16_TLS_TPREL_LO16, 112)
+ELF_RELOC(R_MIPS_COPY, 126)
+ELF_RELOC(R_MIPS_JUMP_SLOT, 127)
+ELF_RELOC(R_MICROMIPS_26_S1, 133)
+ELF_RELOC(R_MICROMIPS_HI16, 134)
+ELF_RELOC(R_MICROMIPS_LO16, 135)
+ELF_RELOC(R_MICROMIPS_GPREL16, 136)
+ELF_RELOC(R_MICROMIPS_LITERAL, 137)
+ELF_RELOC(R_MICROMIPS_GOT16, 138)
+ELF_RELOC(R_MICROMIPS_PC7_S1, 139)
+ELF_RELOC(R_MICROMIPS_PC10_S1, 140)
+ELF_RELOC(R_MICROMIPS_PC16_S1, 141)
+ELF_RELOC(R_MICROMIPS_CALL16, 142)
+ELF_RELOC(R_MICROMIPS_GOT_DISP, 145)
+ELF_RELOC(R_MICROMIPS_GOT_PAGE, 146)
+ELF_RELOC(R_MICROMIPS_GOT_OFST, 147)
+ELF_RELOC(R_MICROMIPS_GOT_HI16, 148)
+ELF_RELOC(R_MICROMIPS_GOT_LO16, 149)
+ELF_RELOC(R_MICROMIPS_SUB, 150)
+ELF_RELOC(R_MICROMIPS_HIGHER, 151)
+ELF_RELOC(R_MICROMIPS_HIGHEST, 152)
+ELF_RELOC(R_MICROMIPS_CALL_HI16, 153)
+ELF_RELOC(R_MICROMIPS_CALL_LO16, 154)
+ELF_RELOC(R_MICROMIPS_SCN_DISP, 155)
+ELF_RELOC(R_MICROMIPS_JALR, 156)
+ELF_RELOC(R_MICROMIPS_HI0_LO16, 157)
+ELF_RELOC(R_MICROMIPS_TLS_GD, 162)
+ELF_RELOC(R_MICROMIPS_TLS_LDM, 163)
+ELF_RELOC(R_MICROMIPS_TLS_DTPREL_HI16, 164)
+ELF_RELOC(R_MICROMIPS_TLS_DTPREL_LO16, 165)
+ELF_RELOC(R_MICROMIPS_TLS_GOTTPREL, 166)
+ELF_RELOC(R_MICROMIPS_TLS_TPREL_HI16, 169)
+ELF_RELOC(R_MICROMIPS_TLS_TPREL_LO16, 170)
+ELF_RELOC(R_MICROMIPS_GPREL7_S2, 172)
+ELF_RELOC(R_MICROMIPS_PC23_S2, 173)
+ELF_RELOC(R_MICROMIPS_PC21_S2, 174)
+ELF_RELOC(R_MICROMIPS_PC26_S2, 175)
+ELF_RELOC(R_MICROMIPS_PC18_S3, 176)
+ELF_RELOC(R_MICROMIPS_PC19_S2, 177)
+ELF_RELOC(R_MIPS_NUM, 218)
+ELF_RELOC(R_MIPS_PC32, 248)
+ELF_RELOC(R_MIPS_EH, 249)
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/PowerPC.def b/contrib/llvm/include/llvm/Support/ELFRelocs/PowerPC.def
new file mode 100644
index 0000000..e4f8ee0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/PowerPC.def
@@ -0,0 +1,123 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// glibc's PowerPC asm/sigcontext.h, when compiling for PPC64, has the
+// unfortunate behavior of including asm/elf.h, which defines R_PPC_NONE, etc.
+// to their corresponding integer values. As a result, we need to undef them
+// here before continuing.
+
+#undef R_PPC_NONE
+#undef R_PPC_ADDR32
+#undef R_PPC_ADDR24
+#undef R_PPC_ADDR16
+#undef R_PPC_ADDR16_LO
+#undef R_PPC_ADDR16_HI
+#undef R_PPC_ADDR16_HA
+#undef R_PPC_ADDR14
+#undef R_PPC_ADDR14_BRTAKEN
+#undef R_PPC_ADDR14_BRNTAKEN
+#undef R_PPC_REL24
+#undef R_PPC_REL14
+#undef R_PPC_REL14_BRTAKEN
+#undef R_PPC_REL14_BRNTAKEN
+#undef R_PPC_GOT16
+#undef R_PPC_GOT16_LO
+#undef R_PPC_GOT16_HI
+#undef R_PPC_GOT16_HA
+#undef R_PPC_PLTREL24
+#undef R_PPC_JMP_SLOT
+#undef R_PPC_LOCAL24PC
+#undef R_PPC_REL32
+#undef R_PPC_TLS
+#undef R_PPC_DTPMOD32
+#undef R_PPC_TPREL16
+#undef R_PPC_TPREL16_LO
+#undef R_PPC_TPREL16_HI
+#undef R_PPC_TPREL16_HA
+#undef R_PPC_TPREL32
+#undef R_PPC_DTPREL16
+#undef R_PPC_DTPREL16_LO
+#undef R_PPC_DTPREL16_HI
+#undef R_PPC_DTPREL16_HA
+#undef R_PPC_DTPREL32
+#undef R_PPC_GOT_TLSGD16
+#undef R_PPC_GOT_TLSGD16_LO
+#undef R_PPC_GOT_TLSGD16_HI
+#undef R_PPC_GOT_TLSGD16_HA
+#undef R_PPC_GOT_TLSLD16
+#undef R_PPC_GOT_TLSLD16_LO
+#undef R_PPC_GOT_TLSLD16_HI
+#undef R_PPC_GOT_TLSLD16_HA
+#undef R_PPC_GOT_TPREL16
+#undef R_PPC_GOT_TPREL16_LO
+#undef R_PPC_GOT_TPREL16_HI
+#undef R_PPC_GOT_TPREL16_HA
+#undef R_PPC_GOT_DTPREL16
+#undef R_PPC_GOT_DTPREL16_LO
+#undef R_PPC_GOT_DTPREL16_HI
+#undef R_PPC_GOT_DTPREL16_HA
+#undef R_PPC_TLSGD
+#undef R_PPC_TLSLD
+#undef R_PPC_REL16
+#undef R_PPC_REL16_LO
+#undef R_PPC_REL16_HI
+#undef R_PPC_REL16_HA
+
+ELF_RELOC(R_PPC_NONE, 0) /* No relocation. */
+ELF_RELOC(R_PPC_ADDR32, 1)
+ELF_RELOC(R_PPC_ADDR24, 2)
+ELF_RELOC(R_PPC_ADDR16, 3)
+ELF_RELOC(R_PPC_ADDR16_LO, 4)
+ELF_RELOC(R_PPC_ADDR16_HI, 5)
+ELF_RELOC(R_PPC_ADDR16_HA, 6)
+ELF_RELOC(R_PPC_ADDR14, 7)
+ELF_RELOC(R_PPC_ADDR14_BRTAKEN, 8)
+ELF_RELOC(R_PPC_ADDR14_BRNTAKEN, 9)
+ELF_RELOC(R_PPC_REL24, 10)
+ELF_RELOC(R_PPC_REL14, 11)
+ELF_RELOC(R_PPC_REL14_BRTAKEN, 12)
+ELF_RELOC(R_PPC_REL14_BRNTAKEN, 13)
+ELF_RELOC(R_PPC_GOT16, 14)
+ELF_RELOC(R_PPC_GOT16_LO, 15)
+ELF_RELOC(R_PPC_GOT16_HI, 16)
+ELF_RELOC(R_PPC_GOT16_HA, 17)
+ELF_RELOC(R_PPC_PLTREL24, 18)
+ELF_RELOC(R_PPC_JMP_SLOT, 21)
+ELF_RELOC(R_PPC_LOCAL24PC, 23)
+ELF_RELOC(R_PPC_REL32, 26)
+ELF_RELOC(R_PPC_TLS, 67)
+ELF_RELOC(R_PPC_DTPMOD32, 68)
+ELF_RELOC(R_PPC_TPREL16, 69)
+ELF_RELOC(R_PPC_TPREL16_LO, 70)
+ELF_RELOC(R_PPC_TPREL16_HI, 71)
+ELF_RELOC(R_PPC_TPREL16_HA, 72)
+ELF_RELOC(R_PPC_TPREL32, 73)
+ELF_RELOC(R_PPC_DTPREL16, 74)
+ELF_RELOC(R_PPC_DTPREL16_LO, 75)
+ELF_RELOC(R_PPC_DTPREL16_HI, 76)
+ELF_RELOC(R_PPC_DTPREL16_HA, 77)
+ELF_RELOC(R_PPC_DTPREL32, 78)
+ELF_RELOC(R_PPC_GOT_TLSGD16, 79)
+ELF_RELOC(R_PPC_GOT_TLSGD16_LO, 80)
+ELF_RELOC(R_PPC_GOT_TLSGD16_HI, 81)
+ELF_RELOC(R_PPC_GOT_TLSGD16_HA, 82)
+ELF_RELOC(R_PPC_GOT_TLSLD16, 83)
+ELF_RELOC(R_PPC_GOT_TLSLD16_LO, 84)
+ELF_RELOC(R_PPC_GOT_TLSLD16_HI, 85)
+ELF_RELOC(R_PPC_GOT_TLSLD16_HA, 86)
+ELF_RELOC(R_PPC_GOT_TPREL16, 87)
+ELF_RELOC(R_PPC_GOT_TPREL16_LO, 88)
+ELF_RELOC(R_PPC_GOT_TPREL16_HI, 89)
+ELF_RELOC(R_PPC_GOT_TPREL16_HA, 90)
+ELF_RELOC(R_PPC_GOT_DTPREL16, 91)
+ELF_RELOC(R_PPC_GOT_DTPREL16_LO, 92)
+ELF_RELOC(R_PPC_GOT_DTPREL16_HI, 93)
+ELF_RELOC(R_PPC_GOT_DTPREL16_HA, 94)
+ELF_RELOC(R_PPC_TLSGD, 95)
+ELF_RELOC(R_PPC_TLSLD, 96)
+ELF_RELOC(R_PPC_REL16, 249)
+ELF_RELOC(R_PPC_REL16_LO, 250)
+ELF_RELOC(R_PPC_REL16_HI, 251)
+ELF_RELOC(R_PPC_REL16_HA, 252)
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/PowerPC64.def b/contrib/llvm/include/llvm/Support/ELFRelocs/PowerPC64.def
new file mode 100644
index 0000000..3a47c5a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/PowerPC64.def
@@ -0,0 +1,181 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// glibc's PowerPC asm/sigcontext.h, when compiling for PPC64, has the
+// unfortunate behavior of including asm/elf.h, which defines R_PPC_NONE, etc.
+// to their corresponding integer values. As a result, we need to undef them
+// here before continuing.
+
+#undef R_PPC64_NONE
+#undef R_PPC64_ADDR32
+#undef R_PPC64_ADDR24
+#undef R_PPC64_ADDR16
+#undef R_PPC64_ADDR16_LO
+#undef R_PPC64_ADDR16_HI
+#undef R_PPC64_ADDR16_HA
+#undef R_PPC64_ADDR14
+#undef R_PPC64_ADDR14_BRTAKEN
+#undef R_PPC64_ADDR14_BRNTAKEN
+#undef R_PPC64_REL24
+#undef R_PPC64_REL14
+#undef R_PPC64_REL14_BRTAKEN
+#undef R_PPC64_REL14_BRNTAKEN
+#undef R_PPC64_GOT16
+#undef R_PPC64_GOT16_LO
+#undef R_PPC64_GOT16_HI
+#undef R_PPC64_GOT16_HA
+#undef R_PPC64_GLOB_DAT
+#undef R_PPC64_JMP_SLOT
+#undef R_PPC64_RELATIVE
+#undef R_PPC64_REL32
+#undef R_PPC64_ADDR64
+#undef R_PPC64_ADDR16_HIGHER
+#undef R_PPC64_ADDR16_HIGHERA
+#undef R_PPC64_ADDR16_HIGHEST
+#undef R_PPC64_ADDR16_HIGHESTA
+#undef R_PPC64_REL64
+#undef R_PPC64_TOC16
+#undef R_PPC64_TOC16_LO
+#undef R_PPC64_TOC16_HI
+#undef R_PPC64_TOC16_HA
+#undef R_PPC64_TOC
+#undef R_PPC64_ADDR16_DS
+#undef R_PPC64_ADDR16_LO_DS
+#undef R_PPC64_GOT16_DS
+#undef R_PPC64_GOT16_LO_DS
+#undef R_PPC64_TOC16_DS
+#undef R_PPC64_TOC16_LO_DS
+#undef R_PPC64_TLS
+#undef R_PPC64_DTPMOD64
+#undef R_PPC64_TPREL16
+#undef R_PPC64_TPREL16_LO
+#undef R_PPC64_TPREL16_HI
+#undef R_PPC64_TPREL16_HA
+#undef R_PPC64_TPREL64
+#undef R_PPC64_DTPREL16
+#undef R_PPC64_DTPREL16_LO
+#undef R_PPC64_DTPREL16_HI
+#undef R_PPC64_DTPREL16_HA
+#undef R_PPC64_DTPREL64
+#undef R_PPC64_GOT_TLSGD16
+#undef R_PPC64_GOT_TLSGD16_LO
+#undef R_PPC64_GOT_TLSGD16_HI
+#undef R_PPC64_GOT_TLSGD16_HA
+#undef R_PPC64_GOT_TLSLD16
+#undef R_PPC64_GOT_TLSLD16_LO
+#undef R_PPC64_GOT_TLSLD16_HI
+#undef R_PPC64_GOT_TLSLD16_HA
+#undef R_PPC64_GOT_TPREL16_DS
+#undef R_PPC64_GOT_TPREL16_LO_DS
+#undef R_PPC64_GOT_TPREL16_HI
+#undef R_PPC64_GOT_TPREL16_HA
+#undef R_PPC64_GOT_DTPREL16_DS
+#undef R_PPC64_GOT_DTPREL16_LO_DS
+#undef R_PPC64_GOT_DTPREL16_HI
+#undef R_PPC64_GOT_DTPREL16_HA
+#undef R_PPC64_TPREL16_DS
+#undef R_PPC64_TPREL16_LO_DS
+#undef R_PPC64_TPREL16_HIGHER
+#undef R_PPC64_TPREL16_HIGHERA
+#undef R_PPC64_TPREL16_HIGHEST
+#undef R_PPC64_TPREL16_HIGHESTA
+#undef R_PPC64_DTPREL16_DS
+#undef R_PPC64_DTPREL16_LO_DS
+#undef R_PPC64_DTPREL16_HIGHER
+#undef R_PPC64_DTPREL16_HIGHERA
+#undef R_PPC64_DTPREL16_HIGHEST
+#undef R_PPC64_DTPREL16_HIGHESTA
+#undef R_PPC64_TLSGD
+#undef R_PPC64_TLSLD
+#undef R_PPC64_REL16
+#undef R_PPC64_REL16_LO
+#undef R_PPC64_REL16_HI
+#undef R_PPC64_REL16_HA
+
+ELF_RELOC(R_PPC64_NONE, 0)
+ELF_RELOC(R_PPC64_ADDR32, 1)
+ELF_RELOC(R_PPC64_ADDR24, 2)
+ELF_RELOC(R_PPC64_ADDR16, 3)
+ELF_RELOC(R_PPC64_ADDR16_LO, 4)
+ELF_RELOC(R_PPC64_ADDR16_HI, 5)
+ELF_RELOC(R_PPC64_ADDR16_HA, 6)
+ELF_RELOC(R_PPC64_ADDR14, 7)
+ELF_RELOC(R_PPC64_ADDR14_BRTAKEN, 8)
+ELF_RELOC(R_PPC64_ADDR14_BRNTAKEN, 9)
+ELF_RELOC(R_PPC64_REL24, 10)
+ELF_RELOC(R_PPC64_REL14, 11)
+ELF_RELOC(R_PPC64_REL14_BRTAKEN, 12)
+ELF_RELOC(R_PPC64_REL14_BRNTAKEN, 13)
+ELF_RELOC(R_PPC64_GOT16, 14)
+ELF_RELOC(R_PPC64_GOT16_LO, 15)
+ELF_RELOC(R_PPC64_GOT16_HI, 16)
+ELF_RELOC(R_PPC64_GOT16_HA, 17)
+ELF_RELOC(R_PPC64_GLOB_DAT, 20)
+ELF_RELOC(R_PPC64_JMP_SLOT, 21)
+ELF_RELOC(R_PPC64_RELATIVE, 22)
+ELF_RELOC(R_PPC64_REL32, 26)
+ELF_RELOC(R_PPC64_ADDR64, 38)
+ELF_RELOC(R_PPC64_ADDR16_HIGHER, 39)
+ELF_RELOC(R_PPC64_ADDR16_HIGHERA, 40)
+ELF_RELOC(R_PPC64_ADDR16_HIGHEST, 41)
+ELF_RELOC(R_PPC64_ADDR16_HIGHESTA, 42)
+ELF_RELOC(R_PPC64_REL64, 44)
+ELF_RELOC(R_PPC64_TOC16, 47)
+ELF_RELOC(R_PPC64_TOC16_LO, 48)
+ELF_RELOC(R_PPC64_TOC16_HI, 49)
+ELF_RELOC(R_PPC64_TOC16_HA, 50)
+ELF_RELOC(R_PPC64_TOC, 51)
+ELF_RELOC(R_PPC64_ADDR16_DS, 56)
+ELF_RELOC(R_PPC64_ADDR16_LO_DS, 57)
+ELF_RELOC(R_PPC64_GOT16_DS, 58)
+ELF_RELOC(R_PPC64_GOT16_LO_DS, 59)
+ELF_RELOC(R_PPC64_TOC16_DS, 63)
+ELF_RELOC(R_PPC64_TOC16_LO_DS, 64)
+ELF_RELOC(R_PPC64_TLS, 67)
+ELF_RELOC(R_PPC64_DTPMOD64, 68)
+ELF_RELOC(R_PPC64_TPREL16, 69)
+ELF_RELOC(R_PPC64_TPREL16_LO, 70)
+ELF_RELOC(R_PPC64_TPREL16_HI, 71)
+ELF_RELOC(R_PPC64_TPREL16_HA, 72)
+ELF_RELOC(R_PPC64_TPREL64, 73)
+ELF_RELOC(R_PPC64_DTPREL16, 74)
+ELF_RELOC(R_PPC64_DTPREL16_LO, 75)
+ELF_RELOC(R_PPC64_DTPREL16_HI, 76)
+ELF_RELOC(R_PPC64_DTPREL16_HA, 77)
+ELF_RELOC(R_PPC64_DTPREL64, 78)
+ELF_RELOC(R_PPC64_GOT_TLSGD16, 79)
+ELF_RELOC(R_PPC64_GOT_TLSGD16_LO, 80)
+ELF_RELOC(R_PPC64_GOT_TLSGD16_HI, 81)
+ELF_RELOC(R_PPC64_GOT_TLSGD16_HA, 82)
+ELF_RELOC(R_PPC64_GOT_TLSLD16, 83)
+ELF_RELOC(R_PPC64_GOT_TLSLD16_LO, 84)
+ELF_RELOC(R_PPC64_GOT_TLSLD16_HI, 85)
+ELF_RELOC(R_PPC64_GOT_TLSLD16_HA, 86)
+ELF_RELOC(R_PPC64_GOT_TPREL16_DS, 87)
+ELF_RELOC(R_PPC64_GOT_TPREL16_LO_DS, 88)
+ELF_RELOC(R_PPC64_GOT_TPREL16_HI, 89)
+ELF_RELOC(R_PPC64_GOT_TPREL16_HA, 90)
+ELF_RELOC(R_PPC64_GOT_DTPREL16_DS, 91)
+ELF_RELOC(R_PPC64_GOT_DTPREL16_LO_DS, 92)
+ELF_RELOC(R_PPC64_GOT_DTPREL16_HI, 93)
+ELF_RELOC(R_PPC64_GOT_DTPREL16_HA, 94)
+ELF_RELOC(R_PPC64_TPREL16_DS, 95)
+ELF_RELOC(R_PPC64_TPREL16_LO_DS, 96)
+ELF_RELOC(R_PPC64_TPREL16_HIGHER, 97)
+ELF_RELOC(R_PPC64_TPREL16_HIGHERA, 98)
+ELF_RELOC(R_PPC64_TPREL16_HIGHEST, 99)
+ELF_RELOC(R_PPC64_TPREL16_HIGHESTA, 100)
+ELF_RELOC(R_PPC64_DTPREL16_DS, 101)
+ELF_RELOC(R_PPC64_DTPREL16_LO_DS, 102)
+ELF_RELOC(R_PPC64_DTPREL16_HIGHER, 103)
+ELF_RELOC(R_PPC64_DTPREL16_HIGHERA, 104)
+ELF_RELOC(R_PPC64_DTPREL16_HIGHEST, 105)
+ELF_RELOC(R_PPC64_DTPREL16_HIGHESTA, 106)
+ELF_RELOC(R_PPC64_TLSGD, 107)
+ELF_RELOC(R_PPC64_TLSLD, 108)
+ELF_RELOC(R_PPC64_REL16, 249)
+ELF_RELOC(R_PPC64_REL16_LO, 250)
+ELF_RELOC(R_PPC64_REL16_HI, 251)
+ELF_RELOC(R_PPC64_REL16_HA, 252)
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/Sparc.def b/contrib/llvm/include/llvm/Support/ELFRelocs/Sparc.def
new file mode 100644
index 0000000..7e01a4a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/Sparc.def
@@ -0,0 +1,89 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_SPARC_NONE, 0)
+ELF_RELOC(R_SPARC_8, 1)
+ELF_RELOC(R_SPARC_16, 2)
+ELF_RELOC(R_SPARC_32, 3)
+ELF_RELOC(R_SPARC_DISP8, 4)
+ELF_RELOC(R_SPARC_DISP16, 5)
+ELF_RELOC(R_SPARC_DISP32, 6)
+ELF_RELOC(R_SPARC_WDISP30, 7)
+ELF_RELOC(R_SPARC_WDISP22, 8)
+ELF_RELOC(R_SPARC_HI22, 9)
+ELF_RELOC(R_SPARC_22, 10)
+ELF_RELOC(R_SPARC_13, 11)
+ELF_RELOC(R_SPARC_LO10, 12)
+ELF_RELOC(R_SPARC_GOT10, 13)
+ELF_RELOC(R_SPARC_GOT13, 14)
+ELF_RELOC(R_SPARC_GOT22, 15)
+ELF_RELOC(R_SPARC_PC10, 16)
+ELF_RELOC(R_SPARC_PC22, 17)
+ELF_RELOC(R_SPARC_WPLT30, 18)
+ELF_RELOC(R_SPARC_COPY, 19)
+ELF_RELOC(R_SPARC_GLOB_DAT, 20)
+ELF_RELOC(R_SPARC_JMP_SLOT, 21)
+ELF_RELOC(R_SPARC_RELATIVE, 22)
+ELF_RELOC(R_SPARC_UA32, 23)
+ELF_RELOC(R_SPARC_PLT32, 24)
+ELF_RELOC(R_SPARC_HIPLT22, 25)
+ELF_RELOC(R_SPARC_LOPLT10, 26)
+ELF_RELOC(R_SPARC_PCPLT32, 27)
+ELF_RELOC(R_SPARC_PCPLT22, 28)
+ELF_RELOC(R_SPARC_PCPLT10, 29)
+ELF_RELOC(R_SPARC_10, 30)
+ELF_RELOC(R_SPARC_11, 31)
+ELF_RELOC(R_SPARC_64, 32)
+ELF_RELOC(R_SPARC_OLO10, 33)
+ELF_RELOC(R_SPARC_HH22, 34)
+ELF_RELOC(R_SPARC_HM10, 35)
+ELF_RELOC(R_SPARC_LM22, 36)
+ELF_RELOC(R_SPARC_PC_HH22, 37)
+ELF_RELOC(R_SPARC_PC_HM10, 38)
+ELF_RELOC(R_SPARC_PC_LM22, 39)
+ELF_RELOC(R_SPARC_WDISP16, 40)
+ELF_RELOC(R_SPARC_WDISP19, 41)
+ELF_RELOC(R_SPARC_7, 43)
+ELF_RELOC(R_SPARC_5, 44)
+ELF_RELOC(R_SPARC_6, 45)
+ELF_RELOC(R_SPARC_DISP64, 46)
+ELF_RELOC(R_SPARC_PLT64, 47)
+ELF_RELOC(R_SPARC_HIX22, 48)
+ELF_RELOC(R_SPARC_LOX10, 49)
+ELF_RELOC(R_SPARC_H44, 50)
+ELF_RELOC(R_SPARC_M44, 51)
+ELF_RELOC(R_SPARC_L44, 52)
+ELF_RELOC(R_SPARC_REGISTER, 53)
+ELF_RELOC(R_SPARC_UA64, 54)
+ELF_RELOC(R_SPARC_UA16, 55)
+ELF_RELOC(R_SPARC_TLS_GD_HI22, 56)
+ELF_RELOC(R_SPARC_TLS_GD_LO10, 57)
+ELF_RELOC(R_SPARC_TLS_GD_ADD, 58)
+ELF_RELOC(R_SPARC_TLS_GD_CALL, 59)
+ELF_RELOC(R_SPARC_TLS_LDM_HI22, 60)
+ELF_RELOC(R_SPARC_TLS_LDM_LO10, 61)
+ELF_RELOC(R_SPARC_TLS_LDM_ADD, 62)
+ELF_RELOC(R_SPARC_TLS_LDM_CALL, 63)
+ELF_RELOC(R_SPARC_TLS_LDO_HIX22, 64)
+ELF_RELOC(R_SPARC_TLS_LDO_LOX10, 65)
+ELF_RELOC(R_SPARC_TLS_LDO_ADD, 66)
+ELF_RELOC(R_SPARC_TLS_IE_HI22, 67)
+ELF_RELOC(R_SPARC_TLS_IE_LO10, 68)
+ELF_RELOC(R_SPARC_TLS_IE_LD, 69)
+ELF_RELOC(R_SPARC_TLS_IE_LDX, 70)
+ELF_RELOC(R_SPARC_TLS_IE_ADD, 71)
+ELF_RELOC(R_SPARC_TLS_LE_HIX22, 72)
+ELF_RELOC(R_SPARC_TLS_LE_LOX10, 73)
+ELF_RELOC(R_SPARC_TLS_DTPMOD32, 74)
+ELF_RELOC(R_SPARC_TLS_DTPMOD64, 75)
+ELF_RELOC(R_SPARC_TLS_DTPOFF32, 76)
+ELF_RELOC(R_SPARC_TLS_DTPOFF64, 77)
+ELF_RELOC(R_SPARC_TLS_TPOFF32, 78)
+ELF_RELOC(R_SPARC_TLS_TPOFF64, 79)
+ELF_RELOC(R_SPARC_GOTDATA_HIX22, 80)
+ELF_RELOC(R_SPARC_GOTDATA_LOX10, 81)
+ELF_RELOC(R_SPARC_GOTDATA_OP_HIX22, 82)
+ELF_RELOC(R_SPARC_GOTDATA_OP_LOX10, 83)
+ELF_RELOC(R_SPARC_GOTDATA_OP, 84)
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/SystemZ.def b/contrib/llvm/include/llvm/Support/ELFRelocs/SystemZ.def
new file mode 100644
index 0000000..711f940
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/SystemZ.def
@@ -0,0 +1,67 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_390_NONE, 0)
+ELF_RELOC(R_390_8, 1)
+ELF_RELOC(R_390_12, 2)
+ELF_RELOC(R_390_16, 3)
+ELF_RELOC(R_390_32, 4)
+ELF_RELOC(R_390_PC32, 5)
+ELF_RELOC(R_390_GOT12, 6)
+ELF_RELOC(R_390_GOT32, 7)
+ELF_RELOC(R_390_PLT32, 8)
+ELF_RELOC(R_390_COPY, 9)
+ELF_RELOC(R_390_GLOB_DAT, 10)
+ELF_RELOC(R_390_JMP_SLOT, 11)
+ELF_RELOC(R_390_RELATIVE, 12)
+ELF_RELOC(R_390_GOTOFF, 13)
+ELF_RELOC(R_390_GOTPC, 14)
+ELF_RELOC(R_390_GOT16, 15)
+ELF_RELOC(R_390_PC16, 16)
+ELF_RELOC(R_390_PC16DBL, 17)
+ELF_RELOC(R_390_PLT16DBL, 18)
+ELF_RELOC(R_390_PC32DBL, 19)
+ELF_RELOC(R_390_PLT32DBL, 20)
+ELF_RELOC(R_390_GOTPCDBL, 21)
+ELF_RELOC(R_390_64, 22)
+ELF_RELOC(R_390_PC64, 23)
+ELF_RELOC(R_390_GOT64, 24)
+ELF_RELOC(R_390_PLT64, 25)
+ELF_RELOC(R_390_GOTENT, 26)
+ELF_RELOC(R_390_GOTOFF16, 27)
+ELF_RELOC(R_390_GOTOFF64, 28)
+ELF_RELOC(R_390_GOTPLT12, 29)
+ELF_RELOC(R_390_GOTPLT16, 30)
+ELF_RELOC(R_390_GOTPLT32, 31)
+ELF_RELOC(R_390_GOTPLT64, 32)
+ELF_RELOC(R_390_GOTPLTENT, 33)
+ELF_RELOC(R_390_PLTOFF16, 34)
+ELF_RELOC(R_390_PLTOFF32, 35)
+ELF_RELOC(R_390_PLTOFF64, 36)
+ELF_RELOC(R_390_TLS_LOAD, 37)
+ELF_RELOC(R_390_TLS_GDCALL, 38)
+ELF_RELOC(R_390_TLS_LDCALL, 39)
+ELF_RELOC(R_390_TLS_GD32, 40)
+ELF_RELOC(R_390_TLS_GD64, 41)
+ELF_RELOC(R_390_TLS_GOTIE12, 42)
+ELF_RELOC(R_390_TLS_GOTIE32, 43)
+ELF_RELOC(R_390_TLS_GOTIE64, 44)
+ELF_RELOC(R_390_TLS_LDM32, 45)
+ELF_RELOC(R_390_TLS_LDM64, 46)
+ELF_RELOC(R_390_TLS_IE32, 47)
+ELF_RELOC(R_390_TLS_IE64, 48)
+ELF_RELOC(R_390_TLS_IEENT, 49)
+ELF_RELOC(R_390_TLS_LE32, 50)
+ELF_RELOC(R_390_TLS_LE64, 51)
+ELF_RELOC(R_390_TLS_LDO32, 52)
+ELF_RELOC(R_390_TLS_LDO64, 53)
+ELF_RELOC(R_390_TLS_DTPMOD, 54)
+ELF_RELOC(R_390_TLS_DTPOFF, 55)
+ELF_RELOC(R_390_TLS_TPOFF, 56)
+ELF_RELOC(R_390_20, 57)
+ELF_RELOC(R_390_GOT20, 58)
+ELF_RELOC(R_390_GOTPLT20, 59)
+ELF_RELOC(R_390_TLS_GOTIE20, 60)
+ELF_RELOC(R_390_IRELATIVE, 61)
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/i386.def b/contrib/llvm/include/llvm/Support/ELFRelocs/i386.def
new file mode 100644
index 0000000..45eae7f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/i386.def
@@ -0,0 +1,47 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+// TODO: this is just a subset
+ELF_RELOC(R_386_NONE, 0)
+ELF_RELOC(R_386_32, 1)
+ELF_RELOC(R_386_PC32, 2)
+ELF_RELOC(R_386_GOT32, 3)
+ELF_RELOC(R_386_PLT32, 4)
+ELF_RELOC(R_386_COPY, 5)
+ELF_RELOC(R_386_GLOB_DAT, 6)
+ELF_RELOC(R_386_JUMP_SLOT, 7)
+ELF_RELOC(R_386_RELATIVE, 8)
+ELF_RELOC(R_386_GOTOFF, 9)
+ELF_RELOC(R_386_GOTPC, 10)
+ELF_RELOC(R_386_32PLT, 11)
+ELF_RELOC(R_386_TLS_TPOFF, 14)
+ELF_RELOC(R_386_TLS_IE, 15)
+ELF_RELOC(R_386_TLS_GOTIE, 16)
+ELF_RELOC(R_386_TLS_LE, 17)
+ELF_RELOC(R_386_TLS_GD, 18)
+ELF_RELOC(R_386_TLS_LDM, 19)
+ELF_RELOC(R_386_16, 20)
+ELF_RELOC(R_386_PC16, 21)
+ELF_RELOC(R_386_8, 22)
+ELF_RELOC(R_386_PC8, 23)
+ELF_RELOC(R_386_TLS_GD_32, 24)
+ELF_RELOC(R_386_TLS_GD_PUSH, 25)
+ELF_RELOC(R_386_TLS_GD_CALL, 26)
+ELF_RELOC(R_386_TLS_GD_POP, 27)
+ELF_RELOC(R_386_TLS_LDM_32, 28)
+ELF_RELOC(R_386_TLS_LDM_PUSH, 29)
+ELF_RELOC(R_386_TLS_LDM_CALL, 30)
+ELF_RELOC(R_386_TLS_LDM_POP, 31)
+ELF_RELOC(R_386_TLS_LDO_32, 32)
+ELF_RELOC(R_386_TLS_IE_32, 33)
+ELF_RELOC(R_386_TLS_LE_32, 34)
+ELF_RELOC(R_386_TLS_DTPMOD32, 35)
+ELF_RELOC(R_386_TLS_DTPOFF32, 36)
+ELF_RELOC(R_386_TLS_TPOFF32, 37)
+ELF_RELOC(R_386_TLS_GOTDESC, 39)
+ELF_RELOC(R_386_TLS_DESC_CALL, 40)
+ELF_RELOC(R_386_TLS_DESC, 41)
+ELF_RELOC(R_386_IRELATIVE, 42)
+ELF_RELOC(R_386_NUM, 43)
diff --git a/contrib/llvm/include/llvm/Support/ELFRelocs/x86_64.def b/contrib/llvm/include/llvm/Support/ELFRelocs/x86_64.def
new file mode 100644
index 0000000..36ad061
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ELFRelocs/x86_64.def
@@ -0,0 +1,44 @@
+
+#ifndef ELF_RELOC
+#error "ELF_RELOC must be defined"
+#endif
+
+ELF_RELOC(R_X86_64_NONE, 0)
+ELF_RELOC(R_X86_64_64, 1)
+ELF_RELOC(R_X86_64_PC32, 2)
+ELF_RELOC(R_X86_64_GOT32, 3)
+ELF_RELOC(R_X86_64_PLT32, 4)
+ELF_RELOC(R_X86_64_COPY, 5)
+ELF_RELOC(R_X86_64_GLOB_DAT, 6)
+ELF_RELOC(R_X86_64_JUMP_SLOT, 7)
+ELF_RELOC(R_X86_64_RELATIVE, 8)
+ELF_RELOC(R_X86_64_GOTPCREL, 9)
+ELF_RELOC(R_X86_64_32, 10)
+ELF_RELOC(R_X86_64_32S, 11)
+ELF_RELOC(R_X86_64_16, 12)
+ELF_RELOC(R_X86_64_PC16, 13)
+ELF_RELOC(R_X86_64_8, 14)
+ELF_RELOC(R_X86_64_PC8, 15)
+ELF_RELOC(R_X86_64_DTPMOD64, 16)
+ELF_RELOC(R_X86_64_DTPOFF64, 17)
+ELF_RELOC(R_X86_64_TPOFF64, 18)
+ELF_RELOC(R_X86_64_TLSGD, 19)
+ELF_RELOC(R_X86_64_TLSLD, 20)
+ELF_RELOC(R_X86_64_DTPOFF32, 21)
+ELF_RELOC(R_X86_64_GOTTPOFF, 22)
+ELF_RELOC(R_X86_64_TPOFF32, 23)
+ELF_RELOC(R_X86_64_PC64, 24)
+ELF_RELOC(R_X86_64_GOTOFF64, 25)
+ELF_RELOC(R_X86_64_GOTPC32, 26)
+ELF_RELOC(R_X86_64_GOT64, 27)
+ELF_RELOC(R_X86_64_GOTPCREL64, 28)
+ELF_RELOC(R_X86_64_GOTPC64, 29)
+ELF_RELOC(R_X86_64_GOTPLT64, 30)
+ELF_RELOC(R_X86_64_PLTOFF64, 31)
+ELF_RELOC(R_X86_64_SIZE32, 32)
+ELF_RELOC(R_X86_64_SIZE64, 33)
+ELF_RELOC(R_X86_64_GOTPC32_TLSDESC, 34)
+ELF_RELOC(R_X86_64_TLSDESC_CALL, 35)
+ELF_RELOC(R_X86_64_TLSDESC, 36)
+ELF_RELOC(R_X86_64_IRELATIVE, 37)
+
diff --git a/contrib/llvm/include/llvm/Support/Endian.h b/contrib/llvm/include/llvm/Support/Endian.h
new file mode 100644
index 0000000..bc93c9a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Endian.h
@@ -0,0 +1,345 @@
+//===- Endian.h - Utilities for IO with endian specific data ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares generic functions to read and write endian specific data.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ENDIAN_H
+#define LLVM_SUPPORT_ENDIAN_H
+
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/SwapByteOrder.h"
+
+namespace llvm {
+namespace support {
+enum endianness {big, little, native};
+
+// These are named values for common alignments.
+enum {aligned = 0, unaligned = 1};
+
+namespace detail {
+ /// \brief ::value is either alignment, or alignof(T) if alignment is 0.
+ template<class T, int alignment>
+ struct PickAlignment {
+ enum {value = alignment == 0 ? AlignOf<T>::Alignment : alignment};
+ };
+} // end namespace detail
+
+namespace endian {
+/// Swap the bytes of value to match the given endianness.
+template<typename value_type, endianness endian>
+inline value_type byte_swap(value_type value) {
+ if (endian != native && sys::IsBigEndianHost != (endian == big))
+ sys::swapByteOrder(value);
+ return value;
+}
+
+/// Read a value of a particular endianness from memory.
+template<typename value_type,
+ endianness endian,
+ std::size_t alignment>
+inline value_type read(const void *memory) {
+ value_type ret;
+
+ memcpy(&ret,
+ LLVM_ASSUME_ALIGNED(memory,
+ (detail::PickAlignment<value_type, alignment>::value)),
+ sizeof(value_type));
+ return byte_swap<value_type, endian>(ret);
+}
+
+/// Read a value of a particular endianness from a buffer, and increment the
+/// buffer past that value.
+template<typename value_type, endianness endian, std::size_t alignment,
+ typename CharT>
+inline value_type readNext(const CharT *&memory) {
+ value_type ret = read<value_type, endian, alignment>(memory);
+ memory += sizeof(value_type);
+ return ret;
+}
+
+/// Write a value to memory with a particular endianness.
+template<typename value_type,
+ endianness endian,
+ std::size_t alignment>
+inline void write(void *memory, value_type value) {
+ value = byte_swap<value_type, endian>(value);
+ memcpy(LLVM_ASSUME_ALIGNED(memory,
+ (detail::PickAlignment<value_type, alignment>::value)),
+ &value,
+ sizeof(value_type));
+}
+
+template <typename value_type>
+using make_unsigned_t = typename std::make_unsigned<value_type>::type;
+
+/// Read a value of a particular endianness from memory, for a location
+/// that starts at the given bit offset within the first byte.
+template <typename value_type, endianness endian, std::size_t alignment>
+inline value_type readAtBitAlignment(const void *memory, uint64_t startBit) {
+ assert(startBit < 8);
+ if (startBit == 0)
+ return read<value_type, endian, alignment>(memory);
+ else {
+ // Read two values and compose the result from them.
+ value_type val[2];
+ memcpy(&val[0],
+ LLVM_ASSUME_ALIGNED(
+ memory, (detail::PickAlignment<value_type, alignment>::value)),
+ sizeof(value_type) * 2);
+ val[0] = byte_swap<value_type, endian>(val[0]);
+ val[1] = byte_swap<value_type, endian>(val[1]);
+
+ // Shift bits from the lower value into place.
+ make_unsigned_t<value_type> lowerVal = val[0] >> startBit;
+ // Mask off upper bits after right shift in case of signed type.
+ make_unsigned_t<value_type> numBitsFirstVal =
+ (sizeof(value_type) * 8) - startBit;
+ lowerVal &= ((make_unsigned_t<value_type>)1 << numBitsFirstVal) - 1;
+
+ // Get the bits from the upper value.
+ make_unsigned_t<value_type> upperVal =
+ val[1] & (((make_unsigned_t<value_type>)1 << startBit) - 1);
+ // Shift them in to place.
+ upperVal <<= numBitsFirstVal;
+
+ return lowerVal | upperVal;
+ }
+}
+
+/// Write a value to memory with a particular endianness, for a location
+/// that starts at the given bit offset within the first byte.
+template <typename value_type, endianness endian, std::size_t alignment>
+inline void writeAtBitAlignment(void *memory, value_type value,
+ uint64_t startBit) {
+ assert(startBit < 8);
+ if (startBit == 0)
+ write<value_type, endian, alignment>(memory, value);
+ else {
+ // Read two values and shift the result into them.
+ value_type val[2];
+ memcpy(&val[0],
+ LLVM_ASSUME_ALIGNED(
+ memory, (detail::PickAlignment<value_type, alignment>::value)),
+ sizeof(value_type) * 2);
+ val[0] = byte_swap<value_type, endian>(val[0]);
+ val[1] = byte_swap<value_type, endian>(val[1]);
+
+ // Mask off any existing bits in the upper part of the lower value that
+ // we want to replace.
+ val[0] &= ((make_unsigned_t<value_type>)1 << startBit) - 1;
+ make_unsigned_t<value_type> numBitsFirstVal =
+ (sizeof(value_type) * 8) - startBit;
+ make_unsigned_t<value_type> lowerVal = value;
+ if (startBit > 0) {
+ // Mask off the upper bits in the new value that are not going to go into
+ // the lower value. This avoids a left shift of a negative value, which
+ // is undefined behavior.
+ lowerVal &= (((make_unsigned_t<value_type>)1 << numBitsFirstVal) - 1);
+ // Now shift the new bits into place
+ lowerVal <<= startBit;
+ }
+ val[0] |= lowerVal;
+
+ // Mask off any existing bits in the lower part of the upper value that
+ // we want to replace.
+ val[1] &= ~(((make_unsigned_t<value_type>)1 << startBit) - 1);
+ // Next shift the bits that go into the upper value into position.
+ make_unsigned_t<value_type> upperVal = value >> numBitsFirstVal;
+ // Mask off upper bits after right shift in case of signed type.
+ upperVal &= ((make_unsigned_t<value_type>)1 << startBit) - 1;
+ val[1] |= upperVal;
+
+ // Finally, rewrite values.
+ val[0] = byte_swap<value_type, endian>(val[0]);
+ val[1] = byte_swap<value_type, endian>(val[1]);
+ memcpy(LLVM_ASSUME_ALIGNED(
+ memory, (detail::PickAlignment<value_type, alignment>::value)),
+ &val[0], sizeof(value_type) * 2);
+ }
+}
+} // end namespace endian
+
+namespace detail {
+template<typename value_type,
+ endianness endian,
+ std::size_t alignment>
+struct packed_endian_specific_integral {
+ operator value_type() const {
+ return endian::read<value_type, endian, alignment>(
+ (const void*)Value.buffer);
+ }
+
+ void operator=(value_type newValue) {
+ endian::write<value_type, endian, alignment>(
+ (void*)Value.buffer, newValue);
+ }
+
+ packed_endian_specific_integral &operator+=(value_type newValue) {
+ *this = *this + newValue;
+ return *this;
+ }
+
+ packed_endian_specific_integral &operator-=(value_type newValue) {
+ *this = *this - newValue;
+ return *this;
+ }
+
+ packed_endian_specific_integral &operator|=(value_type newValue) {
+ *this = *this | newValue;
+ return *this;
+ }
+
+ packed_endian_specific_integral &operator&=(value_type newValue) {
+ *this = *this & newValue;
+ return *this;
+ }
+
+private:
+ AlignedCharArray<PickAlignment<value_type, alignment>::value,
+ sizeof(value_type)> Value;
+
+public:
+ struct ref {
+ explicit ref(void *Ptr) : Ptr(Ptr) {}
+
+ operator value_type() const {
+ return endian::read<value_type, endian, alignment>(Ptr);
+ }
+
+ void operator=(value_type NewValue) {
+ endian::write<value_type, endian, alignment>(Ptr, NewValue);
+ }
+
+ private:
+ void *Ptr;
+ };
+};
+
+} // end namespace detail
+
+typedef detail::packed_endian_specific_integral
+ <uint16_t, little, unaligned> ulittle16_t;
+typedef detail::packed_endian_specific_integral
+ <uint32_t, little, unaligned> ulittle32_t;
+typedef detail::packed_endian_specific_integral
+ <uint64_t, little, unaligned> ulittle64_t;
+
+typedef detail::packed_endian_specific_integral
+ <int16_t, little, unaligned> little16_t;
+typedef detail::packed_endian_specific_integral
+ <int32_t, little, unaligned> little32_t;
+typedef detail::packed_endian_specific_integral
+ <int64_t, little, unaligned> little64_t;
+
+typedef detail::packed_endian_specific_integral
+ <uint16_t, little, aligned> aligned_ulittle16_t;
+typedef detail::packed_endian_specific_integral
+ <uint32_t, little, aligned> aligned_ulittle32_t;
+typedef detail::packed_endian_specific_integral
+ <uint64_t, little, aligned> aligned_ulittle64_t;
+
+typedef detail::packed_endian_specific_integral
+ <int16_t, little, aligned> aligned_little16_t;
+typedef detail::packed_endian_specific_integral
+ <int32_t, little, aligned> aligned_little32_t;
+typedef detail::packed_endian_specific_integral
+ <int64_t, little, aligned> aligned_little64_t;
+
+typedef detail::packed_endian_specific_integral
+ <uint16_t, big, unaligned> ubig16_t;
+typedef detail::packed_endian_specific_integral
+ <uint32_t, big, unaligned> ubig32_t;
+typedef detail::packed_endian_specific_integral
+ <uint64_t, big, unaligned> ubig64_t;
+
+typedef detail::packed_endian_specific_integral
+ <int16_t, big, unaligned> big16_t;
+typedef detail::packed_endian_specific_integral
+ <int32_t, big, unaligned> big32_t;
+typedef detail::packed_endian_specific_integral
+ <int64_t, big, unaligned> big64_t;
+
+typedef detail::packed_endian_specific_integral
+ <uint16_t, big, aligned> aligned_ubig16_t;
+typedef detail::packed_endian_specific_integral
+ <uint32_t, big, aligned> aligned_ubig32_t;
+typedef detail::packed_endian_specific_integral
+ <uint64_t, big, aligned> aligned_ubig64_t;
+
+typedef detail::packed_endian_specific_integral
+ <int16_t, big, aligned> aligned_big16_t;
+typedef detail::packed_endian_specific_integral
+ <int32_t, big, aligned> aligned_big32_t;
+typedef detail::packed_endian_specific_integral
+ <int64_t, big, aligned> aligned_big64_t;
+
+typedef detail::packed_endian_specific_integral
+ <uint16_t, native, unaligned> unaligned_uint16_t;
+typedef detail::packed_endian_specific_integral
+ <uint32_t, native, unaligned> unaligned_uint32_t;
+typedef detail::packed_endian_specific_integral
+ <uint64_t, native, unaligned> unaligned_uint64_t;
+
+typedef detail::packed_endian_specific_integral
+ <int16_t, native, unaligned> unaligned_int16_t;
+typedef detail::packed_endian_specific_integral
+ <int32_t, native, unaligned> unaligned_int32_t;
+typedef detail::packed_endian_specific_integral
+ <int64_t, native, unaligned> unaligned_int64_t;
+
+namespace endian {
+template <typename T, endianness E> inline T read(const void *P) {
+ return *(const detail::packed_endian_specific_integral<T, E, unaligned> *)P;
+}
+
+template <endianness E> inline uint16_t read16(const void *P) {
+ return read<uint16_t, E>(P);
+}
+template <endianness E> inline uint32_t read32(const void *P) {
+ return read<uint32_t, E>(P);
+}
+template <endianness E> inline uint64_t read64(const void *P) {
+ return read<uint64_t, E>(P);
+}
+
+inline uint16_t read16le(const void *P) { return read16<little>(P); }
+inline uint32_t read32le(const void *P) { return read32<little>(P); }
+inline uint64_t read64le(const void *P) { return read64<little>(P); }
+inline uint16_t read16be(const void *P) { return read16<big>(P); }
+inline uint32_t read32be(const void *P) { return read32<big>(P); }
+inline uint64_t read64be(const void *P) { return read64<big>(P); }
+
+template <typename T, endianness E> inline void write(void *P, T V) {
+ *(detail::packed_endian_specific_integral<T, E, unaligned> *)P = V;
+}
+
+template <endianness E> inline void write16(void *P, uint16_t V) {
+ write<uint16_t, E>(P, V);
+}
+template <endianness E> inline void write32(void *P, uint32_t V) {
+ write<uint32_t, E>(P, V);
+}
+template <endianness E> inline void write64(void *P, uint64_t V) {
+ write<uint64_t, E>(P, V);
+}
+
+inline void write16le(void *P, uint16_t V) { write16<little>(P, V); }
+inline void write32le(void *P, uint32_t V) { write32<little>(P, V); }
+inline void write64le(void *P, uint64_t V) { write64<little>(P, V); }
+inline void write16be(void *P, uint16_t V) { write16<big>(P, V); }
+inline void write32be(void *P, uint32_t V) { write32<big>(P, V); }
+inline void write64be(void *P, uint64_t V) { write64<big>(P, V); }
+} // end namespace endian
+} // end namespace support
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/EndianStream.h b/contrib/llvm/include/llvm/Support/EndianStream.h
new file mode 100644
index 0000000..d44a9b3
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/EndianStream.h
@@ -0,0 +1,64 @@
+//===- EndianStream.h - Stream ops with endian specific data ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines utilities for operating on streams that have endian
+// specific data.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ENDIANSTREAM_H
+#define LLVM_SUPPORT_ENDIANSTREAM_H
+
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace support {
+
+namespace endian {
+/// Adapter to write values to a stream in a particular byte order.
+template <endianness endian> struct Writer {
+ raw_ostream &OS;
+ Writer(raw_ostream &OS) : OS(OS) {}
+ template <typename value_type> void write(value_type Val) {
+ Val = byte_swap<value_type, endian>(Val);
+ OS.write((const char *)&Val, sizeof(value_type));
+ }
+};
+
+template <>
+template <>
+inline void Writer<little>::write<float>(float Val) {
+ write(FloatToBits(Val));
+}
+
+template <>
+template <>
+inline void Writer<little>::write<double>(double Val) {
+ write(DoubleToBits(Val));
+}
+
+template <>
+template <>
+inline void Writer<big>::write<float>(float Val) {
+ write(FloatToBits(Val));
+}
+
+template <>
+template <>
+inline void Writer<big>::write<double>(double Val) {
+ write(DoubleToBits(Val));
+}
+
+} // end namespace endian
+
+} // end namespace support
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Errc.h b/contrib/llvm/include/llvm/Support/Errc.h
new file mode 100644
index 0000000..80bfe2a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Errc.h
@@ -0,0 +1,86 @@
+//===- llvm/Support/Errc.h - Defines the llvm::errc enum --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// While std::error_code works OK on all platforms we use, there are some
+// some problems with std::errc that can be avoided by using our own
+// enumeration:
+//
+// * std::errc is a namespace in some implementations. That meas that ADL
+// doesn't work and it is sometimes necessary to write std::make_error_code
+// or in templates:
+// using std::make_error_code;
+// make_error_code(...);
+//
+// with this enum it is safe to always just use make_error_code.
+//
+// * Some implementations define fewer names than others. This header has
+// the intersection of all the ones we support.
+//
+// * std::errc is just marked with is_error_condition_enum. This means that
+// common patters like AnErrorCode == errc::no_such_file_or_directory take
+// 4 virtual calls instead of two comparisons.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ERRC_H
+#define LLVM_SUPPORT_ERRC_H
+
+#include <system_error>
+
+namespace llvm {
+enum class errc {
+ argument_list_too_long = int(std::errc::argument_list_too_long),
+ argument_out_of_domain = int(std::errc::argument_out_of_domain),
+ bad_address = int(std::errc::bad_address),
+ bad_file_descriptor = int(std::errc::bad_file_descriptor),
+ broken_pipe = int(std::errc::broken_pipe),
+ device_or_resource_busy = int(std::errc::device_or_resource_busy),
+ directory_not_empty = int(std::errc::directory_not_empty),
+ executable_format_error = int(std::errc::executable_format_error),
+ file_exists = int(std::errc::file_exists),
+ file_too_large = int(std::errc::file_too_large),
+ filename_too_long = int(std::errc::filename_too_long),
+ function_not_supported = int(std::errc::function_not_supported),
+ illegal_byte_sequence = int(std::errc::illegal_byte_sequence),
+ inappropriate_io_control_operation =
+ int(std::errc::inappropriate_io_control_operation),
+ interrupted = int(std::errc::interrupted),
+ invalid_argument = int(std::errc::invalid_argument),
+ invalid_seek = int(std::errc::invalid_seek),
+ io_error = int(std::errc::io_error),
+ is_a_directory = int(std::errc::is_a_directory),
+ no_child_process = int(std::errc::no_child_process),
+ no_lock_available = int(std::errc::no_lock_available),
+ no_space_on_device = int(std::errc::no_space_on_device),
+ no_such_device_or_address = int(std::errc::no_such_device_or_address),
+ no_such_device = int(std::errc::no_such_device),
+ no_such_file_or_directory = int(std::errc::no_such_file_or_directory),
+ no_such_process = int(std::errc::no_such_process),
+ not_a_directory = int(std::errc::not_a_directory),
+ not_enough_memory = int(std::errc::not_enough_memory),
+ operation_not_permitted = int(std::errc::operation_not_permitted),
+ permission_denied = int(std::errc::permission_denied),
+ read_only_file_system = int(std::errc::read_only_file_system),
+ resource_deadlock_would_occur = int(std::errc::resource_deadlock_would_occur),
+ resource_unavailable_try_again =
+ int(std::errc::resource_unavailable_try_again),
+ result_out_of_range = int(std::errc::result_out_of_range),
+ too_many_files_open_in_system = int(std::errc::too_many_files_open_in_system),
+ too_many_files_open = int(std::errc::too_many_files_open),
+ too_many_links = int(std::errc::too_many_links)
+};
+
+inline std::error_code make_error_code(errc E) {
+ return std::error_code(static_cast<int>(E), std::generic_category());
+}
+}
+
+namespace std {
+template <> struct is_error_code_enum<llvm::errc> : std::true_type {};
+}
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Errno.h b/contrib/llvm/include/llvm/Support/Errno.h
new file mode 100644
index 0000000..8e145c7
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Errno.h
@@ -0,0 +1,34 @@
+//===- llvm/Support/Errno.h - Portable+convenient errno handling -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares some portable and convenient functions to deal with errno.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ERRNO_H
+#define LLVM_SUPPORT_ERRNO_H
+
+#include <string>
+
+namespace llvm {
+namespace sys {
+
+/// Returns a string representation of the errno value, using whatever
+/// thread-safe variant of strerror() is available. Be sure to call this
+/// immediately after the function that set errno, or errno may have been
+/// overwritten by an intervening call.
+std::string StrError();
+
+/// Like the no-argument version above, but uses \p errnum instead of errno.
+std::string StrError(int errnum);
+
+} // namespace sys
+} // namespace llvm
+
+#endif // LLVM_SYSTEM_ERRNO_H
diff --git a/contrib/llvm/include/llvm/Support/ErrorHandling.h b/contrib/llvm/include/llvm/Support/ErrorHandling.h
new file mode 100644
index 0000000..32f05e0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ErrorHandling.h
@@ -0,0 +1,106 @@
+//===- llvm/Support/ErrorHandling.h - Fatal error handling ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an API used to indicate fatal error conditions. Non-fatal
+// errors (most of them) should be handled through LLVMContext.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ERRORHANDLING_H
+#define LLVM_SUPPORT_ERRORHANDLING_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include <string>
+
+namespace llvm {
+ class Twine;
+
+ /// An error handler callback.
+ typedef void (*fatal_error_handler_t)(void *user_data,
+ const std::string& reason,
+ bool gen_crash_diag);
+
+ /// install_fatal_error_handler - Installs a new error handler to be used
+ /// whenever a serious (non-recoverable) error is encountered by LLVM.
+ ///
+ /// If no error handler is installed the default is to print the error message
+ /// to stderr, and call exit(1). If an error handler is installed then it is
+ /// the handler's responsibility to log the message, it will no longer be
+ /// printed to stderr. If the error handler returns, then exit(1) will be
+ /// called.
+ ///
+ /// It is dangerous to naively use an error handler which throws an exception.
+ /// Even though some applications desire to gracefully recover from arbitrary
+ /// faults, blindly throwing exceptions through unfamiliar code isn't a way to
+ /// achieve this.
+ ///
+ /// \param user_data - An argument which will be passed to the install error
+ /// handler.
+ void install_fatal_error_handler(fatal_error_handler_t handler,
+ void *user_data = nullptr);
+
+ /// Restores default error handling behaviour.
+ void remove_fatal_error_handler();
+
+ /// ScopedFatalErrorHandler - This is a simple helper class which just
+ /// calls install_fatal_error_handler in its constructor and
+ /// remove_fatal_error_handler in its destructor.
+ struct ScopedFatalErrorHandler {
+ explicit ScopedFatalErrorHandler(fatal_error_handler_t handler,
+ void *user_data = nullptr) {
+ install_fatal_error_handler(handler, user_data);
+ }
+
+ ~ScopedFatalErrorHandler() { remove_fatal_error_handler(); }
+ };
+
+/// Reports a serious error, calling any installed error handler. These
+/// functions are intended to be used for error conditions which are outside
+/// the control of the compiler (I/O errors, invalid user input, etc.)
+///
+/// If no error handler is installed the default is to print the message to
+/// standard error, followed by a newline.
+/// After the error handler is called this function will call exit(1), it
+/// does not return.
+LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason,
+ bool gen_crash_diag = true);
+LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const std::string &reason,
+ bool gen_crash_diag = true);
+LLVM_ATTRIBUTE_NORETURN void report_fatal_error(StringRef reason,
+ bool gen_crash_diag = true);
+LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const Twine &reason,
+ bool gen_crash_diag = true);
+
+ /// This function calls abort(), and prints the optional message to stderr.
+ /// Use the llvm_unreachable macro (that adds location info), instead of
+ /// calling this function directly.
+ LLVM_ATTRIBUTE_NORETURN void
+ llvm_unreachable_internal(const char *msg=nullptr, const char *file=nullptr,
+ unsigned line=0);
+}
+
+/// Marks that the current location is not supposed to be reachable.
+/// In !NDEBUG builds, prints the message and location info to stderr.
+/// In NDEBUG builds, becomes an optimizer hint that the current location
+/// is not supposed to be reachable. On compilers that don't support
+/// such hints, prints a reduced message instead.
+///
+/// Use this instead of assert(0). It conveys intent more clearly and
+/// allows compilers to omit some unnecessary code.
+#ifndef NDEBUG
+#define llvm_unreachable(msg) \
+ ::llvm::llvm_unreachable_internal(msg, __FILE__, __LINE__)
+#elif defined(LLVM_BUILTIN_UNREACHABLE)
+#define llvm_unreachable(msg) LLVM_BUILTIN_UNREACHABLE
+#else
+#define llvm_unreachable(msg) ::llvm::llvm_unreachable_internal()
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ErrorOr.h b/contrib/llvm/include/llvm/Support/ErrorOr.h
new file mode 100644
index 0000000..ca6ede7
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ErrorOr.h
@@ -0,0 +1,298 @@
+//===- llvm/Support/ErrorOr.h - Error Smart Pointer -------------*- C++ -*-===//
+//
+// The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+///
+/// Provides ErrorOr<T> smart pointer.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_ERROROR_H
+#define LLVM_SUPPORT_ERROROR_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/AlignOf.h"
+#include <cassert>
+#include <system_error>
+#include <type_traits>
+
+namespace llvm {
+template<class T, class V>
+typename std::enable_if< std::is_constructible<T, V>::value
+ , typename std::remove_reference<V>::type>::type &&
+ moveIfMoveConstructible(V &Val) {
+ return std::move(Val);
+}
+
+template<class T, class V>
+typename std::enable_if< !std::is_constructible<T, V>::value
+ , typename std::remove_reference<V>::type>::type &
+moveIfMoveConstructible(V &Val) {
+ return Val;
+}
+
+/// \brief Stores a reference that can be changed.
+template <typename T>
+class ReferenceStorage {
+ T *Storage;
+
+public:
+ ReferenceStorage(T &Ref) : Storage(&Ref) {}
+
+ operator T &() const { return *Storage; }
+ T &get() const { return *Storage; }
+};
+
+/// \brief Represents either an error or a value T.
+///
+/// ErrorOr<T> is a pointer-like class that represents the result of an
+/// operation. The result is either an error, or a value of type T. This is
+/// designed to emulate the usage of returning a pointer where nullptr indicates
+/// failure. However instead of just knowing that the operation failed, we also
+/// have an error_code and optional user data that describes why it failed.
+///
+/// It is used like the following.
+/// \code
+/// ErrorOr<Buffer> getBuffer();
+///
+/// auto buffer = getBuffer();
+/// if (error_code ec = buffer.getError())
+/// return ec;
+/// buffer->write("adena");
+/// \endcode
+///
+///
+/// Implicit conversion to bool returns true if there is a usable value. The
+/// unary * and -> operators provide pointer like access to the value. Accessing
+/// the value when there is an error has undefined behavior.
+///
+/// When T is a reference type the behaivor is slightly different. The reference
+/// is held in a std::reference_wrapper<std::remove_reference<T>::type>, and
+/// there is special handling to make operator -> work as if T was not a
+/// reference.
+///
+/// T cannot be a rvalue reference.
+template<class T>
+class ErrorOr {
+ template <class OtherT> friend class ErrorOr;
+ static const bool isRef = std::is_reference<T>::value;
+ typedef ReferenceStorage<typename std::remove_reference<T>::type> wrap;
+
+public:
+ typedef typename std::conditional<isRef, wrap, T>::type storage_type;
+
+private:
+ typedef typename std::remove_reference<T>::type &reference;
+ typedef const typename std::remove_reference<T>::type &const_reference;
+ typedef typename std::remove_reference<T>::type *pointer;
+ typedef const typename std::remove_reference<T>::type *const_pointer;
+
+public:
+ template <class E>
+ ErrorOr(E ErrorCode,
+ typename std::enable_if<std::is_error_code_enum<E>::value ||
+ std::is_error_condition_enum<E>::value,
+ void *>::type = 0)
+ : HasError(true) {
+ new (getErrorStorage()) std::error_code(make_error_code(ErrorCode));
+ }
+
+ ErrorOr(std::error_code EC) : HasError(true) {
+ new (getErrorStorage()) std::error_code(EC);
+ }
+
+ ErrorOr(T Val) : HasError(false) {
+ new (getStorage()) storage_type(moveIfMoveConstructible<storage_type>(Val));
+ }
+
+ ErrorOr(const ErrorOr &Other) {
+ copyConstruct(Other);
+ }
+
+ template <class OtherT>
+ ErrorOr(
+ const ErrorOr<OtherT> &Other,
+ typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+ nullptr) {
+ copyConstruct(Other);
+ }
+
+ template <class OtherT>
+ explicit ErrorOr(
+ const ErrorOr<OtherT> &Other,
+ typename std::enable_if<
+ !std::is_convertible<OtherT, const T &>::value>::type * = nullptr) {
+ copyConstruct(Other);
+ }
+
+ ErrorOr(ErrorOr &&Other) {
+ moveConstruct(std::move(Other));
+ }
+
+ template <class OtherT>
+ ErrorOr(
+ ErrorOr<OtherT> &&Other,
+ typename std::enable_if<std::is_convertible<OtherT, T>::value>::type * =
+ nullptr) {
+ moveConstruct(std::move(Other));
+ }
+
+ // This might eventually need SFINAE but it's more complex than is_convertible
+ // & I'm too lazy to write it right now.
+ template <class OtherT>
+ explicit ErrorOr(
+ ErrorOr<OtherT> &&Other,
+ typename std::enable_if<!std::is_convertible<OtherT, T>::value>::type * =
+ nullptr) {
+ moveConstruct(std::move(Other));
+ }
+
+ ErrorOr &operator=(const ErrorOr &Other) {
+ copyAssign(Other);
+ return *this;
+ }
+
+ ErrorOr &operator=(ErrorOr &&Other) {
+ moveAssign(std::move(Other));
+ return *this;
+ }
+
+ ~ErrorOr() {
+ if (!HasError)
+ getStorage()->~storage_type();
+ }
+
+ /// \brief Return false if there is an error.
+ explicit operator bool() const {
+ return !HasError;
+ }
+
+ reference get() { return *getStorage(); }
+ const_reference get() const { return const_cast<ErrorOr<T> *>(this)->get(); }
+
+ std::error_code getError() const {
+ return HasError ? *getErrorStorage() : std::error_code();
+ }
+
+ pointer operator ->() {
+ return toPointer(getStorage());
+ }
+
+ const_pointer operator->() const { return toPointer(getStorage()); }
+
+ reference operator *() {
+ return *getStorage();
+ }
+
+ const_reference operator*() const { return *getStorage(); }
+
+private:
+ template <class OtherT>
+ void copyConstruct(const ErrorOr<OtherT> &Other) {
+ if (!Other.HasError) {
+ // Get the other value.
+ HasError = false;
+ new (getStorage()) storage_type(*Other.getStorage());
+ } else {
+ // Get other's error.
+ HasError = true;
+ new (getErrorStorage()) std::error_code(Other.getError());
+ }
+ }
+
+ template <class T1>
+ static bool compareThisIfSameType(const T1 &a, const T1 &b) {
+ return &a == &b;
+ }
+
+ template <class T1, class T2>
+ static bool compareThisIfSameType(const T1 &a, const T2 &b) {
+ return false;
+ }
+
+ template <class OtherT>
+ void copyAssign(const ErrorOr<OtherT> &Other) {
+ if (compareThisIfSameType(*this, Other))
+ return;
+
+ this->~ErrorOr();
+ new (this) ErrorOr(Other);
+ }
+
+ template <class OtherT>
+ void moveConstruct(ErrorOr<OtherT> &&Other) {
+ if (!Other.HasError) {
+ // Get the other value.
+ HasError = false;
+ new (getStorage()) storage_type(std::move(*Other.getStorage()));
+ } else {
+ // Get other's error.
+ HasError = true;
+ new (getErrorStorage()) std::error_code(Other.getError());
+ }
+ }
+
+ template <class OtherT>
+ void moveAssign(ErrorOr<OtherT> &&Other) {
+ if (compareThisIfSameType(*this, Other))
+ return;
+
+ this->~ErrorOr();
+ new (this) ErrorOr(std::move(Other));
+ }
+
+ pointer toPointer(pointer Val) {
+ return Val;
+ }
+
+ const_pointer toPointer(const_pointer Val) const { return Val; }
+
+ pointer toPointer(wrap *Val) {
+ return &Val->get();
+ }
+
+ const_pointer toPointer(const wrap *Val) const { return &Val->get(); }
+
+ storage_type *getStorage() {
+ assert(!HasError && "Cannot get value when an error exists!");
+ return reinterpret_cast<storage_type*>(TStorage.buffer);
+ }
+
+ const storage_type *getStorage() const {
+ assert(!HasError && "Cannot get value when an error exists!");
+ return reinterpret_cast<const storage_type*>(TStorage.buffer);
+ }
+
+ std::error_code *getErrorStorage() {
+ assert(HasError && "Cannot get error when a value exists!");
+ return reinterpret_cast<std::error_code *>(ErrorStorage.buffer);
+ }
+
+ const std::error_code *getErrorStorage() const {
+ return const_cast<ErrorOr<T> *>(this)->getErrorStorage();
+ }
+
+
+ union {
+ AlignedCharArrayUnion<storage_type> TStorage;
+ AlignedCharArrayUnion<std::error_code> ErrorStorage;
+ };
+ bool HasError : 1;
+};
+
+template <class T, class E>
+typename std::enable_if<std::is_error_code_enum<E>::value ||
+ std::is_error_condition_enum<E>::value,
+ bool>::type
+operator==(const ErrorOr<T> &Err, E Code) {
+ return Err.getError() == Code;
+}
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/FileOutputBuffer.h b/contrib/llvm/include/llvm/Support/FileOutputBuffer.h
new file mode 100644
index 0000000..3bcf64a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/FileOutputBuffer.h
@@ -0,0 +1,89 @@
+//=== FileOutputBuffer.h - File Output Buffer -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Utility for creating a in-memory buffer that will be written to a file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FILEOUTPUTBUFFER_H
+#define LLVM_SUPPORT_FILEOUTPUTBUFFER_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FileSystem.h"
+
+namespace llvm {
+/// FileOutputBuffer - This interface provides simple way to create an in-memory
+/// buffer which will be written to a file. During the lifetime of these
+/// objects, the content or existence of the specified file is undefined. That
+/// is, creating an OutputBuffer for a file may immediately remove the file.
+/// If the FileOutputBuffer is committed, the target file's content will become
+/// the buffer content at the time of the commit. If the FileOutputBuffer is
+/// not committed, the file will be deleted in the FileOutputBuffer destructor.
+class FileOutputBuffer {
+public:
+
+ enum {
+ F_executable = 1 /// set the 'x' bit on the resulting file
+ };
+
+ /// Factory method to create an OutputBuffer object which manages a read/write
+ /// buffer of the specified size. When committed, the buffer will be written
+ /// to the file at the specified path.
+ static ErrorOr<std::unique_ptr<FileOutputBuffer>>
+ create(StringRef FilePath, size_t Size, unsigned Flags = 0);
+
+ /// Returns a pointer to the start of the buffer.
+ uint8_t *getBufferStart() {
+ return (uint8_t*)Region->data();
+ }
+
+ /// Returns a pointer to the end of the buffer.
+ uint8_t *getBufferEnd() {
+ return (uint8_t*)Region->data() + Region->size();
+ }
+
+ /// Returns size of the buffer.
+ size_t getBufferSize() const {
+ return Region->size();
+ }
+
+ /// Returns path where file will show up if buffer is committed.
+ StringRef getPath() const {
+ return FinalPath;
+ }
+
+ /// Flushes the content of the buffer to its file and deallocates the
+ /// buffer. If commit() is not called before this object's destructor
+ /// is called, the file is deleted in the destructor. The optional parameter
+ /// is used if it turns out you want the file size to be smaller than
+ /// initially requested.
+ std::error_code commit();
+
+ /// If this object was previously committed, the destructor just deletes
+ /// this object. If this object was not committed, the destructor
+ /// deallocates the buffer and the target file is never written.
+ ~FileOutputBuffer();
+
+private:
+ FileOutputBuffer(const FileOutputBuffer &) = delete;
+ FileOutputBuffer &operator=(const FileOutputBuffer &) = delete;
+
+ FileOutputBuffer(std::unique_ptr<llvm::sys::fs::mapped_file_region> R,
+ StringRef Path, StringRef TempPath);
+
+ std::unique_ptr<llvm::sys::fs::mapped_file_region> Region;
+ SmallString<128> FinalPath;
+ SmallString<128> TempPath;
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/FileSystem.h b/contrib/llvm/include/llvm/Support/FileSystem.h
new file mode 100644
index 0000000..4733ddb
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/FileSystem.h
@@ -0,0 +1,903 @@
+//===- llvm/Support/FileSystem.h - File System OS Concept -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::fs namespace. It is designed after
+// TR2/boost filesystem (v3), but modified to remove exception handling and the
+// path class.
+//
+// All functions return an error_code and their actual work via the last out
+// argument. The out argument is defined if and only if errc::success is
+// returned. A function may return any error code in the generic or system
+// category. However, they shall be equivalent to any error conditions listed
+// in each functions respective documentation if the condition applies. [ note:
+// this does not guarantee that error_code will be in the set of explicitly
+// listed codes, but it does guarantee that if any of the explicitly listed
+// errors occur, the correct error_code will be used ]. All functions may
+// return errc::not_enough_memory if there is not enough memory to complete the
+// operation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FILESYSTEM_H
+#define LLVM_SUPPORT_FILESYSTEM_H
+
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TimeValue.h"
+#include <ctime>
+#include <iterator>
+#include <stack>
+#include <string>
+#include <system_error>
+#include <tuple>
+#include <vector>
+
+#ifdef HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif
+
+namespace llvm {
+namespace sys {
+namespace fs {
+
+/// An enumeration for the file system's view of the type.
+enum class file_type {
+ status_error,
+ file_not_found,
+ regular_file,
+ directory_file,
+ symlink_file,
+ block_file,
+ character_file,
+ fifo_file,
+ socket_file,
+ type_unknown
+};
+
+/// space_info - Self explanatory.
+struct space_info {
+ uint64_t capacity;
+ uint64_t free;
+ uint64_t available;
+};
+
+enum perms {
+ no_perms = 0,
+ owner_read = 0400,
+ owner_write = 0200,
+ owner_exe = 0100,
+ owner_all = owner_read | owner_write | owner_exe,
+ group_read = 040,
+ group_write = 020,
+ group_exe = 010,
+ group_all = group_read | group_write | group_exe,
+ others_read = 04,
+ others_write = 02,
+ others_exe = 01,
+ others_all = others_read | others_write | others_exe,
+ all_read = owner_read | group_read | others_read,
+ all_write = owner_write | group_write | others_write,
+ all_exe = owner_exe | group_exe | others_exe,
+ all_all = owner_all | group_all | others_all,
+ set_uid_on_exe = 04000,
+ set_gid_on_exe = 02000,
+ sticky_bit = 01000,
+ perms_not_known = 0xFFFF
+};
+
+// Helper functions so that you can use & and | to manipulate perms bits:
+inline perms operator|(perms l, perms r) {
+ return static_cast<perms>(static_cast<unsigned short>(l) |
+ static_cast<unsigned short>(r));
+}
+inline perms operator&(perms l, perms r) {
+ return static_cast<perms>(static_cast<unsigned short>(l) &
+ static_cast<unsigned short>(r));
+}
+inline perms &operator|=(perms &l, perms r) {
+ l = l | r;
+ return l;
+}
+inline perms &operator&=(perms &l, perms r) {
+ l = l & r;
+ return l;
+}
+inline perms operator~(perms x) {
+ return static_cast<perms>(~static_cast<unsigned short>(x));
+}
+
+class UniqueID {
+ uint64_t Device;
+ uint64_t File;
+
+public:
+ UniqueID() = default;
+ UniqueID(uint64_t Device, uint64_t File) : Device(Device), File(File) {}
+ bool operator==(const UniqueID &Other) const {
+ return Device == Other.Device && File == Other.File;
+ }
+ bool operator!=(const UniqueID &Other) const { return !(*this == Other); }
+ bool operator<(const UniqueID &Other) const {
+ return std::tie(Device, File) < std::tie(Other.Device, Other.File);
+ }
+ uint64_t getDevice() const { return Device; }
+ uint64_t getFile() const { return File; }
+};
+
+/// file_status - Represents the result of a call to stat and friends. It has
+/// a platform-specific member to store the result.
+class file_status
+{
+ #if defined(LLVM_ON_UNIX)
+ dev_t fs_st_dev;
+ ino_t fs_st_ino;
+ time_t fs_st_mtime;
+ uid_t fs_st_uid;
+ gid_t fs_st_gid;
+ off_t fs_st_size;
+ #elif defined (LLVM_ON_WIN32)
+ uint32_t LastWriteTimeHigh;
+ uint32_t LastWriteTimeLow;
+ uint32_t VolumeSerialNumber;
+ uint32_t FileSizeHigh;
+ uint32_t FileSizeLow;
+ uint32_t FileIndexHigh;
+ uint32_t FileIndexLow;
+ #endif
+ friend bool equivalent(file_status A, file_status B);
+ file_type Type;
+ perms Perms;
+
+public:
+ #if defined(LLVM_ON_UNIX)
+ file_status() : fs_st_dev(0), fs_st_ino(0), fs_st_mtime(0),
+ fs_st_uid(0), fs_st_gid(0), fs_st_size(0),
+ Type(file_type::status_error), Perms(perms_not_known) {}
+
+ file_status(file_type Type) : fs_st_dev(0), fs_st_ino(0), fs_st_mtime(0),
+ fs_st_uid(0), fs_st_gid(0), fs_st_size(0), Type(Type),
+ Perms(perms_not_known) {}
+
+ file_status(file_type Type, perms Perms, dev_t Dev, ino_t Ino, time_t MTime,
+ uid_t UID, gid_t GID, off_t Size)
+ : fs_st_dev(Dev), fs_st_ino(Ino), fs_st_mtime(MTime), fs_st_uid(UID),
+ fs_st_gid(GID), fs_st_size(Size), Type(Type), Perms(Perms) {}
+ #elif defined(LLVM_ON_WIN32)
+ file_status() : LastWriteTimeHigh(0), LastWriteTimeLow(0),
+ VolumeSerialNumber(0), FileSizeHigh(0), FileSizeLow(0),
+ FileIndexHigh(0), FileIndexLow(0), Type(file_type::status_error),
+ Perms(perms_not_known) {}
+
+ file_status(file_type Type) : LastWriteTimeHigh(0), LastWriteTimeLow(0),
+ VolumeSerialNumber(0), FileSizeHigh(0), FileSizeLow(0),
+ FileIndexHigh(0), FileIndexLow(0), Type(Type),
+ Perms(perms_not_known) {}
+
+ file_status(file_type Type, uint32_t LastWriteTimeHigh,
+ uint32_t LastWriteTimeLow, uint32_t VolumeSerialNumber,
+ uint32_t FileSizeHigh, uint32_t FileSizeLow,
+ uint32_t FileIndexHigh, uint32_t FileIndexLow)
+ : LastWriteTimeHigh(LastWriteTimeHigh),
+ LastWriteTimeLow(LastWriteTimeLow),
+ VolumeSerialNumber(VolumeSerialNumber), FileSizeHigh(FileSizeHigh),
+ FileSizeLow(FileSizeLow), FileIndexHigh(FileIndexHigh),
+ FileIndexLow(FileIndexLow), Type(Type), Perms(perms_not_known) {}
+ #endif
+
+ // getters
+ file_type type() const { return Type; }
+ perms permissions() const { return Perms; }
+ TimeValue getLastModificationTime() const;
+ UniqueID getUniqueID() const;
+
+ #if defined(LLVM_ON_UNIX)
+ uint32_t getUser() const { return fs_st_uid; }
+ uint32_t getGroup() const { return fs_st_gid; }
+ uint64_t getSize() const { return fs_st_size; }
+ #elif defined (LLVM_ON_WIN32)
+ uint32_t getUser() const {
+ return 9999; // Not applicable to Windows, so...
+ }
+ uint32_t getGroup() const {
+ return 9999; // Not applicable to Windows, so...
+ }
+ uint64_t getSize() const {
+ return (uint64_t(FileSizeHigh) << 32) + FileSizeLow;
+ }
+ #endif
+
+ // setters
+ void type(file_type v) { Type = v; }
+ void permissions(perms p) { Perms = p; }
+};
+
+/// file_magic - An "enum class" enumeration of file types based on magic (the first
+/// N bytes of the file).
+struct file_magic {
+ enum Impl {
+ unknown = 0, ///< Unrecognized file
+ bitcode, ///< Bitcode file
+ archive, ///< ar style archive file
+ elf, ///< ELF Unknown type
+ elf_relocatable, ///< ELF Relocatable object file
+ elf_executable, ///< ELF Executable image
+ elf_shared_object, ///< ELF dynamically linked shared lib
+ elf_core, ///< ELF core image
+ macho_object, ///< Mach-O Object file
+ macho_executable, ///< Mach-O Executable
+ macho_fixed_virtual_memory_shared_lib, ///< Mach-O Shared Lib, FVM
+ macho_core, ///< Mach-O Core File
+ macho_preload_executable, ///< Mach-O Preloaded Executable
+ macho_dynamically_linked_shared_lib, ///< Mach-O dynlinked shared lib
+ macho_dynamic_linker, ///< The Mach-O dynamic linker
+ macho_bundle, ///< Mach-O Bundle file
+ macho_dynamically_linked_shared_lib_stub, ///< Mach-O Shared lib stub
+ macho_dsym_companion, ///< Mach-O dSYM companion file
+ macho_kext_bundle, ///< Mach-O kext bundle file
+ macho_universal_binary, ///< Mach-O universal binary
+ coff_object, ///< COFF object file
+ coff_import_library, ///< COFF import library
+ pecoff_executable, ///< PECOFF executable file
+ windows_resource ///< Windows compiled resource file (.rc)
+ };
+
+ bool is_object() const {
+ return V == unknown ? false : true;
+ }
+
+ file_magic() : V(unknown) {}
+ file_magic(Impl V) : V(V) {}
+ operator Impl() const { return V; }
+
+private:
+ Impl V;
+};
+
+/// @}
+/// @name Physical Operators
+/// @{
+
+/// @brief Make \a path an absolute path.
+///
+/// Makes \a path absolute using the \a current_directory if it is not already.
+/// An empty \a path will result in the \a current_directory.
+///
+/// /absolute/path => /absolute/path
+/// relative/../path => <current-directory>/relative/../path
+///
+/// @param path A path that is modified to be an absolute path.
+/// @returns errc::success if \a path has been made absolute, otherwise a
+/// platform-specific error_code.
+std::error_code make_absolute(const Twine &current_directory,
+ SmallVectorImpl<char> &path);
+
+/// @brief Make \a path an absolute path.
+///
+/// Makes \a path absolute using the current directory if it is not already. An
+/// empty \a path will result in the current directory.
+///
+/// /absolute/path => /absolute/path
+/// relative/../path => <current-directory>/relative/../path
+///
+/// @param path A path that is modified to be an absolute path.
+/// @returns errc::success if \a path has been made absolute, otherwise a
+/// platform-specific error_code.
+std::error_code make_absolute(SmallVectorImpl<char> &path);
+
+/// @brief Create all the non-existent directories in path.
+///
+/// @param path Directories to create.
+/// @returns errc::success if is_directory(path), otherwise a platform
+/// specific error_code. If IgnoreExisting is false, also returns
+/// error if the directory already existed.
+std::error_code create_directories(const Twine &path,
+ bool IgnoreExisting = true,
+ perms Perms = owner_all | group_all);
+
+/// @brief Create the directory in path.
+///
+/// @param path Directory to create.
+/// @returns errc::success if is_directory(path), otherwise a platform
+/// specific error_code. If IgnoreExisting is false, also returns
+/// error if the directory already existed.
+std::error_code create_directory(const Twine &path, bool IgnoreExisting = true,
+ perms Perms = owner_all | group_all);
+
+/// @brief Create a link from \a from to \a to.
+///
+/// The link may be a soft or a hard link, depending on the platform. The caller
+/// may not assume which one. Currently on windows it creates a hard link since
+/// soft links require extra privileges. On unix, it creates a soft link since
+/// hard links don't work on SMB file systems.
+///
+/// @param to The path to hard link to.
+/// @param from The path to hard link from. This is created.
+/// @returns errc::success if the link was created, otherwise a platform
+/// specific error_code.
+std::error_code create_link(const Twine &to, const Twine &from);
+
+/// @brief Get the current path.
+///
+/// @param result Holds the current path on return.
+/// @returns errc::success if the current path has been stored in result,
+/// otherwise a platform-specific error_code.
+std::error_code current_path(SmallVectorImpl<char> &result);
+
+/// @brief Remove path. Equivalent to POSIX remove().
+///
+/// @param path Input path.
+/// @returns errc::success if path has been removed or didn't exist, otherwise a
+/// platform-specific error code. If IgnoreNonExisting is false, also
+/// returns error if the file didn't exist.
+std::error_code remove(const Twine &path, bool IgnoreNonExisting = true);
+
+/// @brief Rename \a from to \a to. Files are renamed as if by POSIX rename().
+///
+/// @param from The path to rename from.
+/// @param to The path to rename to. This is created.
+std::error_code rename(const Twine &from, const Twine &to);
+
+/// @brief Copy the contents of \a From to \a To.
+///
+/// @param From The path to copy from.
+/// @param To The path to copy to. This is created.
+std::error_code copy_file(const Twine &From, const Twine &To);
+
+/// @brief Resize path to size. File is resized as if by POSIX truncate().
+///
+/// @param FD Input file descriptor.
+/// @param Size Size to resize to.
+/// @returns errc::success if \a path has been resized to \a size, otherwise a
+/// platform-specific error_code.
+std::error_code resize_file(int FD, uint64_t Size);
+
+/// @}
+/// @name Physical Observers
+/// @{
+
+/// @brief Does file exist?
+///
+/// @param status A file_status previously returned from stat.
+/// @returns True if the file represented by status exists, false if it does
+/// not.
+bool exists(file_status status);
+
+enum class AccessMode { Exist, Write, Execute };
+
+/// @brief Can the file be accessed?
+///
+/// @param Path Input path.
+/// @returns errc::success if the path can be accessed, otherwise a
+/// platform-specific error_code.
+std::error_code access(const Twine &Path, AccessMode Mode);
+
+/// @brief Does file exist?
+///
+/// @param Path Input path.
+/// @returns True if it exists, false otherwise.
+inline bool exists(const Twine &Path) {
+ return !access(Path, AccessMode::Exist);
+}
+
+/// @brief Can we execute this file?
+///
+/// @param Path Input path.
+/// @returns True if we can execute it, false otherwise.
+bool can_execute(const Twine &Path);
+
+/// @brief Can we write this file?
+///
+/// @param Path Input path.
+/// @returns True if we can write to it, false otherwise.
+inline bool can_write(const Twine &Path) {
+ return !access(Path, AccessMode::Write);
+}
+
+/// @brief Do file_status's represent the same thing?
+///
+/// @param A Input file_status.
+/// @param B Input file_status.
+///
+/// assert(status_known(A) || status_known(B));
+///
+/// @returns True if A and B both represent the same file system entity, false
+/// otherwise.
+bool equivalent(file_status A, file_status B);
+
+/// @brief Do paths represent the same thing?
+///
+/// assert(status_known(A) || status_known(B));
+///
+/// @param A Input path A.
+/// @param B Input path B.
+/// @param result Set to true if stat(A) and stat(B) have the same device and
+/// inode (or equivalent).
+/// @returns errc::success if result has been successfully set, otherwise a
+/// platform-specific error_code.
+std::error_code equivalent(const Twine &A, const Twine &B, bool &result);
+
+/// @brief Simpler version of equivalent for clients that don't need to
+/// differentiate between an error and false.
+inline bool equivalent(const Twine &A, const Twine &B) {
+ bool result;
+ return !equivalent(A, B, result) && result;
+}
+
+/// @brief Does status represent a directory?
+///
+/// @param status A file_status previously returned from status.
+/// @returns status.type() == file_type::directory_file.
+bool is_directory(file_status status);
+
+/// @brief Is path a directory?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a directory, false if it is not.
+/// Undefined otherwise.
+/// @returns errc::success if result has been successfully set, otherwise a
+/// platform-specific error_code.
+std::error_code is_directory(const Twine &path, bool &result);
+
+/// @brief Simpler version of is_directory for clients that don't need to
+/// differentiate between an error and false.
+inline bool is_directory(const Twine &Path) {
+ bool Result;
+ return !is_directory(Path, Result) && Result;
+}
+
+/// @brief Does status represent a regular file?
+///
+/// @param status A file_status previously returned from status.
+/// @returns status_known(status) && status.type() == file_type::regular_file.
+bool is_regular_file(file_status status);
+
+/// @brief Is path a regular file?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path is a regular file, false if it is not.
+/// Undefined otherwise.
+/// @returns errc::success if result has been successfully set, otherwise a
+/// platform-specific error_code.
+std::error_code is_regular_file(const Twine &path, bool &result);
+
+/// @brief Simpler version of is_regular_file for clients that don't need to
+/// differentiate between an error and false.
+inline bool is_regular_file(const Twine &Path) {
+ bool Result;
+ if (is_regular_file(Path, Result))
+ return false;
+ return Result;
+}
+
+/// @brief Does this status represent something that exists but is not a
+/// directory, regular file, or symlink?
+///
+/// @param status A file_status previously returned from status.
+/// @returns exists(s) && !is_regular_file(s) && !is_directory(s)
+bool is_other(file_status status);
+
+/// @brief Is path something that exists but is not a directory,
+/// regular file, or symlink?
+///
+/// @param path Input path.
+/// @param result Set to true if \a path exists, but is not a directory, regular
+/// file, or a symlink, false if it does not. Undefined otherwise.
+/// @returns errc::success if result has been successfully set, otherwise a
+/// platform-specific error_code.
+std::error_code is_other(const Twine &path, bool &result);
+
+/// @brief Get file status as if by POSIX stat().
+///
+/// @param path Input path.
+/// @param result Set to the file status.
+/// @returns errc::success if result has been successfully set, otherwise a
+/// platform-specific error_code.
+std::error_code status(const Twine &path, file_status &result);
+
+/// @brief A version for when a file descriptor is already available.
+std::error_code status(int FD, file_status &Result);
+
+/// @brief Get file size.
+///
+/// @param Path Input path.
+/// @param Result Set to the size of the file in \a Path.
+/// @returns errc::success if result has been successfully set, otherwise a
+/// platform-specific error_code.
+inline std::error_code file_size(const Twine &Path, uint64_t &Result) {
+ file_status Status;
+ std::error_code EC = status(Path, Status);
+ if (EC)
+ return EC;
+ Result = Status.getSize();
+ return std::error_code();
+}
+
+/// @brief Set the file modification and access time.
+///
+/// @returns errc::success if the file times were successfully set, otherwise a
+/// platform-specific error_code or errc::function_not_supported on
+/// platforms where the functionality isn't available.
+std::error_code setLastModificationAndAccessTime(int FD, TimeValue Time);
+
+/// @brief Is status available?
+///
+/// @param s Input file status.
+/// @returns True if status() != status_error.
+bool status_known(file_status s);
+
+/// @brief Is status available?
+///
+/// @param path Input path.
+/// @param result Set to true if status() != status_error.
+/// @returns errc::success if result has been successfully set, otherwise a
+/// platform-specific error_code.
+std::error_code status_known(const Twine &path, bool &result);
+
+/// @brief Create a uniquely named file.
+///
+/// Generates a unique path suitable for a temporary file and then opens it as a
+/// file. The name is based on \a model with '%' replaced by a random char in
+/// [0-9a-f]. If \a model is not an absolute path, the temporary file will be
+/// created in the current directory.
+///
+/// Example: clang-%%-%%-%%-%%-%%.s => clang-a0-b1-c2-d3-e4.s
+///
+/// This is an atomic operation. Either the file is created and opened, or the
+/// file system is left untouched.
+///
+/// The intended use is for files that are to be kept, possibly after
+/// renaming them. For example, when running 'clang -c foo.o', the file can
+/// be first created as foo-abc123.o and then renamed.
+///
+/// @param Model Name to base unique path off of.
+/// @param ResultFD Set to the opened file's file descriptor.
+/// @param ResultPath Set to the opened file's absolute path.
+/// @returns errc::success if Result{FD,Path} have been successfully set,
+/// otherwise a platform-specific error_code.
+std::error_code createUniqueFile(const Twine &Model, int &ResultFD,
+ SmallVectorImpl<char> &ResultPath,
+ unsigned Mode = all_read | all_write);
+
+/// @brief Simpler version for clients that don't want an open file.
+std::error_code createUniqueFile(const Twine &Model,
+ SmallVectorImpl<char> &ResultPath);
+
+/// @brief Create a file in the system temporary directory.
+///
+/// The filename is of the form prefix-random_chars.suffix. Since the directory
+/// is not know to the caller, Prefix and Suffix cannot have path separators.
+/// The files are created with mode 0600.
+///
+/// This should be used for things like a temporary .s that is removed after
+/// running the assembler.
+std::error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
+ int &ResultFD,
+ SmallVectorImpl<char> &ResultPath);
+
+/// @brief Simpler version for clients that don't want an open file.
+std::error_code createTemporaryFile(const Twine &Prefix, StringRef Suffix,
+ SmallVectorImpl<char> &ResultPath);
+
+std::error_code createUniqueDirectory(const Twine &Prefix,
+ SmallVectorImpl<char> &ResultPath);
+
+enum OpenFlags : unsigned {
+ F_None = 0,
+
+ /// F_Excl - When opening a file, this flag makes raw_fd_ostream
+ /// report an error if the file already exists.
+ F_Excl = 1,
+
+ /// F_Append - When opening a file, if it already exists append to the
+ /// existing file instead of returning an error. This may not be specified
+ /// with F_Excl.
+ F_Append = 2,
+
+ /// The file should be opened in text mode on platforms that make this
+ /// distinction.
+ F_Text = 4,
+
+ /// Open the file for read and write.
+ F_RW = 8
+};
+
+inline OpenFlags operator|(OpenFlags A, OpenFlags B) {
+ return OpenFlags(unsigned(A) | unsigned(B));
+}
+
+inline OpenFlags &operator|=(OpenFlags &A, OpenFlags B) {
+ A = A | B;
+ return A;
+}
+
+std::error_code openFileForWrite(const Twine &Name, int &ResultFD,
+ OpenFlags Flags, unsigned Mode = 0666);
+
+std::error_code openFileForRead(const Twine &Name, int &ResultFD);
+
+/// @brief Identify the type of a binary file based on how magical it is.
+file_magic identify_magic(StringRef magic);
+
+/// @brief Get and identify \a path's type based on its content.
+///
+/// @param path Input path.
+/// @param result Set to the type of file, or file_magic::unknown.
+/// @returns errc::success if result has been successfully set, otherwise a
+/// platform-specific error_code.
+std::error_code identify_magic(const Twine &path, file_magic &result);
+
+std::error_code getUniqueID(const Twine Path, UniqueID &Result);
+
+/// This class represents a memory mapped file. It is based on
+/// boost::iostreams::mapped_file.
+class mapped_file_region {
+ mapped_file_region() = delete;
+ mapped_file_region(mapped_file_region&) = delete;
+ mapped_file_region &operator =(mapped_file_region&) = delete;
+
+public:
+ enum mapmode {
+ readonly, ///< May only access map via const_data as read only.
+ readwrite, ///< May access map via data and modify it. Written to path.
+ priv ///< May modify via data, but changes are lost on destruction.
+ };
+
+private:
+ /// Platform-specific mapping state.
+ uint64_t Size;
+ void *Mapping;
+
+ std::error_code init(int FD, uint64_t Offset, mapmode Mode);
+
+public:
+ /// \param fd An open file descriptor to map. mapped_file_region takes
+ /// ownership if closefd is true. It must have been opended in the correct
+ /// mode.
+ mapped_file_region(int fd, mapmode mode, uint64_t length, uint64_t offset,
+ std::error_code &ec);
+
+ ~mapped_file_region();
+
+ uint64_t size() const;
+ char *data() const;
+
+ /// Get a const view of the data. Modifying this memory has undefined
+ /// behavior.
+ const char *const_data() const;
+
+ /// \returns The minimum alignment offset must be.
+ static int alignment();
+};
+
+/// Return the path to the main executable, given the value of argv[0] from
+/// program startup and the address of main itself. In extremis, this function
+/// may fail and return an empty path.
+std::string getMainExecutable(const char *argv0, void *MainExecAddr);
+
+/// @}
+/// @name Iterators
+/// @{
+
+/// directory_entry - A single entry in a directory. Caches the status either
+/// from the result of the iteration syscall, or the first time status is
+/// called.
+class directory_entry {
+ std::string Path;
+ mutable file_status Status;
+
+public:
+ explicit directory_entry(const Twine &path, file_status st = file_status())
+ : Path(path.str())
+ , Status(st) {}
+
+ directory_entry() {}
+
+ void assign(const Twine &path, file_status st = file_status()) {
+ Path = path.str();
+ Status = st;
+ }
+
+ void replace_filename(const Twine &filename, file_status st = file_status());
+
+ const std::string &path() const { return Path; }
+ std::error_code status(file_status &result) const;
+
+ bool operator==(const directory_entry& rhs) const { return Path == rhs.Path; }
+ bool operator!=(const directory_entry& rhs) const { return !(*this == rhs); }
+ bool operator< (const directory_entry& rhs) const;
+ bool operator<=(const directory_entry& rhs) const;
+ bool operator> (const directory_entry& rhs) const;
+ bool operator>=(const directory_entry& rhs) const;
+};
+
+namespace detail {
+ struct DirIterState;
+
+ std::error_code directory_iterator_construct(DirIterState &, StringRef);
+ std::error_code directory_iterator_increment(DirIterState &);
+ std::error_code directory_iterator_destruct(DirIterState &);
+
+ /// DirIterState - Keeps state for the directory_iterator. It is reference
+ /// counted in order to preserve InputIterator semantics on copy.
+ struct DirIterState : public RefCountedBase<DirIterState> {
+ DirIterState()
+ : IterationHandle(0) {}
+
+ ~DirIterState() {
+ directory_iterator_destruct(*this);
+ }
+
+ intptr_t IterationHandle;
+ directory_entry CurrentEntry;
+ };
+}
+
+/// directory_iterator - Iterates through the entries in path. There is no
+/// operator++ because we need an error_code. If it's really needed we can make
+/// it call report_fatal_error on error.
+class directory_iterator {
+ IntrusiveRefCntPtr<detail::DirIterState> State;
+
+public:
+ explicit directory_iterator(const Twine &path, std::error_code &ec) {
+ State = new detail::DirIterState;
+ SmallString<128> path_storage;
+ ec = detail::directory_iterator_construct(*State,
+ path.toStringRef(path_storage));
+ }
+
+ explicit directory_iterator(const directory_entry &de, std::error_code &ec) {
+ State = new detail::DirIterState;
+ ec = detail::directory_iterator_construct(*State, de.path());
+ }
+
+ /// Construct end iterator.
+ directory_iterator() : State(nullptr) {}
+
+ // No operator++ because we need error_code.
+ directory_iterator &increment(std::error_code &ec) {
+ ec = directory_iterator_increment(*State);
+ return *this;
+ }
+
+ const directory_entry &operator*() const { return State->CurrentEntry; }
+ const directory_entry *operator->() const { return &State->CurrentEntry; }
+
+ bool operator==(const directory_iterator &RHS) const {
+ if (State == RHS.State)
+ return true;
+ if (!RHS.State)
+ return State->CurrentEntry == directory_entry();
+ if (!State)
+ return RHS.State->CurrentEntry == directory_entry();
+ return State->CurrentEntry == RHS.State->CurrentEntry;
+ }
+
+ bool operator!=(const directory_iterator &RHS) const {
+ return !(*this == RHS);
+ }
+ // Other members as required by
+ // C++ Std, 24.1.1 Input iterators [input.iterators]
+};
+
+namespace detail {
+ /// RecDirIterState - Keeps state for the recursive_directory_iterator. It is
+ /// reference counted in order to preserve InputIterator semantics on copy.
+ struct RecDirIterState : public RefCountedBase<RecDirIterState> {
+ RecDirIterState()
+ : Level(0)
+ , HasNoPushRequest(false) {}
+
+ std::stack<directory_iterator, std::vector<directory_iterator> > Stack;
+ uint16_t Level;
+ bool HasNoPushRequest;
+ };
+}
+
+/// recursive_directory_iterator - Same as directory_iterator except for it
+/// recurses down into child directories.
+class recursive_directory_iterator {
+ IntrusiveRefCntPtr<detail::RecDirIterState> State;
+
+public:
+ recursive_directory_iterator() {}
+ explicit recursive_directory_iterator(const Twine &path, std::error_code &ec)
+ : State(new detail::RecDirIterState) {
+ State->Stack.push(directory_iterator(path, ec));
+ if (State->Stack.top() == directory_iterator())
+ State.reset();
+ }
+ // No operator++ because we need error_code.
+ recursive_directory_iterator &increment(std::error_code &ec) {
+ const directory_iterator end_itr;
+
+ if (State->HasNoPushRequest)
+ State->HasNoPushRequest = false;
+ else {
+ file_status st;
+ if ((ec = State->Stack.top()->status(st))) return *this;
+ if (is_directory(st)) {
+ State->Stack.push(directory_iterator(*State->Stack.top(), ec));
+ if (ec) return *this;
+ if (State->Stack.top() != end_itr) {
+ ++State->Level;
+ return *this;
+ }
+ State->Stack.pop();
+ }
+ }
+
+ while (!State->Stack.empty()
+ && State->Stack.top().increment(ec) == end_itr) {
+ State->Stack.pop();
+ --State->Level;
+ }
+
+ // Check if we are done. If so, create an end iterator.
+ if (State->Stack.empty())
+ State.reset();
+
+ return *this;
+ }
+
+ const directory_entry &operator*() const { return *State->Stack.top(); }
+ const directory_entry *operator->() const { return &*State->Stack.top(); }
+
+ // observers
+ /// Gets the current level. Starting path is at level 0.
+ int level() const { return State->Level; }
+
+ /// Returns true if no_push has been called for this directory_entry.
+ bool no_push_request() const { return State->HasNoPushRequest; }
+
+ // modifiers
+ /// Goes up one level if Level > 0.
+ void pop() {
+ assert(State && "Cannot pop an end iterator!");
+ assert(State->Level > 0 && "Cannot pop an iterator with level < 1");
+
+ const directory_iterator end_itr;
+ std::error_code ec;
+ do {
+ if (ec)
+ report_fatal_error("Error incrementing directory iterator.");
+ State->Stack.pop();
+ --State->Level;
+ } while (!State->Stack.empty()
+ && State->Stack.top().increment(ec) == end_itr);
+
+ // Check if we are done. If so, create an end iterator.
+ if (State->Stack.empty())
+ State.reset();
+ }
+
+ /// Does not go down into the current directory_entry.
+ void no_push() { State->HasNoPushRequest = true; }
+
+ bool operator==(const recursive_directory_iterator &RHS) const {
+ return State == RHS.State;
+ }
+
+ bool operator!=(const recursive_directory_iterator &RHS) const {
+ return !(*this == RHS);
+ }
+ // Other members as required by
+ // C++ Std, 24.1.1 Input iterators [input.iterators]
+};
+
+/// @}
+
+} // end namespace fs
+} // end namespace sys
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/FileUtilities.h b/contrib/llvm/include/llvm/Support/FileUtilities.h
new file mode 100644
index 0000000..2ee2c60
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/FileUtilities.h
@@ -0,0 +1,78 @@
+//===- llvm/Support/FileUtilities.h - File System Utilities -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a family of utility functions which are useful for doing
+// various things with files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FILEUTILITIES_H
+#define LLVM_SUPPORT_FILEUTILITIES_H
+
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+
+namespace llvm {
+
+ /// DiffFilesWithTolerance - Compare the two files specified, returning 0 if
+ /// the files match, 1 if they are different, and 2 if there is a file error.
+ /// This function allows you to specify an absolute and relative FP error that
+ /// is allowed to exist. If you specify a string to fill in for the error
+ /// option, it will set the string to an error message if an error occurs, or
+ /// if the files are different.
+ ///
+ int DiffFilesWithTolerance(StringRef FileA,
+ StringRef FileB,
+ double AbsTol, double RelTol,
+ std::string *Error = nullptr);
+
+
+ /// FileRemover - This class is a simple object meant to be stack allocated.
+ /// If an exception is thrown from a region, the object removes the filename
+ /// specified (if deleteIt is true).
+ ///
+ class FileRemover {
+ SmallString<128> Filename;
+ bool DeleteIt;
+ public:
+ FileRemover() : DeleteIt(false) {}
+
+ explicit FileRemover(const Twine& filename, bool deleteIt = true)
+ : DeleteIt(deleteIt) {
+ filename.toVector(Filename);
+ }
+
+ ~FileRemover() {
+ if (DeleteIt) {
+ // Ignore problems deleting the file.
+ sys::fs::remove(Filename);
+ }
+ }
+
+ /// setFile - Give ownership of the file to the FileRemover so it will
+ /// be removed when the object is destroyed. If the FileRemover already
+ /// had ownership of a file, remove it first.
+ void setFile(const Twine& filename, bool deleteIt = true) {
+ if (DeleteIt) {
+ // Ignore problems deleting the file.
+ sys::fs::remove(Filename);
+ }
+
+ Filename.clear();
+ filename.toVector(Filename);
+ DeleteIt = deleteIt;
+ }
+
+ /// releaseFile - Take ownership of the file away from the FileRemover so it
+ /// will not be removed when the object is destroyed.
+ void releaseFile() { DeleteIt = false; }
+ };
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Format.h b/contrib/llvm/include/llvm/Support/Format.h
new file mode 100644
index 0000000..f0b437a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Format.h
@@ -0,0 +1,195 @@
+//===- Format.h - Efficient printf-style formatting for streams -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the format() function, which can be used with other
+// LLVM subsystems to provide printf-style formatting. This gives all the power
+// and risk of printf. This can be used like this (with raw_ostreams as an
+// example):
+//
+// OS << "mynumber: " << format("%4.5f", 1234.412) << '\n';
+//
+// Or if you prefer:
+//
+// OS << format("mynumber: %4.5f\n", 1234.412);
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMAT_H
+#define LLVM_SUPPORT_FORMAT_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstdio>
+#include <tuple>
+
+namespace llvm {
+
+/// This is a helper class used for handling formatted output. It is the
+/// abstract base class of a templated derived class.
+class format_object_base {
+protected:
+ const char *Fmt;
+ ~format_object_base() = default; // Disallow polymorphic deletion.
+ format_object_base(const format_object_base &) = default;
+ virtual void home(); // Out of line virtual method.
+
+ /// Call snprintf() for this object, on the given buffer and size.
+ virtual int snprint(char *Buffer, unsigned BufferSize) const = 0;
+
+public:
+ format_object_base(const char *fmt) : Fmt(fmt) {}
+
+ /// Format the object into the specified buffer. On success, this returns
+ /// the length of the formatted string. If the buffer is too small, this
+ /// returns a length to retry with, which will be larger than BufferSize.
+ unsigned print(char *Buffer, unsigned BufferSize) const {
+ assert(BufferSize && "Invalid buffer size!");
+
+ // Print the string, leaving room for the terminating null.
+ int N = snprint(Buffer, BufferSize);
+
+ // VC++ and old GlibC return negative on overflow, just double the size.
+ if (N < 0)
+ return BufferSize * 2;
+
+ // Other implementations yield number of bytes needed, not including the
+ // final '\0'.
+ if (unsigned(N) >= BufferSize)
+ return N + 1;
+
+ // Otherwise N is the length of output (not including the final '\0').
+ return N;
+ }
+};
+
+/// These are templated helper classes used by the format function that
+/// capture the object to be formated and the format string. When actually
+/// printed, this synthesizes the string into a temporary buffer provided and
+/// returns whether or not it is big enough.
+
+template <typename... Ts>
+class format_object final : public format_object_base {
+ std::tuple<Ts...> Vals;
+
+ template <std::size_t... Is>
+ int snprint_tuple(char *Buffer, unsigned BufferSize,
+ index_sequence<Is...>) const {
+#ifdef _MSC_VER
+ return _snprintf(Buffer, BufferSize, Fmt, std::get<Is>(Vals)...);
+#else
+ return snprintf(Buffer, BufferSize, Fmt, std::get<Is>(Vals)...);
+#endif
+ }
+
+public:
+ format_object(const char *fmt, const Ts &... vals)
+ : format_object_base(fmt), Vals(vals...) {}
+
+ int snprint(char *Buffer, unsigned BufferSize) const override {
+ return snprint_tuple(Buffer, BufferSize, index_sequence_for<Ts...>());
+ }
+};
+
+/// These are helper functions used to produce formatted output. They use
+/// template type deduction to construct the appropriate instance of the
+/// format_object class to simplify their construction.
+///
+/// This is typically used like:
+/// \code
+/// OS << format("%0.4f", myfloat) << '\n';
+/// \endcode
+
+template <typename... Ts>
+inline format_object<Ts...> format(const char *Fmt, const Ts &... Vals) {
+ return format_object<Ts...>(Fmt, Vals...);
+}
+
+/// This is a helper class used for left_justify() and right_justify().
+class FormattedString {
+ StringRef Str;
+ unsigned Width;
+ bool RightJustify;
+ friend class raw_ostream;
+
+public:
+ FormattedString(StringRef S, unsigned W, bool R)
+ : Str(S), Width(W), RightJustify(R) { }
+};
+
+/// left_justify - append spaces after string so total output is
+/// \p Width characters. If \p Str is larger that \p Width, full string
+/// is written with no padding.
+inline FormattedString left_justify(StringRef Str, unsigned Width) {
+ return FormattedString(Str, Width, false);
+}
+
+/// right_justify - add spaces before string so total output is
+/// \p Width characters. If \p Str is larger that \p Width, full string
+/// is written with no padding.
+inline FormattedString right_justify(StringRef Str, unsigned Width) {
+ return FormattedString(Str, Width, true);
+}
+
+/// This is a helper class used for format_hex() and format_decimal().
+class FormattedNumber {
+ uint64_t HexValue;
+ int64_t DecValue;
+ unsigned Width;
+ bool Hex;
+ bool Upper;
+ bool HexPrefix;
+ friend class raw_ostream;
+
+public:
+ FormattedNumber(uint64_t HV, int64_t DV, unsigned W, bool H, bool U,
+ bool Prefix)
+ : HexValue(HV), DecValue(DV), Width(W), Hex(H), Upper(U),
+ HexPrefix(Prefix) {}
+};
+
+/// format_hex - Output \p N as a fixed width hexadecimal. If number will not
+/// fit in width, full number is still printed. Examples:
+/// OS << format_hex(255, 4) => 0xff
+/// OS << format_hex(255, 4, true) => 0xFF
+/// OS << format_hex(255, 6) => 0x00ff
+/// OS << format_hex(255, 2) => 0xff
+inline FormattedNumber format_hex(uint64_t N, unsigned Width,
+ bool Upper = false) {
+ assert(Width <= 18 && "hex width must be <= 18");
+ return FormattedNumber(N, 0, Width, true, Upper, true);
+}
+
+/// format_hex_no_prefix - Output \p N as a fixed width hexadecimal. Does not
+/// prepend '0x' to the outputted string. If number will not fit in width,
+/// full number is still printed. Examples:
+/// OS << format_hex_no_prefix(255, 4) => ff
+/// OS << format_hex_no_prefix(255, 4, true) => FF
+/// OS << format_hex_no_prefix(255, 6) => 00ff
+/// OS << format_hex_no_prefix(255, 2) => ff
+inline FormattedNumber format_hex_no_prefix(uint64_t N, unsigned Width,
+ bool Upper = false) {
+ assert(Width <= 18 && "hex width must be <= 18");
+ return FormattedNumber(N, 0, Width, true, Upper, false);
+}
+
+/// format_decimal - Output \p N as a right justified, fixed-width decimal. If
+/// number will not fit in width, full number is still printed. Examples:
+/// OS << format_decimal(0, 5) => " 0"
+/// OS << format_decimal(255, 5) => " 255"
+/// OS << format_decimal(-1, 3) => " -1"
+/// OS << format_decimal(12345, 3) => "12345"
+inline FormattedNumber format_decimal(int64_t N, unsigned Width) {
+ return FormattedNumber(0, N, Width, false, false, false);
+}
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/FormattedStream.h b/contrib/llvm/include/llvm/Support/FormattedStream.h
new file mode 100644
index 0000000..4a135cd
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/FormattedStream.h
@@ -0,0 +1,162 @@
+//===-- llvm/Support/FormattedStream.h - Formatted streams ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains raw_ostream implementations for streams to do
+// things like pretty-print comments.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_FORMATTEDSTREAM_H
+#define LLVM_SUPPORT_FORMATTEDSTREAM_H
+
+#include "llvm/Support/raw_ostream.h"
+#include <utility>
+
+namespace llvm {
+
+/// formatted_raw_ostream - A raw_ostream that wraps another one and keeps track
+/// of line and column position, allowing padding out to specific column
+/// boundaries and querying the number of lines written to the stream.
+///
+class formatted_raw_ostream : public raw_ostream {
+ /// TheStream - The real stream we output to. We set it to be
+ /// unbuffered, since we're already doing our own buffering.
+ ///
+ raw_ostream *TheStream;
+
+ /// Position - The current output column and line of the data that's
+ /// been flushed and the portion of the buffer that's been
+ /// scanned. The line and column scheme is zero-based.
+ ///
+ std::pair<unsigned, unsigned> Position;
+
+ /// Scanned - This points to one past the last character in the
+ /// buffer we've scanned.
+ ///
+ const char *Scanned;
+
+ void write_impl(const char *Ptr, size_t Size) override;
+
+ /// current_pos - Return the current position within the stream,
+ /// not counting the bytes currently in the buffer.
+ uint64_t current_pos() const override {
+ // Our current position in the stream is all the contents which have been
+ // written to the underlying stream (*not* the current position of the
+ // underlying stream).
+ return TheStream->tell();
+ }
+
+ /// ComputePosition - Examine the given output buffer and figure out the new
+ /// position after output.
+ ///
+ void ComputePosition(const char *Ptr, size_t size);
+
+ void setStream(raw_ostream &Stream) {
+ releaseStream();
+
+ TheStream = &Stream;
+
+ // This formatted_raw_ostream inherits from raw_ostream, so it'll do its
+ // own buffering, and it doesn't need or want TheStream to do another
+ // layer of buffering underneath. Resize the buffer to what TheStream
+ // had been using, and tell TheStream not to do its own buffering.
+ if (size_t BufferSize = TheStream->GetBufferSize())
+ SetBufferSize(BufferSize);
+ else
+ SetUnbuffered();
+ TheStream->SetUnbuffered();
+
+ Scanned = nullptr;
+ }
+
+public:
+ /// formatted_raw_ostream - Open the specified file for
+ /// writing. If an error occurs, information about the error is
+ /// put into ErrorInfo, and the stream should be immediately
+ /// destroyed; the string will be empty if no error occurred.
+ ///
+ /// As a side effect, the given Stream is set to be Unbuffered.
+ /// This is because formatted_raw_ostream does its own buffering,
+ /// so it doesn't want another layer of buffering to be happening
+ /// underneath it.
+ ///
+ formatted_raw_ostream(raw_ostream &Stream)
+ : TheStream(nullptr), Position(0, 0) {
+ setStream(Stream);
+ }
+ explicit formatted_raw_ostream() : TheStream(nullptr), Position(0, 0) {
+ Scanned = nullptr;
+ }
+
+ ~formatted_raw_ostream() override {
+ flush();
+ releaseStream();
+ }
+
+ /// PadToColumn - Align the output to some column number. If the current
+ /// column is already equal to or more than NewCol, PadToColumn inserts one
+ /// space.
+ ///
+ /// \param NewCol - The column to move to.
+ formatted_raw_ostream &PadToColumn(unsigned NewCol);
+
+ /// getColumn - Return the column number
+ unsigned getColumn() { return Position.first; }
+
+ /// getLine - Return the line number
+ unsigned getLine() { return Position.second; }
+
+ raw_ostream &resetColor() override {
+ TheStream->resetColor();
+ return *this;
+ }
+
+ raw_ostream &reverseColor() override {
+ TheStream->reverseColor();
+ return *this;
+ }
+
+ raw_ostream &changeColor(enum Colors Color, bool Bold, bool BG) override {
+ TheStream->changeColor(Color, Bold, BG);
+ return *this;
+ }
+
+ bool is_displayed() const override {
+ return TheStream->is_displayed();
+ }
+
+private:
+ void releaseStream() {
+ // Transfer the buffer settings from this raw_ostream back to the underlying
+ // stream.
+ if (!TheStream)
+ return;
+ if (size_t BufferSize = GetBufferSize())
+ TheStream->SetBufferSize(BufferSize);
+ else
+ TheStream->SetUnbuffered();
+ }
+};
+
+/// fouts() - This returns a reference to a formatted_raw_ostream for
+/// standard output. Use it like: fouts() << "foo" << "bar";
+formatted_raw_ostream &fouts();
+
+/// ferrs() - This returns a reference to a formatted_raw_ostream for
+/// standard error. Use it like: ferrs() << "foo" << "bar";
+formatted_raw_ostream &ferrs();
+
+/// fdbgs() - This returns a reference to a formatted_raw_ostream for
+/// debug output. Use it like: fdbgs() << "foo" << "bar";
+formatted_raw_ostream &fdbgs();
+
+} // end llvm namespace
+
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/GCOV.h b/contrib/llvm/include/llvm/Support/GCOV.h
new file mode 100644
index 0000000..544434f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/GCOV.h
@@ -0,0 +1,445 @@
+//===- GCOV.h - LLVM coverage tool ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header provides the interface to read and write coverage files that
+// use 'gcov' format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_GCOV_H
+#define LLVM_SUPPORT_GCOV_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+class GCOVFunction;
+class GCOVBlock;
+class FileInfo;
+
+namespace GCOV {
+enum GCOVVersion { V402, V404, V704 };
+
+/// \brief A struct for passing gcov options between functions.
+struct Options {
+ Options(bool A, bool B, bool C, bool F, bool P, bool U, bool L, bool N)
+ : AllBlocks(A), BranchInfo(B), BranchCount(C), FuncCoverage(F),
+ PreservePaths(P), UncondBranch(U), LongFileNames(L), NoOutput(N) {}
+
+ bool AllBlocks;
+ bool BranchInfo;
+ bool BranchCount;
+ bool FuncCoverage;
+ bool PreservePaths;
+ bool UncondBranch;
+ bool LongFileNames;
+ bool NoOutput;
+};
+} // end GCOV namespace
+
+/// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific
+/// read operations.
+class GCOVBuffer {
+public:
+ GCOVBuffer(MemoryBuffer *B) : Buffer(B), Cursor(0) {}
+
+ /// readGCNOFormat - Check GCNO signature is valid at the beginning of buffer.
+ bool readGCNOFormat() {
+ StringRef File = Buffer->getBuffer().slice(0, 4);
+ if (File != "oncg") {
+ errs() << "Unexpected file type: " << File << ".\n";
+ return false;
+ }
+ Cursor = 4;
+ return true;
+ }
+
+ /// readGCDAFormat - Check GCDA signature is valid at the beginning of buffer.
+ bool readGCDAFormat() {
+ StringRef File = Buffer->getBuffer().slice(0, 4);
+ if (File != "adcg") {
+ errs() << "Unexpected file type: " << File << ".\n";
+ return false;
+ }
+ Cursor = 4;
+ return true;
+ }
+
+ /// readGCOVVersion - Read GCOV version.
+ bool readGCOVVersion(GCOV::GCOVVersion &Version) {
+ StringRef VersionStr = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+ if (VersionStr == "*204") {
+ Cursor += 4;
+ Version = GCOV::V402;
+ return true;
+ }
+ if (VersionStr == "*404") {
+ Cursor += 4;
+ Version = GCOV::V404;
+ return true;
+ }
+ if (VersionStr == "*704") {
+ Cursor += 4;
+ Version = GCOV::V704;
+ return true;
+ }
+ errs() << "Unexpected version: " << VersionStr << ".\n";
+ return false;
+ }
+
+ /// readFunctionTag - If cursor points to a function tag then increment the
+ /// cursor and return true otherwise return false.
+ bool readFunctionTag() {
+ StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+ if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' ||
+ Tag[3] != '\1') {
+ return false;
+ }
+ Cursor += 4;
+ return true;
+ }
+
+ /// readBlockTag - If cursor points to a block tag then increment the
+ /// cursor and return true otherwise return false.
+ bool readBlockTag() {
+ StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+ if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x41' ||
+ Tag[3] != '\x01') {
+ return false;
+ }
+ Cursor += 4;
+ return true;
+ }
+
+ /// readEdgeTag - If cursor points to an edge tag then increment the
+ /// cursor and return true otherwise return false.
+ bool readEdgeTag() {
+ StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+ if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x43' ||
+ Tag[3] != '\x01') {
+ return false;
+ }
+ Cursor += 4;
+ return true;
+ }
+
+ /// readLineTag - If cursor points to a line tag then increment the
+ /// cursor and return true otherwise return false.
+ bool readLineTag() {
+ StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+ if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\x45' ||
+ Tag[3] != '\x01') {
+ return false;
+ }
+ Cursor += 4;
+ return true;
+ }
+
+ /// readArcTag - If cursor points to an gcda arc tag then increment the
+ /// cursor and return true otherwise return false.
+ bool readArcTag() {
+ StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+ if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\xa1' ||
+ Tag[3] != '\1') {
+ return false;
+ }
+ Cursor += 4;
+ return true;
+ }
+
+ /// readObjectTag - If cursor points to an object summary tag then increment
+ /// the cursor and return true otherwise return false.
+ bool readObjectTag() {
+ StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+ if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' ||
+ Tag[3] != '\xa1') {
+ return false;
+ }
+ Cursor += 4;
+ return true;
+ }
+
+ /// readProgramTag - If cursor points to a program summary tag then increment
+ /// the cursor and return true otherwise return false.
+ bool readProgramTag() {
+ StringRef Tag = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+ if (Tag.empty() || Tag[0] != '\0' || Tag[1] != '\0' || Tag[2] != '\0' ||
+ Tag[3] != '\xa3') {
+ return false;
+ }
+ Cursor += 4;
+ return true;
+ }
+
+ bool readInt(uint32_t &Val) {
+ if (Buffer->getBuffer().size() < Cursor + 4) {
+ errs() << "Unexpected end of memory buffer: " << Cursor + 4 << ".\n";
+ return false;
+ }
+ StringRef Str = Buffer->getBuffer().slice(Cursor, Cursor + 4);
+ Cursor += 4;
+ Val = *(const uint32_t *)(Str.data());
+ return true;
+ }
+
+ bool readInt64(uint64_t &Val) {
+ uint32_t Lo, Hi;
+ if (!readInt(Lo) || !readInt(Hi))
+ return false;
+ Val = ((uint64_t)Hi << 32) | Lo;
+ return true;
+ }
+
+ bool readString(StringRef &Str) {
+ uint32_t Len = 0;
+ // Keep reading until we find a non-zero length. This emulates gcov's
+ // behaviour, which appears to do the same.
+ while (Len == 0)
+ if (!readInt(Len))
+ return false;
+ Len *= 4;
+ if (Buffer->getBuffer().size() < Cursor + Len) {
+ errs() << "Unexpected end of memory buffer: " << Cursor + Len << ".\n";
+ return false;
+ }
+ Str = Buffer->getBuffer().slice(Cursor, Cursor + Len).split('\0').first;
+ Cursor += Len;
+ return true;
+ }
+
+ uint64_t getCursor() const { return Cursor; }
+ void advanceCursor(uint32_t n) { Cursor += n * 4; }
+
+private:
+ MemoryBuffer *Buffer;
+ uint64_t Cursor;
+};
+
+/// GCOVFile - Collects coverage information for one pair of coverage file
+/// (.gcno and .gcda).
+class GCOVFile {
+public:
+ GCOVFile()
+ : GCNOInitialized(false), Checksum(0), Functions(), RunCount(0),
+ ProgramCount(0) {}
+ bool readGCNO(GCOVBuffer &Buffer);
+ bool readGCDA(GCOVBuffer &Buffer);
+ uint32_t getChecksum() const { return Checksum; }
+ void dump() const;
+ void collectLineCounts(FileInfo &FI);
+
+private:
+ bool GCNOInitialized;
+ GCOV::GCOVVersion Version;
+ uint32_t Checksum;
+ SmallVector<std::unique_ptr<GCOVFunction>, 16> Functions;
+ uint32_t RunCount;
+ uint32_t ProgramCount;
+};
+
+/// GCOVEdge - Collects edge information.
+struct GCOVEdge {
+ GCOVEdge(GCOVBlock &S, GCOVBlock &D) : Src(S), Dst(D), Count(0) {}
+
+ GCOVBlock &Src;
+ GCOVBlock &Dst;
+ uint64_t Count;
+};
+
+/// GCOVFunction - Collects function information.
+class GCOVFunction {
+public:
+ typedef pointee_iterator<SmallVectorImpl<
+ std::unique_ptr<GCOVBlock>>::const_iterator> BlockIterator;
+
+ GCOVFunction(GCOVFile &P) : Parent(P), Ident(0), LineNumber(0) {}
+ bool readGCNO(GCOVBuffer &Buffer, GCOV::GCOVVersion Version);
+ bool readGCDA(GCOVBuffer &Buffer, GCOV::GCOVVersion Version);
+ StringRef getName() const { return Name; }
+ StringRef getFilename() const { return Filename; }
+ size_t getNumBlocks() const { return Blocks.size(); }
+ uint64_t getEntryCount() const;
+ uint64_t getExitCount() const;
+
+ BlockIterator block_begin() const { return Blocks.begin(); }
+ BlockIterator block_end() const { return Blocks.end(); }
+ iterator_range<BlockIterator> blocks() const {
+ return make_range(block_begin(), block_end());
+ }
+
+ void dump() const;
+ void collectLineCounts(FileInfo &FI);
+
+private:
+ GCOVFile &Parent;
+ uint32_t Ident;
+ uint32_t Checksum;
+ uint32_t LineNumber;
+ StringRef Name;
+ StringRef Filename;
+ SmallVector<std::unique_ptr<GCOVBlock>, 16> Blocks;
+ SmallVector<std::unique_ptr<GCOVEdge>, 16> Edges;
+};
+
+/// GCOVBlock - Collects block information.
+class GCOVBlock {
+ struct EdgeWeight {
+ EdgeWeight(GCOVBlock *D) : Dst(D), Count(0) {}
+
+ GCOVBlock *Dst;
+ uint64_t Count;
+ };
+
+ struct SortDstEdgesFunctor {
+ bool operator()(const GCOVEdge *E1, const GCOVEdge *E2) {
+ return E1->Dst.Number < E2->Dst.Number;
+ }
+ };
+
+public:
+ typedef SmallVectorImpl<GCOVEdge *>::const_iterator EdgeIterator;
+
+ GCOVBlock(GCOVFunction &P, uint32_t N)
+ : Parent(P), Number(N), Counter(0), DstEdgesAreSorted(true), SrcEdges(),
+ DstEdges(), Lines() {}
+ ~GCOVBlock();
+ const GCOVFunction &getParent() const { return Parent; }
+ void addLine(uint32_t N) { Lines.push_back(N); }
+ uint32_t getLastLine() const { return Lines.back(); }
+ void addCount(size_t DstEdgeNo, uint64_t N);
+ uint64_t getCount() const { return Counter; }
+
+ void addSrcEdge(GCOVEdge *Edge) {
+ assert(&Edge->Dst == this); // up to caller to ensure edge is valid
+ SrcEdges.push_back(Edge);
+ }
+ void addDstEdge(GCOVEdge *Edge) {
+ assert(&Edge->Src == this); // up to caller to ensure edge is valid
+ // Check if adding this edge causes list to become unsorted.
+ if (DstEdges.size() && DstEdges.back()->Dst.Number > Edge->Dst.Number)
+ DstEdgesAreSorted = false;
+ DstEdges.push_back(Edge);
+ }
+ size_t getNumSrcEdges() const { return SrcEdges.size(); }
+ size_t getNumDstEdges() const { return DstEdges.size(); }
+ void sortDstEdges();
+
+ EdgeIterator src_begin() const { return SrcEdges.begin(); }
+ EdgeIterator src_end() const { return SrcEdges.end(); }
+ iterator_range<EdgeIterator> srcs() const {
+ return make_range(src_begin(), src_end());
+ }
+
+ EdgeIterator dst_begin() const { return DstEdges.begin(); }
+ EdgeIterator dst_end() const { return DstEdges.end(); }
+ iterator_range<EdgeIterator> dsts() const {
+ return make_range(dst_begin(), dst_end());
+ }
+
+ void dump() const;
+ void collectLineCounts(FileInfo &FI);
+
+private:
+ GCOVFunction &Parent;
+ uint32_t Number;
+ uint64_t Counter;
+ bool DstEdgesAreSorted;
+ SmallVector<GCOVEdge *, 16> SrcEdges;
+ SmallVector<GCOVEdge *, 16> DstEdges;
+ SmallVector<uint32_t, 16> Lines;
+};
+
+class FileInfo {
+ // It is unlikely--but possible--for multiple functions to be on the same
+ // line.
+ // Therefore this typedef allows LineData.Functions to store multiple
+ // functions
+ // per instance. This is rare, however, so optimize for the common case.
+ typedef SmallVector<const GCOVFunction *, 1> FunctionVector;
+ typedef DenseMap<uint32_t, FunctionVector> FunctionLines;
+ typedef SmallVector<const GCOVBlock *, 4> BlockVector;
+ typedef DenseMap<uint32_t, BlockVector> BlockLines;
+
+ struct LineData {
+ LineData() : LastLine(0) {}
+ BlockLines Blocks;
+ FunctionLines Functions;
+ uint32_t LastLine;
+ };
+
+ struct GCOVCoverage {
+ GCOVCoverage(StringRef Name)
+ : Name(Name), LogicalLines(0), LinesExec(0), Branches(0),
+ BranchesExec(0), BranchesTaken(0) {}
+
+ StringRef Name;
+
+ uint32_t LogicalLines;
+ uint32_t LinesExec;
+
+ uint32_t Branches;
+ uint32_t BranchesExec;
+ uint32_t BranchesTaken;
+ };
+
+public:
+ FileInfo(const GCOV::Options &Options)
+ : Options(Options), LineInfo(), RunCount(0), ProgramCount(0) {}
+
+ void addBlockLine(StringRef Filename, uint32_t Line, const GCOVBlock *Block) {
+ if (Line > LineInfo[Filename].LastLine)
+ LineInfo[Filename].LastLine = Line;
+ LineInfo[Filename].Blocks[Line - 1].push_back(Block);
+ }
+ void addFunctionLine(StringRef Filename, uint32_t Line,
+ const GCOVFunction *Function) {
+ if (Line > LineInfo[Filename].LastLine)
+ LineInfo[Filename].LastLine = Line;
+ LineInfo[Filename].Functions[Line - 1].push_back(Function);
+ }
+ void setRunCount(uint32_t Runs) { RunCount = Runs; }
+ void setProgramCount(uint32_t Programs) { ProgramCount = Programs; }
+ void print(raw_ostream &OS, StringRef MainFilename, StringRef GCNOFile,
+ StringRef GCDAFile);
+
+private:
+ std::string getCoveragePath(StringRef Filename, StringRef MainFilename);
+ std::unique_ptr<raw_ostream> openCoveragePath(StringRef CoveragePath);
+ void printFunctionSummary(raw_ostream &OS, const FunctionVector &Funcs) const;
+ void printBlockInfo(raw_ostream &OS, const GCOVBlock &Block,
+ uint32_t LineIndex, uint32_t &BlockNo) const;
+ void printBranchInfo(raw_ostream &OS, const GCOVBlock &Block,
+ GCOVCoverage &Coverage, uint32_t &EdgeNo);
+ void printUncondBranchInfo(raw_ostream &OS, uint32_t &EdgeNo,
+ uint64_t Count) const;
+
+ void printCoverage(raw_ostream &OS, const GCOVCoverage &Coverage) const;
+ void printFuncCoverage(raw_ostream &OS) const;
+ void printFileCoverage(raw_ostream &OS) const;
+
+ const GCOV::Options &Options;
+ StringMap<LineData> LineInfo;
+ uint32_t RunCount;
+ uint32_t ProgramCount;
+
+ typedef SmallVector<std::pair<std::string, GCOVCoverage>, 4> FileCoverageList;
+ typedef MapVector<const GCOVFunction *, GCOVCoverage> FuncCoverageMap;
+
+ FileCoverageList FileCoverages;
+ FuncCoverageMap FuncCoverages;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/GenericDomTree.h b/contrib/llvm/include/llvm/Support/GenericDomTree.h
new file mode 100644
index 0000000..8751f27
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/GenericDomTree.h
@@ -0,0 +1,779 @@
+//===- GenericDomTree.h - Generic dominator trees for graphs ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a set of templates that efficiently compute a dominator
+/// tree over a generic graph. This is used typically in LLVM for fast
+/// dominance queries on the CFG, but is fully generic w.r.t. the underlying
+/// graph types.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_GENERICDOMTREE_H
+#define LLVM_SUPPORT_GENERICDOMTREE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+namespace llvm {
+
+/// \brief Base class that other, more interesting dominator analyses
+/// inherit from.
+template <class NodeT> class DominatorBase {
+protected:
+ std::vector<NodeT *> Roots;
+ bool IsPostDominators;
+ explicit DominatorBase(bool isPostDom)
+ : Roots(), IsPostDominators(isPostDom) {}
+ DominatorBase(DominatorBase &&Arg)
+ : Roots(std::move(Arg.Roots)),
+ IsPostDominators(std::move(Arg.IsPostDominators)) {
+ Arg.Roots.clear();
+ }
+ DominatorBase &operator=(DominatorBase &&RHS) {
+ Roots = std::move(RHS.Roots);
+ IsPostDominators = std::move(RHS.IsPostDominators);
+ RHS.Roots.clear();
+ return *this;
+ }
+
+public:
+ /// getRoots - Return the root blocks of the current CFG. This may include
+ /// multiple blocks if we are computing post dominators. For forward
+ /// dominators, this will always be a single block (the entry node).
+ ///
+ const std::vector<NodeT *> &getRoots() const { return Roots; }
+
+ /// isPostDominator - Returns true if analysis based of postdoms
+ ///
+ bool isPostDominator() const { return IsPostDominators; }
+};
+
+template <class NodeT> class DominatorTreeBase;
+struct PostDominatorTree;
+
+/// \brief Base class for the actual dominator tree node.
+template <class NodeT> class DomTreeNodeBase {
+ NodeT *TheBB;
+ DomTreeNodeBase<NodeT> *IDom;
+ std::vector<DomTreeNodeBase<NodeT> *> Children;
+ mutable int DFSNumIn, DFSNumOut;
+
+ template <class N> friend class DominatorTreeBase;
+ friend struct PostDominatorTree;
+
+public:
+ typedef typename std::vector<DomTreeNodeBase<NodeT> *>::iterator iterator;
+ typedef typename std::vector<DomTreeNodeBase<NodeT> *>::const_iterator
+ const_iterator;
+
+ iterator begin() { return Children.begin(); }
+ iterator end() { return Children.end(); }
+ const_iterator begin() const { return Children.begin(); }
+ const_iterator end() const { return Children.end(); }
+
+ NodeT *getBlock() const { return TheBB; }
+ DomTreeNodeBase<NodeT> *getIDom() const { return IDom; }
+ const std::vector<DomTreeNodeBase<NodeT> *> &getChildren() const {
+ return Children;
+ }
+
+ DomTreeNodeBase(NodeT *BB, DomTreeNodeBase<NodeT> *iDom)
+ : TheBB(BB), IDom(iDom), DFSNumIn(-1), DFSNumOut(-1) {}
+
+ std::unique_ptr<DomTreeNodeBase<NodeT>>
+ addChild(std::unique_ptr<DomTreeNodeBase<NodeT>> C) {
+ Children.push_back(C.get());
+ return C;
+ }
+
+ size_t getNumChildren() const { return Children.size(); }
+
+ void clearAllChildren() { Children.clear(); }
+
+ bool compare(const DomTreeNodeBase<NodeT> *Other) const {
+ if (getNumChildren() != Other->getNumChildren())
+ return true;
+
+ SmallPtrSet<const NodeT *, 4> OtherChildren;
+ for (const_iterator I = Other->begin(), E = Other->end(); I != E; ++I) {
+ const NodeT *Nd = (*I)->getBlock();
+ OtherChildren.insert(Nd);
+ }
+
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ const NodeT *N = (*I)->getBlock();
+ if (OtherChildren.count(N) == 0)
+ return true;
+ }
+ return false;
+ }
+
+ void setIDom(DomTreeNodeBase<NodeT> *NewIDom) {
+ assert(IDom && "No immediate dominator?");
+ if (IDom != NewIDom) {
+ typename std::vector<DomTreeNodeBase<NodeT> *>::iterator I =
+ std::find(IDom->Children.begin(), IDom->Children.end(), this);
+ assert(I != IDom->Children.end() &&
+ "Not in immediate dominator children set!");
+ // I am no longer your child...
+ IDom->Children.erase(I);
+
+ // Switch to new dominator
+ IDom = NewIDom;
+ IDom->Children.push_back(this);
+ }
+ }
+
+ /// getDFSNumIn/getDFSNumOut - These are an internal implementation detail, do
+ /// not call them.
+ unsigned getDFSNumIn() const { return DFSNumIn; }
+ unsigned getDFSNumOut() const { return DFSNumOut; }
+
+private:
+ // Return true if this node is dominated by other. Use this only if DFS info
+ // is valid.
+ bool DominatedBy(const DomTreeNodeBase<NodeT> *other) const {
+ return this->DFSNumIn >= other->DFSNumIn &&
+ this->DFSNumOut <= other->DFSNumOut;
+ }
+};
+
+template <class NodeT>
+raw_ostream &operator<<(raw_ostream &o, const DomTreeNodeBase<NodeT> *Node) {
+ if (Node->getBlock())
+ Node->getBlock()->printAsOperand(o, false);
+ else
+ o << " <<exit node>>";
+
+ o << " {" << Node->getDFSNumIn() << "," << Node->getDFSNumOut() << "}";
+
+ return o << "\n";
+}
+
+template <class NodeT>
+void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &o,
+ unsigned Lev) {
+ o.indent(2 * Lev) << "[" << Lev << "] " << N;
+ for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(),
+ E = N->end();
+ I != E; ++I)
+ PrintDomTree<NodeT>(*I, o, Lev + 1);
+}
+
+// The calculate routine is provided in a separate header but referenced here.
+template <class FuncT, class N>
+void Calculate(DominatorTreeBase<typename GraphTraits<N>::NodeType> &DT,
+ FuncT &F);
+
+/// \brief Core dominator tree base class.
+///
+/// This class is a generic template over graph nodes. It is instantiated for
+/// various graphs in the LLVM IR or in the code generator.
+template <class NodeT> class DominatorTreeBase : public DominatorBase<NodeT> {
+ DominatorTreeBase(const DominatorTreeBase &) = delete;
+ DominatorTreeBase &operator=(const DominatorTreeBase &) = delete;
+
+ bool dominatedBySlowTreeWalk(const DomTreeNodeBase<NodeT> *A,
+ const DomTreeNodeBase<NodeT> *B) const {
+ assert(A != B);
+ assert(isReachableFromEntry(B));
+ assert(isReachableFromEntry(A));
+
+ const DomTreeNodeBase<NodeT> *IDom;
+ while ((IDom = B->getIDom()) != nullptr && IDom != A && IDom != B)
+ B = IDom; // Walk up the tree
+ return IDom != nullptr;
+ }
+
+ /// \brief Wipe this tree's state without releasing any resources.
+ ///
+ /// This is essentially a post-move helper only. It leaves the object in an
+ /// assignable and destroyable state, but otherwise invalid.
+ void wipe() {
+ DomTreeNodes.clear();
+ IDoms.clear();
+ Vertex.clear();
+ Info.clear();
+ RootNode = nullptr;
+ }
+
+protected:
+ typedef DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>
+ DomTreeNodeMapType;
+ DomTreeNodeMapType DomTreeNodes;
+ DomTreeNodeBase<NodeT> *RootNode;
+
+ mutable bool DFSInfoValid;
+ mutable unsigned int SlowQueries;
+ // Information record used during immediate dominators computation.
+ struct InfoRec {
+ unsigned DFSNum;
+ unsigned Parent;
+ unsigned Semi;
+ NodeT *Label;
+
+ InfoRec() : DFSNum(0), Parent(0), Semi(0), Label(nullptr) {}
+ };
+
+ DenseMap<NodeT *, NodeT *> IDoms;
+
+ // Vertex - Map the DFS number to the NodeT*
+ std::vector<NodeT *> Vertex;
+
+ // Info - Collection of information used during the computation of idoms.
+ DenseMap<NodeT *, InfoRec> Info;
+
+ void reset() {
+ DomTreeNodes.clear();
+ IDoms.clear();
+ this->Roots.clear();
+ Vertex.clear();
+ RootNode = nullptr;
+ DFSInfoValid = false;
+ SlowQueries = 0;
+ }
+
+ // NewBB is split and now it has one successor. Update dominator tree to
+ // reflect this change.
+ template <class N, class GraphT>
+ void Split(DominatorTreeBase<typename GraphT::NodeType> &DT,
+ typename GraphT::NodeType *NewBB) {
+ assert(std::distance(GraphT::child_begin(NewBB),
+ GraphT::child_end(NewBB)) == 1 &&
+ "NewBB should have a single successor!");
+ typename GraphT::NodeType *NewBBSucc = *GraphT::child_begin(NewBB);
+
+ std::vector<typename GraphT::NodeType *> PredBlocks;
+ typedef GraphTraits<Inverse<N>> InvTraits;
+ for (typename InvTraits::ChildIteratorType
+ PI = InvTraits::child_begin(NewBB),
+ PE = InvTraits::child_end(NewBB);
+ PI != PE; ++PI)
+ PredBlocks.push_back(*PI);
+
+ assert(!PredBlocks.empty() && "No predblocks?");
+
+ bool NewBBDominatesNewBBSucc = true;
+ for (typename InvTraits::ChildIteratorType
+ PI = InvTraits::child_begin(NewBBSucc),
+ E = InvTraits::child_end(NewBBSucc);
+ PI != E; ++PI) {
+ typename InvTraits::NodeType *ND = *PI;
+ if (ND != NewBB && !DT.dominates(NewBBSucc, ND) &&
+ DT.isReachableFromEntry(ND)) {
+ NewBBDominatesNewBBSucc = false;
+ break;
+ }
+ }
+
+ // Find NewBB's immediate dominator and create new dominator tree node for
+ // NewBB.
+ NodeT *NewBBIDom = nullptr;
+ unsigned i = 0;
+ for (i = 0; i < PredBlocks.size(); ++i)
+ if (DT.isReachableFromEntry(PredBlocks[i])) {
+ NewBBIDom = PredBlocks[i];
+ break;
+ }
+
+ // It's possible that none of the predecessors of NewBB are reachable;
+ // in that case, NewBB itself is unreachable, so nothing needs to be
+ // changed.
+ if (!NewBBIDom)
+ return;
+
+ for (i = i + 1; i < PredBlocks.size(); ++i) {
+ if (DT.isReachableFromEntry(PredBlocks[i]))
+ NewBBIDom = DT.findNearestCommonDominator(NewBBIDom, PredBlocks[i]);
+ }
+
+ // Create the new dominator tree node... and set the idom of NewBB.
+ DomTreeNodeBase<NodeT> *NewBBNode = DT.addNewBlock(NewBB, NewBBIDom);
+
+ // If NewBB strictly dominates other blocks, then it is now the immediate
+ // dominator of NewBBSucc. Update the dominator tree as appropriate.
+ if (NewBBDominatesNewBBSucc) {
+ DomTreeNodeBase<NodeT> *NewBBSuccNode = DT.getNode(NewBBSucc);
+ DT.changeImmediateDominator(NewBBSuccNode, NewBBNode);
+ }
+ }
+
+public:
+ explicit DominatorTreeBase(bool isPostDom)
+ : DominatorBase<NodeT>(isPostDom), DFSInfoValid(false), SlowQueries(0) {}
+
+ DominatorTreeBase(DominatorTreeBase &&Arg)
+ : DominatorBase<NodeT>(
+ std::move(static_cast<DominatorBase<NodeT> &>(Arg))),
+ DomTreeNodes(std::move(Arg.DomTreeNodes)),
+ RootNode(std::move(Arg.RootNode)),
+ DFSInfoValid(std::move(Arg.DFSInfoValid)),
+ SlowQueries(std::move(Arg.SlowQueries)), IDoms(std::move(Arg.IDoms)),
+ Vertex(std::move(Arg.Vertex)), Info(std::move(Arg.Info)) {
+ Arg.wipe();
+ }
+ DominatorTreeBase &operator=(DominatorTreeBase &&RHS) {
+ DominatorBase<NodeT>::operator=(
+ std::move(static_cast<DominatorBase<NodeT> &>(RHS)));
+ DomTreeNodes = std::move(RHS.DomTreeNodes);
+ RootNode = std::move(RHS.RootNode);
+ DFSInfoValid = std::move(RHS.DFSInfoValid);
+ SlowQueries = std::move(RHS.SlowQueries);
+ IDoms = std::move(RHS.IDoms);
+ Vertex = std::move(RHS.Vertex);
+ Info = std::move(RHS.Info);
+ RHS.wipe();
+ return *this;
+ }
+
+ /// compare - Return false if the other dominator tree base matches this
+ /// dominator tree base. Otherwise return true.
+ bool compare(const DominatorTreeBase &Other) const {
+
+ const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes;
+ if (DomTreeNodes.size() != OtherDomTreeNodes.size())
+ return true;
+
+ for (typename DomTreeNodeMapType::const_iterator
+ I = this->DomTreeNodes.begin(),
+ E = this->DomTreeNodes.end();
+ I != E; ++I) {
+ NodeT *BB = I->first;
+ typename DomTreeNodeMapType::const_iterator OI =
+ OtherDomTreeNodes.find(BB);
+ if (OI == OtherDomTreeNodes.end())
+ return true;
+
+ DomTreeNodeBase<NodeT> &MyNd = *I->second;
+ DomTreeNodeBase<NodeT> &OtherNd = *OI->second;
+
+ if (MyNd.compare(&OtherNd))
+ return true;
+ }
+
+ return false;
+ }
+
+ void releaseMemory() { reset(); }
+
+ /// getNode - return the (Post)DominatorTree node for the specified basic
+ /// block. This is the same as using operator[] on this class. The result
+ /// may (but is not required to) be null for a forward (backwards)
+ /// statically unreachable block.
+ DomTreeNodeBase<NodeT> *getNode(NodeT *BB) const {
+ auto I = DomTreeNodes.find(BB);
+ if (I != DomTreeNodes.end())
+ return I->second.get();
+ return nullptr;
+ }
+
+ /// See getNode.
+ DomTreeNodeBase<NodeT> *operator[](NodeT *BB) const { return getNode(BB); }
+
+ /// getRootNode - This returns the entry node for the CFG of the function. If
+ /// this tree represents the post-dominance relations for a function, however,
+ /// this root may be a node with the block == NULL. This is the case when
+ /// there are multiple exit nodes from a particular function. Consumers of
+ /// post-dominance information must be capable of dealing with this
+ /// possibility.
+ ///
+ DomTreeNodeBase<NodeT> *getRootNode() { return RootNode; }
+ const DomTreeNodeBase<NodeT> *getRootNode() const { return RootNode; }
+
+ /// Get all nodes dominated by R, including R itself.
+ void getDescendants(NodeT *R, SmallVectorImpl<NodeT *> &Result) const {
+ Result.clear();
+ const DomTreeNodeBase<NodeT> *RN = getNode(R);
+ if (!RN)
+ return; // If R is unreachable, it will not be present in the DOM tree.
+ SmallVector<const DomTreeNodeBase<NodeT> *, 8> WL;
+ WL.push_back(RN);
+
+ while (!WL.empty()) {
+ const DomTreeNodeBase<NodeT> *N = WL.pop_back_val();
+ Result.push_back(N->getBlock());
+ WL.append(N->begin(), N->end());
+ }
+ }
+
+ /// properlyDominates - Returns true iff A dominates B and A != B.
+ /// Note that this is not a constant time operation!
+ ///
+ bool properlyDominates(const DomTreeNodeBase<NodeT> *A,
+ const DomTreeNodeBase<NodeT> *B) const {
+ if (!A || !B)
+ return false;
+ if (A == B)
+ return false;
+ return dominates(A, B);
+ }
+
+ bool properlyDominates(const NodeT *A, const NodeT *B) const;
+
+ /// isReachableFromEntry - Return true if A is dominated by the entry
+ /// block of the function containing it.
+ bool isReachableFromEntry(const NodeT *A) const {
+ assert(!this->isPostDominator() &&
+ "This is not implemented for post dominators");
+ return isReachableFromEntry(getNode(const_cast<NodeT *>(A)));
+ }
+
+ bool isReachableFromEntry(const DomTreeNodeBase<NodeT> *A) const { return A; }
+
+ /// dominates - Returns true iff A dominates B. Note that this is not a
+ /// constant time operation!
+ ///
+ bool dominates(const DomTreeNodeBase<NodeT> *A,
+ const DomTreeNodeBase<NodeT> *B) const {
+ // A node trivially dominates itself.
+ if (B == A)
+ return true;
+
+ // An unreachable node is dominated by anything.
+ if (!isReachableFromEntry(B))
+ return true;
+
+ // And dominates nothing.
+ if (!isReachableFromEntry(A))
+ return false;
+
+ // Compare the result of the tree walk and the dfs numbers, if expensive
+ // checks are enabled.
+#ifdef XDEBUG
+ assert((!DFSInfoValid ||
+ (dominatedBySlowTreeWalk(A, B) == B->DominatedBy(A))) &&
+ "Tree walk disagrees with dfs numbers!");
+#endif
+
+ if (DFSInfoValid)
+ return B->DominatedBy(A);
+
+ // If we end up with too many slow queries, just update the
+ // DFS numbers on the theory that we are going to keep querying.
+ SlowQueries++;
+ if (SlowQueries > 32) {
+ updateDFSNumbers();
+ return B->DominatedBy(A);
+ }
+
+ return dominatedBySlowTreeWalk(A, B);
+ }
+
+ bool dominates(const NodeT *A, const NodeT *B) const;
+
+ NodeT *getRoot() const {
+ assert(this->Roots.size() == 1 && "Should always have entry node!");
+ return this->Roots[0];
+ }
+
+ /// findNearestCommonDominator - Find nearest common dominator basic block
+ /// for basic block A and B. If there is no such block then return NULL.
+ NodeT *findNearestCommonDominator(NodeT *A, NodeT *B) {
+ assert(A->getParent() == B->getParent() &&
+ "Two blocks are not in same function");
+
+ // If either A or B is a entry block then it is nearest common dominator
+ // (for forward-dominators).
+ if (!this->isPostDominator()) {
+ NodeT &Entry = A->getParent()->front();
+ if (A == &Entry || B == &Entry)
+ return &Entry;
+ }
+
+ // If B dominates A then B is nearest common dominator.
+ if (dominates(B, A))
+ return B;
+
+ // If A dominates B then A is nearest common dominator.
+ if (dominates(A, B))
+ return A;
+
+ DomTreeNodeBase<NodeT> *NodeA = getNode(A);
+ DomTreeNodeBase<NodeT> *NodeB = getNode(B);
+
+ // If we have DFS info, then we can avoid all allocations by just querying
+ // it from each IDom. Note that because we call 'dominates' twice above, we
+ // expect to call through this code at most 16 times in a row without
+ // building valid DFS information. This is important as below is a *very*
+ // slow tree walk.
+ if (DFSInfoValid) {
+ DomTreeNodeBase<NodeT> *IDomA = NodeA->getIDom();
+ while (IDomA) {
+ if (NodeB->DominatedBy(IDomA))
+ return IDomA->getBlock();
+ IDomA = IDomA->getIDom();
+ }
+ return nullptr;
+ }
+
+ // Collect NodeA dominators set.
+ SmallPtrSet<DomTreeNodeBase<NodeT> *, 16> NodeADoms;
+ NodeADoms.insert(NodeA);
+ DomTreeNodeBase<NodeT> *IDomA = NodeA->getIDom();
+ while (IDomA) {
+ NodeADoms.insert(IDomA);
+ IDomA = IDomA->getIDom();
+ }
+
+ // Walk NodeB immediate dominators chain and find common dominator node.
+ DomTreeNodeBase<NodeT> *IDomB = NodeB->getIDom();
+ while (IDomB) {
+ if (NodeADoms.count(IDomB) != 0)
+ return IDomB->getBlock();
+
+ IDomB = IDomB->getIDom();
+ }
+
+ return nullptr;
+ }
+
+ const NodeT *findNearestCommonDominator(const NodeT *A, const NodeT *B) {
+ // Cast away the const qualifiers here. This is ok since
+ // const is re-introduced on the return type.
+ return findNearestCommonDominator(const_cast<NodeT *>(A),
+ const_cast<NodeT *>(B));
+ }
+
+ //===--------------------------------------------------------------------===//
+ // API to update (Post)DominatorTree information based on modifications to
+ // the CFG...
+
+ /// addNewBlock - Add a new node to the dominator tree information. This
+ /// creates a new node as a child of DomBB dominator node,linking it into
+ /// the children list of the immediate dominator.
+ DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
+ assert(getNode(BB) == nullptr && "Block already in dominator tree!");
+ DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
+ assert(IDomNode && "Not immediate dominator specified for block!");
+ DFSInfoValid = false;
+ return (DomTreeNodes[BB] = IDomNode->addChild(
+ llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get();
+ }
+
+ /// changeImmediateDominator - This method is used to update the dominator
+ /// tree information when a node's immediate dominator changes.
+ ///
+ void changeImmediateDominator(DomTreeNodeBase<NodeT> *N,
+ DomTreeNodeBase<NodeT> *NewIDom) {
+ assert(N && NewIDom && "Cannot change null node pointers!");
+ DFSInfoValid = false;
+ N->setIDom(NewIDom);
+ }
+
+ void changeImmediateDominator(NodeT *BB, NodeT *NewBB) {
+ changeImmediateDominator(getNode(BB), getNode(NewBB));
+ }
+
+ /// eraseNode - Removes a node from the dominator tree. Block must not
+ /// dominate any other blocks. Removes node from its immediate dominator's
+ /// children list. Deletes dominator node associated with basic block BB.
+ void eraseNode(NodeT *BB) {
+ DomTreeNodeBase<NodeT> *Node = getNode(BB);
+ assert(Node && "Removing node that isn't in dominator tree.");
+ assert(Node->getChildren().empty() && "Node is not a leaf node.");
+
+ // Remove node from immediate dominator's children list.
+ DomTreeNodeBase<NodeT> *IDom = Node->getIDom();
+ if (IDom) {
+ typename std::vector<DomTreeNodeBase<NodeT> *>::iterator I =
+ std::find(IDom->Children.begin(), IDom->Children.end(), Node);
+ assert(I != IDom->Children.end() &&
+ "Not in immediate dominator children set!");
+ // I am no longer your child...
+ IDom->Children.erase(I);
+ }
+
+ DomTreeNodes.erase(BB);
+ }
+
+ /// splitBlock - BB is split and now it has one successor. Update dominator
+ /// tree to reflect this change.
+ void splitBlock(NodeT *NewBB) {
+ if (this->IsPostDominators)
+ this->Split<Inverse<NodeT *>, GraphTraits<Inverse<NodeT *>>>(*this,
+ NewBB);
+ else
+ this->Split<NodeT *, GraphTraits<NodeT *>>(*this, NewBB);
+ }
+
+ /// print - Convert to human readable form
+ ///
+ void print(raw_ostream &o) const {
+ o << "=============================--------------------------------\n";
+ if (this->isPostDominator())
+ o << "Inorder PostDominator Tree: ";
+ else
+ o << "Inorder Dominator Tree: ";
+ if (!this->DFSInfoValid)
+ o << "DFSNumbers invalid: " << SlowQueries << " slow queries.";
+ o << "\n";
+
+ // The postdom tree can have a null root if there are no returns.
+ if (getRootNode())
+ PrintDomTree<NodeT>(getRootNode(), o, 1);
+ }
+
+protected:
+ template <class GraphT>
+ friend typename GraphT::NodeType *
+ Eval(DominatorTreeBase<typename GraphT::NodeType> &DT,
+ typename GraphT::NodeType *V, unsigned LastLinked);
+
+ template <class GraphT>
+ friend unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType> &DT,
+ typename GraphT::NodeType *V, unsigned N);
+
+ template <class FuncT, class N>
+ friend void
+ Calculate(DominatorTreeBase<typename GraphTraits<N>::NodeType> &DT, FuncT &F);
+
+
+ DomTreeNodeBase<NodeT> *getNodeForBlock(NodeT *BB) {
+ if (DomTreeNodeBase<NodeT> *Node = getNode(BB))
+ return Node;
+
+ // Haven't calculated this node yet? Get or calculate the node for the
+ // immediate dominator.
+ NodeT *IDom = getIDom(BB);
+
+ assert(IDom || this->DomTreeNodes[nullptr]);
+ DomTreeNodeBase<NodeT> *IDomNode = getNodeForBlock(IDom);
+
+ // Add a new tree node for this NodeT, and link it as a child of
+ // IDomNode
+ return (this->DomTreeNodes[BB] = IDomNode->addChild(
+ llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get();
+ }
+
+ NodeT *getIDom(NodeT *BB) const { return IDoms.lookup(BB); }
+
+ void addRoot(NodeT *BB) { this->Roots.push_back(BB); }
+
+public:
+ /// updateDFSNumbers - Assign In and Out numbers to the nodes while walking
+ /// dominator tree in dfs order.
+ void updateDFSNumbers() const {
+
+ if (DFSInfoValid) {
+ SlowQueries = 0;
+ return;
+ }
+
+ unsigned DFSNum = 0;
+
+ SmallVector<std::pair<const DomTreeNodeBase<NodeT> *,
+ typename DomTreeNodeBase<NodeT>::const_iterator>,
+ 32> WorkStack;
+
+ const DomTreeNodeBase<NodeT> *ThisRoot = getRootNode();
+
+ if (!ThisRoot)
+ return;
+
+ // Even in the case of multiple exits that form the post dominator root
+ // nodes, do not iterate over all exits, but start from the virtual root
+ // node. Otherwise bbs, that are not post dominated by any exit but by the
+ // virtual root node, will never be assigned a DFS number.
+ WorkStack.push_back(std::make_pair(ThisRoot, ThisRoot->begin()));
+ ThisRoot->DFSNumIn = DFSNum++;
+
+ while (!WorkStack.empty()) {
+ const DomTreeNodeBase<NodeT> *Node = WorkStack.back().first;
+ typename DomTreeNodeBase<NodeT>::const_iterator ChildIt =
+ WorkStack.back().second;
+
+ // If we visited all of the children of this node, "recurse" back up the
+ // stack setting the DFOutNum.
+ if (ChildIt == Node->end()) {
+ Node->DFSNumOut = DFSNum++;
+ WorkStack.pop_back();
+ } else {
+ // Otherwise, recursively visit this child.
+ const DomTreeNodeBase<NodeT> *Child = *ChildIt;
+ ++WorkStack.back().second;
+
+ WorkStack.push_back(std::make_pair(Child, Child->begin()));
+ Child->DFSNumIn = DFSNum++;
+ }
+ }
+
+ SlowQueries = 0;
+ DFSInfoValid = true;
+ }
+
+ /// recalculate - compute a dominator tree for the given function
+ template <class FT> void recalculate(FT &F) {
+ typedef GraphTraits<FT *> TraitsTy;
+ reset();
+ this->Vertex.push_back(nullptr);
+
+ if (!this->IsPostDominators) {
+ // Initialize root
+ NodeT *entry = TraitsTy::getEntryNode(&F);
+ this->Roots.push_back(entry);
+ this->IDoms[entry] = nullptr;
+ this->DomTreeNodes[entry] = nullptr;
+
+ Calculate<FT, NodeT *>(*this, F);
+ } else {
+ // Initialize the roots list
+ for (typename TraitsTy::nodes_iterator I = TraitsTy::nodes_begin(&F),
+ E = TraitsTy::nodes_end(&F);
+ I != E; ++I) {
+ if (TraitsTy::child_begin(&*I) == TraitsTy::child_end(&*I))
+ addRoot(&*I);
+
+ // Prepopulate maps so that we don't get iterator invalidation issues
+ // later.
+ this->IDoms[&*I] = nullptr;
+ this->DomTreeNodes[&*I] = nullptr;
+ }
+
+ Calculate<FT, Inverse<NodeT *>>(*this, F);
+ }
+ }
+};
+
+// These two functions are declared out of line as a workaround for building
+// with old (< r147295) versions of clang because of pr11642.
+template <class NodeT>
+bool DominatorTreeBase<NodeT>::dominates(const NodeT *A, const NodeT *B) const {
+ if (A == B)
+ return true;
+
+ // Cast away the const qualifiers here. This is ok since
+ // this function doesn't actually return the values returned
+ // from getNode.
+ return dominates(getNode(const_cast<NodeT *>(A)),
+ getNode(const_cast<NodeT *>(B)));
+}
+template <class NodeT>
+bool DominatorTreeBase<NodeT>::properlyDominates(const NodeT *A,
+ const NodeT *B) const {
+ if (A == B)
+ return false;
+
+ // Cast away the const qualifiers here. This is ok since
+ // this function doesn't actually return the values returned
+ // from getNode.
+ return dominates(getNode(const_cast<NodeT *>(A)),
+ getNode(const_cast<NodeT *>(B)));
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/GenericDomTreeConstruction.h b/contrib/llvm/include/llvm/Support/GenericDomTreeConstruction.h
new file mode 100644
index 0000000..3e867dc
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/GenericDomTreeConstruction.h
@@ -0,0 +1,291 @@
+//===- GenericDomTreeConstruction.h - Dominator Calculation ------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Generic dominator tree construction - This file provides routines to
+/// construct immediate dominator information for a flow-graph based on the
+/// algorithm described in this document:
+///
+/// A Fast Algorithm for Finding Dominators in a Flowgraph
+/// T. Lengauer & R. Tarjan, ACM TOPLAS July 1979, pgs 121-141.
+///
+/// This implements the O(n*log(n)) versions of EVAL and LINK, because it turns
+/// out that the theoretically slower O(n*log(n)) implementation is actually
+/// faster than the almost-linear O(n*alpha(n)) version, even for large CFGs.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_GENERICDOMTREECONSTRUCTION_H
+#define LLVM_SUPPORT_GENERICDOMTREECONSTRUCTION_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/GenericDomTree.h"
+
+namespace llvm {
+
+template<class GraphT>
+unsigned DFSPass(DominatorTreeBase<typename GraphT::NodeType>& DT,
+ typename GraphT::NodeType* V, unsigned N) {
+ // This is more understandable as a recursive algorithm, but we can't use the
+ // recursive algorithm due to stack depth issues. Keep it here for
+ // documentation purposes.
+#if 0
+ InfoRec &VInfo = DT.Info[DT.Roots[i]];
+ VInfo.DFSNum = VInfo.Semi = ++N;
+ VInfo.Label = V;
+
+ Vertex.push_back(V); // Vertex[n] = V;
+
+ for (succ_iterator SI = succ_begin(V), E = succ_end(V); SI != E; ++SI) {
+ InfoRec &SuccVInfo = DT.Info[*SI];
+ if (SuccVInfo.Semi == 0) {
+ SuccVInfo.Parent = V;
+ N = DTDFSPass(DT, *SI, N);
+ }
+ }
+#else
+ bool IsChildOfArtificialExit = (N != 0);
+
+ SmallVector<std::pair<typename GraphT::NodeType*,
+ typename GraphT::ChildIteratorType>, 32> Worklist;
+ Worklist.push_back(std::make_pair(V, GraphT::child_begin(V)));
+ while (!Worklist.empty()) {
+ typename GraphT::NodeType* BB = Worklist.back().first;
+ typename GraphT::ChildIteratorType NextSucc = Worklist.back().second;
+
+ typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &BBInfo =
+ DT.Info[BB];
+
+ // First time we visited this BB?
+ if (NextSucc == GraphT::child_begin(BB)) {
+ BBInfo.DFSNum = BBInfo.Semi = ++N;
+ BBInfo.Label = BB;
+
+ DT.Vertex.push_back(BB); // Vertex[n] = V;
+
+ if (IsChildOfArtificialExit)
+ BBInfo.Parent = 1;
+
+ IsChildOfArtificialExit = false;
+ }
+
+ // store the DFS number of the current BB - the reference to BBInfo might
+ // get invalidated when processing the successors.
+ unsigned BBDFSNum = BBInfo.DFSNum;
+
+ // If we are done with this block, remove it from the worklist.
+ if (NextSucc == GraphT::child_end(BB)) {
+ Worklist.pop_back();
+ continue;
+ }
+
+ // Increment the successor number for the next time we get to it.
+ ++Worklist.back().second;
+
+ // Visit the successor next, if it isn't already visited.
+ typename GraphT::NodeType* Succ = *NextSucc;
+
+ typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &SuccVInfo =
+ DT.Info[Succ];
+ if (SuccVInfo.Semi == 0) {
+ SuccVInfo.Parent = BBDFSNum;
+ Worklist.push_back(std::make_pair(Succ, GraphT::child_begin(Succ)));
+ }
+ }
+#endif
+ return N;
+}
+
+template <class GraphT>
+typename GraphT::NodeType *
+Eval(DominatorTreeBase<typename GraphT::NodeType> &DT,
+ typename GraphT::NodeType *VIn, unsigned LastLinked) {
+ typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInInfo =
+ DT.Info[VIn];
+ if (VInInfo.DFSNum < LastLinked)
+ return VIn;
+
+ SmallVector<typename GraphT::NodeType*, 32> Work;
+ SmallPtrSet<typename GraphT::NodeType*, 32> Visited;
+
+ if (VInInfo.Parent >= LastLinked)
+ Work.push_back(VIn);
+
+ while (!Work.empty()) {
+ typename GraphT::NodeType* V = Work.back();
+ typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VInfo =
+ DT.Info[V];
+ typename GraphT::NodeType* VAncestor = DT.Vertex[VInfo.Parent];
+
+ // Process Ancestor first
+ if (Visited.insert(VAncestor).second && VInfo.Parent >= LastLinked) {
+ Work.push_back(VAncestor);
+ continue;
+ }
+ Work.pop_back();
+
+ // Update VInfo based on Ancestor info
+ if (VInfo.Parent < LastLinked)
+ continue;
+
+ typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &VAInfo =
+ DT.Info[VAncestor];
+ typename GraphT::NodeType* VAncestorLabel = VAInfo.Label;
+ typename GraphT::NodeType* VLabel = VInfo.Label;
+ if (DT.Info[VAncestorLabel].Semi < DT.Info[VLabel].Semi)
+ VInfo.Label = VAncestorLabel;
+ VInfo.Parent = VAInfo.Parent;
+ }
+
+ return VInInfo.Label;
+}
+
+template<class FuncT, class NodeT>
+void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
+ FuncT& F) {
+ typedef GraphTraits<NodeT> GraphT;
+
+ unsigned N = 0;
+ bool MultipleRoots = (DT.Roots.size() > 1);
+ if (MultipleRoots) {
+ typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &BBInfo =
+ DT.Info[nullptr];
+ BBInfo.DFSNum = BBInfo.Semi = ++N;
+ BBInfo.Label = nullptr;
+
+ DT.Vertex.push_back(nullptr); // Vertex[n] = V;
+ }
+
+ // Step #1: Number blocks in depth-first order and initialize variables used
+ // in later stages of the algorithm.
+ for (unsigned i = 0, e = static_cast<unsigned>(DT.Roots.size());
+ i != e; ++i)
+ N = DFSPass<GraphT>(DT, DT.Roots[i], N);
+
+ // it might be that some blocks did not get a DFS number (e.g., blocks of
+ // infinite loops). In these cases an artificial exit node is required.
+ MultipleRoots |= (DT.isPostDominator() && N != GraphTraits<FuncT*>::size(&F));
+
+ // When naively implemented, the Lengauer-Tarjan algorithm requires a separate
+ // bucket for each vertex. However, this is unnecessary, because each vertex
+ // is only placed into a single bucket (that of its semidominator), and each
+ // vertex's bucket is processed before it is added to any bucket itself.
+ //
+ // Instead of using a bucket per vertex, we use a single array Buckets that
+ // has two purposes. Before the vertex V with preorder number i is processed,
+ // Buckets[i] stores the index of the first element in V's bucket. After V's
+ // bucket is processed, Buckets[i] stores the index of the next element in the
+ // bucket containing V, if any.
+ SmallVector<unsigned, 32> Buckets;
+ Buckets.resize(N + 1);
+ for (unsigned i = 1; i <= N; ++i)
+ Buckets[i] = i;
+
+ for (unsigned i = N; i >= 2; --i) {
+ typename GraphT::NodeType* W = DT.Vertex[i];
+ typename DominatorTreeBase<typename GraphT::NodeType>::InfoRec &WInfo =
+ DT.Info[W];
+
+ // Step #2: Implicitly define the immediate dominator of vertices
+ for (unsigned j = i; Buckets[j] != i; j = Buckets[j]) {
+ typename GraphT::NodeType* V = DT.Vertex[Buckets[j]];
+ typename GraphT::NodeType* U = Eval<GraphT>(DT, V, i + 1);
+ DT.IDoms[V] = DT.Info[U].Semi < i ? U : W;
+ }
+
+ // Step #3: Calculate the semidominators of all vertices
+
+ // initialize the semi dominator to point to the parent node
+ WInfo.Semi = WInfo.Parent;
+ typedef GraphTraits<Inverse<NodeT> > InvTraits;
+ for (typename InvTraits::ChildIteratorType CI =
+ InvTraits::child_begin(W),
+ E = InvTraits::child_end(W); CI != E; ++CI) {
+ typename InvTraits::NodeType *N = *CI;
+ if (DT.Info.count(N)) { // Only if this predecessor is reachable!
+ unsigned SemiU = DT.Info[Eval<GraphT>(DT, N, i + 1)].Semi;
+ if (SemiU < WInfo.Semi)
+ WInfo.Semi = SemiU;
+ }
+ }
+
+ // If V is a non-root vertex and sdom(V) = parent(V), then idom(V) is
+ // necessarily parent(V). In this case, set idom(V) here and avoid placing
+ // V into a bucket.
+ if (WInfo.Semi == WInfo.Parent) {
+ DT.IDoms[W] = DT.Vertex[WInfo.Parent];
+ } else {
+ Buckets[i] = Buckets[WInfo.Semi];
+ Buckets[WInfo.Semi] = i;
+ }
+ }
+
+ if (N >= 1) {
+ typename GraphT::NodeType* Root = DT.Vertex[1];
+ for (unsigned j = 1; Buckets[j] != 1; j = Buckets[j]) {
+ typename GraphT::NodeType* V = DT.Vertex[Buckets[j]];
+ DT.IDoms[V] = Root;
+ }
+ }
+
+ // Step #4: Explicitly define the immediate dominator of each vertex
+ for (unsigned i = 2; i <= N; ++i) {
+ typename GraphT::NodeType* W = DT.Vertex[i];
+ typename GraphT::NodeType*& WIDom = DT.IDoms[W];
+ if (WIDom != DT.Vertex[DT.Info[W].Semi])
+ WIDom = DT.IDoms[WIDom];
+ }
+
+ if (DT.Roots.empty()) return;
+
+ // Add a node for the root. This node might be the actual root, if there is
+ // one exit block, or it may be the virtual exit (denoted by (BasicBlock *)0)
+ // which postdominates all real exits if there are multiple exit blocks, or
+ // an infinite loop.
+ typename GraphT::NodeType* Root = !MultipleRoots ? DT.Roots[0] : nullptr;
+
+ DT.RootNode =
+ (DT.DomTreeNodes[Root] =
+ llvm::make_unique<DomTreeNodeBase<typename GraphT::NodeType>>(
+ Root, nullptr)).get();
+
+ // Loop over all of the reachable blocks in the function...
+ for (unsigned i = 2; i <= N; ++i) {
+ typename GraphT::NodeType* W = DT.Vertex[i];
+
+ // Don't replace this with 'count', the insertion side effect is important
+ if (DT.DomTreeNodes[W])
+ continue; // Haven't calculated this node yet?
+
+ typename GraphT::NodeType* ImmDom = DT.getIDom(W);
+
+ assert(ImmDom || DT.DomTreeNodes[nullptr]);
+
+ // Get or calculate the node for the immediate dominator
+ DomTreeNodeBase<typename GraphT::NodeType> *IDomNode =
+ DT.getNodeForBlock(ImmDom);
+
+ // Add a new tree node for this BasicBlock, and link it as a child of
+ // IDomNode
+ DT.DomTreeNodes[W] = IDomNode->addChild(
+ llvm::make_unique<DomTreeNodeBase<typename GraphT::NodeType>>(
+ W, IDomNode));
+ }
+
+ // Free temporary memory used to construct idom's
+ DT.IDoms.clear();
+ DT.Info.clear();
+ DT.Vertex.clear();
+ DT.Vertex.shrink_to_fit();
+
+ DT.updateDFSNumbers();
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/GraphWriter.h b/contrib/llvm/include/llvm/Support/GraphWriter.h
new file mode 100644
index 0000000..86985c5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/GraphWriter.h
@@ -0,0 +1,363 @@
+//===-- llvm/Support/GraphWriter.h - Write graph to a .dot file -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a simple interface that can be used to print out generic
+// LLVM graphs to ".dot" files. "dot" is a tool that is part of the AT&T
+// graphviz package (http://www.research.att.com/sw/tools/graphviz/) which can
+// be used to turn the files output by this interface into a variety of
+// different graphics formats.
+//
+// Graphs do not need to implement any interface past what is already required
+// by the GraphTraits template, but they can choose to implement specializations
+// of the DOTGraphTraits template if they want to customize the graphs output in
+// any way.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_GRAPHWRITER_H
+#define LLVM_SUPPORT_GRAPHWRITER_H
+
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/DOTGraphTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <vector>
+
+namespace llvm {
+
+namespace DOT { // Private functions...
+ std::string EscapeString(const std::string &Label);
+
+ /// \brief Get a color string for this node number. Simply round-robin selects
+ /// from a reasonable number of colors.
+ StringRef getColorString(unsigned NodeNumber);
+}
+
+namespace GraphProgram {
+ enum Name {
+ DOT,
+ FDP,
+ NEATO,
+ TWOPI,
+ CIRCO
+ };
+}
+
+bool DisplayGraph(StringRef Filename, bool wait = true,
+ GraphProgram::Name program = GraphProgram::DOT);
+
+template<typename GraphType>
+class GraphWriter {
+ raw_ostream &O;
+ const GraphType &G;
+
+ typedef DOTGraphTraits<GraphType> DOTTraits;
+ typedef GraphTraits<GraphType> GTraits;
+ typedef typename GTraits::NodeType NodeType;
+ typedef typename GTraits::nodes_iterator node_iterator;
+ typedef typename GTraits::ChildIteratorType child_iterator;
+ DOTTraits DTraits;
+
+ // Writes the edge labels of the node to O and returns true if there are any
+ // edge labels not equal to the empty string "".
+ bool getEdgeSourceLabels(raw_ostream &O, NodeType *Node) {
+ child_iterator EI = GTraits::child_begin(Node);
+ child_iterator EE = GTraits::child_end(Node);
+ bool hasEdgeSourceLabels = false;
+
+ for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i) {
+ std::string label = DTraits.getEdgeSourceLabel(Node, EI);
+
+ if (label.empty())
+ continue;
+
+ hasEdgeSourceLabels = true;
+
+ if (i)
+ O << "|";
+
+ O << "<s" << i << ">" << DOT::EscapeString(label);
+ }
+
+ if (EI != EE && hasEdgeSourceLabels)
+ O << "|<s64>truncated...";
+
+ return hasEdgeSourceLabels;
+ }
+
+public:
+ GraphWriter(raw_ostream &o, const GraphType &g, bool SN) : O(o), G(g) {
+ DTraits = DOTTraits(SN);
+ }
+
+ void writeGraph(const std::string &Title = "") {
+ // Output the header for the graph...
+ writeHeader(Title);
+
+ // Emit all of the nodes in the graph...
+ writeNodes();
+
+ // Output any customizations on the graph
+ DOTGraphTraits<GraphType>::addCustomGraphFeatures(G, *this);
+
+ // Output the end of the graph
+ writeFooter();
+ }
+
+ void writeHeader(const std::string &Title) {
+ std::string GraphName = DTraits.getGraphName(G);
+
+ if (!Title.empty())
+ O << "digraph \"" << DOT::EscapeString(Title) << "\" {\n";
+ else if (!GraphName.empty())
+ O << "digraph \"" << DOT::EscapeString(GraphName) << "\" {\n";
+ else
+ O << "digraph unnamed {\n";
+
+ if (DTraits.renderGraphFromBottomUp())
+ O << "\trankdir=\"BT\";\n";
+
+ if (!Title.empty())
+ O << "\tlabel=\"" << DOT::EscapeString(Title) << "\";\n";
+ else if (!GraphName.empty())
+ O << "\tlabel=\"" << DOT::EscapeString(GraphName) << "\";\n";
+ O << DTraits.getGraphProperties(G);
+ O << "\n";
+ }
+
+ void writeFooter() {
+ // Finish off the graph
+ O << "}\n";
+ }
+
+ void writeNodes() {
+ // Loop over the graph, printing it out...
+ for (node_iterator I = GTraits::nodes_begin(G), E = GTraits::nodes_end(G);
+ I != E; ++I)
+ if (!isNodeHidden(*I))
+ writeNode(*I);
+ }
+
+ bool isNodeHidden(NodeType &Node) {
+ return isNodeHidden(&Node);
+ }
+
+ bool isNodeHidden(NodeType *const *Node) {
+ return isNodeHidden(*Node);
+ }
+
+ bool isNodeHidden(NodeType *Node) {
+ return DTraits.isNodeHidden(Node);
+ }
+
+ void writeNode(NodeType& Node) {
+ writeNode(&Node);
+ }
+
+ void writeNode(NodeType *const *Node) {
+ writeNode(*Node);
+ }
+
+ void writeNode(NodeType *Node) {
+ std::string NodeAttributes = DTraits.getNodeAttributes(Node, G);
+
+ O << "\tNode" << static_cast<const void*>(Node) << " [shape=record,";
+ if (!NodeAttributes.empty()) O << NodeAttributes << ",";
+ O << "label=\"{";
+
+ if (!DTraits.renderGraphFromBottomUp()) {
+ O << DOT::EscapeString(DTraits.getNodeLabel(Node, G));
+
+ // If we should include the address of the node in the label, do so now.
+ std::string Id = DTraits.getNodeIdentifierLabel(Node, G);
+ if (!Id.empty())
+ O << "|" << DOT::EscapeString(Id);
+
+ std::string NodeDesc = DTraits.getNodeDescription(Node, G);
+ if (!NodeDesc.empty())
+ O << "|" << DOT::EscapeString(NodeDesc);
+ }
+
+ std::string edgeSourceLabels;
+ raw_string_ostream EdgeSourceLabels(edgeSourceLabels);
+ bool hasEdgeSourceLabels = getEdgeSourceLabels(EdgeSourceLabels, Node);
+
+ if (hasEdgeSourceLabels) {
+ if (!DTraits.renderGraphFromBottomUp()) O << "|";
+
+ O << "{" << EdgeSourceLabels.str() << "}";
+
+ if (DTraits.renderGraphFromBottomUp()) O << "|";
+ }
+
+ if (DTraits.renderGraphFromBottomUp()) {
+ O << DOT::EscapeString(DTraits.getNodeLabel(Node, G));
+
+ // If we should include the address of the node in the label, do so now.
+ std::string Id = DTraits.getNodeIdentifierLabel(Node, G);
+ if (!Id.empty())
+ O << "|" << DOT::EscapeString(Id);
+
+ std::string NodeDesc = DTraits.getNodeDescription(Node, G);
+ if (!NodeDesc.empty())
+ O << "|" << DOT::EscapeString(NodeDesc);
+ }
+
+ if (DTraits.hasEdgeDestLabels()) {
+ O << "|{";
+
+ unsigned i = 0, e = DTraits.numEdgeDestLabels(Node);
+ for (; i != e && i != 64; ++i) {
+ if (i) O << "|";
+ O << "<d" << i << ">"
+ << DOT::EscapeString(DTraits.getEdgeDestLabel(Node, i));
+ }
+
+ if (i != e)
+ O << "|<d64>truncated...";
+ O << "}";
+ }
+
+ O << "}\"];\n"; // Finish printing the "node" line
+
+ // Output all of the edges now
+ child_iterator EI = GTraits::child_begin(Node);
+ child_iterator EE = GTraits::child_end(Node);
+ for (unsigned i = 0; EI != EE && i != 64; ++EI, ++i)
+ if (!DTraits.isNodeHidden(*EI))
+ writeEdge(Node, i, EI);
+ for (; EI != EE; ++EI)
+ if (!DTraits.isNodeHidden(*EI))
+ writeEdge(Node, 64, EI);
+ }
+
+ void writeEdge(NodeType *Node, unsigned edgeidx, child_iterator EI) {
+ if (NodeType *TargetNode = *EI) {
+ int DestPort = -1;
+ if (DTraits.edgeTargetsEdgeSource(Node, EI)) {
+ child_iterator TargetIt = DTraits.getEdgeTarget(Node, EI);
+
+ // Figure out which edge this targets...
+ unsigned Offset =
+ (unsigned)std::distance(GTraits::child_begin(TargetNode), TargetIt);
+ DestPort = static_cast<int>(Offset);
+ }
+
+ if (DTraits.getEdgeSourceLabel(Node, EI).empty())
+ edgeidx = -1;
+
+ emitEdge(static_cast<const void*>(Node), edgeidx,
+ static_cast<const void*>(TargetNode), DestPort,
+ DTraits.getEdgeAttributes(Node, EI, G));
+ }
+ }
+
+ /// emitSimpleNode - Outputs a simple (non-record) node
+ void emitSimpleNode(const void *ID, const std::string &Attr,
+ const std::string &Label, unsigned NumEdgeSources = 0,
+ const std::vector<std::string> *EdgeSourceLabels = nullptr) {
+ O << "\tNode" << ID << "[ ";
+ if (!Attr.empty())
+ O << Attr << ",";
+ O << " label =\"";
+ if (NumEdgeSources) O << "{";
+ O << DOT::EscapeString(Label);
+ if (NumEdgeSources) {
+ O << "|{";
+
+ for (unsigned i = 0; i != NumEdgeSources; ++i) {
+ if (i) O << "|";
+ O << "<s" << i << ">";
+ if (EdgeSourceLabels) O << DOT::EscapeString((*EdgeSourceLabels)[i]);
+ }
+ O << "}}";
+ }
+ O << "\"];\n";
+ }
+
+ /// emitEdge - Output an edge from a simple node into the graph...
+ void emitEdge(const void *SrcNodeID, int SrcNodePort,
+ const void *DestNodeID, int DestNodePort,
+ const std::string &Attrs) {
+ if (SrcNodePort > 64) return; // Eminating from truncated part?
+ if (DestNodePort > 64) DestNodePort = 64; // Targeting the truncated part?
+
+ O << "\tNode" << SrcNodeID;
+ if (SrcNodePort >= 0)
+ O << ":s" << SrcNodePort;
+ O << " -> Node" << DestNodeID;
+ if (DestNodePort >= 0 && DTraits.hasEdgeDestLabels())
+ O << ":d" << DestNodePort;
+
+ if (!Attrs.empty())
+ O << "[" << Attrs << "]";
+ O << ";\n";
+ }
+
+ /// getOStream - Get the raw output stream into the graph file. Useful to
+ /// write fancy things using addCustomGraphFeatures().
+ raw_ostream &getOStream() {
+ return O;
+ }
+};
+
+template<typename GraphType>
+raw_ostream &WriteGraph(raw_ostream &O, const GraphType &G,
+ bool ShortNames = false,
+ const Twine &Title = "") {
+ // Start the graph emission process...
+ GraphWriter<GraphType> W(O, G, ShortNames);
+
+ // Emit the graph.
+ W.writeGraph(Title.str());
+
+ return O;
+}
+
+std::string createGraphFilename(const Twine &Name, int &FD);
+
+template <typename GraphType>
+std::string WriteGraph(const GraphType &G, const Twine &Name,
+ bool ShortNames = false, const Twine &Title = "") {
+ int FD;
+ // Windows can't always handle long paths, so limit the length of the name.
+ std::string N = Name.str();
+ N = N.substr(0, std::min<std::size_t>(N.size(), 140));
+ std::string Filename = createGraphFilename(N, FD);
+ raw_fd_ostream O(FD, /*shouldClose=*/ true);
+
+ if (FD == -1) {
+ errs() << "error opening file '" << Filename << "' for writing!\n";
+ return "";
+ }
+
+ llvm::WriteGraph(O, G, ShortNames, Title);
+ errs() << " done. \n";
+
+ return Filename;
+}
+
+/// ViewGraph - Emit a dot graph, run 'dot', run gv on the postscript file,
+/// then cleanup. For use from the debugger.
+///
+template<typename GraphType>
+void ViewGraph(const GraphType &G, const Twine &Name,
+ bool ShortNames = false, const Twine &Title = "",
+ GraphProgram::Name Program = GraphProgram::DOT) {
+ std::string Filename = llvm::WriteGraph(G, Name, ShortNames, Title);
+
+ if (Filename.empty())
+ return;
+
+ DisplayGraph(Filename, false, Program);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Host.h b/contrib/llvm/include/llvm/Support/Host.h
new file mode 100644
index 0000000..8f4bf3c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Host.h
@@ -0,0 +1,74 @@
+//===- llvm/Support/Host.h - Host machine characteristics --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Methods for querying the nature of the host machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_HOST_H
+#define LLVM_SUPPORT_HOST_H
+
+#include "llvm/ADT/StringMap.h"
+
+#if defined(__linux__) || defined(__GNU__)
+#include <endian.h>
+#else
+#if !defined(BYTE_ORDER) && !defined(LLVM_ON_WIN32)
+#include <machine/endian.h>
+#endif
+#endif
+
+#include <string>
+
+namespace llvm {
+namespace sys {
+
+#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN
+ static const bool IsBigEndianHost = true;
+#else
+ static const bool IsBigEndianHost = false;
+#endif
+
+ static const bool IsLittleEndianHost = !IsBigEndianHost;
+
+ /// getDefaultTargetTriple() - Return the default target triple the compiler
+ /// has been configured to produce code for.
+ ///
+ /// The target triple is a string in the format of:
+ /// CPU_TYPE-VENDOR-OPERATING_SYSTEM
+ /// or
+ /// CPU_TYPE-VENDOR-KERNEL-OPERATING_SYSTEM
+ std::string getDefaultTargetTriple();
+
+ /// getProcessTriple() - Return an appropriate target triple for generating
+ /// code to be loaded into the current process, e.g. when using the JIT.
+ std::string getProcessTriple();
+
+ /// getHostCPUName - Get the LLVM name for the host CPU. The particular format
+ /// of the name is target dependent, and suitable for passing as -mcpu to the
+ /// target which matches the host.
+ ///
+ /// \return - The host CPU name, or empty if the CPU could not be determined.
+ StringRef getHostCPUName();
+
+ /// getHostCPUFeatures - Get the LLVM names for the host CPU features.
+ /// The particular format of the names are target dependent, and suitable for
+ /// passing as -mattr to the target which matches the host.
+ ///
+ /// \param Features - A string mapping feature names to either
+ /// true (if enabled) or false (if disabled). This routine makes no guarantees
+ /// about exactly which features may appear in this map, except that they are
+ /// all valid LLVM feature names.
+ ///
+ /// \return - True on success.
+ bool getHostCPUFeatures(StringMap<bool> &Features);
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/JamCRC.h b/contrib/llvm/include/llvm/Support/JamCRC.h
new file mode 100644
index 0000000..20c28a5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/JamCRC.h
@@ -0,0 +1,48 @@
+//===-- llvm/Support/JamCRC.h - Cyclic Redundancy Check ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of JamCRC.
+//
+// We will use the "Rocksoft^tm Model CRC Algorithm" to describe the properties
+// of this CRC:
+// Width : 32
+// Poly : 04C11DB7
+// Init : FFFFFFFF
+// RefIn : True
+// RefOut : True
+// XorOut : 00000000
+// Check : 340BC6D9 (result of CRC for "123456789")
+//
+// N.B. We permit flexibility of the "Init" value. Some consumers of this need
+// it to be zero.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_JAMCRC_H
+#define LLVM_SUPPORT_JAMCRC_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+class JamCRC {
+public:
+ JamCRC(uint32_t Init = 0xFFFFFFFFU) : CRC(Init) {}
+
+ // \brief Update the CRC calculation with Data.
+ void update(ArrayRef<char> Data);
+
+ uint32_t getCRC() const { return CRC; }
+
+private:
+ uint32_t CRC;
+};
+} // End of namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/LEB128.h b/contrib/llvm/include/llvm/Support/LEB128.h
new file mode 100644
index 0000000..6a95432
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/LEB128.h
@@ -0,0 +1,121 @@
+//===- llvm/Support/LEB128.h - [SU]LEB128 utility functions -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares some utility functions for encoding SLEB128 and
+// ULEB128 values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_LEB128_H
+#define LLVM_SUPPORT_LEB128_H
+
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// Utility function to encode a SLEB128 value to an output stream.
+inline void encodeSLEB128(int64_t Value, raw_ostream &OS) {
+ bool More;
+ do {
+ uint8_t Byte = Value & 0x7f;
+ // NOTE: this assumes that this signed shift is an arithmetic right shift.
+ Value >>= 7;
+ More = !((((Value == 0 ) && ((Byte & 0x40) == 0)) ||
+ ((Value == -1) && ((Byte & 0x40) != 0))));
+ if (More)
+ Byte |= 0x80; // Mark this byte to show that more bytes will follow.
+ OS << char(Byte);
+ } while (More);
+}
+
+/// Utility function to encode a ULEB128 value to an output stream.
+inline void encodeULEB128(uint64_t Value, raw_ostream &OS,
+ unsigned Padding = 0) {
+ do {
+ uint8_t Byte = Value & 0x7f;
+ Value >>= 7;
+ if (Value != 0 || Padding != 0)
+ Byte |= 0x80; // Mark this byte to show that more bytes will follow.
+ OS << char(Byte);
+ } while (Value != 0);
+
+ // Pad with 0x80 and emit a null byte at the end.
+ if (Padding != 0) {
+ for (; Padding != 1; --Padding)
+ OS << '\x80';
+ OS << '\x00';
+ }
+}
+
+/// Utility function to encode a ULEB128 value to a buffer. Returns
+/// the length in bytes of the encoded value.
+inline unsigned encodeULEB128(uint64_t Value, uint8_t *p,
+ unsigned Padding = 0) {
+ uint8_t *orig_p = p;
+ do {
+ uint8_t Byte = Value & 0x7f;
+ Value >>= 7;
+ if (Value != 0 || Padding != 0)
+ Byte |= 0x80; // Mark this byte to show that more bytes will follow.
+ *p++ = Byte;
+ } while (Value != 0);
+
+ // Pad with 0x80 and emit a null byte at the end.
+ if (Padding != 0) {
+ for (; Padding != 1; --Padding)
+ *p++ = '\x80';
+ *p++ = '\x00';
+ }
+ return (unsigned)(p - orig_p);
+}
+
+
+/// Utility function to decode a ULEB128 value.
+inline uint64_t decodeULEB128(const uint8_t *p, unsigned *n = nullptr) {
+ const uint8_t *orig_p = p;
+ uint64_t Value = 0;
+ unsigned Shift = 0;
+ do {
+ Value += uint64_t(*p & 0x7f) << Shift;
+ Shift += 7;
+ } while (*p++ >= 128);
+ if (n)
+ *n = (unsigned)(p - orig_p);
+ return Value;
+}
+
+/// Utility function to decode a SLEB128 value.
+inline int64_t decodeSLEB128(const uint8_t *p, unsigned *n = nullptr) {
+ const uint8_t *orig_p = p;
+ int64_t Value = 0;
+ unsigned Shift = 0;
+ uint8_t Byte;
+ do {
+ Byte = *p++;
+ Value |= ((Byte & 0x7f) << Shift);
+ Shift += 7;
+ } while (Byte >= 128);
+ // Sign extend negative numbers.
+ if (Byte & 0x40)
+ Value |= (-1ULL) << Shift;
+ if (n)
+ *n = (unsigned)(p - orig_p);
+ return Value;
+}
+
+
+/// Utility function to get the size of the ULEB128-encoded value.
+extern unsigned getULEB128Size(uint64_t Value);
+
+/// Utility function to get the size of the SLEB128-encoded value.
+extern unsigned getSLEB128Size(int64_t Value);
+
+} // namespace llvm
+
+#endif // LLVM_SYSTEM_LEB128_H
diff --git a/contrib/llvm/include/llvm/Support/LineIterator.h b/contrib/llvm/include/llvm/Support/LineIterator.h
new file mode 100644
index 0000000..9d4cd3b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/LineIterator.h
@@ -0,0 +1,88 @@
+//===- LineIterator.h - Iterator to read a text buffer's lines --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_LINEITERATOR_H
+#define LLVM_SUPPORT_LINEITERATOR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include <iterator>
+
+namespace llvm {
+
+class MemoryBuffer;
+
+/// \brief A forward iterator which reads text lines from a buffer.
+///
+/// This class provides a forward iterator interface for reading one line at
+/// a time from a buffer. When default constructed the iterator will be the
+/// "end" iterator.
+///
+/// The iterator is aware of what line number it is currently processing. It
+/// strips blank lines by default, and comment lines given a comment-starting
+/// character.
+///
+/// Note that this iterator requires the buffer to be nul terminated.
+class line_iterator
+ : public std::iterator<std::forward_iterator_tag, StringRef> {
+ const MemoryBuffer *Buffer;
+ char CommentMarker;
+ bool SkipBlanks;
+
+ unsigned LineNumber;
+ StringRef CurrentLine;
+
+public:
+ /// \brief Default construct an "end" iterator.
+ line_iterator() : Buffer(nullptr) {}
+
+ /// \brief Construct a new iterator around some memory buffer.
+ explicit line_iterator(const MemoryBuffer &Buffer, bool SkipBlanks = true,
+ char CommentMarker = '\0');
+
+ /// \brief Return true if we've reached EOF or are an "end" iterator.
+ bool is_at_eof() const { return !Buffer; }
+
+ /// \brief Return true if we're an "end" iterator or have reached EOF.
+ bool is_at_end() const { return is_at_eof(); }
+
+ /// \brief Return the current line number. May return any number at EOF.
+ int64_t line_number() const { return LineNumber; }
+
+ /// \brief Advance to the next (non-empty, non-comment) line.
+ line_iterator &operator++() {
+ advance();
+ return *this;
+ }
+ line_iterator operator++(int) {
+ line_iterator tmp(*this);
+ advance();
+ return tmp;
+ }
+
+ /// \brief Get the current line as a \c StringRef.
+ StringRef operator*() const { return CurrentLine; }
+ const StringRef *operator->() const { return &CurrentLine; }
+
+ friend bool operator==(const line_iterator &LHS, const line_iterator &RHS) {
+ return LHS.Buffer == RHS.Buffer &&
+ LHS.CurrentLine.begin() == RHS.CurrentLine.begin();
+ }
+
+ friend bool operator!=(const line_iterator &LHS, const line_iterator &RHS) {
+ return !(LHS == RHS);
+ }
+
+private:
+ /// \brief Advance the iterator to the next line.
+ void advance();
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Locale.h b/contrib/llvm/include/llvm/Support/Locale.h
new file mode 100644
index 0000000..b384d58
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Locale.h
@@ -0,0 +1,17 @@
+#ifndef LLVM_SUPPORT_LOCALE_H
+#define LLVM_SUPPORT_LOCALE_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+namespace sys {
+namespace locale {
+
+int columnWidth(StringRef s);
+bool isPrint(int c);
+
+}
+}
+}
+
+#endif // LLVM_SUPPORT_LOCALE_H
diff --git a/contrib/llvm/include/llvm/Support/LockFileManager.h b/contrib/llvm/include/llvm/Support/LockFileManager.h
new file mode 100644
index 0000000..8e88d42
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/LockFileManager.h
@@ -0,0 +1,88 @@
+//===--- LockFileManager.h - File-level locking utility ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_SUPPORT_LOCKFILEMANAGER_H
+#define LLVM_SUPPORT_LOCKFILEMANAGER_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
+#include <system_error>
+#include <utility> // for std::pair
+
+namespace llvm {
+/// \brief Class that manages the creation of a lock file to aid
+/// implicit coordination between different processes.
+///
+/// The implicit coordination works by creating a ".lock" file alongside
+/// the file that we're coordinating for, using the atomicity of the file
+/// system to ensure that only a single process can create that ".lock" file.
+/// When the lock file is removed, the owning process has finished the
+/// operation.
+class LockFileManager {
+public:
+ /// \brief Describes the state of a lock file.
+ enum LockFileState {
+ /// \brief The lock file has been created and is owned by this instance
+ /// of the object.
+ LFS_Owned,
+ /// \brief The lock file already exists and is owned by some other
+ /// instance.
+ LFS_Shared,
+ /// \brief An error occurred while trying to create or find the lock
+ /// file.
+ LFS_Error
+ };
+
+ /// \brief Describes the result of waiting for the owner to release the lock.
+ enum WaitForUnlockResult {
+ /// \brief The lock was released successfully.
+ Res_Success,
+ /// \brief Owner died while holding the lock.
+ Res_OwnerDied,
+ /// \brief Reached timeout while waiting for the owner to release the lock.
+ Res_Timeout
+ };
+
+private:
+ SmallString<128> FileName;
+ SmallString<128> LockFileName;
+ SmallString<128> UniqueLockFileName;
+
+ Optional<std::pair<std::string, int> > Owner;
+ Optional<std::error_code> Error;
+
+ LockFileManager(const LockFileManager &) = delete;
+ LockFileManager &operator=(const LockFileManager &) = delete;
+
+ static Optional<std::pair<std::string, int> >
+ readLockFile(StringRef LockFileName);
+
+ static bool processStillExecuting(StringRef Hostname, int PID);
+
+public:
+
+ LockFileManager(StringRef FileName);
+ ~LockFileManager();
+
+ /// \brief Determine the state of the lock file.
+ LockFileState getState() const;
+
+ operator LockFileState() const { return getState(); }
+
+ /// \brief For a shared lock, wait until the owner releases the lock.
+ WaitForUnlockResult waitForUnlock();
+
+ /// \brief Remove the lock file. This may delete a different lock file than
+ /// the one previously read if there is a race.
+ std::error_code unsafeRemoveLockFile();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_LOCKFILEMANAGER_H
diff --git a/contrib/llvm/include/llvm/Support/MD5.h b/contrib/llvm/include/llvm/Support/MD5.h
new file mode 100644
index 0000000..f6e1e92
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/MD5.h
@@ -0,0 +1,70 @@
+/*
+ * This code is derived from (original license follows):
+ *
+ * This is an OpenSSL-compatible implementation of the RSA Data Security, Inc.
+ * MD5 Message-Digest Algorithm (RFC 1321).
+ *
+ * Homepage:
+ * http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
+ *
+ * Author:
+ * Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
+ *
+ * This software was written by Alexander Peslyak in 2001. No copyright is
+ * claimed, and the software is hereby placed in the public domain.
+ * In case this attempt to disclaim copyright and place the software in the
+ * public domain is deemed null and void, then the software is
+ * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
+ * general public under the following terms:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted.
+ *
+ * There's ABSOLUTELY NO WARRANTY, express or implied.
+ *
+ * See md5.c for more information.
+ */
+
+#ifndef LLVM_SUPPORT_MD5_H
+#define LLVM_SUPPORT_MD5_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class MD5 {
+ // Any 32-bit or wider unsigned integer data type will do.
+ typedef uint32_t MD5_u32plus;
+
+ MD5_u32plus a, b, c, d;
+ MD5_u32plus hi, lo;
+ uint8_t buffer[64];
+ MD5_u32plus block[16];
+
+public:
+ typedef uint8_t MD5Result[16];
+
+ MD5();
+
+ /// \brief Updates the hash for the byte stream provided.
+ void update(ArrayRef<uint8_t> Data);
+
+ /// \brief Updates the hash for the StringRef provided.
+ void update(StringRef Str);
+
+ /// \brief Finishes off the hash and puts the result in result.
+ void final(MD5Result &Result);
+
+ /// \brief Translates the bytes in \p Res to a hex string that is
+ /// deposited into \p Str. The result will be of length 32.
+ static void stringifyResult(MD5Result &Result, SmallString<32> &Str);
+
+private:
+ const uint8_t *body(ArrayRef<uint8_t> Data);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/MachO.h b/contrib/llvm/include/llvm/Support/MachO.h
new file mode 100644
index 0000000..54b8745
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/MachO.h
@@ -0,0 +1,1675 @@
+//===-- llvm/Support/MachO.h - The MachO file format ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines manifest constants for the MachO object file format.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MACHO_H
+#define LLVM_SUPPORT_MACHO_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Host.h"
+
+namespace llvm {
+ namespace MachO {
+ // Enums from <mach-o/loader.h>
+ enum : uint32_t {
+ // Constants for the "magic" field in llvm::MachO::mach_header and
+ // llvm::MachO::mach_header_64
+ MH_MAGIC = 0xFEEDFACEu,
+ MH_CIGAM = 0xCEFAEDFEu,
+ MH_MAGIC_64 = 0xFEEDFACFu,
+ MH_CIGAM_64 = 0xCFFAEDFEu,
+ FAT_MAGIC = 0xCAFEBABEu,
+ FAT_CIGAM = 0xBEBAFECAu
+ };
+
+ enum HeaderFileType {
+ // Constants for the "filetype" field in llvm::MachO::mach_header and
+ // llvm::MachO::mach_header_64
+ MH_OBJECT = 0x1u,
+ MH_EXECUTE = 0x2u,
+ MH_FVMLIB = 0x3u,
+ MH_CORE = 0x4u,
+ MH_PRELOAD = 0x5u,
+ MH_DYLIB = 0x6u,
+ MH_DYLINKER = 0x7u,
+ MH_BUNDLE = 0x8u,
+ MH_DYLIB_STUB = 0x9u,
+ MH_DSYM = 0xAu,
+ MH_KEXT_BUNDLE = 0xBu
+ };
+
+ enum {
+ // Constant bits for the "flags" field in llvm::MachO::mach_header and
+ // llvm::MachO::mach_header_64
+ MH_NOUNDEFS = 0x00000001u,
+ MH_INCRLINK = 0x00000002u,
+ MH_DYLDLINK = 0x00000004u,
+ MH_BINDATLOAD = 0x00000008u,
+ MH_PREBOUND = 0x00000010u,
+ MH_SPLIT_SEGS = 0x00000020u,
+ MH_LAZY_INIT = 0x00000040u,
+ MH_TWOLEVEL = 0x00000080u,
+ MH_FORCE_FLAT = 0x00000100u,
+ MH_NOMULTIDEFS = 0x00000200u,
+ MH_NOFIXPREBINDING = 0x00000400u,
+ MH_PREBINDABLE = 0x00000800u,
+ MH_ALLMODSBOUND = 0x00001000u,
+ MH_SUBSECTIONS_VIA_SYMBOLS = 0x00002000u,
+ MH_CANONICAL = 0x00004000u,
+ MH_WEAK_DEFINES = 0x00008000u,
+ MH_BINDS_TO_WEAK = 0x00010000u,
+ MH_ALLOW_STACK_EXECUTION = 0x00020000u,
+ MH_ROOT_SAFE = 0x00040000u,
+ MH_SETUID_SAFE = 0x00080000u,
+ MH_NO_REEXPORTED_DYLIBS = 0x00100000u,
+ MH_PIE = 0x00200000u,
+ MH_DEAD_STRIPPABLE_DYLIB = 0x00400000u,
+ MH_HAS_TLV_DESCRIPTORS = 0x00800000u,
+ MH_NO_HEAP_EXECUTION = 0x01000000u,
+ MH_APP_EXTENSION_SAFE = 0x02000000u
+ };
+
+ enum : uint32_t {
+ // Flags for the "cmd" field in llvm::MachO::load_command
+ LC_REQ_DYLD = 0x80000000u
+ };
+
+ enum LoadCommandType : uint32_t {
+ // Constants for the "cmd" field in llvm::MachO::load_command
+ LC_SEGMENT = 0x00000001u,
+ LC_SYMTAB = 0x00000002u,
+ LC_SYMSEG = 0x00000003u,
+ LC_THREAD = 0x00000004u,
+ LC_UNIXTHREAD = 0x00000005u,
+ LC_LOADFVMLIB = 0x00000006u,
+ LC_IDFVMLIB = 0x00000007u,
+ LC_IDENT = 0x00000008u,
+ LC_FVMFILE = 0x00000009u,
+ LC_PREPAGE = 0x0000000Au,
+ LC_DYSYMTAB = 0x0000000Bu,
+ LC_LOAD_DYLIB = 0x0000000Cu,
+ LC_ID_DYLIB = 0x0000000Du,
+ LC_LOAD_DYLINKER = 0x0000000Eu,
+ LC_ID_DYLINKER = 0x0000000Fu,
+ LC_PREBOUND_DYLIB = 0x00000010u,
+ LC_ROUTINES = 0x00000011u,
+ LC_SUB_FRAMEWORK = 0x00000012u,
+ LC_SUB_UMBRELLA = 0x00000013u,
+ LC_SUB_CLIENT = 0x00000014u,
+ LC_SUB_LIBRARY = 0x00000015u,
+ LC_TWOLEVEL_HINTS = 0x00000016u,
+ LC_PREBIND_CKSUM = 0x00000017u,
+ LC_LOAD_WEAK_DYLIB = 0x80000018u,
+ LC_SEGMENT_64 = 0x00000019u,
+ LC_ROUTINES_64 = 0x0000001Au,
+ LC_UUID = 0x0000001Bu,
+ LC_RPATH = 0x8000001Cu,
+ LC_CODE_SIGNATURE = 0x0000001Du,
+ LC_SEGMENT_SPLIT_INFO = 0x0000001Eu,
+ LC_REEXPORT_DYLIB = 0x8000001Fu,
+ LC_LAZY_LOAD_DYLIB = 0x00000020u,
+ LC_ENCRYPTION_INFO = 0x00000021u,
+ LC_DYLD_INFO = 0x00000022u,
+ LC_DYLD_INFO_ONLY = 0x80000022u,
+ LC_LOAD_UPWARD_DYLIB = 0x80000023u,
+ LC_VERSION_MIN_MACOSX = 0x00000024u,
+ LC_VERSION_MIN_IPHONEOS = 0x00000025u,
+ LC_FUNCTION_STARTS = 0x00000026u,
+ LC_DYLD_ENVIRONMENT = 0x00000027u,
+ LC_MAIN = 0x80000028u,
+ LC_DATA_IN_CODE = 0x00000029u,
+ LC_SOURCE_VERSION = 0x0000002Au,
+ LC_DYLIB_CODE_SIGN_DRS = 0x0000002Bu,
+ LC_ENCRYPTION_INFO_64 = 0x0000002Cu,
+ LC_LINKER_OPTION = 0x0000002Du,
+ LC_LINKER_OPTIMIZATION_HINT = 0x0000002Eu,
+ LC_VERSION_MIN_TVOS = 0x0000002Fu,
+ LC_VERSION_MIN_WATCHOS = 0x00000030u,
+ };
+
+ enum : uint32_t {
+ // Constant bits for the "flags" field in llvm::MachO::segment_command
+ SG_HIGHVM = 0x1u,
+ SG_FVMLIB = 0x2u,
+ SG_NORELOC = 0x4u,
+ SG_PROTECTED_VERSION_1 = 0x8u,
+
+ // Constant masks for the "flags" field in llvm::MachO::section and
+ // llvm::MachO::section_64
+ SECTION_TYPE = 0x000000ffu, // SECTION_TYPE
+ SECTION_ATTRIBUTES = 0xffffff00u, // SECTION_ATTRIBUTES
+ SECTION_ATTRIBUTES_USR = 0xff000000u, // SECTION_ATTRIBUTES_USR
+ SECTION_ATTRIBUTES_SYS = 0x00ffff00u // SECTION_ATTRIBUTES_SYS
+ };
+
+ /// These are the section type and attributes fields. A MachO section can
+ /// have only one Type, but can have any of the attributes specified.
+ enum SectionType : uint32_t {
+ // Constant masks for the "flags[7:0]" field in llvm::MachO::section and
+ // llvm::MachO::section_64 (mask "flags" with SECTION_TYPE)
+
+ /// S_REGULAR - Regular section.
+ S_REGULAR = 0x00u,
+ /// S_ZEROFILL - Zero fill on demand section.
+ S_ZEROFILL = 0x01u,
+ /// S_CSTRING_LITERALS - Section with literal C strings.
+ S_CSTRING_LITERALS = 0x02u,
+ /// S_4BYTE_LITERALS - Section with 4 byte literals.
+ S_4BYTE_LITERALS = 0x03u,
+ /// S_8BYTE_LITERALS - Section with 8 byte literals.
+ S_8BYTE_LITERALS = 0x04u,
+ /// S_LITERAL_POINTERS - Section with pointers to literals.
+ S_LITERAL_POINTERS = 0x05u,
+ /// S_NON_LAZY_SYMBOL_POINTERS - Section with non-lazy symbol pointers.
+ S_NON_LAZY_SYMBOL_POINTERS = 0x06u,
+ /// S_LAZY_SYMBOL_POINTERS - Section with lazy symbol pointers.
+ S_LAZY_SYMBOL_POINTERS = 0x07u,
+ /// S_SYMBOL_STUBS - Section with symbol stubs, byte size of stub in
+ /// the Reserved2 field.
+ S_SYMBOL_STUBS = 0x08u,
+ /// S_MOD_INIT_FUNC_POINTERS - Section with only function pointers for
+ /// initialization.
+ S_MOD_INIT_FUNC_POINTERS = 0x09u,
+ /// S_MOD_TERM_FUNC_POINTERS - Section with only function pointers for
+ /// termination.
+ S_MOD_TERM_FUNC_POINTERS = 0x0au,
+ /// S_COALESCED - Section contains symbols that are to be coalesced.
+ S_COALESCED = 0x0bu,
+ /// S_GB_ZEROFILL - Zero fill on demand section (that can be larger than 4
+ /// gigabytes).
+ S_GB_ZEROFILL = 0x0cu,
+ /// S_INTERPOSING - Section with only pairs of function pointers for
+ /// interposing.
+ S_INTERPOSING = 0x0du,
+ /// S_16BYTE_LITERALS - Section with only 16 byte literals.
+ S_16BYTE_LITERALS = 0x0eu,
+ /// S_DTRACE_DOF - Section contains DTrace Object Format.
+ S_DTRACE_DOF = 0x0fu,
+ /// S_LAZY_DYLIB_SYMBOL_POINTERS - Section with lazy symbol pointers to
+ /// lazy loaded dylibs.
+ S_LAZY_DYLIB_SYMBOL_POINTERS = 0x10u,
+ /// S_THREAD_LOCAL_REGULAR - Thread local data section.
+ S_THREAD_LOCAL_REGULAR = 0x11u,
+ /// S_THREAD_LOCAL_ZEROFILL - Thread local zerofill section.
+ S_THREAD_LOCAL_ZEROFILL = 0x12u,
+ /// S_THREAD_LOCAL_VARIABLES - Section with thread local variable
+ /// structure data.
+ S_THREAD_LOCAL_VARIABLES = 0x13u,
+ /// S_THREAD_LOCAL_VARIABLE_POINTERS - Section with pointers to thread
+ /// local structures.
+ S_THREAD_LOCAL_VARIABLE_POINTERS = 0x14u,
+ /// S_THREAD_LOCAL_INIT_FUNCTION_POINTERS - Section with thread local
+ /// variable initialization pointers to functions.
+ S_THREAD_LOCAL_INIT_FUNCTION_POINTERS = 0x15u,
+
+ LAST_KNOWN_SECTION_TYPE = S_THREAD_LOCAL_INIT_FUNCTION_POINTERS
+ };
+
+ enum : uint32_t {
+ // Constant masks for the "flags[31:24]" field in llvm::MachO::section and
+ // llvm::MachO::section_64 (mask "flags" with SECTION_ATTRIBUTES_USR)
+
+ /// S_ATTR_PURE_INSTRUCTIONS - Section contains only true machine
+ /// instructions.
+ S_ATTR_PURE_INSTRUCTIONS = 0x80000000u,
+ /// S_ATTR_NO_TOC - Section contains coalesced symbols that are not to be
+ /// in a ranlib table of contents.
+ S_ATTR_NO_TOC = 0x40000000u,
+ /// S_ATTR_STRIP_STATIC_SYMS - Ok to strip static symbols in this section
+ /// in files with the MY_DYLDLINK flag.
+ S_ATTR_STRIP_STATIC_SYMS = 0x20000000u,
+ /// S_ATTR_NO_DEAD_STRIP - No dead stripping.
+ S_ATTR_NO_DEAD_STRIP = 0x10000000u,
+ /// S_ATTR_LIVE_SUPPORT - Blocks are live if they reference live blocks.
+ S_ATTR_LIVE_SUPPORT = 0x08000000u,
+ /// S_ATTR_SELF_MODIFYING_CODE - Used with i386 code stubs written on by
+ /// dyld.
+ S_ATTR_SELF_MODIFYING_CODE = 0x04000000u,
+ /// S_ATTR_DEBUG - A debug section.
+ S_ATTR_DEBUG = 0x02000000u,
+
+ // Constant masks for the "flags[23:8]" field in llvm::MachO::section and
+ // llvm::MachO::section_64 (mask "flags" with SECTION_ATTRIBUTES_SYS)
+
+ /// S_ATTR_SOME_INSTRUCTIONS - Section contains some machine instructions.
+ S_ATTR_SOME_INSTRUCTIONS = 0x00000400u,
+ /// S_ATTR_EXT_RELOC - Section has external relocation entries.
+ S_ATTR_EXT_RELOC = 0x00000200u,
+ /// S_ATTR_LOC_RELOC - Section has local relocation entries.
+ S_ATTR_LOC_RELOC = 0x00000100u,
+
+ // Constant masks for the value of an indirect symbol in an indirect
+ // symbol table
+ INDIRECT_SYMBOL_LOCAL = 0x80000000u,
+ INDIRECT_SYMBOL_ABS = 0x40000000u
+ };
+
+ enum DataRegionType {
+ // Constants for the "kind" field in a data_in_code_entry structure
+ DICE_KIND_DATA = 1u,
+ DICE_KIND_JUMP_TABLE8 = 2u,
+ DICE_KIND_JUMP_TABLE16 = 3u,
+ DICE_KIND_JUMP_TABLE32 = 4u,
+ DICE_KIND_ABS_JUMP_TABLE32 = 5u
+ };
+
+ enum RebaseType {
+ REBASE_TYPE_POINTER = 1u,
+ REBASE_TYPE_TEXT_ABSOLUTE32 = 2u,
+ REBASE_TYPE_TEXT_PCREL32 = 3u
+ };
+
+ enum {
+ REBASE_OPCODE_MASK = 0xF0u,
+ REBASE_IMMEDIATE_MASK = 0x0Fu
+ };
+
+ enum RebaseOpcode {
+ REBASE_OPCODE_DONE = 0x00u,
+ REBASE_OPCODE_SET_TYPE_IMM = 0x10u,
+ REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x20u,
+ REBASE_OPCODE_ADD_ADDR_ULEB = 0x30u,
+ REBASE_OPCODE_ADD_ADDR_IMM_SCALED = 0x40u,
+ REBASE_OPCODE_DO_REBASE_IMM_TIMES = 0x50u,
+ REBASE_OPCODE_DO_REBASE_ULEB_TIMES = 0x60u,
+ REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB = 0x70u,
+ REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB = 0x80u
+ };
+
+ enum BindType {
+ BIND_TYPE_POINTER = 1u,
+ BIND_TYPE_TEXT_ABSOLUTE32 = 2u,
+ BIND_TYPE_TEXT_PCREL32 = 3u
+ };
+
+ enum BindSpecialDylib {
+ BIND_SPECIAL_DYLIB_SELF = 0,
+ BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE = -1,
+ BIND_SPECIAL_DYLIB_FLAT_LOOKUP = -2
+ };
+
+ enum {
+ BIND_SYMBOL_FLAGS_WEAK_IMPORT = 0x1u,
+ BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION = 0x8u,
+
+ BIND_OPCODE_MASK = 0xF0u,
+ BIND_IMMEDIATE_MASK = 0x0Fu
+ };
+
+ enum BindOpcode {
+ BIND_OPCODE_DONE = 0x00u,
+ BIND_OPCODE_SET_DYLIB_ORDINAL_IMM = 0x10u,
+ BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB = 0x20u,
+ BIND_OPCODE_SET_DYLIB_SPECIAL_IMM = 0x30u,
+ BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM = 0x40u,
+ BIND_OPCODE_SET_TYPE_IMM = 0x50u,
+ BIND_OPCODE_SET_ADDEND_SLEB = 0x60u,
+ BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB = 0x70u,
+ BIND_OPCODE_ADD_ADDR_ULEB = 0x80u,
+ BIND_OPCODE_DO_BIND = 0x90u,
+ BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB = 0xA0u,
+ BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED = 0xB0u,
+ BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB = 0xC0u
+ };
+
+ enum {
+ EXPORT_SYMBOL_FLAGS_KIND_MASK = 0x03u,
+ EXPORT_SYMBOL_FLAGS_WEAK_DEFINITION = 0x04u,
+ EXPORT_SYMBOL_FLAGS_REEXPORT = 0x08u,
+ EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER = 0x10u
+ };
+
+ enum ExportSymbolKind {
+ EXPORT_SYMBOL_FLAGS_KIND_REGULAR = 0x00u,
+ EXPORT_SYMBOL_FLAGS_KIND_THREAD_LOCAL = 0x01u,
+ EXPORT_SYMBOL_FLAGS_KIND_ABSOLUTE = 0x02u
+ };
+
+ enum {
+ // Constant masks for the "n_type" field in llvm::MachO::nlist and
+ // llvm::MachO::nlist_64
+ N_STAB = 0xe0,
+ N_PEXT = 0x10,
+ N_TYPE = 0x0e,
+ N_EXT = 0x01
+ };
+
+ enum NListType {
+ // Constants for the "n_type & N_TYPE" llvm::MachO::nlist and
+ // llvm::MachO::nlist_64
+ N_UNDF = 0x0u,
+ N_ABS = 0x2u,
+ N_SECT = 0xeu,
+ N_PBUD = 0xcu,
+ N_INDR = 0xau
+ };
+
+ enum SectionOrdinal {
+ // Constants for the "n_sect" field in llvm::MachO::nlist and
+ // llvm::MachO::nlist_64
+ NO_SECT = 0u,
+ MAX_SECT = 0xffu
+ };
+
+ enum {
+ // Constant masks for the "n_desc" field in llvm::MachO::nlist and
+ // llvm::MachO::nlist_64
+ // The low 3 bits are the for the REFERENCE_TYPE.
+ REFERENCE_TYPE = 0x7,
+ REFERENCE_FLAG_UNDEFINED_NON_LAZY = 0,
+ REFERENCE_FLAG_UNDEFINED_LAZY = 1,
+ REFERENCE_FLAG_DEFINED = 2,
+ REFERENCE_FLAG_PRIVATE_DEFINED = 3,
+ REFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZY = 4,
+ REFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY = 5,
+ // Flag bits (some overlap with the library ordinal bits).
+ N_ARM_THUMB_DEF = 0x0008u,
+ REFERENCED_DYNAMICALLY = 0x0010u,
+ N_NO_DEAD_STRIP = 0x0020u,
+ N_WEAK_REF = 0x0040u,
+ N_WEAK_DEF = 0x0080u,
+ N_SYMBOL_RESOLVER = 0x0100u,
+ N_ALT_ENTRY = 0x0200u,
+ // For undefined symbols coming from libraries, see GET_LIBRARY_ORDINAL()
+ // as these are in the top 8 bits.
+ SELF_LIBRARY_ORDINAL = 0x0,
+ MAX_LIBRARY_ORDINAL = 0xfd,
+ DYNAMIC_LOOKUP_ORDINAL = 0xfe,
+ EXECUTABLE_ORDINAL = 0xff
+ };
+
+ enum StabType {
+ // Constant values for the "n_type" field in llvm::MachO::nlist and
+ // llvm::MachO::nlist_64 when "(n_type & N_STAB) != 0"
+ N_GSYM = 0x20u,
+ N_FNAME = 0x22u,
+ N_FUN = 0x24u,
+ N_STSYM = 0x26u,
+ N_LCSYM = 0x28u,
+ N_BNSYM = 0x2Eu,
+ N_PC = 0x30u,
+ N_AST = 0x32u,
+ N_OPT = 0x3Cu,
+ N_RSYM = 0x40u,
+ N_SLINE = 0x44u,
+ N_ENSYM = 0x4Eu,
+ N_SSYM = 0x60u,
+ N_SO = 0x64u,
+ N_OSO = 0x66u,
+ N_LSYM = 0x80u,
+ N_BINCL = 0x82u,
+ N_SOL = 0x84u,
+ N_PARAMS = 0x86u,
+ N_VERSION = 0x88u,
+ N_OLEVEL = 0x8Au,
+ N_PSYM = 0xA0u,
+ N_EINCL = 0xA2u,
+ N_ENTRY = 0xA4u,
+ N_LBRAC = 0xC0u,
+ N_EXCL = 0xC2u,
+ N_RBRAC = 0xE0u,
+ N_BCOMM = 0xE2u,
+ N_ECOMM = 0xE4u,
+ N_ECOML = 0xE8u,
+ N_LENG = 0xFEu
+ };
+
+ enum : uint32_t {
+ // Constant values for the r_symbolnum field in an
+ // llvm::MachO::relocation_info structure when r_extern is 0.
+ R_ABS = 0,
+
+ // Constant bits for the r_address field in an
+ // llvm::MachO::relocation_info structure.
+ R_SCATTERED = 0x80000000
+ };
+
+ enum RelocationInfoType {
+ // Constant values for the r_type field in an
+ // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
+ // structure.
+ GENERIC_RELOC_VANILLA = 0,
+ GENERIC_RELOC_PAIR = 1,
+ GENERIC_RELOC_SECTDIFF = 2,
+ GENERIC_RELOC_PB_LA_PTR = 3,
+ GENERIC_RELOC_LOCAL_SECTDIFF = 4,
+ GENERIC_RELOC_TLV = 5,
+
+ // Constant values for the r_type field in a PowerPC architecture
+ // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
+ // structure.
+ PPC_RELOC_VANILLA = GENERIC_RELOC_VANILLA,
+ PPC_RELOC_PAIR = GENERIC_RELOC_PAIR,
+ PPC_RELOC_BR14 = 2,
+ PPC_RELOC_BR24 = 3,
+ PPC_RELOC_HI16 = 4,
+ PPC_RELOC_LO16 = 5,
+ PPC_RELOC_HA16 = 6,
+ PPC_RELOC_LO14 = 7,
+ PPC_RELOC_SECTDIFF = 8,
+ PPC_RELOC_PB_LA_PTR = 9,
+ PPC_RELOC_HI16_SECTDIFF = 10,
+ PPC_RELOC_LO16_SECTDIFF = 11,
+ PPC_RELOC_HA16_SECTDIFF = 12,
+ PPC_RELOC_JBSR = 13,
+ PPC_RELOC_LO14_SECTDIFF = 14,
+ PPC_RELOC_LOCAL_SECTDIFF = 15,
+
+ // Constant values for the r_type field in an ARM architecture
+ // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
+ // structure.
+ ARM_RELOC_VANILLA = GENERIC_RELOC_VANILLA,
+ ARM_RELOC_PAIR = GENERIC_RELOC_PAIR,
+ ARM_RELOC_SECTDIFF = GENERIC_RELOC_SECTDIFF,
+ ARM_RELOC_LOCAL_SECTDIFF = 3,
+ ARM_RELOC_PB_LA_PTR = 4,
+ ARM_RELOC_BR24 = 5,
+ ARM_THUMB_RELOC_BR22 = 6,
+ ARM_THUMB_32BIT_BRANCH = 7, // obsolete
+ ARM_RELOC_HALF = 8,
+ ARM_RELOC_HALF_SECTDIFF = 9,
+
+ // Constant values for the r_type field in an ARM64 architecture
+ // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
+ // structure.
+
+ // For pointers.
+ ARM64_RELOC_UNSIGNED = 0,
+ // Must be followed by an ARM64_RELOC_UNSIGNED
+ ARM64_RELOC_SUBTRACTOR = 1,
+ // A B/BL instruction with 26-bit displacement.
+ ARM64_RELOC_BRANCH26 = 2,
+ // PC-rel distance to page of target.
+ ARM64_RELOC_PAGE21 = 3,
+ // Offset within page, scaled by r_length.
+ ARM64_RELOC_PAGEOFF12 = 4,
+ // PC-rel distance to page of GOT slot.
+ ARM64_RELOC_GOT_LOAD_PAGE21 = 5,
+ // Offset within page of GOT slot, scaled by r_length.
+ ARM64_RELOC_GOT_LOAD_PAGEOFF12 = 6,
+ // For pointers to GOT slots.
+ ARM64_RELOC_POINTER_TO_GOT = 7,
+ // PC-rel distance to page of TLVP slot.
+ ARM64_RELOC_TLVP_LOAD_PAGE21 = 8,
+ // Offset within page of TLVP slot, scaled by r_length.
+ ARM64_RELOC_TLVP_LOAD_PAGEOFF12 = 9,
+ // Must be followed by ARM64_RELOC_PAGE21 or ARM64_RELOC_PAGEOFF12.
+ ARM64_RELOC_ADDEND = 10,
+
+ // Constant values for the r_type field in an x86_64 architecture
+ // llvm::MachO::relocation_info or llvm::MachO::scattered_relocation_info
+ // structure
+ X86_64_RELOC_UNSIGNED = 0,
+ X86_64_RELOC_SIGNED = 1,
+ X86_64_RELOC_BRANCH = 2,
+ X86_64_RELOC_GOT_LOAD = 3,
+ X86_64_RELOC_GOT = 4,
+ X86_64_RELOC_SUBTRACTOR = 5,
+ X86_64_RELOC_SIGNED_1 = 6,
+ X86_64_RELOC_SIGNED_2 = 7,
+ X86_64_RELOC_SIGNED_4 = 8,
+ X86_64_RELOC_TLV = 9
+ };
+
+ // Values for segment_command.initprot.
+ // From <mach/vm_prot.h>
+ enum {
+ VM_PROT_READ = 0x1,
+ VM_PROT_WRITE = 0x2,
+ VM_PROT_EXECUTE = 0x4
+ };
+
+ // Structs from <mach-o/loader.h>
+
+ struct mach_header {
+ uint32_t magic;
+ uint32_t cputype;
+ uint32_t cpusubtype;
+ uint32_t filetype;
+ uint32_t ncmds;
+ uint32_t sizeofcmds;
+ uint32_t flags;
+ };
+
+ struct mach_header_64 {
+ uint32_t magic;
+ uint32_t cputype;
+ uint32_t cpusubtype;
+ uint32_t filetype;
+ uint32_t ncmds;
+ uint32_t sizeofcmds;
+ uint32_t flags;
+ uint32_t reserved;
+ };
+
+ struct load_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ };
+
+ struct segment_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ char segname[16];
+ uint32_t vmaddr;
+ uint32_t vmsize;
+ uint32_t fileoff;
+ uint32_t filesize;
+ uint32_t maxprot;
+ uint32_t initprot;
+ uint32_t nsects;
+ uint32_t flags;
+ };
+
+ struct segment_command_64 {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ char segname[16];
+ uint64_t vmaddr;
+ uint64_t vmsize;
+ uint64_t fileoff;
+ uint64_t filesize;
+ uint32_t maxprot;
+ uint32_t initprot;
+ uint32_t nsects;
+ uint32_t flags;
+ };
+
+ struct section {
+ char sectname[16];
+ char segname[16];
+ uint32_t addr;
+ uint32_t size;
+ uint32_t offset;
+ uint32_t align;
+ uint32_t reloff;
+ uint32_t nreloc;
+ uint32_t flags;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ };
+
+ struct section_64 {
+ char sectname[16];
+ char segname[16];
+ uint64_t addr;
+ uint64_t size;
+ uint32_t offset;
+ uint32_t align;
+ uint32_t reloff;
+ uint32_t nreloc;
+ uint32_t flags;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ };
+
+ struct fvmlib {
+ uint32_t name;
+ uint32_t minor_version;
+ uint32_t header_addr;
+ };
+
+ struct fvmlib_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ struct fvmlib fvmlib;
+ };
+
+ struct dylib {
+ uint32_t name;
+ uint32_t timestamp;
+ uint32_t current_version;
+ uint32_t compatibility_version;
+ };
+
+ struct dylib_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ struct dylib dylib;
+ };
+
+ struct sub_framework_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t umbrella;
+ };
+
+ struct sub_client_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t client;
+ };
+
+ struct sub_umbrella_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t sub_umbrella;
+ };
+
+ struct sub_library_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t sub_library;
+ };
+
+ struct prebound_dylib_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t name;
+ uint32_t nmodules;
+ uint32_t linked_modules;
+ };
+
+ struct dylinker_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t name;
+ };
+
+ struct thread_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ };
+
+ struct routines_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t init_address;
+ uint32_t init_module;
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+ uint32_t reserved4;
+ uint32_t reserved5;
+ uint32_t reserved6;
+ };
+
+ struct routines_command_64 {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint64_t init_address;
+ uint64_t init_module;
+ uint64_t reserved1;
+ uint64_t reserved2;
+ uint64_t reserved3;
+ uint64_t reserved4;
+ uint64_t reserved5;
+ uint64_t reserved6;
+ };
+
+ struct symtab_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t symoff;
+ uint32_t nsyms;
+ uint32_t stroff;
+ uint32_t strsize;
+ };
+
+ struct dysymtab_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t ilocalsym;
+ uint32_t nlocalsym;
+ uint32_t iextdefsym;
+ uint32_t nextdefsym;
+ uint32_t iundefsym;
+ uint32_t nundefsym;
+ uint32_t tocoff;
+ uint32_t ntoc;
+ uint32_t modtaboff;
+ uint32_t nmodtab;
+ uint32_t extrefsymoff;
+ uint32_t nextrefsyms;
+ uint32_t indirectsymoff;
+ uint32_t nindirectsyms;
+ uint32_t extreloff;
+ uint32_t nextrel;
+ uint32_t locreloff;
+ uint32_t nlocrel;
+ };
+
+ struct dylib_table_of_contents {
+ uint32_t symbol_index;
+ uint32_t module_index;
+ };
+
+ struct dylib_module {
+ uint32_t module_name;
+ uint32_t iextdefsym;
+ uint32_t nextdefsym;
+ uint32_t irefsym;
+ uint32_t nrefsym;
+ uint32_t ilocalsym;
+ uint32_t nlocalsym;
+ uint32_t iextrel;
+ uint32_t nextrel;
+ uint32_t iinit_iterm;
+ uint32_t ninit_nterm;
+ uint32_t objc_module_info_addr;
+ uint32_t objc_module_info_size;
+ };
+
+ struct dylib_module_64 {
+ uint32_t module_name;
+ uint32_t iextdefsym;
+ uint32_t nextdefsym;
+ uint32_t irefsym;
+ uint32_t nrefsym;
+ uint32_t ilocalsym;
+ uint32_t nlocalsym;
+ uint32_t iextrel;
+ uint32_t nextrel;
+ uint32_t iinit_iterm;
+ uint32_t ninit_nterm;
+ uint32_t objc_module_info_size;
+ uint64_t objc_module_info_addr;
+ };
+
+ struct dylib_reference {
+ uint32_t isym:24,
+ flags:8;
+ };
+
+ struct twolevel_hints_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t offset;
+ uint32_t nhints;
+ };
+
+ struct twolevel_hint {
+ uint32_t isub_image:8,
+ itoc:24;
+ };
+
+ struct prebind_cksum_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t cksum;
+ };
+
+ struct uuid_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint8_t uuid[16];
+ };
+
+ struct rpath_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t path;
+ };
+
+ struct linkedit_data_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t dataoff;
+ uint32_t datasize;
+ };
+
+ struct data_in_code_entry {
+ uint32_t offset;
+ uint16_t length;
+ uint16_t kind;
+ };
+
+ struct source_version_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint64_t version;
+ };
+
+ struct encryption_info_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t cryptoff;
+ uint32_t cryptsize;
+ uint32_t cryptid;
+ };
+
+ struct encryption_info_command_64 {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t cryptoff;
+ uint32_t cryptsize;
+ uint32_t cryptid;
+ uint32_t pad;
+ };
+
+ struct version_min_command {
+ uint32_t cmd; // LC_VERSION_MIN_MACOSX or
+ // LC_VERSION_MIN_IPHONEOS
+ uint32_t cmdsize; // sizeof(struct version_min_command)
+ uint32_t version; // X.Y.Z is encoded in nibbles xxxx.yy.zz
+ uint32_t sdk; // X.Y.Z is encoded in nibbles xxxx.yy.zz
+ };
+
+ struct dyld_info_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t rebase_off;
+ uint32_t rebase_size;
+ uint32_t bind_off;
+ uint32_t bind_size;
+ uint32_t weak_bind_off;
+ uint32_t weak_bind_size;
+ uint32_t lazy_bind_off;
+ uint32_t lazy_bind_size;
+ uint32_t export_off;
+ uint32_t export_size;
+ };
+
+ struct linker_option_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t count;
+ };
+
+ struct symseg_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t offset;
+ uint32_t size;
+ };
+
+ struct ident_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ };
+
+ struct fvmfile_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint32_t name;
+ uint32_t header_addr;
+ };
+
+ struct tlv_descriptor_32 {
+ uint32_t thunk;
+ uint32_t key;
+ uint32_t offset;
+ };
+
+ struct tlv_descriptor_64 {
+ uint64_t thunk;
+ uint64_t key;
+ uint64_t offset;
+ };
+
+ struct tlv_descriptor {
+ uintptr_t thunk;
+ uintptr_t key;
+ uintptr_t offset;
+ };
+
+ struct entry_point_command {
+ uint32_t cmd;
+ uint32_t cmdsize;
+ uint64_t entryoff;
+ uint64_t stacksize;
+ };
+
+ // Structs from <mach-o/fat.h>
+ struct fat_header {
+ uint32_t magic;
+ uint32_t nfat_arch;
+ };
+
+ struct fat_arch {
+ uint32_t cputype;
+ uint32_t cpusubtype;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t align;
+ };
+
+ // Structs from <mach-o/reloc.h>
+ struct relocation_info {
+ int32_t r_address;
+ uint32_t r_symbolnum:24,
+ r_pcrel:1,
+ r_length:2,
+ r_extern:1,
+ r_type:4;
+ };
+
+ struct scattered_relocation_info {
+#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && (BYTE_ORDER == BIG_ENDIAN)
+ uint32_t r_scattered:1,
+ r_pcrel:1,
+ r_length:2,
+ r_type:4,
+ r_address:24;
+#else
+ uint32_t r_address:24,
+ r_type:4,
+ r_length:2,
+ r_pcrel:1,
+ r_scattered:1;
+#endif
+ int32_t r_value;
+ };
+
+ // Structs NOT from <mach-o/reloc.h>, but that make LLVM's life easier
+ struct any_relocation_info {
+ uint32_t r_word0, r_word1;
+ };
+
+ // Structs from <mach-o/nlist.h>
+ struct nlist_base {
+ uint32_t n_strx;
+ uint8_t n_type;
+ uint8_t n_sect;
+ uint16_t n_desc;
+ };
+
+ struct nlist {
+ uint32_t n_strx;
+ uint8_t n_type;
+ uint8_t n_sect;
+ int16_t n_desc;
+ uint32_t n_value;
+ };
+
+ struct nlist_64 {
+ uint32_t n_strx;
+ uint8_t n_type;
+ uint8_t n_sect;
+ uint16_t n_desc;
+ uint64_t n_value;
+ };
+
+ // Byte order swapping functions for MachO structs
+
+ inline void swapStruct(mach_header &mh) {
+ sys::swapByteOrder(mh.magic);
+ sys::swapByteOrder(mh.cputype);
+ sys::swapByteOrder(mh.cpusubtype);
+ sys::swapByteOrder(mh.filetype);
+ sys::swapByteOrder(mh.ncmds);
+ sys::swapByteOrder(mh.sizeofcmds);
+ sys::swapByteOrder(mh.flags);
+ }
+
+ inline void swapStruct(mach_header_64 &H) {
+ sys::swapByteOrder(H.magic);
+ sys::swapByteOrder(H.cputype);
+ sys::swapByteOrder(H.cpusubtype);
+ sys::swapByteOrder(H.filetype);
+ sys::swapByteOrder(H.ncmds);
+ sys::swapByteOrder(H.sizeofcmds);
+ sys::swapByteOrder(H.flags);
+ sys::swapByteOrder(H.reserved);
+ }
+
+ inline void swapStruct(load_command &lc) {
+ sys::swapByteOrder(lc.cmd);
+ sys::swapByteOrder(lc.cmdsize);
+ }
+
+ inline void swapStruct(symtab_command &lc) {
+ sys::swapByteOrder(lc.cmd);
+ sys::swapByteOrder(lc.cmdsize);
+ sys::swapByteOrder(lc.symoff);
+ sys::swapByteOrder(lc.nsyms);
+ sys::swapByteOrder(lc.stroff);
+ sys::swapByteOrder(lc.strsize);
+ }
+
+ inline void swapStruct(segment_command_64 &seg) {
+ sys::swapByteOrder(seg.cmd);
+ sys::swapByteOrder(seg.cmdsize);
+ sys::swapByteOrder(seg.vmaddr);
+ sys::swapByteOrder(seg.vmsize);
+ sys::swapByteOrder(seg.fileoff);
+ sys::swapByteOrder(seg.filesize);
+ sys::swapByteOrder(seg.maxprot);
+ sys::swapByteOrder(seg.initprot);
+ sys::swapByteOrder(seg.nsects);
+ sys::swapByteOrder(seg.flags);
+ }
+
+ inline void swapStruct(segment_command &seg) {
+ sys::swapByteOrder(seg.cmd);
+ sys::swapByteOrder(seg.cmdsize);
+ sys::swapByteOrder(seg.vmaddr);
+ sys::swapByteOrder(seg.vmsize);
+ sys::swapByteOrder(seg.fileoff);
+ sys::swapByteOrder(seg.filesize);
+ sys::swapByteOrder(seg.maxprot);
+ sys::swapByteOrder(seg.initprot);
+ sys::swapByteOrder(seg.nsects);
+ sys::swapByteOrder(seg.flags);
+ }
+
+ inline void swapStruct(section_64 &sect) {
+ sys::swapByteOrder(sect.addr);
+ sys::swapByteOrder(sect.size);
+ sys::swapByteOrder(sect.offset);
+ sys::swapByteOrder(sect.align);
+ sys::swapByteOrder(sect.reloff);
+ sys::swapByteOrder(sect.nreloc);
+ sys::swapByteOrder(sect.flags);
+ sys::swapByteOrder(sect.reserved1);
+ sys::swapByteOrder(sect.reserved2);
+ }
+
+ inline void swapStruct(section &sect) {
+ sys::swapByteOrder(sect.addr);
+ sys::swapByteOrder(sect.size);
+ sys::swapByteOrder(sect.offset);
+ sys::swapByteOrder(sect.align);
+ sys::swapByteOrder(sect.reloff);
+ sys::swapByteOrder(sect.nreloc);
+ sys::swapByteOrder(sect.flags);
+ sys::swapByteOrder(sect.reserved1);
+ sys::swapByteOrder(sect.reserved2);
+ }
+
+ inline void swapStruct(dyld_info_command &info) {
+ sys::swapByteOrder(info.cmd);
+ sys::swapByteOrder(info.cmdsize);
+ sys::swapByteOrder(info.rebase_off);
+ sys::swapByteOrder(info.rebase_size);
+ sys::swapByteOrder(info.bind_off);
+ sys::swapByteOrder(info.bind_size);
+ sys::swapByteOrder(info.weak_bind_off);
+ sys::swapByteOrder(info.weak_bind_size);
+ sys::swapByteOrder(info.lazy_bind_off);
+ sys::swapByteOrder(info.lazy_bind_size);
+ sys::swapByteOrder(info.export_off);
+ sys::swapByteOrder(info.export_size);
+ }
+
+ inline void swapStruct(dylib_command &d) {
+ sys::swapByteOrder(d.cmd);
+ sys::swapByteOrder(d.cmdsize);
+ sys::swapByteOrder(d.dylib.name);
+ sys::swapByteOrder(d.dylib.timestamp);
+ sys::swapByteOrder(d.dylib.current_version);
+ sys::swapByteOrder(d.dylib.compatibility_version);
+ }
+
+ inline void swapStruct(sub_framework_command &s) {
+ sys::swapByteOrder(s.cmd);
+ sys::swapByteOrder(s.cmdsize);
+ sys::swapByteOrder(s.umbrella);
+ }
+
+ inline void swapStruct(sub_umbrella_command &s) {
+ sys::swapByteOrder(s.cmd);
+ sys::swapByteOrder(s.cmdsize);
+ sys::swapByteOrder(s.sub_umbrella);
+ }
+
+ inline void swapStruct(sub_library_command &s) {
+ sys::swapByteOrder(s.cmd);
+ sys::swapByteOrder(s.cmdsize);
+ sys::swapByteOrder(s.sub_library);
+ }
+
+ inline void swapStruct(sub_client_command &s) {
+ sys::swapByteOrder(s.cmd);
+ sys::swapByteOrder(s.cmdsize);
+ sys::swapByteOrder(s.client);
+ }
+
+ inline void swapStruct(routines_command &r) {
+ sys::swapByteOrder(r.cmd);
+ sys::swapByteOrder(r.cmdsize);
+ sys::swapByteOrder(r.init_address);
+ sys::swapByteOrder(r.init_module);
+ sys::swapByteOrder(r.reserved1);
+ sys::swapByteOrder(r.reserved2);
+ sys::swapByteOrder(r.reserved3);
+ sys::swapByteOrder(r.reserved4);
+ sys::swapByteOrder(r.reserved5);
+ sys::swapByteOrder(r.reserved6);
+ }
+
+ inline void swapStruct(routines_command_64 &r) {
+ sys::swapByteOrder(r.cmd);
+ sys::swapByteOrder(r.cmdsize);
+ sys::swapByteOrder(r.init_address);
+ sys::swapByteOrder(r.init_module);
+ sys::swapByteOrder(r.reserved1);
+ sys::swapByteOrder(r.reserved2);
+ sys::swapByteOrder(r.reserved3);
+ sys::swapByteOrder(r.reserved4);
+ sys::swapByteOrder(r.reserved5);
+ sys::swapByteOrder(r.reserved6);
+ }
+
+ inline void swapStruct(thread_command &t) {
+ sys::swapByteOrder(t.cmd);
+ sys::swapByteOrder(t.cmdsize);
+ }
+
+ inline void swapStruct(dylinker_command &d) {
+ sys::swapByteOrder(d.cmd);
+ sys::swapByteOrder(d.cmdsize);
+ sys::swapByteOrder(d.name);
+ }
+
+ inline void swapStruct(uuid_command &u) {
+ sys::swapByteOrder(u.cmd);
+ sys::swapByteOrder(u.cmdsize);
+ }
+
+ inline void swapStruct(rpath_command &r) {
+ sys::swapByteOrder(r.cmd);
+ sys::swapByteOrder(r.cmdsize);
+ sys::swapByteOrder(r.path);
+ }
+
+ inline void swapStruct(source_version_command &s) {
+ sys::swapByteOrder(s.cmd);
+ sys::swapByteOrder(s.cmdsize);
+ sys::swapByteOrder(s.version);
+ }
+
+ inline void swapStruct(entry_point_command &e) {
+ sys::swapByteOrder(e.cmd);
+ sys::swapByteOrder(e.cmdsize);
+ sys::swapByteOrder(e.entryoff);
+ sys::swapByteOrder(e.stacksize);
+ }
+
+ inline void swapStruct(encryption_info_command &e) {
+ sys::swapByteOrder(e.cmd);
+ sys::swapByteOrder(e.cmdsize);
+ sys::swapByteOrder(e.cryptoff);
+ sys::swapByteOrder(e.cryptsize);
+ sys::swapByteOrder(e.cryptid);
+ }
+
+ inline void swapStruct(encryption_info_command_64 &e) {
+ sys::swapByteOrder(e.cmd);
+ sys::swapByteOrder(e.cmdsize);
+ sys::swapByteOrder(e.cryptoff);
+ sys::swapByteOrder(e.cryptsize);
+ sys::swapByteOrder(e.cryptid);
+ sys::swapByteOrder(e.pad);
+ }
+
+ inline void swapStruct(dysymtab_command &dst) {
+ sys::swapByteOrder(dst.cmd);
+ sys::swapByteOrder(dst.cmdsize);
+ sys::swapByteOrder(dst.ilocalsym);
+ sys::swapByteOrder(dst.nlocalsym);
+ sys::swapByteOrder(dst.iextdefsym);
+ sys::swapByteOrder(dst.nextdefsym);
+ sys::swapByteOrder(dst.iundefsym);
+ sys::swapByteOrder(dst.nundefsym);
+ sys::swapByteOrder(dst.tocoff);
+ sys::swapByteOrder(dst.ntoc);
+ sys::swapByteOrder(dst.modtaboff);
+ sys::swapByteOrder(dst.nmodtab);
+ sys::swapByteOrder(dst.extrefsymoff);
+ sys::swapByteOrder(dst.nextrefsyms);
+ sys::swapByteOrder(dst.indirectsymoff);
+ sys::swapByteOrder(dst.nindirectsyms);
+ sys::swapByteOrder(dst.extreloff);
+ sys::swapByteOrder(dst.nextrel);
+ sys::swapByteOrder(dst.locreloff);
+ sys::swapByteOrder(dst.nlocrel);
+ }
+
+ inline void swapStruct(any_relocation_info &reloc) {
+ sys::swapByteOrder(reloc.r_word0);
+ sys::swapByteOrder(reloc.r_word1);
+ }
+
+ inline void swapStruct(nlist_base &S) {
+ sys::swapByteOrder(S.n_strx);
+ sys::swapByteOrder(S.n_desc);
+ }
+
+ inline void swapStruct(nlist &sym) {
+ sys::swapByteOrder(sym.n_strx);
+ sys::swapByteOrder(sym.n_desc);
+ sys::swapByteOrder(sym.n_value);
+ }
+
+ inline void swapStruct(nlist_64 &sym) {
+ sys::swapByteOrder(sym.n_strx);
+ sys::swapByteOrder(sym.n_desc);
+ sys::swapByteOrder(sym.n_value);
+ }
+
+ inline void swapStruct(linkedit_data_command &C) {
+ sys::swapByteOrder(C.cmd);
+ sys::swapByteOrder(C.cmdsize);
+ sys::swapByteOrder(C.dataoff);
+ sys::swapByteOrder(C.datasize);
+ }
+
+ inline void swapStruct(linker_option_command &C) {
+ sys::swapByteOrder(C.cmd);
+ sys::swapByteOrder(C.cmdsize);
+ sys::swapByteOrder(C.count);
+ }
+
+ inline void swapStruct(version_min_command&C) {
+ sys::swapByteOrder(C.cmd);
+ sys::swapByteOrder(C.cmdsize);
+ sys::swapByteOrder(C.version);
+ sys::swapByteOrder(C.sdk);
+ }
+
+ inline void swapStruct(data_in_code_entry &C) {
+ sys::swapByteOrder(C.offset);
+ sys::swapByteOrder(C.length);
+ sys::swapByteOrder(C.kind);
+ }
+
+ inline void swapStruct(uint32_t &C) {
+ sys::swapByteOrder(C);
+ }
+
+ // Get/Set functions from <mach-o/nlist.h>
+
+ static inline uint16_t GET_LIBRARY_ORDINAL(uint16_t n_desc) {
+ return (((n_desc) >> 8u) & 0xffu);
+ }
+
+ static inline void SET_LIBRARY_ORDINAL(uint16_t &n_desc, uint8_t ordinal) {
+ n_desc = (((n_desc) & 0x00ff) | (((ordinal) & 0xff) << 8));
+ }
+
+ static inline uint8_t GET_COMM_ALIGN (uint16_t n_desc) {
+ return (n_desc >> 8u) & 0x0fu;
+ }
+
+ static inline void SET_COMM_ALIGN (uint16_t &n_desc, uint8_t align) {
+ n_desc = ((n_desc & 0xf0ffu) | ((align & 0x0fu) << 8u));
+ }
+
+ // Enums from <mach/machine.h>
+ enum : uint32_t {
+ // Capability bits used in the definition of cpu_type.
+ CPU_ARCH_MASK = 0xff000000, // Mask for architecture bits
+ CPU_ARCH_ABI64 = 0x01000000 // 64 bit ABI
+ };
+
+ // Constants for the cputype field.
+ enum CPUType {
+ CPU_TYPE_ANY = -1,
+ CPU_TYPE_X86 = 7,
+ CPU_TYPE_I386 = CPU_TYPE_X86,
+ CPU_TYPE_X86_64 = CPU_TYPE_X86 | CPU_ARCH_ABI64,
+ /* CPU_TYPE_MIPS = 8, */
+ CPU_TYPE_MC98000 = 10, // Old Motorola PowerPC
+ CPU_TYPE_ARM = 12,
+ CPU_TYPE_ARM64 = CPU_TYPE_ARM | CPU_ARCH_ABI64,
+ CPU_TYPE_SPARC = 14,
+ CPU_TYPE_POWERPC = 18,
+ CPU_TYPE_POWERPC64 = CPU_TYPE_POWERPC | CPU_ARCH_ABI64
+ };
+
+ enum : uint32_t {
+ // Capability bits used in the definition of cpusubtype.
+ CPU_SUBTYPE_MASK = 0xff000000, // Mask for architecture bits
+ CPU_SUBTYPE_LIB64 = 0x80000000, // 64 bit libraries
+
+ // Special CPU subtype constants.
+ CPU_SUBTYPE_MULTIPLE = ~0u
+ };
+
+ // Constants for the cpusubtype field.
+ enum CPUSubTypeX86 {
+ CPU_SUBTYPE_I386_ALL = 3,
+ CPU_SUBTYPE_386 = 3,
+ CPU_SUBTYPE_486 = 4,
+ CPU_SUBTYPE_486SX = 0x84,
+ CPU_SUBTYPE_586 = 5,
+ CPU_SUBTYPE_PENT = CPU_SUBTYPE_586,
+ CPU_SUBTYPE_PENTPRO = 0x16,
+ CPU_SUBTYPE_PENTII_M3 = 0x36,
+ CPU_SUBTYPE_PENTII_M5 = 0x56,
+ CPU_SUBTYPE_CELERON = 0x67,
+ CPU_SUBTYPE_CELERON_MOBILE = 0x77,
+ CPU_SUBTYPE_PENTIUM_3 = 0x08,
+ CPU_SUBTYPE_PENTIUM_3_M = 0x18,
+ CPU_SUBTYPE_PENTIUM_3_XEON = 0x28,
+ CPU_SUBTYPE_PENTIUM_M = 0x09,
+ CPU_SUBTYPE_PENTIUM_4 = 0x0a,
+ CPU_SUBTYPE_PENTIUM_4_M = 0x1a,
+ CPU_SUBTYPE_ITANIUM = 0x0b,
+ CPU_SUBTYPE_ITANIUM_2 = 0x1b,
+ CPU_SUBTYPE_XEON = 0x0c,
+ CPU_SUBTYPE_XEON_MP = 0x1c,
+
+ CPU_SUBTYPE_X86_ALL = 3,
+ CPU_SUBTYPE_X86_64_ALL = 3,
+ CPU_SUBTYPE_X86_ARCH1 = 4,
+ CPU_SUBTYPE_X86_64_H = 8
+ };
+ static inline int CPU_SUBTYPE_INTEL(int Family, int Model) {
+ return Family | (Model << 4);
+ }
+ static inline int CPU_SUBTYPE_INTEL_FAMILY(CPUSubTypeX86 ST) {
+ return ((int)ST) & 0x0f;
+ }
+ static inline int CPU_SUBTYPE_INTEL_MODEL(CPUSubTypeX86 ST) {
+ return ((int)ST) >> 4;
+ }
+ enum {
+ CPU_SUBTYPE_INTEL_FAMILY_MAX = 15,
+ CPU_SUBTYPE_INTEL_MODEL_ALL = 0
+ };
+
+ enum CPUSubTypeARM {
+ CPU_SUBTYPE_ARM_ALL = 0,
+ CPU_SUBTYPE_ARM_V4T = 5,
+ CPU_SUBTYPE_ARM_V6 = 6,
+ CPU_SUBTYPE_ARM_V5 = 7,
+ CPU_SUBTYPE_ARM_V5TEJ = 7,
+ CPU_SUBTYPE_ARM_XSCALE = 8,
+ CPU_SUBTYPE_ARM_V7 = 9,
+ // unused ARM_V7F = 10,
+ CPU_SUBTYPE_ARM_V7S = 11,
+ CPU_SUBTYPE_ARM_V7K = 12,
+ CPU_SUBTYPE_ARM_V6M = 14,
+ CPU_SUBTYPE_ARM_V7M = 15,
+ CPU_SUBTYPE_ARM_V7EM = 16
+ };
+
+ enum CPUSubTypeARM64 {
+ CPU_SUBTYPE_ARM64_ALL = 0
+ };
+
+ enum CPUSubTypeSPARC {
+ CPU_SUBTYPE_SPARC_ALL = 0
+ };
+
+ enum CPUSubTypePowerPC {
+ CPU_SUBTYPE_POWERPC_ALL = 0,
+ CPU_SUBTYPE_POWERPC_601 = 1,
+ CPU_SUBTYPE_POWERPC_602 = 2,
+ CPU_SUBTYPE_POWERPC_603 = 3,
+ CPU_SUBTYPE_POWERPC_603e = 4,
+ CPU_SUBTYPE_POWERPC_603ev = 5,
+ CPU_SUBTYPE_POWERPC_604 = 6,
+ CPU_SUBTYPE_POWERPC_604e = 7,
+ CPU_SUBTYPE_POWERPC_620 = 8,
+ CPU_SUBTYPE_POWERPC_750 = 9,
+ CPU_SUBTYPE_POWERPC_7400 = 10,
+ CPU_SUBTYPE_POWERPC_7450 = 11,
+ CPU_SUBTYPE_POWERPC_970 = 100,
+
+ CPU_SUBTYPE_MC980000_ALL = CPU_SUBTYPE_POWERPC_ALL,
+ CPU_SUBTYPE_MC98601 = CPU_SUBTYPE_POWERPC_601
+ };
+
+ struct x86_thread_state64_t {
+ uint64_t rax;
+ uint64_t rbx;
+ uint64_t rcx;
+ uint64_t rdx;
+ uint64_t rdi;
+ uint64_t rsi;
+ uint64_t rbp;
+ uint64_t rsp;
+ uint64_t r8;
+ uint64_t r9;
+ uint64_t r10;
+ uint64_t r11;
+ uint64_t r12;
+ uint64_t r13;
+ uint64_t r14;
+ uint64_t r15;
+ uint64_t rip;
+ uint64_t rflags;
+ uint64_t cs;
+ uint64_t fs;
+ uint64_t gs;
+ };
+
+ enum x86_fp_control_precis {
+ x86_FP_PREC_24B = 0,
+ x86_FP_PREC_53B = 2,
+ x86_FP_PREC_64B = 3
+ };
+
+ enum x86_fp_control_rc {
+ x86_FP_RND_NEAR = 0,
+ x86_FP_RND_DOWN = 1,
+ x86_FP_RND_UP = 2,
+ x86_FP_CHOP = 3
+ };
+
+ struct fp_control_t {
+ unsigned short
+ invalid :1,
+ denorm :1,
+ zdiv :1,
+ ovrfl :1,
+ undfl :1,
+ precis :1,
+ :2,
+ pc :2,
+ rc :2,
+ :1,
+ :3;
+ };
+
+ struct fp_status_t {
+ unsigned short
+ invalid :1,
+ denorm :1,
+ zdiv :1,
+ ovrfl :1,
+ undfl :1,
+ precis :1,
+ stkflt :1,
+ errsumm :1,
+ c0 :1,
+ c1 :1,
+ c2 :1,
+ tos :3,
+ c3 :1,
+ busy :1;
+ };
+
+ struct mmst_reg_t {
+ char mmst_reg[10];
+ char mmst_rsrv[6];
+ };
+
+ struct xmm_reg_t {
+ char xmm_reg[16];
+ };
+
+ struct x86_float_state64_t {
+ int32_t fpu_reserved[2];
+ fp_control_t fpu_fcw;
+ fp_status_t fpu_fsw;
+ uint8_t fpu_ftw;
+ uint8_t fpu_rsrv1;
+ uint16_t fpu_fop;
+ uint32_t fpu_ip;
+ uint16_t fpu_cs;
+ uint16_t fpu_rsrv2;
+ uint32_t fpu_dp;
+ uint16_t fpu_ds;
+ uint16_t fpu_rsrv3;
+ uint32_t fpu_mxcsr;
+ uint32_t fpu_mxcsrmask;
+ mmst_reg_t fpu_stmm0;
+ mmst_reg_t fpu_stmm1;
+ mmst_reg_t fpu_stmm2;
+ mmst_reg_t fpu_stmm3;
+ mmst_reg_t fpu_stmm4;
+ mmst_reg_t fpu_stmm5;
+ mmst_reg_t fpu_stmm6;
+ mmst_reg_t fpu_stmm7;
+ xmm_reg_t fpu_xmm0;
+ xmm_reg_t fpu_xmm1;
+ xmm_reg_t fpu_xmm2;
+ xmm_reg_t fpu_xmm3;
+ xmm_reg_t fpu_xmm4;
+ xmm_reg_t fpu_xmm5;
+ xmm_reg_t fpu_xmm6;
+ xmm_reg_t fpu_xmm7;
+ xmm_reg_t fpu_xmm8;
+ xmm_reg_t fpu_xmm9;
+ xmm_reg_t fpu_xmm10;
+ xmm_reg_t fpu_xmm11;
+ xmm_reg_t fpu_xmm12;
+ xmm_reg_t fpu_xmm13;
+ xmm_reg_t fpu_xmm14;
+ xmm_reg_t fpu_xmm15;
+ char fpu_rsrv4[6*16];
+ uint32_t fpu_reserved1;
+ };
+
+ struct x86_exception_state64_t {
+ uint16_t trapno;
+ uint16_t cpu;
+ uint32_t err;
+ uint64_t faultvaddr;
+ };
+
+ inline void swapStruct(x86_thread_state64_t &x) {
+ sys::swapByteOrder(x.rax);
+ sys::swapByteOrder(x.rbx);
+ sys::swapByteOrder(x.rcx);
+ sys::swapByteOrder(x.rdx);
+ sys::swapByteOrder(x.rdi);
+ sys::swapByteOrder(x.rsi);
+ sys::swapByteOrder(x.rbp);
+ sys::swapByteOrder(x.rsp);
+ sys::swapByteOrder(x.r8);
+ sys::swapByteOrder(x.r9);
+ sys::swapByteOrder(x.r10);
+ sys::swapByteOrder(x.r11);
+ sys::swapByteOrder(x.r12);
+ sys::swapByteOrder(x.r13);
+ sys::swapByteOrder(x.r14);
+ sys::swapByteOrder(x.r15);
+ sys::swapByteOrder(x.rip);
+ sys::swapByteOrder(x.rflags);
+ sys::swapByteOrder(x.cs);
+ sys::swapByteOrder(x.fs);
+ sys::swapByteOrder(x.gs);
+ }
+
+ inline void swapStruct(x86_float_state64_t &x) {
+ sys::swapByteOrder(x.fpu_reserved[0]);
+ sys::swapByteOrder(x.fpu_reserved[1]);
+ // TODO swap: fp_control_t fpu_fcw;
+ // TODO swap: fp_status_t fpu_fsw;
+ sys::swapByteOrder(x.fpu_fop);
+ sys::swapByteOrder(x.fpu_ip);
+ sys::swapByteOrder(x.fpu_cs);
+ sys::swapByteOrder(x.fpu_rsrv2);
+ sys::swapByteOrder(x.fpu_dp);
+ sys::swapByteOrder(x.fpu_ds);
+ sys::swapByteOrder(x.fpu_rsrv3);
+ sys::swapByteOrder(x.fpu_mxcsr);
+ sys::swapByteOrder(x.fpu_mxcsrmask);
+ sys::swapByteOrder(x.fpu_reserved1);
+ }
+
+ inline void swapStruct(x86_exception_state64_t &x) {
+ sys::swapByteOrder(x.trapno);
+ sys::swapByteOrder(x.cpu);
+ sys::swapByteOrder(x.err);
+ sys::swapByteOrder(x.faultvaddr);
+ }
+
+ struct x86_state_hdr_t {
+ uint32_t flavor;
+ uint32_t count;
+ };
+
+ struct x86_thread_state_t {
+ x86_state_hdr_t tsh;
+ union {
+ x86_thread_state64_t ts64;
+ } uts;
+ };
+
+ struct x86_float_state_t {
+ x86_state_hdr_t fsh;
+ union {
+ x86_float_state64_t fs64;
+ } ufs;
+ };
+
+ struct x86_exception_state_t {
+ x86_state_hdr_t esh;
+ union {
+ x86_exception_state64_t es64;
+ } ues;
+ };
+
+ inline void swapStruct(x86_state_hdr_t &x) {
+ sys::swapByteOrder(x.flavor);
+ sys::swapByteOrder(x.count);
+ }
+
+ enum X86ThreadFlavors {
+ x86_THREAD_STATE32 = 1,
+ x86_FLOAT_STATE32 = 2,
+ x86_EXCEPTION_STATE32 = 3,
+ x86_THREAD_STATE64 = 4,
+ x86_FLOAT_STATE64 = 5,
+ x86_EXCEPTION_STATE64 = 6,
+ x86_THREAD_STATE = 7,
+ x86_FLOAT_STATE = 8,
+ x86_EXCEPTION_STATE = 9,
+ x86_DEBUG_STATE32 = 10,
+ x86_DEBUG_STATE64 = 11,
+ x86_DEBUG_STATE = 12
+ };
+
+ inline void swapStruct(x86_thread_state_t &x) {
+ swapStruct(x.tsh);
+ if (x.tsh.flavor == x86_THREAD_STATE64)
+ swapStruct(x.uts.ts64);
+ }
+
+ inline void swapStruct(x86_float_state_t &x) {
+ swapStruct(x.fsh);
+ if (x.fsh.flavor == x86_FLOAT_STATE64)
+ swapStruct(x.ufs.fs64);
+ }
+
+ inline void swapStruct(x86_exception_state_t &x) {
+ swapStruct(x.esh);
+ if (x.esh.flavor == x86_EXCEPTION_STATE64)
+ swapStruct(x.ues.es64);
+ }
+
+ const uint32_t x86_THREAD_STATE64_COUNT =
+ sizeof(x86_thread_state64_t) / sizeof(uint32_t);
+ const uint32_t x86_FLOAT_STATE64_COUNT =
+ sizeof(x86_float_state64_t) / sizeof(uint32_t);
+ const uint32_t x86_EXCEPTION_STATE64_COUNT =
+ sizeof(x86_exception_state64_t) / sizeof(uint32_t);
+
+ const uint32_t x86_THREAD_STATE_COUNT =
+ sizeof(x86_thread_state_t) / sizeof(uint32_t);
+ const uint32_t x86_FLOAT_STATE_COUNT =
+ sizeof(x86_float_state_t) / sizeof(uint32_t);
+ const uint32_t x86_EXCEPTION_STATE_COUNT =
+ sizeof(x86_exception_state_t) / sizeof(uint32_t);
+
+ } // end namespace MachO
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ManagedStatic.h b/contrib/llvm/include/llvm/Support/ManagedStatic.h
new file mode 100644
index 0000000..2e131e4
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ManagedStatic.h
@@ -0,0 +1,111 @@
+//===-- llvm/Support/ManagedStatic.h - Static Global wrapper ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ManagedStatic class and the llvm_shutdown() function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MANAGEDSTATIC_H
+#define LLVM_SUPPORT_MANAGEDSTATIC_H
+
+#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Threading.h"
+
+namespace llvm {
+
+/// object_creator - Helper method for ManagedStatic.
+template<class C>
+void* object_creator() {
+ return new C();
+}
+
+/// object_deleter - Helper method for ManagedStatic.
+///
+template<typename T> struct object_deleter {
+ static void call(void * Ptr) { delete (T*)Ptr; }
+};
+template<typename T, size_t N> struct object_deleter<T[N]> {
+ static void call(void * Ptr) { delete[] (T*)Ptr; }
+};
+
+/// ManagedStaticBase - Common base class for ManagedStatic instances.
+class ManagedStaticBase {
+protected:
+ // This should only be used as a static variable, which guarantees that this
+ // will be zero initialized.
+ mutable void *Ptr;
+ mutable void (*DeleterFn)(void*);
+ mutable const ManagedStaticBase *Next;
+
+ void RegisterManagedStatic(void *(*creator)(), void (*deleter)(void*)) const;
+public:
+ /// isConstructed - Return true if this object has not been created yet.
+ bool isConstructed() const { return Ptr != nullptr; }
+
+ void destroy() const;
+};
+
+/// ManagedStatic - This transparently changes the behavior of global statics to
+/// be lazily constructed on demand (good for reducing startup times of dynamic
+/// libraries that link in LLVM components) and for making destruction be
+/// explicit through the llvm_shutdown() function call.
+///
+template<class C>
+class ManagedStatic : public ManagedStaticBase {
+public:
+
+ // Accessors.
+ C &operator*() {
+ void* tmp = Ptr;
+ if (llvm_is_multithreaded()) sys::MemoryFence();
+ if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>::call);
+ TsanHappensAfter(this);
+
+ return *static_cast<C*>(Ptr);
+ }
+ C *operator->() {
+ void* tmp = Ptr;
+ if (llvm_is_multithreaded()) sys::MemoryFence();
+ if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>::call);
+ TsanHappensAfter(this);
+
+ return static_cast<C*>(Ptr);
+ }
+ const C &operator*() const {
+ void* tmp = Ptr;
+ if (llvm_is_multithreaded()) sys::MemoryFence();
+ if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>::call);
+ TsanHappensAfter(this);
+
+ return *static_cast<C*>(Ptr);
+ }
+ const C *operator->() const {
+ void* tmp = Ptr;
+ if (llvm_is_multithreaded()) sys::MemoryFence();
+ if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>::call);
+ TsanHappensAfter(this);
+
+ return static_cast<C*>(Ptr);
+ }
+};
+
+/// llvm_shutdown - Deallocate and destroy all ManagedStatic variables.
+void llvm_shutdown();
+
+/// llvm_shutdown_obj - This is a simple helper class that calls
+/// llvm_shutdown() when it is destroyed.
+struct llvm_shutdown_obj {
+ llvm_shutdown_obj() { }
+ ~llvm_shutdown_obj() { llvm_shutdown(); }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/MathExtras.h b/contrib/llvm/include/llvm/Support/MathExtras.h
new file mode 100644
index 0000000..8111aee
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/MathExtras.h
@@ -0,0 +1,723 @@
+//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains some functions that are useful for math stuff.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MATHEXTRAS_H
+#define LLVM_SUPPORT_MATHEXTRAS_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include <cassert>
+#include <cstring>
+#include <type_traits>
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#ifdef __ANDROID_NDK__
+#include <android/api-level.h>
+#endif
+
+namespace llvm {
+/// \brief The behavior an operation has on an input of 0.
+enum ZeroBehavior {
+ /// \brief The returned value is undefined.
+ ZB_Undefined,
+ /// \brief The returned value is numeric_limits<T>::max()
+ ZB_Max,
+ /// \brief The returned value is numeric_limits<T>::digits
+ ZB_Width
+};
+
+namespace detail {
+template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter {
+ static std::size_t count(T Val, ZeroBehavior) {
+ if (!Val)
+ return std::numeric_limits<T>::digits;
+ if (Val & 0x1)
+ return 0;
+
+ // Bisection method.
+ std::size_t ZeroBits = 0;
+ T Shift = std::numeric_limits<T>::digits >> 1;
+ T Mask = std::numeric_limits<T>::max() >> Shift;
+ while (Shift) {
+ if ((Val & Mask) == 0) {
+ Val >>= Shift;
+ ZeroBits |= Shift;
+ }
+ Shift >>= 1;
+ Mask >>= Shift;
+ }
+ return ZeroBits;
+ }
+};
+
+#if __GNUC__ >= 4 || defined(_MSC_VER)
+template <typename T> struct TrailingZerosCounter<T, 4> {
+ static std::size_t count(T Val, ZeroBehavior ZB) {
+ if (ZB != ZB_Undefined && Val == 0)
+ return 32;
+
+#if __has_builtin(__builtin_ctz) || LLVM_GNUC_PREREQ(4, 0, 0)
+ return __builtin_ctz(Val);
+#elif defined(_MSC_VER)
+ unsigned long Index;
+ _BitScanForward(&Index, Val);
+ return Index;
+#endif
+ }
+};
+
+#if !defined(_MSC_VER) || defined(_M_X64)
+template <typename T> struct TrailingZerosCounter<T, 8> {
+ static std::size_t count(T Val, ZeroBehavior ZB) {
+ if (ZB != ZB_Undefined && Val == 0)
+ return 64;
+
+#if __has_builtin(__builtin_ctzll) || LLVM_GNUC_PREREQ(4, 0, 0)
+ return __builtin_ctzll(Val);
+#elif defined(_MSC_VER)
+ unsigned long Index;
+ _BitScanForward64(&Index, Val);
+ return Index;
+#endif
+ }
+};
+#endif
+#endif
+} // namespace detail
+
+/// \brief Count number of 0's from the least significant bit to the most
+/// stopping at the first 1.
+///
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
+/// valid arguments.
+template <typename T>
+std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
+ static_assert(std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed,
+ "Only unsigned integral types are allowed.");
+ return detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB);
+}
+
+namespace detail {
+template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter {
+ static std::size_t count(T Val, ZeroBehavior) {
+ if (!Val)
+ return std::numeric_limits<T>::digits;
+
+ // Bisection method.
+ std::size_t ZeroBits = 0;
+ for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) {
+ T Tmp = Val >> Shift;
+ if (Tmp)
+ Val = Tmp;
+ else
+ ZeroBits |= Shift;
+ }
+ return ZeroBits;
+ }
+};
+
+#if __GNUC__ >= 4 || defined(_MSC_VER)
+template <typename T> struct LeadingZerosCounter<T, 4> {
+ static std::size_t count(T Val, ZeroBehavior ZB) {
+ if (ZB != ZB_Undefined && Val == 0)
+ return 32;
+
+#if __has_builtin(__builtin_clz) || LLVM_GNUC_PREREQ(4, 0, 0)
+ return __builtin_clz(Val);
+#elif defined(_MSC_VER)
+ unsigned long Index;
+ _BitScanReverse(&Index, Val);
+ return Index ^ 31;
+#endif
+ }
+};
+
+#if !defined(_MSC_VER) || defined(_M_X64)
+template <typename T> struct LeadingZerosCounter<T, 8> {
+ static std::size_t count(T Val, ZeroBehavior ZB) {
+ if (ZB != ZB_Undefined && Val == 0)
+ return 64;
+
+#if __has_builtin(__builtin_clzll) || LLVM_GNUC_PREREQ(4, 0, 0)
+ return __builtin_clzll(Val);
+#elif defined(_MSC_VER)
+ unsigned long Index;
+ _BitScanReverse64(&Index, Val);
+ return Index ^ 63;
+#endif
+ }
+};
+#endif
+#endif
+} // namespace detail
+
+/// \brief Count number of 0's from the most significant bit to the least
+/// stopping at the first 1.
+///
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are
+/// valid arguments.
+template <typename T>
+std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) {
+ static_assert(std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed,
+ "Only unsigned integral types are allowed.");
+ return detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB);
+}
+
+/// \brief Get the index of the first set bit starting from the least
+/// significant bit.
+///
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
+/// valid arguments.
+template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) {
+ if (ZB == ZB_Max && Val == 0)
+ return std::numeric_limits<T>::max();
+
+ return countTrailingZeros(Val, ZB_Undefined);
+}
+
+/// \brief Get the index of the last set bit starting from the least
+/// significant bit.
+///
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are
+/// valid arguments.
+template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) {
+ if (ZB == ZB_Max && Val == 0)
+ return std::numeric_limits<T>::max();
+
+ // Use ^ instead of - because both gcc and llvm can remove the associated ^
+ // in the __builtin_clz intrinsic on x86.
+ return countLeadingZeros(Val, ZB_Undefined) ^
+ (std::numeric_limits<T>::digits - 1);
+}
+
+/// \brief Macro compressed bit reversal table for 256 bits.
+///
+/// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
+static const unsigned char BitReverseTable256[256] = {
+#define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64
+#define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16)
+#define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4)
+ R6(0), R6(2), R6(1), R6(3)
+#undef R2
+#undef R4
+#undef R6
+};
+
+/// \brief Reverse the bits in \p Val.
+template <typename T>
+T reverseBits(T Val) {
+ unsigned char in[sizeof(Val)];
+ unsigned char out[sizeof(Val)];
+ std::memcpy(in, &Val, sizeof(Val));
+ for (unsigned i = 0; i < sizeof(Val); ++i)
+ out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]];
+ std::memcpy(&Val, out, sizeof(Val));
+ return Val;
+}
+
+// NOTE: The following support functions use the _32/_64 extensions instead of
+// type overloading so that signed and unsigned integers can be used without
+// ambiguity.
+
+/// Hi_32 - This function returns the high 32 bits of a 64 bit value.
+inline uint32_t Hi_32(uint64_t Value) {
+ return static_cast<uint32_t>(Value >> 32);
+}
+
+/// Lo_32 - This function returns the low 32 bits of a 64 bit value.
+inline uint32_t Lo_32(uint64_t Value) {
+ return static_cast<uint32_t>(Value);
+}
+
+/// Make_64 - This functions makes a 64-bit integer from a high / low pair of
+/// 32-bit integers.
+inline uint64_t Make_64(uint32_t High, uint32_t Low) {
+ return ((uint64_t)High << 32) | (uint64_t)Low;
+}
+
+/// isInt - Checks if an integer fits into the given bit width.
+template<unsigned N>
+inline bool isInt(int64_t x) {
+ return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
+}
+// Template specializations to get better code for common cases.
+template<>
+inline bool isInt<8>(int64_t x) {
+ return static_cast<int8_t>(x) == x;
+}
+template<>
+inline bool isInt<16>(int64_t x) {
+ return static_cast<int16_t>(x) == x;
+}
+template<>
+inline bool isInt<32>(int64_t x) {
+ return static_cast<int32_t>(x) == x;
+}
+
+/// isShiftedInt<N,S> - Checks if a signed integer is an N bit number shifted
+/// left by S.
+template<unsigned N, unsigned S>
+inline bool isShiftedInt(int64_t x) {
+ return isInt<N+S>(x) && (x % (1<<S) == 0);
+}
+
+/// isUInt - Checks if an unsigned integer fits into the given bit width.
+template<unsigned N>
+inline bool isUInt(uint64_t x) {
+ return N >= 64 || x < (UINT64_C(1)<<(N));
+}
+// Template specializations to get better code for common cases.
+template<>
+inline bool isUInt<8>(uint64_t x) {
+ return static_cast<uint8_t>(x) == x;
+}
+template<>
+inline bool isUInt<16>(uint64_t x) {
+ return static_cast<uint16_t>(x) == x;
+}
+template<>
+inline bool isUInt<32>(uint64_t x) {
+ return static_cast<uint32_t>(x) == x;
+}
+
+/// isShiftedUInt<N,S> - Checks if a unsigned integer is an N bit number shifted
+/// left by S.
+template<unsigned N, unsigned S>
+inline bool isShiftedUInt(uint64_t x) {
+ return isUInt<N+S>(x) && (x % (1<<S) == 0);
+}
+
+/// isUIntN - Checks if an unsigned integer fits into the given (dynamic)
+/// bit width.
+inline bool isUIntN(unsigned N, uint64_t x) {
+ return N >= 64 || x < (UINT64_C(1)<<(N));
+}
+
+/// isIntN - Checks if an signed integer fits into the given (dynamic)
+/// bit width.
+inline bool isIntN(unsigned N, int64_t x) {
+ return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
+}
+
+/// isMask_32 - This function returns true if the argument is a non-empty
+/// sequence of ones starting at the least significant bit with the remainder
+/// zero (32 bit version). Ex. isMask_32(0x0000FFFFU) == true.
+inline bool isMask_32(uint32_t Value) {
+ return Value && ((Value + 1) & Value) == 0;
+}
+
+/// isMask_64 - This function returns true if the argument is a non-empty
+/// sequence of ones starting at the least significant bit with the remainder
+/// zero (64 bit version).
+inline bool isMask_64(uint64_t Value) {
+ return Value && ((Value + 1) & Value) == 0;
+}
+
+/// isShiftedMask_32 - This function returns true if the argument contains a
+/// non-empty sequence of ones with the remainder zero (32 bit version.)
+/// Ex. isShiftedMask_32(0x0000FF00U) == true.
+inline bool isShiftedMask_32(uint32_t Value) {
+ return Value && isMask_32((Value - 1) | Value);
+}
+
+/// isShiftedMask_64 - This function returns true if the argument contains a
+/// non-empty sequence of ones with the remainder zero (64 bit version.)
+inline bool isShiftedMask_64(uint64_t Value) {
+ return Value && isMask_64((Value - 1) | Value);
+}
+
+/// isPowerOf2_32 - This function returns true if the argument is a power of
+/// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
+inline bool isPowerOf2_32(uint32_t Value) {
+ return Value && !(Value & (Value - 1));
+}
+
+/// isPowerOf2_64 - This function returns true if the argument is a power of two
+/// > 0 (64 bit edition.)
+inline bool isPowerOf2_64(uint64_t Value) {
+ return Value && !(Value & (Value - int64_t(1L)));
+}
+
+/// ByteSwap_16 - This function returns a byte-swapped representation of the
+/// 16-bit argument, Value.
+inline uint16_t ByteSwap_16(uint16_t Value) {
+ return sys::SwapByteOrder_16(Value);
+}
+
+/// ByteSwap_32 - This function returns a byte-swapped representation of the
+/// 32-bit argument, Value.
+inline uint32_t ByteSwap_32(uint32_t Value) {
+ return sys::SwapByteOrder_32(Value);
+}
+
+/// ByteSwap_64 - This function returns a byte-swapped representation of the
+/// 64-bit argument, Value.
+inline uint64_t ByteSwap_64(uint64_t Value) {
+ return sys::SwapByteOrder_64(Value);
+}
+
+/// \brief Count the number of ones from the most significant bit to the first
+/// zero bit.
+///
+/// Ex. CountLeadingOnes(0xFF0FFF00) == 8.
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of all ones. Only ZB_Width and
+/// ZB_Undefined are valid arguments.
+template <typename T>
+std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
+ static_assert(std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed,
+ "Only unsigned integral types are allowed.");
+ return countLeadingZeros(~Value, ZB);
+}
+
+/// \brief Count the number of ones from the least significant bit to the first
+/// zero bit.
+///
+/// Ex. countTrailingOnes(0x00FF00FF) == 8.
+/// Only unsigned integral types are allowed.
+///
+/// \param ZB the behavior on an input of all ones. Only ZB_Width and
+/// ZB_Undefined are valid arguments.
+template <typename T>
+std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) {
+ static_assert(std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed,
+ "Only unsigned integral types are allowed.");
+ return countTrailingZeros(~Value, ZB);
+}
+
+namespace detail {
+template <typename T, std::size_t SizeOfT> struct PopulationCounter {
+ static unsigned count(T Value) {
+ // Generic version, forward to 32 bits.
+ static_assert(SizeOfT <= 4, "Not implemented!");
+#if __GNUC__ >= 4
+ return __builtin_popcount(Value);
+#else
+ uint32_t v = Value;
+ v = v - ((v >> 1) & 0x55555555);
+ v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
+ return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
+#endif
+ }
+};
+
+template <typename T> struct PopulationCounter<T, 8> {
+ static unsigned count(T Value) {
+#if __GNUC__ >= 4
+ return __builtin_popcountll(Value);
+#else
+ uint64_t v = Value;
+ v = v - ((v >> 1) & 0x5555555555555555ULL);
+ v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
+ v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
+ return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56);
+#endif
+ }
+};
+} // namespace detail
+
+/// \brief Count the number of set bits in a value.
+/// Ex. countPopulation(0xF000F000) = 8
+/// Returns 0 if the word is zero.
+template <typename T>
+inline unsigned countPopulation(T Value) {
+ static_assert(std::numeric_limits<T>::is_integer &&
+ !std::numeric_limits<T>::is_signed,
+ "Only unsigned integral types are allowed.");
+ return detail::PopulationCounter<T, sizeof(T)>::count(Value);
+}
+
+/// Log2 - This function returns the log base 2 of the specified value
+inline double Log2(double Value) {
+#if defined(__ANDROID_API__) && __ANDROID_API__ < 18
+ return __builtin_log(Value) / __builtin_log(2.0);
+#else
+ return log2(Value);
+#endif
+}
+
+/// Log2_32 - This function returns the floor log base 2 of the specified value,
+/// -1 if the value is zero. (32 bit edition.)
+/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
+inline unsigned Log2_32(uint32_t Value) {
+ return 31 - countLeadingZeros(Value);
+}
+
+/// Log2_64 - This function returns the floor log base 2 of the specified value,
+/// -1 if the value is zero. (64 bit edition.)
+inline unsigned Log2_64(uint64_t Value) {
+ return 63 - countLeadingZeros(Value);
+}
+
+/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified
+/// value, 32 if the value is zero. (32 bit edition).
+/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
+inline unsigned Log2_32_Ceil(uint32_t Value) {
+ return 32 - countLeadingZeros(Value - 1);
+}
+
+/// Log2_64_Ceil - This function returns the ceil log base 2 of the specified
+/// value, 64 if the value is zero. (64 bit edition.)
+inline unsigned Log2_64_Ceil(uint64_t Value) {
+ return 64 - countLeadingZeros(Value - 1);
+}
+
+/// GreatestCommonDivisor64 - Return the greatest common divisor of the two
+/// values using Euclid's algorithm.
+inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
+ while (B) {
+ uint64_t T = B;
+ B = A % B;
+ A = T;
+ }
+ return A;
+}
+
+/// BitsToDouble - This function takes a 64-bit integer and returns the bit
+/// equivalent double.
+inline double BitsToDouble(uint64_t Bits) {
+ union {
+ uint64_t L;
+ double D;
+ } T;
+ T.L = Bits;
+ return T.D;
+}
+
+/// BitsToFloat - This function takes a 32-bit integer and returns the bit
+/// equivalent float.
+inline float BitsToFloat(uint32_t Bits) {
+ union {
+ uint32_t I;
+ float F;
+ } T;
+ T.I = Bits;
+ return T.F;
+}
+
+/// DoubleToBits - This function takes a double and returns the bit
+/// equivalent 64-bit integer. Note that copying doubles around
+/// changes the bits of NaNs on some hosts, notably x86, so this
+/// routine cannot be used if these bits are needed.
+inline uint64_t DoubleToBits(double Double) {
+ union {
+ uint64_t L;
+ double D;
+ } T;
+ T.D = Double;
+ return T.L;
+}
+
+/// FloatToBits - This function takes a float and returns the bit
+/// equivalent 32-bit integer. Note that copying floats around
+/// changes the bits of NaNs on some hosts, notably x86, so this
+/// routine cannot be used if these bits are needed.
+inline uint32_t FloatToBits(float Float) {
+ union {
+ uint32_t I;
+ float F;
+ } T;
+ T.F = Float;
+ return T.I;
+}
+
+/// MinAlign - A and B are either alignments or offsets. Return the minimum
+/// alignment that may be assumed after adding the two together.
+inline uint64_t MinAlign(uint64_t A, uint64_t B) {
+ // The largest power of 2 that divides both A and B.
+ //
+ // Replace "-Value" by "1+~Value" in the following commented code to avoid
+ // MSVC warning C4146
+ // return (A | B) & -(A | B);
+ return (A | B) & (1 + ~(A | B));
+}
+
+/// \brief Aligns \c Addr to \c Alignment bytes, rounding up.
+///
+/// Alignment should be a power of two. This method rounds up, so
+/// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8.
+inline uintptr_t alignAddr(const void *Addr, size_t Alignment) {
+ assert(Alignment && isPowerOf2_64((uint64_t)Alignment) &&
+ "Alignment is not a power of two!");
+
+ assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr);
+
+ return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1));
+}
+
+/// \brief Returns the necessary adjustment for aligning \c Ptr to \c Alignment
+/// bytes, rounding up.
+inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) {
+ return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
+}
+
+/// NextPowerOf2 - Returns the next power of two (in 64-bits)
+/// that is strictly greater than A. Returns zero on overflow.
+inline uint64_t NextPowerOf2(uint64_t A) {
+ A |= (A >> 1);
+ A |= (A >> 2);
+ A |= (A >> 4);
+ A |= (A >> 8);
+ A |= (A >> 16);
+ A |= (A >> 32);
+ return A + 1;
+}
+
+/// Returns the power of two which is less than or equal to the given value.
+/// Essentially, it is a floor operation across the domain of powers of two.
+inline uint64_t PowerOf2Floor(uint64_t A) {
+ if (!A) return 0;
+ return 1ull << (63 - countLeadingZeros(A, ZB_Undefined));
+}
+
+/// Returns the next integer (mod 2**64) that is greater than or equal to
+/// \p Value and is a multiple of \p Align. \p Align must be non-zero.
+///
+/// If non-zero \p Skew is specified, the return value will be a minimal
+/// integer that is greater than or equal to \p Value and equal to
+/// \p Align * N + \p Skew for some integer N. If \p Skew is larger than
+/// \p Align, its value is adjusted to '\p Skew mod \p Align'.
+///
+/// Examples:
+/// \code
+/// RoundUpToAlignment(5, 8) = 8
+/// RoundUpToAlignment(17, 8) = 24
+/// RoundUpToAlignment(~0LL, 8) = 0
+/// RoundUpToAlignment(321, 255) = 510
+///
+/// RoundUpToAlignment(5, 8, 7) = 7
+/// RoundUpToAlignment(17, 8, 1) = 17
+/// RoundUpToAlignment(~0LL, 8, 3) = 3
+/// RoundUpToAlignment(321, 255, 42) = 552
+/// \endcode
+inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align,
+ uint64_t Skew = 0) {
+ Skew %= Align;
+ return (Value + Align - 1 - Skew) / Align * Align + Skew;
+}
+
+/// Returns the offset to the next integer (mod 2**64) that is greater than
+/// or equal to \p Value and is a multiple of \p Align. \p Align must be
+/// non-zero.
+inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) {
+ return RoundUpToAlignment(Value, Align) - Value;
+}
+
+/// SignExtend32 - Sign extend B-bit number x to 32-bit int.
+/// Usage int32_t r = SignExtend32<5>(x);
+template <unsigned B> inline int32_t SignExtend32(uint32_t x) {
+ return int32_t(x << (32 - B)) >> (32 - B);
+}
+
+/// \brief Sign extend number in the bottom B bits of X to a 32-bit int.
+/// Requires 0 < B <= 32.
+inline int32_t SignExtend32(uint32_t X, unsigned B) {
+ return int32_t(X << (32 - B)) >> (32 - B);
+}
+
+/// SignExtend64 - Sign extend B-bit number x to 64-bit int.
+/// Usage int64_t r = SignExtend64<5>(x);
+template <unsigned B> inline int64_t SignExtend64(uint64_t x) {
+ return int64_t(x << (64 - B)) >> (64 - B);
+}
+
+/// \brief Sign extend number in the bottom B bits of X to a 64-bit int.
+/// Requires 0 < B <= 64.
+inline int64_t SignExtend64(uint64_t X, unsigned B) {
+ return int64_t(X << (64 - B)) >> (64 - B);
+}
+
+/// \brief Add two unsigned integers, X and Y, of type T.
+/// Clamp the result to the maximum representable value of T on overflow.
+/// ResultOverflowed indicates if the result is larger than the maximum
+/// representable value of type T.
+template <typename T>
+typename std::enable_if<std::is_unsigned<T>::value, T>::type
+SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) {
+ bool Dummy;
+ bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
+ // Hacker's Delight, p. 29
+ T Z = X + Y;
+ Overflowed = (Z < X || Z < Y);
+ if (Overflowed)
+ return std::numeric_limits<T>::max();
+ else
+ return Z;
+}
+
+/// \brief Multiply two unsigned integers, X and Y, of type T.
+/// Clamp the result to the maximum representable value of T on overflow.
+/// ResultOverflowed indicates if the result is larger than the maximum
+/// representable value of type T.
+template <typename T>
+typename std::enable_if<std::is_unsigned<T>::value, T>::type
+SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) {
+ bool Dummy;
+ bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy;
+
+ // Hacker's Delight, p. 30 has a different algorithm, but we don't use that
+ // because it fails for uint16_t (where multiplication can have undefined
+ // behavior due to promotion to int), and requires a division in addition
+ // to the multiplication.
+
+ Overflowed = false;
+
+ // Log2(Z) would be either Log2Z or Log2Z + 1.
+ // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z
+ // will necessarily be less than Log2Max as desired.
+ int Log2Z = Log2_64(X) + Log2_64(Y);
+ const T Max = std::numeric_limits<T>::max();
+ int Log2Max = Log2_64(Max);
+ if (Log2Z < Log2Max) {
+ return X * Y;
+ }
+ if (Log2Z > Log2Max) {
+ Overflowed = true;
+ return Max;
+ }
+
+ // We're going to use the top bit, and maybe overflow one
+ // bit past it. Multiply all but the bottom bit then add
+ // that on at the end.
+ T Z = (X >> 1) * Y;
+ if (Z & ~(Max >> 1)) {
+ Overflowed = true;
+ return Max;
+ }
+ Z <<= 1;
+ if (X & 1)
+ return SaturatingAdd(Z, Y, ResultOverflowed);
+
+ return Z;
+}
+
+extern const float huge_valf;
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Memory.h b/contrib/llvm/include/llvm/Support/Memory.h
new file mode 100644
index 0000000..8103aea
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Memory.h
@@ -0,0 +1,186 @@
+//===- llvm/Support/Memory.h - Memory Support -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::Memory class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MEMORY_H
+#define LLVM_SUPPORT_MEMORY_H
+
+#include "llvm/Support/DataTypes.h"
+#include <string>
+#include <system_error>
+
+namespace llvm {
+namespace sys {
+
+ /// This class encapsulates the notion of a memory block which has an address
+ /// and a size. It is used by the Memory class (a friend) as the result of
+ /// various memory allocation operations.
+ /// @see Memory
+ /// @brief Memory block abstraction.
+ class MemoryBlock {
+ public:
+ MemoryBlock() : Address(nullptr), Size(0) { }
+ MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { }
+ void *base() const { return Address; }
+ size_t size() const { return Size; }
+
+ private:
+ void *Address; ///< Address of first byte of memory area
+ size_t Size; ///< Size, in bytes of the memory area
+ friend class Memory;
+ };
+
+ /// This class provides various memory handling functions that manipulate
+ /// MemoryBlock instances.
+ /// @since 1.4
+ /// @brief An abstraction for memory operations.
+ class Memory {
+ public:
+ enum ProtectionFlags {
+ MF_READ = 0x1000000,
+ MF_WRITE = 0x2000000,
+ MF_EXEC = 0x4000000
+ };
+
+ /// This method allocates a block of memory that is suitable for loading
+ /// dynamically generated code (e.g. JIT). An attempt to allocate
+ /// \p NumBytes bytes of virtual memory is made.
+ /// \p NearBlock may point to an existing allocation in which case
+ /// an attempt is made to allocate more memory near the existing block.
+ /// The actual allocated address is not guaranteed to be near the requested
+ /// address.
+ /// \p Flags is used to set the initial protection flags for the block
+ /// of the memory.
+ /// \p EC [out] returns an object describing any error that occurs.
+ ///
+ /// This method may allocate more than the number of bytes requested. The
+ /// actual number of bytes allocated is indicated in the returned
+ /// MemoryBlock.
+ ///
+ /// The start of the allocated block must be aligned with the
+ /// system allocation granularity (64K on Windows, page size on Linux).
+ /// If the address following \p NearBlock is not so aligned, it will be
+ /// rounded up to the next allocation granularity boundary.
+ ///
+ /// \r a non-null MemoryBlock if the function was successful,
+ /// otherwise a null MemoryBlock is with \p EC describing the error.
+ ///
+ /// @brief Allocate mapped memory.
+ static MemoryBlock allocateMappedMemory(size_t NumBytes,
+ const MemoryBlock *const NearBlock,
+ unsigned Flags,
+ std::error_code &EC);
+
+ /// This method releases a block of memory that was allocated with the
+ /// allocateMappedMemory method. It should not be used to release any
+ /// memory block allocated any other way.
+ /// \p Block describes the memory to be released.
+ ///
+ /// \r error_success if the function was successful, or an error_code
+ /// describing the failure if an error occurred.
+ ///
+ /// @brief Release mapped memory.
+ static std::error_code releaseMappedMemory(MemoryBlock &Block);
+
+ /// This method sets the protection flags for a block of memory to the
+ /// state specified by /p Flags. The behavior is not specified if the
+ /// memory was not allocated using the allocateMappedMemory method.
+ /// \p Block describes the memory block to be protected.
+ /// \p Flags specifies the new protection state to be assigned to the block.
+ /// \p ErrMsg [out] returns a string describing any error that occurred.
+ ///
+ /// If \p Flags is MF_WRITE, the actual behavior varies
+ /// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the
+ /// target architecture (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386).
+ ///
+ /// \r error_success if the function was successful, or an error_code
+ /// describing the failure if an error occurred.
+ ///
+ /// @brief Set memory protection state.
+ static std::error_code protectMappedMemory(const MemoryBlock &Block,
+ unsigned Flags);
+
+ /// This method allocates a block of Read/Write/Execute memory that is
+ /// suitable for executing dynamically generated code (e.g. JIT). An
+ /// attempt to allocate \p NumBytes bytes of virtual memory is made.
+ /// \p NearBlock may point to an existing allocation in which case
+ /// an attempt is made to allocate more memory near the existing block.
+ ///
+ /// On success, this returns a non-null memory block, otherwise it returns
+ /// a null memory block and fills in *ErrMsg.
+ ///
+ /// @brief Allocate Read/Write/Execute memory.
+ static MemoryBlock AllocateRWX(size_t NumBytes,
+ const MemoryBlock *NearBlock,
+ std::string *ErrMsg = nullptr);
+
+ /// This method releases a block of Read/Write/Execute memory that was
+ /// allocated with the AllocateRWX method. It should not be used to
+ /// release any memory block allocated any other way.
+ ///
+ /// On success, this returns false, otherwise it returns true and fills
+ /// in *ErrMsg.
+ /// @brief Release Read/Write/Execute memory.
+ static bool ReleaseRWX(MemoryBlock &block, std::string *ErrMsg = nullptr);
+
+ /// InvalidateInstructionCache - Before the JIT can run a block of code
+ /// that has been emitted it must invalidate the instruction cache on some
+ /// platforms.
+ static void InvalidateInstructionCache(const void *Addr, size_t Len);
+
+ /// setExecutable - Before the JIT can run a block of code, it has to be
+ /// given read and executable privilege. Return true if it is already r-x
+ /// or the system is able to change its previlege.
+ static bool setExecutable(MemoryBlock &M, std::string *ErrMsg = nullptr);
+
+ /// setWritable - When adding to a block of code, the JIT may need
+ /// to mark a block of code as RW since the protections are on page
+ /// boundaries, and the JIT internal allocations are not page aligned.
+ static bool setWritable(MemoryBlock &M, std::string *ErrMsg = nullptr);
+
+ /// setRangeExecutable - Mark the page containing a range of addresses
+ /// as executable.
+ static bool setRangeExecutable(const void *Addr, size_t Size);
+
+ /// setRangeWritable - Mark the page containing a range of addresses
+ /// as writable.
+ static bool setRangeWritable(const void *Addr, size_t Size);
+ };
+
+ /// Owning version of MemoryBlock.
+ class OwningMemoryBlock {
+ public:
+ OwningMemoryBlock() = default;
+ explicit OwningMemoryBlock(MemoryBlock M) : M(M) {}
+ OwningMemoryBlock(OwningMemoryBlock &&Other) {
+ M = Other.M;
+ Other.M = MemoryBlock();
+ }
+ OwningMemoryBlock& operator=(OwningMemoryBlock &&Other) {
+ M = Other.M;
+ Other.M = MemoryBlock();
+ return *this;
+ }
+ ~OwningMemoryBlock() {
+ Memory::releaseMappedMemory(M);
+ }
+ void *base() const { return M.base(); }
+ size_t size() const { return M.size(); }
+ MemoryBlock getMemoryBlock() const { return M; }
+ private:
+ MemoryBlock M;
+ };
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/MemoryBuffer.h b/contrib/llvm/include/llvm/Support/MemoryBuffer.h
new file mode 100644
index 0000000..73d6435
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/MemoryBuffer.h
@@ -0,0 +1,173 @@
+//===--- MemoryBuffer.h - Memory Buffer Interface ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MemoryBuffer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MEMORYBUFFER_H
+#define LLVM_SUPPORT_MEMORYBUFFER_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorOr.h"
+#include <memory>
+
+namespace llvm {
+class MemoryBufferRef;
+
+/// This interface provides simple read-only access to a block of memory, and
+/// provides simple methods for reading files and standard input into a memory
+/// buffer. In addition to basic access to the characters in the file, this
+/// interface guarantees you can read one character past the end of the file,
+/// and that this character will read as '\0'.
+///
+/// The '\0' guarantee is needed to support an optimization -- it's intended to
+/// be more efficient for clients which are reading all the data to stop
+/// reading when they encounter a '\0' than to continually check the file
+/// position to see if it has reached the end of the file.
+class MemoryBuffer {
+ const char *BufferStart; // Start of the buffer.
+ const char *BufferEnd; // End of the buffer.
+
+ MemoryBuffer(const MemoryBuffer &) = delete;
+ MemoryBuffer &operator=(const MemoryBuffer &) = delete;
+protected:
+ MemoryBuffer() {}
+ void init(const char *BufStart, const char *BufEnd,
+ bool RequiresNullTerminator);
+public:
+ virtual ~MemoryBuffer();
+
+ const char *getBufferStart() const { return BufferStart; }
+ const char *getBufferEnd() const { return BufferEnd; }
+ size_t getBufferSize() const { return BufferEnd-BufferStart; }
+
+ StringRef getBuffer() const {
+ return StringRef(BufferStart, getBufferSize());
+ }
+
+ /// Return an identifier for this buffer, typically the filename it was read
+ /// from.
+ virtual const char *getBufferIdentifier() const {
+ return "Unknown buffer";
+ }
+
+ /// Open the specified file as a MemoryBuffer, returning a new MemoryBuffer
+ /// if successful, otherwise returning null. If FileSize is specified, this
+ /// means that the client knows that the file exists and that it has the
+ /// specified size.
+ ///
+ /// \param IsVolatileSize Set to true to indicate that the file size may be
+ /// changing, e.g. when libclang tries to parse while the user is
+ /// editing/updating the file.
+ static ErrorOr<std::unique_ptr<MemoryBuffer>>
+ getFile(const Twine &Filename, int64_t FileSize = -1,
+ bool RequiresNullTerminator = true, bool IsVolatileSize = false);
+
+ /// Given an already-open file descriptor, map some slice of it into a
+ /// MemoryBuffer. The slice is specified by an \p Offset and \p MapSize.
+ /// Since this is in the middle of a file, the buffer is not null terminated.
+ static ErrorOr<std::unique_ptr<MemoryBuffer>>
+ getOpenFileSlice(int FD, const Twine &Filename, uint64_t MapSize,
+ int64_t Offset);
+
+ /// Given an already-open file descriptor, read the file and return a
+ /// MemoryBuffer.
+ ///
+ /// \param IsVolatileSize Set to true to indicate that the file size may be
+ /// changing, e.g. when libclang tries to parse while the user is
+ /// editing/updating the file.
+ static ErrorOr<std::unique_ptr<MemoryBuffer>>
+ getOpenFile(int FD, const Twine &Filename, uint64_t FileSize,
+ bool RequiresNullTerminator = true, bool IsVolatileSize = false);
+
+ /// Open the specified memory range as a MemoryBuffer. Note that InputData
+ /// must be null terminated if RequiresNullTerminator is true.
+ static std::unique_ptr<MemoryBuffer>
+ getMemBuffer(StringRef InputData, StringRef BufferName = "",
+ bool RequiresNullTerminator = true);
+
+ static std::unique_ptr<MemoryBuffer>
+ getMemBuffer(MemoryBufferRef Ref, bool RequiresNullTerminator = true);
+
+ /// Open the specified memory range as a MemoryBuffer, copying the contents
+ /// and taking ownership of it. InputData does not have to be null terminated.
+ static std::unique_ptr<MemoryBuffer>
+ getMemBufferCopy(StringRef InputData, const Twine &BufferName = "");
+
+ /// Allocate a new zero-initialized MemoryBuffer of the specified size. Note
+ /// that the caller need not initialize the memory allocated by this method.
+ /// The memory is owned by the MemoryBuffer object.
+ static std::unique_ptr<MemoryBuffer>
+ getNewMemBuffer(size_t Size, StringRef BufferName = "");
+
+ /// Allocate a new MemoryBuffer of the specified size that is not initialized.
+ /// Note that the caller should initialize the memory allocated by this
+ /// method. The memory is owned by the MemoryBuffer object.
+ static std::unique_ptr<MemoryBuffer>
+ getNewUninitMemBuffer(size_t Size, const Twine &BufferName = "");
+
+ /// Read all of stdin into a file buffer, and return it.
+ static ErrorOr<std::unique_ptr<MemoryBuffer>> getSTDIN();
+
+ /// Open the specified file as a MemoryBuffer, or open stdin if the Filename
+ /// is "-".
+ static ErrorOr<std::unique_ptr<MemoryBuffer>>
+ getFileOrSTDIN(const Twine &Filename, int64_t FileSize = -1,
+ bool RequiresNullTerminator = true);
+
+ /// Map a subrange of the specified file as a MemoryBuffer.
+ static ErrorOr<std::unique_ptr<MemoryBuffer>>
+ getFileSlice(const Twine &Filename, uint64_t MapSize, uint64_t Offset);
+
+ //===--------------------------------------------------------------------===//
+ // Provided for performance analysis.
+ //===--------------------------------------------------------------------===//
+
+ /// The kind of memory backing used to support the MemoryBuffer.
+ enum BufferKind {
+ MemoryBuffer_Malloc,
+ MemoryBuffer_MMap
+ };
+
+ /// Return information on the memory mechanism used to support the
+ /// MemoryBuffer.
+ virtual BufferKind getBufferKind() const = 0;
+
+ MemoryBufferRef getMemBufferRef() const;
+};
+
+class MemoryBufferRef {
+ StringRef Buffer;
+ StringRef Identifier;
+
+public:
+ MemoryBufferRef() {}
+ MemoryBufferRef(MemoryBuffer& Buffer)
+ : Buffer(Buffer.getBuffer()), Identifier(Buffer.getBufferIdentifier()) {}
+ MemoryBufferRef(StringRef Buffer, StringRef Identifier)
+ : Buffer(Buffer), Identifier(Identifier) {}
+
+ StringRef getBuffer() const { return Buffer; }
+
+ StringRef getBufferIdentifier() const { return Identifier; }
+
+ const char *getBufferStart() const { return Buffer.begin(); }
+ const char *getBufferEnd() const { return Buffer.end(); }
+ size_t getBufferSize() const { return Buffer.size(); }
+};
+
+// Create wrappers for C Binding types (see CBindingWrapping.h).
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MemoryBuffer, LLVMMemoryBufferRef)
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/MemoryObject.h b/contrib/llvm/include/llvm/Support/MemoryObject.h
new file mode 100644
index 0000000..e0c8749
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/MemoryObject.h
@@ -0,0 +1,68 @@
+//===- MemoryObject.h - Abstract memory interface ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MEMORYOBJECT_H
+#define LLVM_SUPPORT_MEMORYOBJECT_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+/// Interface to data which might be streamed. Streamability has 2 important
+/// implications/restrictions. First, the data might not yet exist in memory
+/// when the request is made. This just means that readByte/readBytes might have
+/// to block or do some work to get it. More significantly, the exact size of
+/// the object might not be known until it has all been fetched. This means that
+/// to return the right result, getExtent must also wait for all the data to
+/// arrive; therefore it should not be called on objects which are actually
+/// streamed (this would defeat the purpose of streaming). Instead,
+/// isValidAddress can be used to test addresses without knowing the exact size
+/// of the stream. Finally, getPointer can be used instead of readBytes to avoid
+/// extra copying.
+class MemoryObject {
+public:
+ virtual ~MemoryObject();
+
+ /// Returns the size of the region in bytes. (The region is contiguous, so
+ /// the highest valid address of the region is getExtent() - 1).
+ ///
+ /// @result - The size of the region.
+ virtual uint64_t getExtent() const = 0;
+
+ /// Tries to read a contiguous range of bytes from the region, up to the end
+ /// of the region.
+ ///
+ /// @param Buf - A pointer to a buffer to be filled in. Must be non-NULL
+ /// and large enough to hold size bytes.
+ /// @param Size - The number of bytes to copy.
+ /// @param Address - The address of the first byte, in the same space as
+ /// getBase().
+ /// @result - The number of bytes read.
+ virtual uint64_t readBytes(uint8_t *Buf, uint64_t Size,
+ uint64_t Address) const = 0;
+
+ /// Ensures that the requested data is in memory, and returns a pointer to it.
+ /// More efficient than using readBytes if the data is already in memory. May
+ /// block until (address - base + size) bytes have been read
+ /// @param address - address of the byte, in the same space as getBase()
+ /// @param size - amount of data that must be available on return
+ /// @result - valid pointer to the requested data
+ virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const = 0;
+
+ /// Returns true if the address is within the object (i.e. between base and
+ /// base + extent - 1 inclusive). May block until (address - base) bytes have
+ /// been read
+ /// @param address - address of the byte, in the same space as getBase()
+ /// @result - true if the address may be read with readByte()
+ virtual bool isValidAddress(uint64_t address) const = 0;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/MipsABIFlags.h b/contrib/llvm/include/llvm/Support/MipsABIFlags.h
new file mode 100644
index 0000000..93f6b41
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/MipsABIFlags.h
@@ -0,0 +1,102 @@
+//===--- MipsABIFlags.h - MIPS ABI flags ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the constants for the ABI flags structure contained
+// in the .MIPS.abiflags section.
+//
+// https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MIPSABIFLAGS_H
+#define LLVM_SUPPORT_MIPSABIFLAGS_H
+
+namespace llvm {
+namespace Mips {
+
+// Values for the xxx_size bytes of an ABI flags structure.
+enum AFL_REG {
+ AFL_REG_NONE = 0x00, // No registers
+ AFL_REG_32 = 0x01, // 32-bit registers
+ AFL_REG_64 = 0x02, // 64-bit registers
+ AFL_REG_128 = 0x03 // 128-bit registers
+};
+
+// Masks for the ases word of an ABI flags structure.
+enum AFL_ASE {
+ AFL_ASE_DSP = 0x00000001, // DSP ASE
+ AFL_ASE_DSPR2 = 0x00000002, // DSP R2 ASE
+ AFL_ASE_EVA = 0x00000004, // Enhanced VA Scheme
+ AFL_ASE_MCU = 0x00000008, // MCU (MicroController) ASE
+ AFL_ASE_MDMX = 0x00000010, // MDMX ASE
+ AFL_ASE_MIPS3D = 0x00000020, // MIPS-3D ASE
+ AFL_ASE_MT = 0x00000040, // MT ASE
+ AFL_ASE_SMARTMIPS = 0x00000080, // SmartMIPS ASE
+ AFL_ASE_VIRT = 0x00000100, // VZ ASE
+ AFL_ASE_MSA = 0x00000200, // MSA ASE
+ AFL_ASE_MIPS16 = 0x00000400, // MIPS16 ASE
+ AFL_ASE_MICROMIPS = 0x00000800, // MICROMIPS ASE
+ AFL_ASE_XPA = 0x00001000 // XPA ASE
+};
+
+// Values for the isa_ext word of an ABI flags structure.
+enum AFL_EXT {
+ AFL_EXT_NONE = 0, // None
+ AFL_EXT_XLR = 1, // RMI Xlr instruction
+ AFL_EXT_OCTEON2 = 2, // Cavium Networks Octeon2
+ AFL_EXT_OCTEONP = 3, // Cavium Networks OcteonP
+ AFL_EXT_LOONGSON_3A = 4, // Loongson 3A
+ AFL_EXT_OCTEON = 5, // Cavium Networks Octeon
+ AFL_EXT_5900 = 6, // MIPS R5900 instruction
+ AFL_EXT_4650 = 7, // MIPS R4650 instruction
+ AFL_EXT_4010 = 8, // LSI R4010 instruction
+ AFL_EXT_4100 = 9, // NEC VR4100 instruction
+ AFL_EXT_3900 = 10, // Toshiba R3900 instruction
+ AFL_EXT_10000 = 11, // MIPS R10000 instruction
+ AFL_EXT_SB1 = 12, // Broadcom SB-1 instruction
+ AFL_EXT_4111 = 13, // NEC VR4111/VR4181 instruction
+ AFL_EXT_4120 = 14, // NEC VR4120 instruction
+ AFL_EXT_5400 = 15, // NEC VR5400 instruction
+ AFL_EXT_5500 = 16, // NEC VR5500 instruction
+ AFL_EXT_LOONGSON_2E = 17, // ST Microelectronics Loongson 2E
+ AFL_EXT_LOONGSON_2F = 18, // ST Microelectronics Loongson 2F
+ AFL_EXT_OCTEON3 = 19 // Cavium Networks Octeon3
+};
+
+// Values for the flags1 word of an ABI flags structure.
+enum AFL_FLAGS1 { AFL_FLAGS1_ODDSPREG = 1 };
+
+// MIPS object attribute tags
+enum {
+ Tag_GNU_MIPS_ABI_FP = 4, // Floating-point ABI used by this object file
+ Tag_GNU_MIPS_ABI_MSA = 8, // MSA ABI used by this object file
+};
+
+// Values for the fp_abi word of an ABI flags structure
+// and for the Tag_GNU_MIPS_ABI_FP attribute tag.
+enum Val_GNU_MIPS_ABI_FP {
+ Val_GNU_MIPS_ABI_FP_ANY = 0, // not tagged
+ Val_GNU_MIPS_ABI_FP_DOUBLE = 1, // hard float / -mdouble-float
+ Val_GNU_MIPS_ABI_FP_SINGLE = 2, // hard float / -msingle-float
+ Val_GNU_MIPS_ABI_FP_SOFT = 3, // soft float
+ Val_GNU_MIPS_ABI_FP_OLD_64 = 4, // -mips32r2 -mfp64
+ Val_GNU_MIPS_ABI_FP_XX = 5, // -mfpxx
+ Val_GNU_MIPS_ABI_FP_64 = 6, // -mips32r2 -mfp64
+ Val_GNU_MIPS_ABI_FP_64A = 7 // -mips32r2 -mfp64 -mno-odd-spreg
+};
+
+// Values for the Tag_GNU_MIPS_ABI_MSA attribute tag.
+enum Val_GNU_MIPS_ABI_MSA {
+ Val_GNU_MIPS_ABI_MSA_ANY = 0, // not tagged
+ Val_GNU_MIPS_ABI_MSA_128 = 1 // 128-bit MSA
+};
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Mutex.h b/contrib/llvm/include/llvm/Support/Mutex.h
new file mode 100644
index 0000000..0f4e61a
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Mutex.h
@@ -0,0 +1,158 @@
+//===- llvm/Support/Mutex.h - Mutex Operating System Concept -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::Mutex class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MUTEX_H
+#define LLVM_SUPPORT_MUTEX_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Threading.h"
+#include <cassert>
+
+namespace llvm
+{
+ namespace sys
+ {
+ /// @brief Platform agnostic Mutex class.
+ class MutexImpl
+ {
+ /// @name Constructors
+ /// @{
+ public:
+
+ /// Initializes the lock but doesn't acquire it. if \p recursive is set
+ /// to false, the lock will not be recursive which makes it cheaper but
+ /// also more likely to deadlock (same thread can't acquire more than
+ /// once).
+ /// @brief Default Constructor.
+ explicit MutexImpl(bool recursive = true);
+
+ /// Releases and removes the lock
+ /// @brief Destructor
+ ~MutexImpl();
+
+ /// @}
+ /// @name Methods
+ /// @{
+ public:
+
+ /// Attempts to unconditionally acquire the lock. If the lock is held by
+ /// another thread, this method will wait until it can acquire the lock.
+ /// @returns false if any kind of error occurs, true otherwise.
+ /// @brief Unconditionally acquire the lock.
+ bool acquire();
+
+ /// Attempts to release the lock. If the lock is held by the current
+ /// thread, the lock is released allowing other threads to acquire the
+ /// lock.
+ /// @returns false if any kind of error occurs, true otherwise.
+ /// @brief Unconditionally release the lock.
+ bool release();
+
+ /// Attempts to acquire the lock without blocking. If the lock is not
+ /// available, this function returns false quickly (without blocking). If
+ /// the lock is available, it is acquired.
+ /// @returns false if any kind of error occurs or the lock is not
+ /// available, true otherwise.
+ /// @brief Try to acquire the lock.
+ bool tryacquire();
+
+ //@}
+ /// @name Platform Dependent Data
+ /// @{
+ private:
+#if defined(LLVM_ENABLE_THREADS) && LLVM_ENABLE_THREADS != 0
+ void* data_; ///< We don't know what the data will be
+#endif
+
+ /// @}
+ /// @name Do Not Implement
+ /// @{
+ private:
+ MutexImpl(const MutexImpl &) = delete;
+ void operator=(const MutexImpl &) = delete;
+ /// @}
+ };
+
+
+ /// SmartMutex - A mutex with a compile time constant parameter that
+ /// indicates whether this mutex should become a no-op when we're not
+ /// running in multithreaded mode.
+ template<bool mt_only>
+ class SmartMutex {
+ MutexImpl impl;
+ unsigned acquired;
+ bool recursive;
+ public:
+ explicit SmartMutex(bool rec = true) :
+ impl(rec), acquired(0), recursive(rec) { }
+
+ bool lock() {
+ if (!mt_only || llvm_is_multithreaded()) {
+ return impl.acquire();
+ } else {
+ // Single-threaded debugging code. This would be racy in
+ // multithreaded mode, but provides not sanity checks in single
+ // threaded mode.
+ assert((recursive || acquired == 0) && "Lock already acquired!!");
+ ++acquired;
+ return true;
+ }
+ }
+
+ bool unlock() {
+ if (!mt_only || llvm_is_multithreaded()) {
+ return impl.release();
+ } else {
+ // Single-threaded debugging code. This would be racy in
+ // multithreaded mode, but provides not sanity checks in single
+ // threaded mode.
+ assert(((recursive && acquired) || (acquired == 1)) &&
+ "Lock not acquired before release!");
+ --acquired;
+ return true;
+ }
+ }
+
+ bool try_lock() {
+ if (!mt_only || llvm_is_multithreaded())
+ return impl.tryacquire();
+ else return true;
+ }
+
+ private:
+ SmartMutex(const SmartMutex<mt_only> & original);
+ void operator=(const SmartMutex<mt_only> &);
+ };
+
+ /// Mutex - A standard, always enforced mutex.
+ typedef SmartMutex<false> Mutex;
+
+ template<bool mt_only>
+ class SmartScopedLock {
+ SmartMutex<mt_only>& mtx;
+
+ public:
+ SmartScopedLock(SmartMutex<mt_only>& m) : mtx(m) {
+ mtx.lock();
+ }
+
+ ~SmartScopedLock() {
+ mtx.unlock();
+ }
+ };
+
+ typedef SmartScopedLock<false> ScopedLock;
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/MutexGuard.h b/contrib/llvm/include/llvm/Support/MutexGuard.h
new file mode 100644
index 0000000..07b64b6
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/MutexGuard.h
@@ -0,0 +1,41 @@
+//===-- Support/MutexGuard.h - Acquire/Release Mutex In Scope ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a guard for a block of code that ensures a Mutex is locked
+// upon construction and released upon destruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_MUTEXGUARD_H
+#define LLVM_SUPPORT_MUTEXGUARD_H
+
+#include "llvm/Support/Mutex.h"
+
+namespace llvm {
+ /// Instances of this class acquire a given Mutex Lock when constructed and
+ /// hold that lock until destruction. The intention is to instantiate one of
+ /// these on the stack at the top of some scope to be assured that C++
+ /// destruction of the object will always release the Mutex and thus avoid
+ /// a host of nasty multi-threading problems in the face of exceptions, etc.
+ /// @brief Guard a section of code with a Mutex.
+ class MutexGuard {
+ sys::Mutex &M;
+ MutexGuard(const MutexGuard &) = delete;
+ void operator=(const MutexGuard &) = delete;
+ public:
+ MutexGuard(sys::Mutex &m) : M(m) { M.lock(); }
+ ~MutexGuard() { M.unlock(); }
+ /// holds - Returns true if this locker instance holds the specified lock.
+ /// This is mostly used in assertions to validate that the correct mutex
+ /// is held.
+ bool holds(const sys::Mutex& lock) const { return &M == &lock; }
+ };
+}
+
+#endif // LLVM_SUPPORT_MUTEXGUARD_H
diff --git a/contrib/llvm/include/llvm/Support/OnDiskHashTable.h b/contrib/llvm/include/llvm/Support/OnDiskHashTable.h
new file mode 100644
index 0000000..ac978d4
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/OnDiskHashTable.h
@@ -0,0 +1,600 @@
+//===--- OnDiskHashTable.h - On-Disk Hash Table Implementation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Defines facilities for reading and writing on-disk hash tables.
+///
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_SUPPORT_ONDISKHASHTABLE_H
+#define LLVM_SUPPORT_ONDISKHASHTABLE_H
+
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdlib>
+
+namespace llvm {
+
+/// \brief Generates an on disk hash table.
+///
+/// This needs an \c Info that handles storing values into the hash table's
+/// payload and computes the hash for a given key. This should provide the
+/// following interface:
+///
+/// \code
+/// class ExampleInfo {
+/// public:
+/// typedef ExampleKey key_type; // Must be copy constructible
+/// typedef ExampleKey &key_type_ref;
+/// typedef ExampleData data_type; // Must be copy constructible
+/// typedef ExampleData &data_type_ref;
+/// typedef uint32_t hash_value_type; // The type the hash function returns.
+/// typedef uint32_t offset_type; // The type for offsets into the table.
+///
+/// /// Calculate the hash for Key
+/// static hash_value_type ComputeHash(key_type_ref Key);
+/// /// Return the lengths, in bytes, of the given Key/Data pair.
+/// static std::pair<offset_type, offset_type>
+/// EmitKeyDataLength(raw_ostream &Out, key_type_ref Key, data_type_ref Data);
+/// /// Write Key to Out. KeyLen is the length from EmitKeyDataLength.
+/// static void EmitKey(raw_ostream &Out, key_type_ref Key,
+/// offset_type KeyLen);
+/// /// Write Data to Out. DataLen is the length from EmitKeyDataLength.
+/// static void EmitData(raw_ostream &Out, key_type_ref Key,
+/// data_type_ref Data, offset_type DataLen);
+/// /// Determine if two keys are equal. Optional, only needed by contains.
+/// static bool EqualKey(key_type_ref Key1, key_type_ref Key2);
+/// };
+/// \endcode
+template <typename Info> class OnDiskChainedHashTableGenerator {
+ /// \brief A single item in the hash table.
+ class Item {
+ public:
+ typename Info::key_type Key;
+ typename Info::data_type Data;
+ Item *Next;
+ const typename Info::hash_value_type Hash;
+
+ Item(typename Info::key_type_ref Key, typename Info::data_type_ref Data,
+ Info &InfoObj)
+ : Key(Key), Data(Data), Next(nullptr), Hash(InfoObj.ComputeHash(Key)) {}
+ };
+
+ typedef typename Info::offset_type offset_type;
+ offset_type NumBuckets;
+ offset_type NumEntries;
+ llvm::SpecificBumpPtrAllocator<Item> BA;
+
+ /// \brief A linked list of values in a particular hash bucket.
+ struct Bucket {
+ offset_type Off;
+ unsigned Length;
+ Item *Head;
+ };
+
+ Bucket *Buckets;
+
+private:
+ /// \brief Insert an item into the appropriate hash bucket.
+ void insert(Bucket *Buckets, size_t Size, Item *E) {
+ Bucket &B = Buckets[E->Hash & (Size - 1)];
+ E->Next = B.Head;
+ ++B.Length;
+ B.Head = E;
+ }
+
+ /// \brief Resize the hash table, moving the old entries into the new buckets.
+ void resize(size_t NewSize) {
+ Bucket *NewBuckets = (Bucket *)std::calloc(NewSize, sizeof(Bucket));
+ // Populate NewBuckets with the old entries.
+ for (size_t I = 0; I < NumBuckets; ++I)
+ for (Item *E = Buckets[I].Head; E;) {
+ Item *N = E->Next;
+ E->Next = nullptr;
+ insert(NewBuckets, NewSize, E);
+ E = N;
+ }
+
+ free(Buckets);
+ NumBuckets = NewSize;
+ Buckets = NewBuckets;
+ }
+
+public:
+ /// \brief Insert an entry into the table.
+ void insert(typename Info::key_type_ref Key,
+ typename Info::data_type_ref Data) {
+ Info InfoObj;
+ insert(Key, Data, InfoObj);
+ }
+
+ /// \brief Insert an entry into the table.
+ ///
+ /// Uses the provided Info instead of a stack allocated one.
+ void insert(typename Info::key_type_ref Key,
+ typename Info::data_type_ref Data, Info &InfoObj) {
+ ++NumEntries;
+ if (4 * NumEntries >= 3 * NumBuckets)
+ resize(NumBuckets * 2);
+ insert(Buckets, NumBuckets, new (BA.Allocate()) Item(Key, Data, InfoObj));
+ }
+
+ /// \brief Determine whether an entry has been inserted.
+ bool contains(typename Info::key_type_ref Key, Info &InfoObj) {
+ unsigned Hash = InfoObj.ComputeHash(Key);
+ for (Item *I = Buckets[Hash & (NumBuckets - 1)].Head; I; I = I->Next)
+ if (I->Hash == Hash && InfoObj.EqualKey(I->Key, Key))
+ return true;
+ return false;
+ }
+
+ /// \brief Emit the table to Out, which must not be at offset 0.
+ offset_type Emit(raw_ostream &Out) {
+ Info InfoObj;
+ return Emit(Out, InfoObj);
+ }
+
+ /// \brief Emit the table to Out, which must not be at offset 0.
+ ///
+ /// Uses the provided Info instead of a stack allocated one.
+ offset_type Emit(raw_ostream &Out, Info &InfoObj) {
+ using namespace llvm::support;
+ endian::Writer<little> LE(Out);
+
+ // Emit the payload of the table.
+ for (offset_type I = 0; I < NumBuckets; ++I) {
+ Bucket &B = Buckets[I];
+ if (!B.Head)
+ continue;
+
+ // Store the offset for the data of this bucket.
+ B.Off = Out.tell();
+ assert(B.Off && "Cannot write a bucket at offset 0. Please add padding.");
+
+ // Write out the number of items in the bucket.
+ LE.write<uint16_t>(B.Length);
+ assert(B.Length != 0 && "Bucket has a head but zero length?");
+
+ // Write out the entries in the bucket.
+ for (Item *I = B.Head; I; I = I->Next) {
+ LE.write<typename Info::hash_value_type>(I->Hash);
+ const std::pair<offset_type, offset_type> &Len =
+ InfoObj.EmitKeyDataLength(Out, I->Key, I->Data);
+#ifdef NDEBUG
+ InfoObj.EmitKey(Out, I->Key, Len.first);
+ InfoObj.EmitData(Out, I->Key, I->Data, Len.second);
+#else
+ // In asserts mode, check that the users length matches the data they
+ // wrote.
+ uint64_t KeyStart = Out.tell();
+ InfoObj.EmitKey(Out, I->Key, Len.first);
+ uint64_t DataStart = Out.tell();
+ InfoObj.EmitData(Out, I->Key, I->Data, Len.second);
+ uint64_t End = Out.tell();
+ assert(offset_type(DataStart - KeyStart) == Len.first &&
+ "key length does not match bytes written");
+ assert(offset_type(End - DataStart) == Len.second &&
+ "data length does not match bytes written");
+#endif
+ }
+ }
+
+ // Pad with zeros so that we can start the hashtable at an aligned address.
+ offset_type TableOff = Out.tell();
+ uint64_t N = llvm::OffsetToAlignment(TableOff, alignOf<offset_type>());
+ TableOff += N;
+ while (N--)
+ LE.write<uint8_t>(0);
+
+ // Emit the hashtable itself.
+ LE.write<offset_type>(NumBuckets);
+ LE.write<offset_type>(NumEntries);
+ for (offset_type I = 0; I < NumBuckets; ++I)
+ LE.write<offset_type>(Buckets[I].Off);
+
+ return TableOff;
+ }
+
+ OnDiskChainedHashTableGenerator() {
+ NumEntries = 0;
+ NumBuckets = 64;
+ // Note that we do not need to run the constructors of the individual
+ // Bucket objects since 'calloc' returns bytes that are all 0.
+ Buckets = (Bucket *)std::calloc(NumBuckets, sizeof(Bucket));
+ }
+
+ ~OnDiskChainedHashTableGenerator() { std::free(Buckets); }
+};
+
+/// \brief Provides lookup on an on disk hash table.
+///
+/// This needs an \c Info that handles reading values from the hash table's
+/// payload and computes the hash for a given key. This should provide the
+/// following interface:
+///
+/// \code
+/// class ExampleLookupInfo {
+/// public:
+/// typedef ExampleData data_type;
+/// typedef ExampleInternalKey internal_key_type; // The stored key type.
+/// typedef ExampleKey external_key_type; // The type to pass to find().
+/// typedef uint32_t hash_value_type; // The type the hash function returns.
+/// typedef uint32_t offset_type; // The type for offsets into the table.
+///
+/// /// Compare two keys for equality.
+/// static bool EqualKey(internal_key_type &Key1, internal_key_type &Key2);
+/// /// Calculate the hash for the given key.
+/// static hash_value_type ComputeHash(internal_key_type &IKey);
+/// /// Translate from the semantic type of a key in the hash table to the
+/// /// type that is actually stored and used for hashing and comparisons.
+/// /// The internal and external types are often the same, in which case this
+/// /// can simply return the passed in value.
+/// static const internal_key_type &GetInternalKey(external_key_type &EKey);
+/// /// Read the key and data length from Buffer, leaving it pointing at the
+/// /// following byte.
+/// static std::pair<offset_type, offset_type>
+/// ReadKeyDataLength(const unsigned char *&Buffer);
+/// /// Read the key from Buffer, given the KeyLen as reported from
+/// /// ReadKeyDataLength.
+/// const internal_key_type &ReadKey(const unsigned char *Buffer,
+/// offset_type KeyLen);
+/// /// Read the data for Key from Buffer, given the DataLen as reported from
+/// /// ReadKeyDataLength.
+/// data_type ReadData(StringRef Key, const unsigned char *Buffer,
+/// offset_type DataLen);
+/// };
+/// \endcode
+template <typename Info> class OnDiskChainedHashTable {
+ const typename Info::offset_type NumBuckets;
+ const typename Info::offset_type NumEntries;
+ const unsigned char *const Buckets;
+ const unsigned char *const Base;
+ Info InfoObj;
+
+public:
+ typedef Info InfoType;
+ typedef typename Info::internal_key_type internal_key_type;
+ typedef typename Info::external_key_type external_key_type;
+ typedef typename Info::data_type data_type;
+ typedef typename Info::hash_value_type hash_value_type;
+ typedef typename Info::offset_type offset_type;
+
+ OnDiskChainedHashTable(offset_type NumBuckets, offset_type NumEntries,
+ const unsigned char *Buckets,
+ const unsigned char *Base,
+ const Info &InfoObj = Info())
+ : NumBuckets(NumBuckets), NumEntries(NumEntries), Buckets(Buckets),
+ Base(Base), InfoObj(InfoObj) {
+ assert((reinterpret_cast<uintptr_t>(Buckets) & 0x3) == 0 &&
+ "'buckets' must have a 4-byte alignment");
+ }
+
+ /// Read the number of buckets and the number of entries from a hash table
+ /// produced by OnDiskHashTableGenerator::Emit, and advance the Buckets
+ /// pointer past them.
+ static std::pair<offset_type, offset_type>
+ readNumBucketsAndEntries(const unsigned char *&Buckets) {
+ assert((reinterpret_cast<uintptr_t>(Buckets) & 0x3) == 0 &&
+ "buckets should be 4-byte aligned.");
+ using namespace llvm::support;
+ offset_type NumBuckets =
+ endian::readNext<offset_type, little, aligned>(Buckets);
+ offset_type NumEntries =
+ endian::readNext<offset_type, little, aligned>(Buckets);
+ return std::make_pair(NumBuckets, NumEntries);
+ }
+
+ offset_type getNumBuckets() const { return NumBuckets; }
+ offset_type getNumEntries() const { return NumEntries; }
+ const unsigned char *getBase() const { return Base; }
+ const unsigned char *getBuckets() const { return Buckets; }
+
+ bool isEmpty() const { return NumEntries == 0; }
+
+ class iterator {
+ internal_key_type Key;
+ const unsigned char *const Data;
+ const offset_type Len;
+ Info *InfoObj;
+
+ public:
+ iterator() : Data(nullptr), Len(0) {}
+ iterator(const internal_key_type K, const unsigned char *D, offset_type L,
+ Info *InfoObj)
+ : Key(K), Data(D), Len(L), InfoObj(InfoObj) {}
+
+ data_type operator*() const { return InfoObj->ReadData(Key, Data, Len); }
+
+ const unsigned char *getDataPtr() const { return Data; }
+ offset_type getDataLen() const { return Len; }
+
+ bool operator==(const iterator &X) const { return X.Data == Data; }
+ bool operator!=(const iterator &X) const { return X.Data != Data; }
+ };
+
+ /// \brief Look up the stored data for a particular key.
+ iterator find(const external_key_type &EKey, Info *InfoPtr = nullptr) {
+ const internal_key_type &IKey = InfoObj.GetInternalKey(EKey);
+ hash_value_type KeyHash = InfoObj.ComputeHash(IKey);
+ return find_hashed(IKey, KeyHash, InfoPtr);
+ }
+
+ /// \brief Look up the stored data for a particular key with a known hash.
+ iterator find_hashed(const internal_key_type &IKey, hash_value_type KeyHash,
+ Info *InfoPtr = nullptr) {
+ using namespace llvm::support;
+
+ if (!InfoPtr)
+ InfoPtr = &InfoObj;
+
+ // Each bucket is just an offset into the hash table file.
+ offset_type Idx = KeyHash & (NumBuckets - 1);
+ const unsigned char *Bucket = Buckets + sizeof(offset_type) * Idx;
+
+ offset_type Offset = endian::readNext<offset_type, little, aligned>(Bucket);
+ if (Offset == 0)
+ return iterator(); // Empty bucket.
+ const unsigned char *Items = Base + Offset;
+
+ // 'Items' starts with a 16-bit unsigned integer representing the
+ // number of items in this bucket.
+ unsigned Len = endian::readNext<uint16_t, little, unaligned>(Items);
+
+ for (unsigned i = 0; i < Len; ++i) {
+ // Read the hash.
+ hash_value_type ItemHash =
+ endian::readNext<hash_value_type, little, unaligned>(Items);
+
+ // Determine the length of the key and the data.
+ const std::pair<offset_type, offset_type> &L =
+ Info::ReadKeyDataLength(Items);
+ offset_type ItemLen = L.first + L.second;
+
+ // Compare the hashes. If they are not the same, skip the entry entirely.
+ if (ItemHash != KeyHash) {
+ Items += ItemLen;
+ continue;
+ }
+
+ // Read the key.
+ const internal_key_type &X =
+ InfoPtr->ReadKey((const unsigned char *const)Items, L.first);
+
+ // If the key doesn't match just skip reading the value.
+ if (!InfoPtr->EqualKey(X, IKey)) {
+ Items += ItemLen;
+ continue;
+ }
+
+ // The key matches!
+ return iterator(X, Items + L.first, L.second, InfoPtr);
+ }
+
+ return iterator();
+ }
+
+ iterator end() const { return iterator(); }
+
+ Info &getInfoObj() { return InfoObj; }
+
+ /// \brief Create the hash table.
+ ///
+ /// \param Buckets is the beginning of the hash table itself, which follows
+ /// the payload of entire structure. This is the value returned by
+ /// OnDiskHashTableGenerator::Emit.
+ ///
+ /// \param Base is the point from which all offsets into the structure are
+ /// based. This is offset 0 in the stream that was used when Emitting the
+ /// table.
+ static OnDiskChainedHashTable *Create(const unsigned char *Buckets,
+ const unsigned char *const Base,
+ const Info &InfoObj = Info()) {
+ assert(Buckets > Base);
+ auto NumBucketsAndEntries = readNumBucketsAndEntries(Buckets);
+ return new OnDiskChainedHashTable<Info>(NumBucketsAndEntries.first,
+ NumBucketsAndEntries.second,
+ Buckets, Base, InfoObj);
+ }
+};
+
+/// \brief Provides lookup and iteration over an on disk hash table.
+///
+/// \copydetails llvm::OnDiskChainedHashTable
+template <typename Info>
+class OnDiskIterableChainedHashTable : public OnDiskChainedHashTable<Info> {
+ const unsigned char *Payload;
+
+public:
+ typedef OnDiskChainedHashTable<Info> base_type;
+ typedef typename base_type::internal_key_type internal_key_type;
+ typedef typename base_type::external_key_type external_key_type;
+ typedef typename base_type::data_type data_type;
+ typedef typename base_type::hash_value_type hash_value_type;
+ typedef typename base_type::offset_type offset_type;
+
+private:
+ /// \brief Iterates over all of the keys in the table.
+ class iterator_base {
+ const unsigned char *Ptr;
+ offset_type NumItemsInBucketLeft;
+ offset_type NumEntriesLeft;
+
+ public:
+ typedef external_key_type value_type;
+
+ iterator_base(const unsigned char *const Ptr, offset_type NumEntries)
+ : Ptr(Ptr), NumItemsInBucketLeft(0), NumEntriesLeft(NumEntries) {}
+ iterator_base()
+ : Ptr(nullptr), NumItemsInBucketLeft(0), NumEntriesLeft(0) {}
+
+ friend bool operator==(const iterator_base &X, const iterator_base &Y) {
+ return X.NumEntriesLeft == Y.NumEntriesLeft;
+ }
+ friend bool operator!=(const iterator_base &X, const iterator_base &Y) {
+ return X.NumEntriesLeft != Y.NumEntriesLeft;
+ }
+
+ /// Move to the next item.
+ void advance() {
+ using namespace llvm::support;
+ if (!NumItemsInBucketLeft) {
+ // 'Items' starts with a 16-bit unsigned integer representing the
+ // number of items in this bucket.
+ NumItemsInBucketLeft =
+ endian::readNext<uint16_t, little, unaligned>(Ptr);
+ }
+ Ptr += sizeof(hash_value_type); // Skip the hash.
+ // Determine the length of the key and the data.
+ const std::pair<offset_type, offset_type> &L =
+ Info::ReadKeyDataLength(Ptr);
+ Ptr += L.first + L.second;
+ assert(NumItemsInBucketLeft);
+ --NumItemsInBucketLeft;
+ assert(NumEntriesLeft);
+ --NumEntriesLeft;
+ }
+
+ /// Get the start of the item as written by the trait (after the hash and
+ /// immediately before the key and value length).
+ const unsigned char *getItem() const {
+ return Ptr + (NumItemsInBucketLeft ? 0 : 2) + sizeof(hash_value_type);
+ }
+ };
+
+public:
+ OnDiskIterableChainedHashTable(offset_type NumBuckets, offset_type NumEntries,
+ const unsigned char *Buckets,
+ const unsigned char *Payload,
+ const unsigned char *Base,
+ const Info &InfoObj = Info())
+ : base_type(NumBuckets, NumEntries, Buckets, Base, InfoObj),
+ Payload(Payload) {}
+
+ /// \brief Iterates over all of the keys in the table.
+ class key_iterator : public iterator_base {
+ Info *InfoObj;
+
+ public:
+ typedef external_key_type value_type;
+
+ key_iterator(const unsigned char *const Ptr, offset_type NumEntries,
+ Info *InfoObj)
+ : iterator_base(Ptr, NumEntries), InfoObj(InfoObj) {}
+ key_iterator() : iterator_base(), InfoObj() {}
+
+ key_iterator &operator++() {
+ this->advance();
+ return *this;
+ }
+ key_iterator operator++(int) { // Postincrement
+ key_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ internal_key_type getInternalKey() const {
+ auto *LocalPtr = this->getItem();
+
+ // Determine the length of the key and the data.
+ auto L = Info::ReadKeyDataLength(LocalPtr);
+
+ // Read the key.
+ return InfoObj->ReadKey(LocalPtr, L.first);
+ }
+
+ value_type operator*() const {
+ return InfoObj->GetExternalKey(getInternalKey());
+ }
+ };
+
+ key_iterator key_begin() {
+ return key_iterator(Payload, this->getNumEntries(), &this->getInfoObj());
+ }
+ key_iterator key_end() { return key_iterator(); }
+
+ iterator_range<key_iterator> keys() {
+ return make_range(key_begin(), key_end());
+ }
+
+ /// \brief Iterates over all the entries in the table, returning the data.
+ class data_iterator : public iterator_base {
+ Info *InfoObj;
+
+ public:
+ typedef data_type value_type;
+
+ data_iterator(const unsigned char *const Ptr, offset_type NumEntries,
+ Info *InfoObj)
+ : iterator_base(Ptr, NumEntries), InfoObj(InfoObj) {}
+ data_iterator() : iterator_base(), InfoObj() {}
+
+ data_iterator &operator++() { // Preincrement
+ this->advance();
+ return *this;
+ }
+ data_iterator operator++(int) { // Postincrement
+ data_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ value_type operator*() const {
+ auto *LocalPtr = this->getItem();
+
+ // Determine the length of the key and the data.
+ auto L = Info::ReadKeyDataLength(LocalPtr);
+
+ // Read the key.
+ const internal_key_type &Key = InfoObj->ReadKey(LocalPtr, L.first);
+ return InfoObj->ReadData(Key, LocalPtr + L.first, L.second);
+ }
+ };
+
+ data_iterator data_begin() {
+ return data_iterator(Payload, this->getNumEntries(), &this->getInfoObj());
+ }
+ data_iterator data_end() { return data_iterator(); }
+
+ iterator_range<data_iterator> data() {
+ return make_range(data_begin(), data_end());
+ }
+
+ /// \brief Create the hash table.
+ ///
+ /// \param Buckets is the beginning of the hash table itself, which follows
+ /// the payload of entire structure. This is the value returned by
+ /// OnDiskHashTableGenerator::Emit.
+ ///
+ /// \param Payload is the beginning of the data contained in the table. This
+ /// is Base plus any padding or header data that was stored, ie, the offset
+ /// that the stream was at when calling Emit.
+ ///
+ /// \param Base is the point from which all offsets into the structure are
+ /// based. This is offset 0 in the stream that was used when Emitting the
+ /// table.
+ static OnDiskIterableChainedHashTable *
+ Create(const unsigned char *Buckets, const unsigned char *const Payload,
+ const unsigned char *const Base, const Info &InfoObj = Info()) {
+ assert(Buckets > Base);
+ auto NumBucketsAndEntries =
+ OnDiskIterableChainedHashTable<Info>::readNumBucketsAndEntries(Buckets);
+ return new OnDiskIterableChainedHashTable<Info>(
+ NumBucketsAndEntries.first, NumBucketsAndEntries.second,
+ Buckets, Payload, Base, InfoObj);
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Options.h b/contrib/llvm/include/llvm/Support/Options.h
new file mode 100644
index 0000000..7b61b23
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Options.h
@@ -0,0 +1,120 @@
+//===- llvm/Support/Options.h - Debug options support -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares helper objects for defining debug options that can be
+/// configured via the command line. The new API currently builds on the cl::opt
+/// API, but does not require the use of static globals.
+///
+/// With this API options are registered during initialization. For passes, this
+/// happens during pass initialization. Passes with options will call a static
+/// registerOptions method during initialization that registers options with the
+/// OptionRegistry. An example implementation of registerOptions is:
+///
+/// static void registerOptions() {
+/// OptionRegistry::registerOption<bool, Scalarizer,
+/// &Scalarizer::ScalarizeLoadStore>(
+/// "scalarize-load-store",
+/// "Allow the scalarizer pass to scalarize loads and store", false);
+/// }
+///
+/// When reading data for options the interface is via the LLVMContext. Option
+/// data for passes should be read from the context during doInitialization. An
+/// example of reading the above option would be:
+///
+/// ScalarizeLoadStore =
+/// M.getContext().getOption<bool,
+/// Scalarizer,
+/// &Scalarizer::ScalarizeLoadStore>();
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_OPTIONS_H
+#define LLVM_SUPPORT_OPTIONS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+namespace detail {
+
+// Options are keyed of the unique address of a static character synthesized
+// based on template arguments.
+template <typename ValT, typename Base, ValT(Base::*Mem)> class OptionKey {
+public:
+ static char ID;
+};
+
+template <typename ValT, typename Base, ValT(Base::*Mem)>
+char OptionKey<ValT, Base, Mem>::ID = 0;
+
+} // namespace detail
+
+/// \brief Singleton class used to register debug options.
+///
+/// The OptionRegistry is responsible for managing lifetimes of the options and
+/// provides interfaces for option registration and reading values from options.
+/// This object is a singleton, only one instance should ever exist so that all
+/// options are registered in the same place.
+class OptionRegistry {
+private:
+ DenseMap<void *, cl::Option *> Options;
+
+ /// \brief Adds a cl::Option to the registry.
+ ///
+ /// \param Key unique key for option
+ /// \param O option to map to \p Key
+ ///
+ /// Allocated cl::Options are owned by the OptionRegistry and are deallocated
+ /// on destruction or removal
+ void addOption(void *Key, cl::Option *O);
+
+public:
+ ~OptionRegistry();
+ OptionRegistry() {}
+
+ /// \brief Returns a reference to the singleton instance.
+ static OptionRegistry &instance();
+
+ /// \brief Registers an option with the OptionRegistry singleton.
+ ///
+ /// \tparam ValT type of the option's data
+ /// \tparam Base class used to key the option
+ /// \tparam Mem member of \p Base used for keying the option
+ ///
+ /// Options are keyed off the template parameters to generate unique static
+ /// characters. The template parameters are (1) the type of the data the
+ /// option stores (\p ValT), the class that will read the option (\p Base),
+ /// and the member that the class will store the data into (\p Mem).
+ template <typename ValT, typename Base, ValT(Base::*Mem)>
+ static void registerOption(const char *ArgStr, const char *Desc,
+ const ValT &InitValue) {
+ cl::opt<ValT> *Option = new cl::opt<ValT>(ArgStr, cl::desc(Desc),
+ cl::Hidden, cl::init(InitValue));
+ instance().addOption(&detail::OptionKey<ValT, Base, Mem>::ID, Option);
+ }
+
+ /// \brief Returns the value of the option.
+ ///
+ /// \tparam ValT type of the option's data
+ /// \tparam Base class used to key the option
+ /// \tparam Mem member of \p Base used for keying the option
+ ///
+ /// Reads option values based on the key generated by the template parameters.
+ /// Keying for get() is the same as keying for registerOption.
+ template <typename ValT, typename Base, ValT(Base::*Mem)> ValT get() const {
+ auto It = Options.find(&detail::OptionKey<ValT, Base, Mem>::ID);
+ assert(It != Options.end() && "Option not in OptionRegistry");
+ return *(cl::opt<ValT> *)It->second;
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Path.h b/contrib/llvm/include/llvm/Support/Path.h
new file mode 100644
index 0000000..955cc99
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Path.h
@@ -0,0 +1,437 @@
+//===- llvm/Support/Path.h - Path Operating System Concept ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::path namespace. It is designed after
+// TR2/boost filesystem (v3), but modified to remove exception handling and the
+// path class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PATH_H
+#define LLVM_SUPPORT_PATH_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/DataTypes.h"
+#include <iterator>
+
+namespace llvm {
+namespace sys {
+namespace path {
+
+/// @name Lexical Component Iterator
+/// @{
+
+/// @brief Path iterator.
+///
+/// This is an input iterator that iterates over the individual components in
+/// \a path. The traversal order is as follows:
+/// * The root-name element, if present.
+/// * The root-directory element, if present.
+/// * Each successive filename element, if present.
+/// * Dot, if one or more trailing non-root slash characters are present.
+/// Traversing backwards is possible with \a reverse_iterator
+///
+/// Iteration examples. Each component is separated by ',':
+/// @code
+/// / => /
+/// /foo => /,foo
+/// foo/ => foo,.
+/// /foo/bar => /,foo,bar
+/// ../ => ..,.
+/// C:\foo\bar => C:,/,foo,bar
+/// @endcode
+class const_iterator
+ : public std::iterator<std::input_iterator_tag, const StringRef> {
+ StringRef Path; ///< The entire path.
+ StringRef Component; ///< The current component. Not necessarily in Path.
+ size_t Position; ///< The iterators current position within Path.
+
+ // An end iterator has Position = Path.size() + 1.
+ friend const_iterator begin(StringRef path);
+ friend const_iterator end(StringRef path);
+
+public:
+ reference operator*() const { return Component; }
+ pointer operator->() const { return &Component; }
+ const_iterator &operator++(); // preincrement
+ bool operator==(const const_iterator &RHS) const;
+ bool operator!=(const const_iterator &RHS) const { return !(*this == RHS); }
+
+ /// @brief Difference in bytes between this and RHS.
+ ptrdiff_t operator-(const const_iterator &RHS) const;
+};
+
+/// @brief Reverse path iterator.
+///
+/// This is an input iterator that iterates over the individual components in
+/// \a path in reverse order. The traversal order is exactly reversed from that
+/// of \a const_iterator
+class reverse_iterator
+ : public std::iterator<std::input_iterator_tag, const StringRef> {
+ StringRef Path; ///< The entire path.
+ StringRef Component; ///< The current component. Not necessarily in Path.
+ size_t Position; ///< The iterators current position within Path.
+
+ friend reverse_iterator rbegin(StringRef path);
+ friend reverse_iterator rend(StringRef path);
+
+public:
+ reference operator*() const { return Component; }
+ pointer operator->() const { return &Component; }
+ reverse_iterator &operator++(); // preincrement
+ bool operator==(const reverse_iterator &RHS) const;
+ bool operator!=(const reverse_iterator &RHS) const { return !(*this == RHS); }
+};
+
+/// @brief Get begin iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized with the first component of \a path.
+const_iterator begin(StringRef path);
+
+/// @brief Get end iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized to the end of \a path.
+const_iterator end(StringRef path);
+
+/// @brief Get reverse begin iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized with the first reverse component of \a path.
+reverse_iterator rbegin(StringRef path);
+
+/// @brief Get reverse end iterator over \a path.
+/// @param path Input path.
+/// @returns Iterator initialized to the reverse end of \a path.
+reverse_iterator rend(StringRef path);
+
+/// @}
+/// @name Lexical Modifiers
+/// @{
+
+/// @brief Remove the last component from \a path unless it is the root dir.
+///
+/// @code
+/// directory/filename.cpp => directory/
+/// directory/ => directory
+/// filename.cpp => <empty>
+/// / => /
+/// @endcode
+///
+/// @param path A path that is modified to not have a file component.
+void remove_filename(SmallVectorImpl<char> &path);
+
+/// @brief Replace the file extension of \a path with \a extension.
+///
+/// @code
+/// ./filename.cpp => ./filename.extension
+/// ./filename => ./filename.extension
+/// ./ => ./.extension
+/// @endcode
+///
+/// @param path A path that has its extension replaced with \a extension.
+/// @param extension The extension to be added. It may be empty. It may also
+/// optionally start with a '.', if it does not, one will be
+/// prepended.
+void replace_extension(SmallVectorImpl<char> &path, const Twine &extension);
+
+/// @brief Append to path.
+///
+/// @code
+/// /foo + bar/f => /foo/bar/f
+/// /foo/ + bar/f => /foo/bar/f
+/// foo + bar/f => foo/bar/f
+/// @endcode
+///
+/// @param path Set to \a path + \a component.
+/// @param a The component to be appended to \a path.
+void append(SmallVectorImpl<char> &path, const Twine &a,
+ const Twine &b = "",
+ const Twine &c = "",
+ const Twine &d = "");
+
+/// @brief Append to path.
+///
+/// @code
+/// /foo + [bar,f] => /foo/bar/f
+/// /foo/ + [bar,f] => /foo/bar/f
+/// foo + [bar,f] => foo/bar/f
+/// @endcode
+///
+/// @param path Set to \a path + [\a begin, \a end).
+/// @param begin Start of components to append.
+/// @param end One past the end of components to append.
+void append(SmallVectorImpl<char> &path,
+ const_iterator begin, const_iterator end);
+
+/// @}
+/// @name Transforms (or some other better name)
+/// @{
+
+/// Convert path to the native form. This is used to give paths to users and
+/// operating system calls in the platform's normal way. For example, on Windows
+/// all '/' are converted to '\'.
+///
+/// @param path A path that is transformed to native format.
+/// @param result Holds the result of the transformation.
+void native(const Twine &path, SmallVectorImpl<char> &result);
+
+/// Convert path to the native form in place. This is used to give paths to
+/// users and operating system calls in the platform's normal way. For example,
+/// on Windows all '/' are converted to '\'.
+///
+/// @param path A path that is transformed to native format.
+void native(SmallVectorImpl<char> &path);
+
+/// @}
+/// @name Lexical Observers
+/// @{
+
+/// @brief Get root name.
+///
+/// @code
+/// //net/hello => //net
+/// c:/hello => c: (on Windows, on other platforms nothing)
+/// /hello => <empty>
+/// @endcode
+///
+/// @param path Input path.
+/// @result The root name of \a path if it has one, otherwise "".
+StringRef root_name(StringRef path);
+
+/// @brief Get root directory.
+///
+/// @code
+/// /goo/hello => /
+/// c:/hello => /
+/// d/file.txt => <empty>
+/// @endcode
+///
+/// @param path Input path.
+/// @result The root directory of \a path if it has one, otherwise
+/// "".
+StringRef root_directory(StringRef path);
+
+/// @brief Get root path.
+///
+/// Equivalent to root_name + root_directory.
+///
+/// @param path Input path.
+/// @result The root path of \a path if it has one, otherwise "".
+StringRef root_path(StringRef path);
+
+/// @brief Get relative path.
+///
+/// @code
+/// C:\hello\world => hello\world
+/// foo/bar => foo/bar
+/// /foo/bar => foo/bar
+/// @endcode
+///
+/// @param path Input path.
+/// @result The path starting after root_path if one exists, otherwise "".
+StringRef relative_path(StringRef path);
+
+/// @brief Get parent path.
+///
+/// @code
+/// / => <empty>
+/// /foo => /
+/// foo/../bar => foo/..
+/// @endcode
+///
+/// @param path Input path.
+/// @result The parent path of \a path if one exists, otherwise "".
+StringRef parent_path(StringRef path);
+
+/// @brief Get filename.
+///
+/// @code
+/// /foo.txt => foo.txt
+/// . => .
+/// .. => ..
+/// / => /
+/// @endcode
+///
+/// @param path Input path.
+/// @result The filename part of \a path. This is defined as the last component
+/// of \a path.
+StringRef filename(StringRef path);
+
+/// @brief Get stem.
+///
+/// If filename contains a dot but not solely one or two dots, result is the
+/// substring of filename ending at (but not including) the last dot. Otherwise
+/// it is filename.
+///
+/// @code
+/// /foo/bar.txt => bar
+/// /foo/bar => bar
+/// /foo/.txt => <empty>
+/// /foo/. => .
+/// /foo/.. => ..
+/// @endcode
+///
+/// @param path Input path.
+/// @result The stem of \a path.
+StringRef stem(StringRef path);
+
+/// @brief Get extension.
+///
+/// If filename contains a dot but not solely one or two dots, result is the
+/// substring of filename starting at (and including) the last dot, and ending
+/// at the end of \a path. Otherwise "".
+///
+/// @code
+/// /foo/bar.txt => .txt
+/// /foo/bar => <empty>
+/// /foo/.txt => .txt
+/// @endcode
+///
+/// @param path Input path.
+/// @result The extension of \a path.
+StringRef extension(StringRef path);
+
+/// @brief Check whether the given char is a path separator on the host OS.
+///
+/// @param value a character
+/// @result true if \a value is a path separator character on the host OS
+bool is_separator(char value);
+
+/// @brief Return the preferred separator for this platform.
+///
+/// @result StringRef of the preferred separator, null-terminated.
+StringRef get_separator();
+
+/// @brief Get the typical temporary directory for the system, e.g.,
+/// "/var/tmp" or "C:/TEMP"
+///
+/// @param erasedOnReboot Whether to favor a path that is erased on reboot
+/// rather than one that potentially persists longer. This parameter will be
+/// ignored if the user or system has set the typical environment variable
+/// (e.g., TEMP on Windows, TMPDIR on *nix) to specify a temporary directory.
+///
+/// @param result Holds the resulting path name.
+void system_temp_directory(bool erasedOnReboot, SmallVectorImpl<char> &result);
+
+/// @brief Get the user's home directory.
+///
+/// @param result Holds the resulting path name.
+/// @result True if a home directory is set, false otherwise.
+bool home_directory(SmallVectorImpl<char> &result);
+
+/// @brief Get the user's cache directory.
+///
+/// Expect the resulting path to be a directory shared with other
+/// applications/services used by the user. Params \p Path1 to \p Path3 can be
+/// used to append additional directory names to the resulting path. Recommended
+/// pattern is <user_cache_directory>/<vendor>/<application>.
+///
+/// @param Result Holds the resulting path.
+/// @param Path1 Additional path to be appended to the user's cache directory
+/// path. "" can be used to append nothing.
+/// @param Path2 Second additional path to be appended.
+/// @param Path3 Third additional path to be appended.
+/// @result True if a cache directory path is set, false otherwise.
+bool user_cache_directory(SmallVectorImpl<char> &Result, const Twine &Path1,
+ const Twine &Path2 = "", const Twine &Path3 = "");
+
+/// @brief Has root name?
+///
+/// root_name != ""
+///
+/// @param path Input path.
+/// @result True if the path has a root name, false otherwise.
+bool has_root_name(const Twine &path);
+
+/// @brief Has root directory?
+///
+/// root_directory != ""
+///
+/// @param path Input path.
+/// @result True if the path has a root directory, false otherwise.
+bool has_root_directory(const Twine &path);
+
+/// @brief Has root path?
+///
+/// root_path != ""
+///
+/// @param path Input path.
+/// @result True if the path has a root path, false otherwise.
+bool has_root_path(const Twine &path);
+
+/// @brief Has relative path?
+///
+/// relative_path != ""
+///
+/// @param path Input path.
+/// @result True if the path has a relative path, false otherwise.
+bool has_relative_path(const Twine &path);
+
+/// @brief Has parent path?
+///
+/// parent_path != ""
+///
+/// @param path Input path.
+/// @result True if the path has a parent path, false otherwise.
+bool has_parent_path(const Twine &path);
+
+/// @brief Has filename?
+///
+/// filename != ""
+///
+/// @param path Input path.
+/// @result True if the path has a filename, false otherwise.
+bool has_filename(const Twine &path);
+
+/// @brief Has stem?
+///
+/// stem != ""
+///
+/// @param path Input path.
+/// @result True if the path has a stem, false otherwise.
+bool has_stem(const Twine &path);
+
+/// @brief Has extension?
+///
+/// extension != ""
+///
+/// @param path Input path.
+/// @result True if the path has a extension, false otherwise.
+bool has_extension(const Twine &path);
+
+/// @brief Is path absolute?
+///
+/// @param path Input path.
+/// @result True if the path is absolute, false if it is not.
+bool is_absolute(const Twine &path);
+
+/// @brief Is path relative?
+///
+/// @param path Input path.
+/// @result True if the path is relative, false if it is not.
+bool is_relative(const Twine &path);
+
+/// @brief Remove redundant leading "./" pieces and consecutive separators.
+///
+/// @param path Input path.
+/// @result The cleaned-up \a path.
+StringRef remove_leading_dotslash(StringRef path);
+
+/// @brief In-place remove any './' and optionally '../' components from a path.
+///
+/// @param path processed path
+/// @param remove_dot_dot specify if '../' should be removed
+/// @result True if path was changed
+bool remove_dots(SmallVectorImpl<char> &path, bool remove_dot_dot = false);
+
+} // end namespace path
+} // end namespace sys
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/PluginLoader.h b/contrib/llvm/include/llvm/Support/PluginLoader.h
new file mode 100644
index 0000000..bdbb134
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/PluginLoader.h
@@ -0,0 +1,37 @@
+//===-- llvm/Support/PluginLoader.h - Plugin Loader for Tools ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A tool can #include this file to get a -load option that allows the user to
+// load arbitrary shared objects into the tool's address space. Note that this
+// header can only be included by a program ONCE, so it should never to used by
+// library authors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PLUGINLOADER_H
+#define LLVM_SUPPORT_PLUGINLOADER_H
+
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+ struct PluginLoader {
+ void operator=(const std::string &Filename);
+ static unsigned getNumPlugins();
+ static std::string& getPlugin(unsigned num);
+ };
+
+#ifndef DONT_GET_PLUGIN_LOADER_OPTION
+ // This causes operator= above to be invoked for every -load option.
+ static cl::opt<PluginLoader, false, cl::parser<std::string> >
+ LoadOpt("load", cl::ZeroOrMore, cl::value_desc("pluginfilename"),
+ cl::desc("Load the specified plugin"));
+#endif
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/PointerLikeTypeTraits.h b/contrib/llvm/include/llvm/Support/PointerLikeTypeTraits.h
new file mode 100644
index 0000000..c12d237
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/PointerLikeTypeTraits.h
@@ -0,0 +1,92 @@
+//===- llvm/Support/PointerLikeTypeTraits.h - Pointer Traits ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PointerLikeTypeTraits class. This allows data
+// structures to reason about pointers and other things that are pointer sized.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
+#define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
+
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+/// A traits type that is used to handle pointer types and things that are just
+/// wrappers for pointers as a uniform entity.
+template <typename T> class PointerLikeTypeTraits {
+ // getAsVoidPointer
+ // getFromVoidPointer
+ // getNumLowBitsAvailable
+};
+
+namespace detail {
+/// A tiny meta function to compute the log2 of a compile time constant.
+template <size_t N>
+struct ConstantLog2
+ : std::integral_constant<size_t, ConstantLog2<N / 2>::value + 1> {};
+template <> struct ConstantLog2<1> : std::integral_constant<size_t, 0> {};
+}
+
+// Provide PointerLikeTypeTraits for non-cvr pointers.
+template <typename T> struct PointerLikeTypeTraits<T *> {
+ static inline void *getAsVoidPointer(T *P) { return P; }
+ static inline T *getFromVoidPointer(void *P) { return static_cast<T *>(P); }
+
+ enum {
+ NumLowBitsAvailable = detail::ConstantLog2<AlignOf<T>::Alignment>::value
+ };
+};
+
+template <> struct PointerLikeTypeTraits<void *> {
+ static inline void *getAsVoidPointer(void *P) { return P; }
+ static inline void *getFromVoidPointer(void *P) { return P; }
+
+ /// Note, we assume here that void* is related to raw malloc'ed memory and
+ /// that malloc returns objects at least 4-byte aligned. However, this may be
+ /// wrong, or pointers may be from something other than malloc. In this case,
+ /// you should specify a real typed pointer or avoid this template.
+ ///
+ /// All clients should use assertions to do a run-time check to ensure that
+ /// this is actually true.
+ enum { NumLowBitsAvailable = 2 };
+};
+
+// Provide PointerLikeTypeTraits for const pointers.
+template <typename T> class PointerLikeTypeTraits<const T *> {
+ typedef PointerLikeTypeTraits<T *> NonConst;
+
+public:
+ static inline const void *getAsVoidPointer(const T *P) {
+ return NonConst::getAsVoidPointer(const_cast<T *>(P));
+ }
+ static inline const T *getFromVoidPointer(const void *P) {
+ return NonConst::getFromVoidPointer(const_cast<void *>(P));
+ }
+ enum { NumLowBitsAvailable = NonConst::NumLowBitsAvailable };
+};
+
+// Provide PointerLikeTypeTraits for uintptr_t.
+template <> class PointerLikeTypeTraits<uintptr_t> {
+public:
+ static inline void *getAsVoidPointer(uintptr_t P) {
+ return reinterpret_cast<void *>(P);
+ }
+ static inline uintptr_t getFromVoidPointer(void *P) {
+ return reinterpret_cast<uintptr_t>(P);
+ }
+ // No bits are available!
+ enum { NumLowBitsAvailable = 0 };
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/PrettyStackTrace.h b/contrib/llvm/include/llvm/Support/PrettyStackTrace.h
new file mode 100644
index 0000000..027f943
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/PrettyStackTrace.h
@@ -0,0 +1,83 @@
+//===- llvm/Support/PrettyStackTrace.h - Pretty Crash Handling --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PrettyStackTraceEntry class, which is used to make
+// crashes give more contextual information about what the program was doing
+// when it crashed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PRETTYSTACKTRACE_H
+#define LLVM_SUPPORT_PRETTYSTACKTRACE_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+ class raw_ostream;
+
+ void EnablePrettyStackTrace();
+
+ /// PrettyStackTraceEntry - This class is used to represent a frame of the
+ /// "pretty" stack trace that is dumped when a program crashes. You can define
+ /// subclasses of this and declare them on the program stack: when they are
+ /// constructed and destructed, they will add their symbolic frames to a
+ /// virtual stack trace. This gets dumped out if the program crashes.
+ class PrettyStackTraceEntry {
+ const PrettyStackTraceEntry *NextEntry;
+ PrettyStackTraceEntry(const PrettyStackTraceEntry &) = delete;
+ void operator=(const PrettyStackTraceEntry&) = delete;
+ public:
+ PrettyStackTraceEntry();
+ virtual ~PrettyStackTraceEntry();
+
+ /// print - Emit information about this stack frame to OS.
+ virtual void print(raw_ostream &OS) const = 0;
+
+ /// getNextEntry - Return the next entry in the list of frames.
+ const PrettyStackTraceEntry *getNextEntry() const { return NextEntry; }
+ };
+
+ /// PrettyStackTraceString - This object prints a specified string (which
+ /// should not contain newlines) to the stream as the stack trace when a crash
+ /// occurs.
+ class PrettyStackTraceString : public PrettyStackTraceEntry {
+ const char *Str;
+ public:
+ PrettyStackTraceString(const char *str) : Str(str) {}
+ void print(raw_ostream &OS) const override;
+ };
+
+ /// PrettyStackTraceProgram - This object prints a specified program arguments
+ /// to the stream as the stack trace when a crash occurs.
+ class PrettyStackTraceProgram : public PrettyStackTraceEntry {
+ int ArgC;
+ const char *const *ArgV;
+ public:
+ PrettyStackTraceProgram(int argc, const char * const*argv)
+ : ArgC(argc), ArgV(argv) {
+ EnablePrettyStackTrace();
+ }
+ void print(raw_ostream &OS) const override;
+ };
+
+ /// Returns the topmost element of the "pretty" stack state.
+ const void* SavePrettyStackState();
+
+ /// Restores the topmost element of the "pretty" stack state to State, which
+ /// should come from a previous call to SavePrettyStackState(). This is
+ /// useful when using a CrashRecoveryContext in code that also uses
+ /// PrettyStackTraceEntries, to make sure the stack that's printed if a crash
+ /// happens after a crash that's been recovered by CrashRecoveryContext
+ /// doesn't have frames on it that were added in code unwound by the
+ /// CrashRecoveryContext.
+ void RestorePrettyStackState(const void* State);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Printable.h b/contrib/llvm/include/llvm/Support/Printable.h
new file mode 100644
index 0000000..5c1b8d5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Printable.h
@@ -0,0 +1,52 @@
+//===--- Printable.h - Print function helpers -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Printable struct.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PRINTABLE_H
+#define LLVM_SUPPORT_PRINTABLE_H
+
+#include <functional>
+
+namespace llvm {
+
+class raw_ostream;
+
+/// Simple wrapper around std::function<void(raw_ostream&)>.
+/// This class is usefull to construct print helpers for raw_ostream.
+///
+/// Example:
+/// Printable PrintRegister(unsigned Register) {
+/// return Printable([Register](raw_ostream &OS) {
+/// OS << getRegisterName(Register);
+/// }
+/// }
+/// ... OS << PrintRegister(Register); ...
+///
+/// Implementation note: Ideally this would just be a typedef, but doing so
+/// leads to operator << being ambiguous as function has matching constructors
+/// in some STL versions. I have seen the problem on gcc 4.6 libstdc++ and
+/// microsoft STL.
+class Printable {
+public:
+ std::function<void(raw_ostream &OS)> Print;
+ Printable(const std::function<void(raw_ostream &OS)> Print)
+ : Print(Print) {}
+};
+
+static inline raw_ostream &operator<<(raw_ostream &OS, const Printable &P) {
+ P.Print(OS);
+ return OS;
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Process.h b/contrib/llvm/include/llvm/Support/Process.h
new file mode 100644
index 0000000..cfdd06c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Process.h
@@ -0,0 +1,190 @@
+//===- llvm/Support/Process.h -----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// Provides a library for accessing information about this process and other
+/// processes on the operating system. Also provides means of spawning
+/// subprocess for commands. The design of this library is modeled after the
+/// proposed design of the Boost.Process library, and is design specifically to
+/// follow the style of standard libraries and potentially become a proposal
+/// for a standard library.
+///
+/// This file declares the llvm::sys::Process class which contains a collection
+/// of legacy static interfaces for extracting various information about the
+/// current process. The goal is to migrate users of this API over to the new
+/// interfaces.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PROCESS_H
+#define LLVM_SUPPORT_PROCESS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/TimeValue.h"
+#include <system_error>
+
+namespace llvm {
+class StringRef;
+
+namespace sys {
+
+
+/// \brief A collection of legacy interfaces for querying information about the
+/// current executing process.
+class Process {
+public:
+ static unsigned getPageSize();
+
+ /// \brief Return process memory usage.
+ /// This static function will return the total amount of memory allocated
+ /// by the process. This only counts the memory allocated via the malloc,
+ /// calloc and realloc functions and includes any "free" holes in the
+ /// allocated space.
+ static size_t GetMallocUsage();
+
+ /// This static function will set \p user_time to the amount of CPU time
+ /// spent in user (non-kernel) mode and \p sys_time to the amount of CPU
+ /// time spent in system (kernel) mode. If the operating system does not
+ /// support collection of these metrics, a zero TimeValue will be for both
+ /// values.
+ /// \param elapsed Returns the TimeValue::now() giving current time
+ /// \param user_time Returns the current amount of user time for the process
+ /// \param sys_time Returns the current amount of system time for the process
+ static void GetTimeUsage(TimeValue &elapsed, TimeValue &user_time,
+ TimeValue &sys_time);
+
+ /// This function makes the necessary calls to the operating system to
+ /// prevent core files or any other kind of large memory dumps that can
+ /// occur when a program fails.
+ /// @brief Prevent core file generation.
+ static void PreventCoreFiles();
+
+ // This function returns the environment variable \arg name's value as a UTF-8
+ // string. \arg Name is assumed to be in UTF-8 encoding too.
+ static Optional<std::string> GetEnv(StringRef name);
+
+ /// This function searches for an existing file in the list of directories
+ /// in a PATH like environment variable, and returns the first file found,
+ /// according to the order of the entries in the PATH like environment
+ /// variable.
+ static Optional<std::string> FindInEnvPath(const std::string& EnvName,
+ const std::string& FileName);
+
+ /// This function returns a SmallVector containing the arguments passed from
+ /// the operating system to the program. This function expects to be handed
+ /// the vector passed in from main.
+ static std::error_code
+ GetArgumentVector(SmallVectorImpl<const char *> &Args,
+ ArrayRef<const char *> ArgsFromMain,
+ SpecificBumpPtrAllocator<char> &ArgAllocator);
+
+ // This functions ensures that the standard file descriptors (input, output,
+ // and error) are properly mapped to a file descriptor before we use any of
+ // them. This should only be called by standalone programs, library
+ // components should not call this.
+ static std::error_code FixupStandardFileDescriptors();
+
+ // This function safely closes a file descriptor. It is not safe to retry
+ // close(2) when it returns with errno equivalent to EINTR; this is because
+ // *nixen cannot agree if the file descriptor is, in fact, closed when this
+ // occurs.
+ //
+ // N.B. Some operating systems, due to thread cancellation, cannot properly
+ // guarantee that it will or will not be closed one way or the other!
+ static std::error_code SafelyCloseFileDescriptor(int FD);
+
+ /// This function determines if the standard input is connected directly
+ /// to a user's input (keyboard probably), rather than coming from a file
+ /// or pipe.
+ static bool StandardInIsUserInput();
+
+ /// This function determines if the standard output is connected to a
+ /// "tty" or "console" window. That is, the output would be displayed to
+ /// the user rather than being put on a pipe or stored in a file.
+ static bool StandardOutIsDisplayed();
+
+ /// This function determines if the standard error is connected to a
+ /// "tty" or "console" window. That is, the output would be displayed to
+ /// the user rather than being put on a pipe or stored in a file.
+ static bool StandardErrIsDisplayed();
+
+ /// This function determines if the given file descriptor is connected to
+ /// a "tty" or "console" window. That is, the output would be displayed to
+ /// the user rather than being put on a pipe or stored in a file.
+ static bool FileDescriptorIsDisplayed(int fd);
+
+ /// This function determines if the given file descriptor is displayd and
+ /// supports colors.
+ static bool FileDescriptorHasColors(int fd);
+
+ /// This function determines the number of columns in the window
+ /// if standard output is connected to a "tty" or "console"
+ /// window. If standard output is not connected to a tty or
+ /// console, or if the number of columns cannot be determined,
+ /// this routine returns zero.
+ static unsigned StandardOutColumns();
+
+ /// This function determines the number of columns in the window
+ /// if standard error is connected to a "tty" or "console"
+ /// window. If standard error is not connected to a tty or
+ /// console, or if the number of columns cannot be determined,
+ /// this routine returns zero.
+ static unsigned StandardErrColumns();
+
+ /// This function determines whether the terminal connected to standard
+ /// output supports colors. If standard output is not connected to a
+ /// terminal, this function returns false.
+ static bool StandardOutHasColors();
+
+ /// This function determines whether the terminal connected to standard
+ /// error supports colors. If standard error is not connected to a
+ /// terminal, this function returns false.
+ static bool StandardErrHasColors();
+
+ /// Enables or disables whether ANSI escape sequences are used to output
+ /// colors. This only has an effect on Windows.
+ /// Note: Setting this option is not thread-safe and should only be done
+ /// during initialization.
+ static void UseANSIEscapeCodes(bool enable);
+
+ /// Whether changing colors requires the output to be flushed.
+ /// This is needed on systems that don't support escape sequences for
+ /// changing colors.
+ static bool ColorNeedsFlush();
+
+ /// This function returns the colorcode escape sequences.
+ /// If ColorNeedsFlush() is true then this function will change the colors
+ /// and return an empty escape sequence. In that case it is the
+ /// responsibility of the client to flush the output stream prior to
+ /// calling this function.
+ static const char *OutputColor(char c, bool bold, bool bg);
+
+ /// Same as OutputColor, but only enables the bold attribute.
+ static const char *OutputBold(bool bg);
+
+ /// This function returns the escape sequence to reverse forground and
+ /// background colors.
+ static const char *OutputReverse();
+
+ /// Resets the terminals colors, or returns an escape sequence to do so.
+ static const char *ResetColor();
+
+ /// Get the result of a process wide random number generator. The
+ /// generator will be automatically seeded in non-deterministic fashion.
+ static unsigned GetRandomNumber();
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Program.h b/contrib/llvm/include/llvm/Support/Program.h
new file mode 100644
index 0000000..4330210
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Program.h
@@ -0,0 +1,192 @@
+//===- llvm/Support/Program.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::Program class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_PROGRAM_H
+#define LLVM_SUPPORT_PROGRAM_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/ErrorOr.h"
+#include <system_error>
+
+namespace llvm {
+class StringRef;
+
+namespace sys {
+
+ /// This is the OS-specific separator for PATH like environment variables:
+ // a colon on Unix or a semicolon on Windows.
+#if defined(LLVM_ON_UNIX)
+ const char EnvPathSeparator = ':';
+#elif defined (LLVM_ON_WIN32)
+ const char EnvPathSeparator = ';';
+#endif
+
+/// @brief This struct encapsulates information about a process.
+struct ProcessInfo {
+#if defined(LLVM_ON_UNIX)
+ typedef pid_t ProcessId;
+#elif defined(LLVM_ON_WIN32)
+ typedef unsigned long ProcessId; // Must match the type of DWORD on Windows.
+ typedef void * HANDLE; // Must match the type of HANDLE on Windows.
+ /// The handle to the process (available on Windows only).
+ HANDLE ProcessHandle;
+#else
+#error "ProcessInfo is not defined for this platform!"
+#endif
+
+ /// The process identifier.
+ ProcessId Pid;
+
+ /// The return code, set after execution.
+ int ReturnCode;
+
+ ProcessInfo();
+};
+
+ /// \brief Find the first executable file \p Name in \p Paths.
+ ///
+ /// This does not perform hashing as a shell would but instead stats each PATH
+ /// entry individually so should generally be avoided. Core LLVM library
+ /// functions and options should instead require fully specified paths.
+ ///
+ /// \param Name name of the executable to find. If it contains any system
+ /// slashes, it will be returned as is.
+ /// \param Paths optional list of paths to search for \p Name. If empty it
+ /// will use the system PATH environment instead.
+ ///
+ /// \returns The fully qualified path to the first \p Name in \p Paths if it
+ /// exists. \p Name if \p Name has slashes in it. Otherwise an error.
+ ErrorOr<std::string>
+ findProgramByName(StringRef Name, ArrayRef<StringRef> Paths = None);
+
+ // These functions change the specified standard stream (stdin or stdout) to
+ // binary mode. They return errc::success if the specified stream
+ // was changed. Otherwise a platform dependent error is returned.
+ std::error_code ChangeStdinToBinary();
+ std::error_code ChangeStdoutToBinary();
+
+ /// This function executes the program using the arguments provided. The
+ /// invoked program will inherit the stdin, stdout, and stderr file
+ /// descriptors, the environment and other configuration settings of the
+ /// invoking program.
+ /// This function waits for the program to finish, so should be avoided in
+ /// library functions that aren't expected to block. Consider using
+ /// ExecuteNoWait() instead.
+ /// @returns an integer result code indicating the status of the program.
+ /// A zero or positive value indicates the result code of the program.
+ /// -1 indicates failure to execute
+ /// -2 indicates a crash during execution or timeout
+ int ExecuteAndWait(
+ StringRef Program, ///< Path of the program to be executed. It is
+ /// presumed this is the result of the findProgramByName method.
+ const char **args, ///< A vector of strings that are passed to the
+ ///< program. The first element should be the name of the program.
+ ///< The list *must* be terminated by a null char* entry.
+ const char **env = nullptr, ///< An optional vector of strings to use for
+ ///< the program's environment. If not provided, the current program's
+ ///< environment will be used.
+ const StringRef **redirects = nullptr, ///< An optional array of pointers
+ ///< to paths. If the array is null, no redirection is done. The array
+ ///< should have a size of at least three. The inferior process's
+ ///< stdin(0), stdout(1), and stderr(2) will be redirected to the
+ ///< corresponding paths.
+ ///< When an empty path is passed in, the corresponding file
+ ///< descriptor will be disconnected (ie, /dev/null'd) in a portable
+ ///< way.
+ unsigned secondsToWait = 0, ///< If non-zero, this specifies the amount
+ ///< of time to wait for the child process to exit. If the time
+ ///< expires, the child is killed and this call returns. If zero,
+ ///< this function will wait until the child finishes or forever if
+ ///< it doesn't.
+ unsigned memoryLimit = 0, ///< If non-zero, this specifies max. amount
+ ///< of memory can be allocated by process. If memory usage will be
+ ///< higher limit, the child is killed and this call returns. If zero
+ ///< - no memory limit.
+ std::string *ErrMsg = nullptr, ///< If non-zero, provides a pointer to a
+ ///< string instance in which error messages will be returned. If the
+ ///< string is non-empty upon return an error occurred while invoking the
+ ///< program.
+ bool *ExecutionFailed = nullptr);
+
+ /// Similar to ExecuteAndWait, but returns immediately.
+ /// @returns The \see ProcessInfo of the newly launced process.
+ /// \note On Microsoft Windows systems, users will need to either call \see
+ /// Wait until the process finished execution or win32 CloseHandle() API on
+ /// ProcessInfo.ProcessHandle to avoid memory leaks.
+ ProcessInfo
+ ExecuteNoWait(StringRef Program, const char **args, const char **env = nullptr,
+ const StringRef **redirects = nullptr, unsigned memoryLimit = 0,
+ std::string *ErrMsg = nullptr, bool *ExecutionFailed = nullptr);
+
+ /// Return true if the given arguments fit within system-specific
+ /// argument length limits.
+ bool argumentsFitWithinSystemLimits(ArrayRef<const char*> Args);
+
+ /// File encoding options when writing contents that a non-UTF8 tool will
+ /// read (on Windows systems). For UNIX, we always use UTF-8.
+ enum WindowsEncodingMethod {
+ /// UTF-8 is the LLVM native encoding, being the same as "do not perform
+ /// encoding conversion".
+ WEM_UTF8,
+ WEM_CurrentCodePage,
+ WEM_UTF16
+ };
+
+ /// Saves the UTF8-encoded \p contents string into the file \p FileName
+ /// using a specific encoding.
+ ///
+ /// This write file function adds the possibility to choose which encoding
+ /// to use when writing a text file. On Windows, this is important when
+ /// writing files with internationalization support with an encoding that is
+ /// different from the one used in LLVM (UTF-8). We use this when writing
+ /// response files, since GCC tools on MinGW only understand legacy code
+ /// pages, and VisualStudio tools only understand UTF-16.
+ /// For UNIX, using different encodings is silently ignored, since all tools
+ /// work well with UTF-8.
+ /// This function assumes that you only use UTF-8 *text* data and will convert
+ /// it to your desired encoding before writing to the file.
+ ///
+ /// FIXME: We use EM_CurrentCodePage to write response files for GNU tools in
+ /// a MinGW/MinGW-w64 environment, which has serious flaws but currently is
+ /// our best shot to make gcc/ld understand international characters. This
+ /// should be changed as soon as binutils fix this to support UTF16 on mingw.
+ ///
+ /// \returns non-zero error_code if failed
+ std::error_code
+ writeFileWithEncoding(StringRef FileName, StringRef Contents,
+ WindowsEncodingMethod Encoding = WEM_UTF8);
+
+ /// This function waits for the process specified by \p PI to finish.
+ /// \returns A \see ProcessInfo struct with Pid set to:
+ /// \li The process id of the child process if the child process has changed
+ /// state.
+ /// \li 0 if the child process has not changed state.
+ /// \note Users of this function should always check the ReturnCode member of
+ /// the \see ProcessInfo returned from this function.
+ ProcessInfo Wait(
+ const ProcessInfo &PI, ///< The child process that should be waited on.
+ unsigned SecondsToWait, ///< If non-zero, this specifies the amount of
+ ///< time to wait for the child process to exit. If the time expires, the
+ ///< child is killed and this function returns. If zero, this function
+ ///< will perform a non-blocking wait on the child process.
+ bool WaitUntilTerminates, ///< If true, ignores \p SecondsToWait and waits
+ ///< until child has terminated.
+ std::string *ErrMsg = nullptr ///< If non-zero, provides a pointer to a
+ ///< string instance in which error messages will be returned. If the
+ ///< string is non-empty upon return an error occurred while invoking the
+ ///< program.
+ );
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/RWMutex.h b/contrib/llvm/include/llvm/Support/RWMutex.h
new file mode 100644
index 0000000..4be9313
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/RWMutex.h
@@ -0,0 +1,177 @@
+//===- RWMutex.h - Reader/Writer Mutual Exclusion Lock ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::RWMutex class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RWMUTEX_H
+#define LLVM_SUPPORT_RWMUTEX_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Threading.h"
+#include <cassert>
+
+namespace llvm
+{
+ namespace sys
+ {
+ /// @brief Platform agnostic RWMutex class.
+ class RWMutexImpl
+ {
+ /// @name Constructors
+ /// @{
+ public:
+
+ /// Initializes the lock but doesn't acquire it.
+ /// @brief Default Constructor.
+ explicit RWMutexImpl();
+
+ /// Releases and removes the lock
+ /// @brief Destructor
+ ~RWMutexImpl();
+
+ /// @}
+ /// @name Methods
+ /// @{
+ public:
+
+ /// Attempts to unconditionally acquire the lock in reader mode. If the
+ /// lock is held by a writer, this method will wait until it can acquire
+ /// the lock.
+ /// @returns false if any kind of error occurs, true otherwise.
+ /// @brief Unconditionally acquire the lock in reader mode.
+ bool reader_acquire();
+
+ /// Attempts to release the lock in reader mode.
+ /// @returns false if any kind of error occurs, true otherwise.
+ /// @brief Unconditionally release the lock in reader mode.
+ bool reader_release();
+
+ /// Attempts to unconditionally acquire the lock in reader mode. If the
+ /// lock is held by any readers, this method will wait until it can
+ /// acquire the lock.
+ /// @returns false if any kind of error occurs, true otherwise.
+ /// @brief Unconditionally acquire the lock in writer mode.
+ bool writer_acquire();
+
+ /// Attempts to release the lock in writer mode.
+ /// @returns false if any kind of error occurs, true otherwise.
+ /// @brief Unconditionally release the lock in write mode.
+ bool writer_release();
+
+ //@}
+ /// @name Platform Dependent Data
+ /// @{
+ private:
+#if defined(LLVM_ENABLE_THREADS) && LLVM_ENABLE_THREADS != 0
+ void* data_; ///< We don't know what the data will be
+#endif
+
+ /// @}
+ /// @name Do Not Implement
+ /// @{
+ private:
+ RWMutexImpl(const RWMutexImpl & original) = delete;
+ void operator=(const RWMutexImpl &) = delete;
+ /// @}
+ };
+
+ /// SmartMutex - An R/W mutex with a compile time constant parameter that
+ /// indicates whether this mutex should become a no-op when we're not
+ /// running in multithreaded mode.
+ template<bool mt_only>
+ class SmartRWMutex {
+ RWMutexImpl impl;
+ unsigned readers, writers;
+ public:
+ explicit SmartRWMutex() : impl(), readers(0), writers(0) { }
+
+ bool lock_shared() {
+ if (!mt_only || llvm_is_multithreaded())
+ return impl.reader_acquire();
+
+ // Single-threaded debugging code. This would be racy in multithreaded
+ // mode, but provides not sanity checks in single threaded mode.
+ ++readers;
+ return true;
+ }
+
+ bool unlock_shared() {
+ if (!mt_only || llvm_is_multithreaded())
+ return impl.reader_release();
+
+ // Single-threaded debugging code. This would be racy in multithreaded
+ // mode, but provides not sanity checks in single threaded mode.
+ assert(readers > 0 && "Reader lock not acquired before release!");
+ --readers;
+ return true;
+ }
+
+ bool lock() {
+ if (!mt_only || llvm_is_multithreaded())
+ return impl.writer_acquire();
+
+ // Single-threaded debugging code. This would be racy in multithreaded
+ // mode, but provides not sanity checks in single threaded mode.
+ assert(writers == 0 && "Writer lock already acquired!");
+ ++writers;
+ return true;
+ }
+
+ bool unlock() {
+ if (!mt_only || llvm_is_multithreaded())
+ return impl.writer_release();
+
+ // Single-threaded debugging code. This would be racy in multithreaded
+ // mode, but provides not sanity checks in single threaded mode.
+ assert(writers == 1 && "Writer lock not acquired before release!");
+ --writers;
+ return true;
+ }
+
+ private:
+ SmartRWMutex(const SmartRWMutex<mt_only> & original);
+ void operator=(const SmartRWMutex<mt_only> &);
+ };
+ typedef SmartRWMutex<false> RWMutex;
+
+ /// ScopedReader - RAII acquisition of a reader lock
+ template<bool mt_only>
+ struct SmartScopedReader {
+ SmartRWMutex<mt_only>& mutex;
+
+ explicit SmartScopedReader(SmartRWMutex<mt_only>& m) : mutex(m) {
+ mutex.lock_shared();
+ }
+
+ ~SmartScopedReader() {
+ mutex.unlock_shared();
+ }
+ };
+ typedef SmartScopedReader<false> ScopedReader;
+
+ /// ScopedWriter - RAII acquisition of a writer lock
+ template<bool mt_only>
+ struct SmartScopedWriter {
+ SmartRWMutex<mt_only>& mutex;
+
+ explicit SmartScopedWriter(SmartRWMutex<mt_only>& m) : mutex(m) {
+ mutex.lock();
+ }
+
+ ~SmartScopedWriter() {
+ mutex.unlock();
+ }
+ };
+ typedef SmartScopedWriter<false> ScopedWriter;
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/RandomNumberGenerator.h b/contrib/llvm/include/llvm/Support/RandomNumberGenerator.h
new file mode 100644
index 0000000..7446558
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/RandomNumberGenerator.h
@@ -0,0 +1,58 @@
+//==- llvm/Support/RandomNumberGenerator.h - RNG for diversity ---*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an abstraction for deterministic random number
+// generation (RNG). Note that the current implementation is not
+// cryptographically secure as it uses the C++11 <random> facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RANDOMNUMBERGENERATOR_H_
+#define LLVM_SUPPORT_RANDOMNUMBERGENERATOR_H_
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h" // Needed for uint64_t on Windows.
+#include <random>
+
+namespace llvm {
+
+/// A random number generator.
+///
+/// Instances of this class should not be shared across threads. The
+/// seed should be set by passing the -rng-seed=<uint64> option. Use
+/// Module::createRNG to create a new RNG instance for use with that
+/// module.
+class RandomNumberGenerator {
+public:
+ /// Returns a random number in the range [0, Max).
+ uint_fast64_t operator()();
+
+private:
+ /// Seeds and salts the underlying RNG engine.
+ ///
+ /// This constructor should not be used directly. Instead use
+ /// Module::createRNG to create a new RNG salted with the Module ID.
+ RandomNumberGenerator(StringRef Salt);
+
+ // 64-bit Mersenne Twister by Matsumoto and Nishimura, 2000
+ // http://en.cppreference.com/w/cpp/numeric/random/mersenne_twister_engine
+ // This RNG is deterministically portable across C++11
+ // implementations.
+ std::mt19937_64 Generator;
+
+ // Noncopyable.
+ RandomNumberGenerator(const RandomNumberGenerator &other) = delete;
+ RandomNumberGenerator &operator=(const RandomNumberGenerator &other) = delete;
+
+ friend class Module;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Recycler.h b/contrib/llvm/include/llvm/Support/Recycler.h
new file mode 100644
index 0000000..a38050d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Recycler.h
@@ -0,0 +1,114 @@
+//==- llvm/Support/Recycler.h - Recycling Allocator --------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Recycler class template. See the doxygen comment for
+// Recycler for more details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RECYCLER_H
+#define LLVM_SUPPORT_RECYCLER_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+
+namespace llvm {
+
+/// PrintRecyclingAllocatorStats - Helper for RecyclingAllocator for
+/// printing statistics.
+///
+void PrintRecyclerStats(size_t Size, size_t Align, size_t FreeListSize);
+
+/// Recycler - This class manages a linked-list of deallocated nodes
+/// and facilitates reusing deallocated memory in place of allocating
+/// new memory.
+///
+template<class T, size_t Size = sizeof(T), size_t Align = AlignOf<T>::Alignment>
+class Recycler {
+ struct FreeNode {
+ FreeNode *Next;
+ };
+
+ /// List of nodes that have deleted contents and are not in active use.
+ FreeNode *FreeList = nullptr;
+
+ FreeNode *pop_val() {
+ auto *Val = FreeList;
+ FreeList = FreeList->Next;
+ return Val;
+ }
+
+ void push(FreeNode *N) {
+ N->Next = FreeList;
+ FreeList = N;
+ }
+
+public:
+ ~Recycler() {
+ // If this fails, either the callee has lost track of some allocation,
+ // or the callee isn't tracking allocations and should just call
+ // clear() before deleting the Recycler.
+ assert(!FreeList && "Non-empty recycler deleted!");
+ }
+
+ /// clear - Release all the tracked allocations to the allocator. The
+ /// recycler must be free of any tracked allocations before being
+ /// deleted; calling clear is one way to ensure this.
+ template<class AllocatorType>
+ void clear(AllocatorType &Allocator) {
+ while (FreeList) {
+ T *t = reinterpret_cast<T *>(pop_val());
+ Allocator.Deallocate(t);
+ }
+ }
+
+ /// Special case for BumpPtrAllocator which has an empty Deallocate()
+ /// function.
+ ///
+ /// There is no need to traverse the free list, pulling all the objects into
+ /// cache.
+ void clear(BumpPtrAllocator &) { FreeList = nullptr; }
+
+ template<class SubClass, class AllocatorType>
+ SubClass *Allocate(AllocatorType &Allocator) {
+ static_assert(AlignOf<SubClass>::Alignment <= Align,
+ "Recycler allocation alignment is less than object align!");
+ static_assert(sizeof(SubClass) <= Size,
+ "Recycler allocation size is less than object size!");
+ return FreeList ? reinterpret_cast<SubClass *>(pop_val())
+ : static_cast<SubClass *>(Allocator.Allocate(Size, Align));
+ }
+
+ template<class AllocatorType>
+ T *Allocate(AllocatorType &Allocator) {
+ return Allocate<T>(Allocator);
+ }
+
+ template<class SubClass, class AllocatorType>
+ void Deallocate(AllocatorType & /*Allocator*/, SubClass* Element) {
+ push(reinterpret_cast<FreeNode *>(Element));
+ }
+
+ void PrintStats();
+};
+
+template <class T, size_t Size, size_t Align>
+void Recycler<T, Size, Align>::PrintStats() {
+ size_t S = 0;
+ for (auto *I = FreeList; I; I = I->Next)
+ ++S;
+ PrintRecyclerStats(Size, Align, S);
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/RecyclingAllocator.h b/contrib/llvm/include/llvm/Support/RecyclingAllocator.h
new file mode 100644
index 0000000..001d1cf
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/RecyclingAllocator.h
@@ -0,0 +1,77 @@
+//==- llvm/Support/RecyclingAllocator.h - Recycling Allocator ----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RecyclingAllocator class. See the doxygen comment for
+// RecyclingAllocator for more details on the implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RECYCLINGALLOCATOR_H
+#define LLVM_SUPPORT_RECYCLINGALLOCATOR_H
+
+#include "llvm/Support/Recycler.h"
+
+namespace llvm {
+
+/// RecyclingAllocator - This class wraps an Allocator, adding the
+/// functionality of recycling deleted objects.
+///
+template<class AllocatorType, class T,
+ size_t Size = sizeof(T), size_t Align = AlignOf<T>::Alignment>
+class RecyclingAllocator {
+private:
+ /// Base - Implementation details.
+ ///
+ Recycler<T, Size, Align> Base;
+
+ /// Allocator - The wrapped allocator.
+ ///
+ AllocatorType Allocator;
+
+public:
+ ~RecyclingAllocator() { Base.clear(Allocator); }
+
+ /// Allocate - Return a pointer to storage for an object of type
+ /// SubClass. The storage may be either newly allocated or recycled.
+ ///
+ template<class SubClass>
+ SubClass *Allocate() { return Base.template Allocate<SubClass>(Allocator); }
+
+ T *Allocate() { return Base.Allocate(Allocator); }
+
+ /// Deallocate - Release storage for the pointed-to object. The
+ /// storage will be kept track of and may be recycled.
+ ///
+ template<class SubClass>
+ void Deallocate(SubClass* E) { return Base.Deallocate(Allocator, E); }
+
+ void PrintStats() {
+ Allocator.PrintStats();
+ Base.PrintStats();
+ }
+};
+
+}
+
+template<class AllocatorType, class T, size_t Size, size_t Align>
+inline void *operator new(size_t size,
+ llvm::RecyclingAllocator<AllocatorType,
+ T, Size, Align> &Allocator) {
+ assert(size <= Size && "allocation size exceeded");
+ return Allocator.Allocate();
+}
+
+template<class AllocatorType, class T, size_t Size, size_t Align>
+inline void operator delete(void *E,
+ llvm::RecyclingAllocator<AllocatorType,
+ T, Size, Align> &A) {
+ A.Deallocate(E);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Regex.h b/contrib/llvm/include/llvm/Support/Regex.h
new file mode 100644
index 0000000..31b35ed
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Regex.h
@@ -0,0 +1,105 @@
+//===-- Regex.h - Regular Expression matcher implementation -*- C++ -*-----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a POSIX regular expression matcher. Both Basic and
+// Extended POSIX regular expressions (ERE) are supported. EREs were extended
+// to support backreferences in matches.
+// This implementation also supports matching strings with embedded NUL chars.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_REGEX_H
+#define LLVM_SUPPORT_REGEX_H
+
+#include <string>
+
+struct llvm_regex;
+
+namespace llvm {
+ class StringRef;
+ template<typename T> class SmallVectorImpl;
+
+ class Regex {
+ public:
+ enum {
+ NoFlags=0,
+ /// Compile for matching that ignores upper/lower case distinctions.
+ IgnoreCase=1,
+ /// Compile for newline-sensitive matching. With this flag '[^' bracket
+ /// expressions and '.' never match newline. A ^ anchor matches the
+ /// null string after any newline in the string in addition to its normal
+ /// function, and the $ anchor matches the null string before any
+ /// newline in the string in addition to its normal function.
+ Newline=2,
+ /// By default, the POSIX extended regular expression (ERE) syntax is
+ /// assumed. Pass this flag to turn on basic regular expressions (BRE)
+ /// instead.
+ BasicRegex=4
+ };
+
+ /// Compiles the given regular expression \p Regex.
+ Regex(StringRef Regex, unsigned Flags = NoFlags);
+ Regex(const Regex &) = delete;
+ Regex &operator=(Regex regex) {
+ std::swap(preg, regex.preg);
+ std::swap(error, regex.error);
+ return *this;
+ }
+ Regex(Regex &&regex) {
+ preg = regex.preg;
+ error = regex.error;
+ regex.preg = nullptr;
+ }
+ ~Regex();
+
+ /// isValid - returns the error encountered during regex compilation, or
+ /// matching, if any.
+ bool isValid(std::string &Error);
+
+ /// getNumMatches - In a valid regex, return the number of parenthesized
+ /// matches it contains. The number filled in by match will include this
+ /// many entries plus one for the whole regex (as element 0).
+ unsigned getNumMatches() const;
+
+ /// matches - Match the regex against a given \p String.
+ ///
+ /// \param Matches - If given, on a successful match this will be filled in
+ /// with references to the matched group expressions (inside \p String),
+ /// the first group is always the entire pattern.
+ ///
+ /// This returns true on a successful match.
+ bool match(StringRef String, SmallVectorImpl<StringRef> *Matches = nullptr);
+
+ /// sub - Return the result of replacing the first match of the regex in
+ /// \p String with the \p Repl string. Backreferences like "\0" in the
+ /// replacement string are replaced with the appropriate match substring.
+ ///
+ /// Note that the replacement string has backslash escaping performed on
+ /// it. Invalid backreferences are ignored (replaced by empty strings).
+ ///
+ /// \param Error If non-null, any errors in the substitution (invalid
+ /// backreferences, trailing backslashes) will be recorded as a non-empty
+ /// string.
+ std::string sub(StringRef Repl, StringRef String,
+ std::string *Error = nullptr);
+
+ /// \brief If this function returns true, ^Str$ is an extended regular
+ /// expression that matches Str and only Str.
+ static bool isLiteralERE(StringRef Str);
+
+ /// \brief Turn String into a regex by escaping its special characters.
+ static std::string escape(StringRef String);
+
+ private:
+ struct llvm_regex *preg;
+ int error;
+ };
+}
+
+#endif // LLVM_SUPPORT_REGEX_H
diff --git a/contrib/llvm/include/llvm/Support/Registry.h b/contrib/llvm/include/llvm/Support/Registry.h
new file mode 100644
index 0000000..bbea97b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Registry.h
@@ -0,0 +1,227 @@
+//=== Registry.h - Linker-supported plugin registries -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a registry template for discovering pluggable modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_REGISTRY_H
+#define LLVM_SUPPORT_REGISTRY_H
+
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include <memory>
+
+namespace llvm {
+ /// A simple registry entry which provides only a name, description, and
+ /// no-argument constructor.
+ template <typename T>
+ class SimpleRegistryEntry {
+ const char *Name, *Desc;
+ std::unique_ptr<T> (*Ctor)();
+
+ public:
+ SimpleRegistryEntry(const char *N, const char *D, std::unique_ptr<T> (*C)())
+ : Name(N), Desc(D), Ctor(C)
+ {}
+
+ const char *getName() const { return Name; }
+ const char *getDesc() const { return Desc; }
+ std::unique_ptr<T> instantiate() const { return Ctor(); }
+ };
+
+ /// Traits for registry entries. If using other than SimpleRegistryEntry, it
+ /// is necessary to define an alternate traits class.
+ template <typename T>
+ class RegistryTraits {
+ RegistryTraits() = delete;
+
+ public:
+ typedef SimpleRegistryEntry<T> entry;
+
+ /// nameof/descof - Accessors for name and description of entries. These are
+ // used to generate help for command-line options.
+ static const char *nameof(const entry &Entry) { return Entry.getName(); }
+ static const char *descof(const entry &Entry) { return Entry.getDesc(); }
+ };
+
+ /// A global registry used in conjunction with static constructors to make
+ /// pluggable components (like targets or garbage collectors) "just work" when
+ /// linked with an executable.
+ template <typename T, typename U = RegistryTraits<T> >
+ class Registry {
+ public:
+ typedef U traits;
+ typedef typename U::entry entry;
+
+ class node;
+ class listener;
+ class iterator;
+
+ private:
+ Registry() = delete;
+
+ static void Announce(const entry &E) {
+ for (listener *Cur = ListenerHead; Cur; Cur = Cur->Next)
+ Cur->registered(E);
+ }
+
+ friend class node;
+ static node *Head, *Tail;
+
+ friend class listener;
+ static listener *ListenerHead, *ListenerTail;
+
+ public:
+ /// Node in linked list of entries.
+ ///
+ class node {
+ friend class iterator;
+
+ node *Next;
+ const entry& Val;
+
+ public:
+ node(const entry& V) : Next(nullptr), Val(V) {
+ if (Tail)
+ Tail->Next = this;
+ else
+ Head = this;
+ Tail = this;
+
+ Announce(V);
+ }
+ };
+
+ /// Iterators for registry entries.
+ ///
+ class iterator {
+ const node *Cur;
+
+ public:
+ explicit iterator(const node *N) : Cur(N) {}
+
+ bool operator==(const iterator &That) const { return Cur == That.Cur; }
+ bool operator!=(const iterator &That) const { return Cur != That.Cur; }
+ iterator &operator++() { Cur = Cur->Next; return *this; }
+ const entry &operator*() const { return Cur->Val; }
+ const entry *operator->() const { return &Cur->Val; }
+ };
+
+ static iterator begin() { return iterator(Head); }
+ static iterator end() { return iterator(nullptr); }
+
+ static iterator_range<iterator> entries() {
+ return make_range(begin(), end());
+ }
+
+ /// Abstract base class for registry listeners, which are informed when new
+ /// entries are added to the registry. Simply subclass and instantiate:
+ ///
+ /// \code
+ /// class CollectorPrinter : public Registry<Collector>::listener {
+ /// protected:
+ /// void registered(const Registry<Collector>::entry &e) {
+ /// cerr << "collector now available: " << e->getName() << "\n";
+ /// }
+ ///
+ /// public:
+ /// CollectorPrinter() { init(); } // Print those already registered.
+ /// };
+ ///
+ /// CollectorPrinter Printer;
+ /// \endcode
+ class listener {
+ listener *Prev, *Next;
+
+ friend void Registry::Announce(const entry &E);
+
+ protected:
+ /// Called when an entry is added to the registry.
+ ///
+ virtual void registered(const entry &) = 0;
+
+ /// Calls 'registered' for each pre-existing entry.
+ ///
+ void init() {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ registered(*I);
+ }
+
+ public:
+ listener() : Prev(ListenerTail), Next(nullptr) {
+ if (Prev)
+ Prev->Next = this;
+ else
+ ListenerHead = this;
+ ListenerTail = this;
+ }
+
+ virtual ~listener() {
+ if (Next)
+ Next->Prev = Prev;
+ else
+ ListenerTail = Prev;
+ if (Prev)
+ Prev->Next = Next;
+ else
+ ListenerHead = Next;
+ }
+ };
+
+ /// A static registration template. Use like such:
+ ///
+ /// Registry<Collector>::Add<FancyGC>
+ /// X("fancy-gc", "Newfangled garbage collector.");
+ ///
+ /// Use of this template requires that:
+ ///
+ /// 1. The registered subclass has a default constructor.
+ //
+ /// 2. The registry entry type has a constructor compatible with this
+ /// signature:
+ ///
+ /// entry(const char *Name, const char *ShortDesc, T *(*Ctor)());
+ ///
+ /// If you have more elaborate requirements, then copy and modify.
+ ///
+ template <typename V>
+ class Add {
+ entry Entry;
+ node Node;
+
+ static std::unique_ptr<T> CtorFn() { return make_unique<V>(); }
+
+ public:
+ Add(const char *Name, const char *Desc)
+ : Entry(Name, Desc, CtorFn), Node(Entry) {}
+ };
+
+ /// Registry::Parser now lives in llvm/Support/RegistryParser.h.
+ };
+
+ // Since these are defined in a header file, plugins must be sure to export
+ // these symbols.
+
+ template <typename T, typename U>
+ typename Registry<T,U>::node *Registry<T,U>::Head;
+
+ template <typename T, typename U>
+ typename Registry<T,U>::node *Registry<T,U>::Tail;
+
+ template <typename T, typename U>
+ typename Registry<T,U>::listener *Registry<T,U>::ListenerHead;
+
+ template <typename T, typename U>
+ typename Registry<T,U>::listener *Registry<T,U>::ListenerTail;
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_REGISTRY_H
diff --git a/contrib/llvm/include/llvm/Support/RegistryParser.h b/contrib/llvm/include/llvm/Support/RegistryParser.h
new file mode 100644
index 0000000..a6997b6
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/RegistryParser.h
@@ -0,0 +1,55 @@
+//=== RegistryParser.h - Linker-supported plugin registries -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a command-line parser for a registry.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_REGISTRYPARSER_H
+#define LLVM_SUPPORT_REGISTRYPARSER_H
+
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Registry.h"
+
+namespace llvm {
+
+ /// A command-line parser for a registry. Use like such:
+ ///
+ /// static cl::opt<Registry<Collector>::entry, false,
+ /// RegistryParser<Collector> >
+ /// GCOpt("gc", cl::desc("Garbage collector to use."),
+ /// cl::value_desc());
+ ///
+ /// To make use of the value:
+ ///
+ /// Collector *TheCollector = GCOpt->instantiate();
+ ///
+ template <typename T, typename U = RegistryTraits<T> >
+ class RegistryParser :
+ public cl::parser<const typename U::entry*>,
+ public Registry<T, U>::listener {
+ typedef U traits;
+ typedef typename U::entry entry;
+ typedef typename Registry<T, U>::listener listener;
+
+ protected:
+ void registered(const entry &E) {
+ addLiteralOption(traits::nameof(E), &E, traits::descof(E));
+ }
+
+ public:
+ void initialize(cl::Option &O) {
+ listener::init();
+ cl::parser<const typename U::entry*>::initialize(O);
+ }
+ };
+
+}
+
+#endif // LLVM_SUPPORT_REGISTRYPARSER_H
diff --git a/contrib/llvm/include/llvm/Support/SMLoc.h b/contrib/llvm/include/llvm/Support/SMLoc.h
new file mode 100644
index 0000000..c6e9a14
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/SMLoc.h
@@ -0,0 +1,63 @@
+//===- SMLoc.h - Source location for use with diagnostics -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SMLoc class. This class encapsulates a location in
+// source code for use in diagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SMLOC_H
+#define LLVM_SUPPORT_SMLOC_H
+
+#include <cassert>
+
+namespace llvm {
+
+/// Represents a location in source code.
+class SMLoc {
+ const char *Ptr;
+
+public:
+ SMLoc() : Ptr(nullptr) {}
+
+ bool isValid() const { return Ptr != nullptr; }
+
+ bool operator==(const SMLoc &RHS) const { return RHS.Ptr == Ptr; }
+ bool operator!=(const SMLoc &RHS) const { return RHS.Ptr != Ptr; }
+
+ const char *getPointer() const { return Ptr; }
+
+ static SMLoc getFromPointer(const char *Ptr) {
+ SMLoc L;
+ L.Ptr = Ptr;
+ return L;
+ }
+};
+
+/// Represents a range in source code.
+///
+/// SMRange is implemented using a half-open range, as is the convention in C++.
+/// In the string "abc", the range (1,3] represents the substring "bc", and the
+/// range (2,2] represents an empty range between the characters "b" and "c".
+class SMRange {
+public:
+ SMLoc Start, End;
+
+ SMRange() {}
+ SMRange(SMLoc St, SMLoc En) : Start(St), End(En) {
+ assert(Start.isValid() == End.isValid() &&
+ "Start and end should either both be valid or both be invalid!");
+ }
+
+ bool isValid() const { return Start.isValid(); }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/SaveAndRestore.h b/contrib/llvm/include/llvm/Support/SaveAndRestore.h
new file mode 100644
index 0000000..ef154ac
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/SaveAndRestore.h
@@ -0,0 +1,49 @@
+//===-- SaveAndRestore.h - Utility -------------------------------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file provides utility classes that use RAII to save and restore
+/// values.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SAVEANDRESTORE_H
+#define LLVM_SUPPORT_SAVEANDRESTORE_H
+
+namespace llvm {
+
+/// A utility class that uses RAII to save and restore the value of a variable.
+template <typename T> struct SaveAndRestore {
+ SaveAndRestore(T &X) : X(X), OldValue(X) {}
+ SaveAndRestore(T &X, const T &NewValue) : X(X), OldValue(X) {
+ X = NewValue;
+ }
+ ~SaveAndRestore() { X = OldValue; }
+ T get() { return OldValue; }
+
+private:
+ T &X;
+ T OldValue;
+};
+
+/// Similar to \c SaveAndRestore. Operates only on bools; the old value of a
+/// variable is saved, and during the dstor the old value is or'ed with the new
+/// value.
+struct SaveOr {
+ SaveOr(bool &X) : X(X), OldValue(X) { X = false; }
+ ~SaveOr() { X |= OldValue; }
+
+private:
+ bool &X;
+ const bool OldValue;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ScaledNumber.h b/contrib/llvm/include/llvm/Support/ScaledNumber.h
new file mode 100644
index 0000000..c6421ef
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ScaledNumber.h
@@ -0,0 +1,899 @@
+//===- llvm/Support/ScaledNumber.h - Support for scaled numbers -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains functions (and a class) useful for working with scaled
+// numbers -- in particular, pairs of integers where one represents digits and
+// another represents a scale. The functions are helpers and live in the
+// namespace ScaledNumbers. The class ScaledNumber is useful for modelling
+// certain cost metrics that need simple, integer-like semantics that are easy
+// to reason about.
+//
+// These might remind you of soft-floats. If you want one of those, you're in
+// the wrong place. Look at include/llvm/ADT/APFloat.h instead.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SCALEDNUMBER_H
+#define LLVM_SUPPORT_SCALEDNUMBER_H
+
+#include "llvm/Support/MathExtras.h"
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <string>
+#include <tuple>
+#include <utility>
+
+namespace llvm {
+namespace ScaledNumbers {
+
+/// \brief Maximum scale; same as APFloat for easy debug printing.
+const int32_t MaxScale = 16383;
+
+/// \brief Maximum scale; same as APFloat for easy debug printing.
+const int32_t MinScale = -16382;
+
+/// \brief Get the width of a number.
+template <class DigitsT> inline int getWidth() { return sizeof(DigitsT) * 8; }
+
+/// \brief Conditionally round up a scaled number.
+///
+/// Given \c Digits and \c Scale, round up iff \c ShouldRound is \c true.
+/// Always returns \c Scale unless there's an overflow, in which case it
+/// returns \c 1+Scale.
+///
+/// \pre adding 1 to \c Scale will not overflow INT16_MAX.
+template <class DigitsT>
+inline std::pair<DigitsT, int16_t> getRounded(DigitsT Digits, int16_t Scale,
+ bool ShouldRound) {
+ static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+ if (ShouldRound)
+ if (!++Digits)
+ // Overflow.
+ return std::make_pair(DigitsT(1) << (getWidth<DigitsT>() - 1), Scale + 1);
+ return std::make_pair(Digits, Scale);
+}
+
+/// \brief Convenience helper for 32-bit rounding.
+inline std::pair<uint32_t, int16_t> getRounded32(uint32_t Digits, int16_t Scale,
+ bool ShouldRound) {
+ return getRounded(Digits, Scale, ShouldRound);
+}
+
+/// \brief Convenience helper for 64-bit rounding.
+inline std::pair<uint64_t, int16_t> getRounded64(uint64_t Digits, int16_t Scale,
+ bool ShouldRound) {
+ return getRounded(Digits, Scale, ShouldRound);
+}
+
+/// \brief Adjust a 64-bit scaled number down to the appropriate width.
+///
+/// \pre Adding 64 to \c Scale will not overflow INT16_MAX.
+template <class DigitsT>
+inline std::pair<DigitsT, int16_t> getAdjusted(uint64_t Digits,
+ int16_t Scale = 0) {
+ static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+ const int Width = getWidth<DigitsT>();
+ if (Width == 64 || Digits <= std::numeric_limits<DigitsT>::max())
+ return std::make_pair(Digits, Scale);
+
+ // Shift right and round.
+ int Shift = 64 - Width - countLeadingZeros(Digits);
+ return getRounded<DigitsT>(Digits >> Shift, Scale + Shift,
+ Digits & (UINT64_C(1) << (Shift - 1)));
+}
+
+/// \brief Convenience helper for adjusting to 32 bits.
+inline std::pair<uint32_t, int16_t> getAdjusted32(uint64_t Digits,
+ int16_t Scale = 0) {
+ return getAdjusted<uint32_t>(Digits, Scale);
+}
+
+/// \brief Convenience helper for adjusting to 64 bits.
+inline std::pair<uint64_t, int16_t> getAdjusted64(uint64_t Digits,
+ int16_t Scale = 0) {
+ return getAdjusted<uint64_t>(Digits, Scale);
+}
+
+/// \brief Multiply two 64-bit integers to create a 64-bit scaled number.
+///
+/// Implemented with four 64-bit integer multiplies.
+std::pair<uint64_t, int16_t> multiply64(uint64_t LHS, uint64_t RHS);
+
+/// \brief Multiply two 32-bit integers to create a 32-bit scaled number.
+///
+/// Implemented with one 64-bit integer multiply.
+template <class DigitsT>
+inline std::pair<DigitsT, int16_t> getProduct(DigitsT LHS, DigitsT RHS) {
+ static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+ if (getWidth<DigitsT>() <= 32 || (LHS <= UINT32_MAX && RHS <= UINT32_MAX))
+ return getAdjusted<DigitsT>(uint64_t(LHS) * RHS);
+
+ return multiply64(LHS, RHS);
+}
+
+/// \brief Convenience helper for 32-bit product.
+inline std::pair<uint32_t, int16_t> getProduct32(uint32_t LHS, uint32_t RHS) {
+ return getProduct(LHS, RHS);
+}
+
+/// \brief Convenience helper for 64-bit product.
+inline std::pair<uint64_t, int16_t> getProduct64(uint64_t LHS, uint64_t RHS) {
+ return getProduct(LHS, RHS);
+}
+
+/// \brief Divide two 64-bit integers to create a 64-bit scaled number.
+///
+/// Implemented with long division.
+///
+/// \pre \c Dividend and \c Divisor are non-zero.
+std::pair<uint64_t, int16_t> divide64(uint64_t Dividend, uint64_t Divisor);
+
+/// \brief Divide two 32-bit integers to create a 32-bit scaled number.
+///
+/// Implemented with one 64-bit integer divide/remainder pair.
+///
+/// \pre \c Dividend and \c Divisor are non-zero.
+std::pair<uint32_t, int16_t> divide32(uint32_t Dividend, uint32_t Divisor);
+
+/// \brief Divide two 32-bit numbers to create a 32-bit scaled number.
+///
+/// Implemented with one 64-bit integer divide/remainder pair.
+///
+/// Returns \c (DigitsT_MAX, MaxScale) for divide-by-zero (0 for 0/0).
+template <class DigitsT>
+std::pair<DigitsT, int16_t> getQuotient(DigitsT Dividend, DigitsT Divisor) {
+ static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+ static_assert(sizeof(DigitsT) == 4 || sizeof(DigitsT) == 8,
+ "expected 32-bit or 64-bit digits");
+
+ // Check for zero.
+ if (!Dividend)
+ return std::make_pair(0, 0);
+ if (!Divisor)
+ return std::make_pair(std::numeric_limits<DigitsT>::max(), MaxScale);
+
+ if (getWidth<DigitsT>() == 64)
+ return divide64(Dividend, Divisor);
+ return divide32(Dividend, Divisor);
+}
+
+/// \brief Convenience helper for 32-bit quotient.
+inline std::pair<uint32_t, int16_t> getQuotient32(uint32_t Dividend,
+ uint32_t Divisor) {
+ return getQuotient(Dividend, Divisor);
+}
+
+/// \brief Convenience helper for 64-bit quotient.
+inline std::pair<uint64_t, int16_t> getQuotient64(uint64_t Dividend,
+ uint64_t Divisor) {
+ return getQuotient(Dividend, Divisor);
+}
+
+/// \brief Implementation of getLg() and friends.
+///
+/// Returns the rounded lg of \c Digits*2^Scale and an int specifying whether
+/// this was rounded up (1), down (-1), or exact (0).
+///
+/// Returns \c INT32_MIN when \c Digits is zero.
+template <class DigitsT>
+inline std::pair<int32_t, int> getLgImpl(DigitsT Digits, int16_t Scale) {
+ static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+ if (!Digits)
+ return std::make_pair(INT32_MIN, 0);
+
+ // Get the floor of the lg of Digits.
+ int32_t LocalFloor = sizeof(Digits) * 8 - countLeadingZeros(Digits) - 1;
+
+ // Get the actual floor.
+ int32_t Floor = Scale + LocalFloor;
+ if (Digits == UINT64_C(1) << LocalFloor)
+ return std::make_pair(Floor, 0);
+
+ // Round based on the next digit.
+ assert(LocalFloor >= 1);
+ bool Round = Digits & UINT64_C(1) << (LocalFloor - 1);
+ return std::make_pair(Floor + Round, Round ? 1 : -1);
+}
+
+/// \brief Get the lg (rounded) of a scaled number.
+///
+/// Get the lg of \c Digits*2^Scale.
+///
+/// Returns \c INT32_MIN when \c Digits is zero.
+template <class DigitsT> int32_t getLg(DigitsT Digits, int16_t Scale) {
+ return getLgImpl(Digits, Scale).first;
+}
+
+/// \brief Get the lg floor of a scaled number.
+///
+/// Get the floor of the lg of \c Digits*2^Scale.
+///
+/// Returns \c INT32_MIN when \c Digits is zero.
+template <class DigitsT> int32_t getLgFloor(DigitsT Digits, int16_t Scale) {
+ auto Lg = getLgImpl(Digits, Scale);
+ return Lg.first - (Lg.second > 0);
+}
+
+/// \brief Get the lg ceiling of a scaled number.
+///
+/// Get the ceiling of the lg of \c Digits*2^Scale.
+///
+/// Returns \c INT32_MIN when \c Digits is zero.
+template <class DigitsT> int32_t getLgCeiling(DigitsT Digits, int16_t Scale) {
+ auto Lg = getLgImpl(Digits, Scale);
+ return Lg.first + (Lg.second < 0);
+}
+
+/// \brief Implementation for comparing scaled numbers.
+///
+/// Compare two 64-bit numbers with different scales. Given that the scale of
+/// \c L is higher than that of \c R by \c ScaleDiff, compare them. Return -1,
+/// 1, and 0 for less than, greater than, and equal, respectively.
+///
+/// \pre 0 <= ScaleDiff < 64.
+int compareImpl(uint64_t L, uint64_t R, int ScaleDiff);
+
+/// \brief Compare two scaled numbers.
+///
+/// Compare two scaled numbers. Returns 0 for equal, -1 for less than, and 1
+/// for greater than.
+template <class DigitsT>
+int compare(DigitsT LDigits, int16_t LScale, DigitsT RDigits, int16_t RScale) {
+ static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+ // Check for zero.
+ if (!LDigits)
+ return RDigits ? -1 : 0;
+ if (!RDigits)
+ return 1;
+
+ // Check for the scale. Use getLgFloor to be sure that the scale difference
+ // is always lower than 64.
+ int32_t lgL = getLgFloor(LDigits, LScale), lgR = getLgFloor(RDigits, RScale);
+ if (lgL != lgR)
+ return lgL < lgR ? -1 : 1;
+
+ // Compare digits.
+ if (LScale < RScale)
+ return compareImpl(LDigits, RDigits, RScale - LScale);
+
+ return -compareImpl(RDigits, LDigits, LScale - RScale);
+}
+
+/// \brief Match scales of two numbers.
+///
+/// Given two scaled numbers, match up their scales. Change the digits and
+/// scales in place. Shift the digits as necessary to form equivalent numbers,
+/// losing precision only when necessary.
+///
+/// If the output value of \c LDigits (\c RDigits) is \c 0, the output value of
+/// \c LScale (\c RScale) is unspecified.
+///
+/// As a convenience, returns the matching scale. If the output value of one
+/// number is zero, returns the scale of the other. If both are zero, which
+/// scale is returned is unspecified.
+template <class DigitsT>
+int16_t matchScales(DigitsT &LDigits, int16_t &LScale, DigitsT &RDigits,
+ int16_t &RScale) {
+ static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+ if (LScale < RScale)
+ // Swap arguments.
+ return matchScales(RDigits, RScale, LDigits, LScale);
+ if (!LDigits)
+ return RScale;
+ if (!RDigits || LScale == RScale)
+ return LScale;
+
+ // Now LScale > RScale. Get the difference.
+ int32_t ScaleDiff = int32_t(LScale) - RScale;
+ if (ScaleDiff >= 2 * getWidth<DigitsT>()) {
+ // Don't bother shifting. RDigits will get zero-ed out anyway.
+ RDigits = 0;
+ return LScale;
+ }
+
+ // Shift LDigits left as much as possible, then shift RDigits right.
+ int32_t ShiftL = std::min<int32_t>(countLeadingZeros(LDigits), ScaleDiff);
+ assert(ShiftL < getWidth<DigitsT>() && "can't shift more than width");
+
+ int32_t ShiftR = ScaleDiff - ShiftL;
+ if (ShiftR >= getWidth<DigitsT>()) {
+ // Don't bother shifting. RDigits will get zero-ed out anyway.
+ RDigits = 0;
+ return LScale;
+ }
+
+ LDigits <<= ShiftL;
+ RDigits >>= ShiftR;
+
+ LScale -= ShiftL;
+ RScale += ShiftR;
+ assert(LScale == RScale && "scales should match");
+ return LScale;
+}
+
+/// \brief Get the sum of two scaled numbers.
+///
+/// Get the sum of two scaled numbers with as much precision as possible.
+///
+/// \pre Adding 1 to \c LScale (or \c RScale) will not overflow INT16_MAX.
+template <class DigitsT>
+std::pair<DigitsT, int16_t> getSum(DigitsT LDigits, int16_t LScale,
+ DigitsT RDigits, int16_t RScale) {
+ static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+ // Check inputs up front. This is only relevant if addition overflows, but
+ // testing here should catch more bugs.
+ assert(LScale < INT16_MAX && "scale too large");
+ assert(RScale < INT16_MAX && "scale too large");
+
+ // Normalize digits to match scales.
+ int16_t Scale = matchScales(LDigits, LScale, RDigits, RScale);
+
+ // Compute sum.
+ DigitsT Sum = LDigits + RDigits;
+ if (Sum >= RDigits)
+ return std::make_pair(Sum, Scale);
+
+ // Adjust sum after arithmetic overflow.
+ DigitsT HighBit = DigitsT(1) << (getWidth<DigitsT>() - 1);
+ return std::make_pair(HighBit | Sum >> 1, Scale + 1);
+}
+
+/// \brief Convenience helper for 32-bit sum.
+inline std::pair<uint32_t, int16_t> getSum32(uint32_t LDigits, int16_t LScale,
+ uint32_t RDigits, int16_t RScale) {
+ return getSum(LDigits, LScale, RDigits, RScale);
+}
+
+/// \brief Convenience helper for 64-bit sum.
+inline std::pair<uint64_t, int16_t> getSum64(uint64_t LDigits, int16_t LScale,
+ uint64_t RDigits, int16_t RScale) {
+ return getSum(LDigits, LScale, RDigits, RScale);
+}
+
+/// \brief Get the difference of two scaled numbers.
+///
+/// Get LHS minus RHS with as much precision as possible.
+///
+/// Returns \c (0, 0) if the RHS is larger than the LHS.
+template <class DigitsT>
+std::pair<DigitsT, int16_t> getDifference(DigitsT LDigits, int16_t LScale,
+ DigitsT RDigits, int16_t RScale) {
+ static_assert(!std::numeric_limits<DigitsT>::is_signed, "expected unsigned");
+
+ // Normalize digits to match scales.
+ const DigitsT SavedRDigits = RDigits;
+ const int16_t SavedRScale = RScale;
+ matchScales(LDigits, LScale, RDigits, RScale);
+
+ // Compute difference.
+ if (LDigits <= RDigits)
+ return std::make_pair(0, 0);
+ if (RDigits || !SavedRDigits)
+ return std::make_pair(LDigits - RDigits, LScale);
+
+ // Check if RDigits just barely lost its last bit. E.g., for 32-bit:
+ //
+ // 1*2^32 - 1*2^0 == 0xffffffff != 1*2^32
+ const auto RLgFloor = getLgFloor(SavedRDigits, SavedRScale);
+ if (!compare(LDigits, LScale, DigitsT(1), RLgFloor + getWidth<DigitsT>()))
+ return std::make_pair(std::numeric_limits<DigitsT>::max(), RLgFloor);
+
+ return std::make_pair(LDigits, LScale);
+}
+
+/// \brief Convenience helper for 32-bit difference.
+inline std::pair<uint32_t, int16_t> getDifference32(uint32_t LDigits,
+ int16_t LScale,
+ uint32_t RDigits,
+ int16_t RScale) {
+ return getDifference(LDigits, LScale, RDigits, RScale);
+}
+
+/// \brief Convenience helper for 64-bit difference.
+inline std::pair<uint64_t, int16_t> getDifference64(uint64_t LDigits,
+ int16_t LScale,
+ uint64_t RDigits,
+ int16_t RScale) {
+ return getDifference(LDigits, LScale, RDigits, RScale);
+}
+
+} // end namespace ScaledNumbers
+} // end namespace llvm
+
+namespace llvm {
+
+class raw_ostream;
+class ScaledNumberBase {
+public:
+ static const int DefaultPrecision = 10;
+
+ static void dump(uint64_t D, int16_t E, int Width);
+ static raw_ostream &print(raw_ostream &OS, uint64_t D, int16_t E, int Width,
+ unsigned Precision);
+ static std::string toString(uint64_t D, int16_t E, int Width,
+ unsigned Precision);
+ static int countLeadingZeros32(uint32_t N) { return countLeadingZeros(N); }
+ static int countLeadingZeros64(uint64_t N) { return countLeadingZeros(N); }
+ static uint64_t getHalf(uint64_t N) { return (N >> 1) + (N & 1); }
+
+ static std::pair<uint64_t, bool> splitSigned(int64_t N) {
+ if (N >= 0)
+ return std::make_pair(N, false);
+ uint64_t Unsigned = N == INT64_MIN ? UINT64_C(1) << 63 : uint64_t(-N);
+ return std::make_pair(Unsigned, true);
+ }
+ static int64_t joinSigned(uint64_t U, bool IsNeg) {
+ if (U > uint64_t(INT64_MAX))
+ return IsNeg ? INT64_MIN : INT64_MAX;
+ return IsNeg ? -int64_t(U) : int64_t(U);
+ }
+};
+
+/// \brief Simple representation of a scaled number.
+///
+/// ScaledNumber is a number represented by digits and a scale. It uses simple
+/// saturation arithmetic and every operation is well-defined for every value.
+/// It's somewhat similar in behaviour to a soft-float, but is *not* a
+/// replacement for one. If you're doing numerics, look at \a APFloat instead.
+/// Nevertheless, we've found these semantics useful for modelling certain cost
+/// metrics.
+///
+/// The number is split into a signed scale and unsigned digits. The number
+/// represented is \c getDigits()*2^getScale(). In this way, the digits are
+/// much like the mantissa in the x87 long double, but there is no canonical
+/// form so the same number can be represented by many bit representations.
+///
+/// ScaledNumber is templated on the underlying integer type for digits, which
+/// is expected to be unsigned.
+///
+/// Unlike APFloat, ScaledNumber does not model architecture floating point
+/// behaviour -- while this might make it a little faster and easier to reason
+/// about, it certainly makes it more dangerous for general numerics.
+///
+/// ScaledNumber is totally ordered. However, there is no canonical form, so
+/// there are multiple representations of most scalars. E.g.:
+///
+/// ScaledNumber(8u, 0) == ScaledNumber(4u, 1)
+/// ScaledNumber(4u, 1) == ScaledNumber(2u, 2)
+/// ScaledNumber(2u, 2) == ScaledNumber(1u, 3)
+///
+/// ScaledNumber implements most arithmetic operations. Precision is kept
+/// where possible. Uses simple saturation arithmetic, so that operations
+/// saturate to 0.0 or getLargest() rather than under or overflowing. It has
+/// some extra arithmetic for unit inversion. 0.0/0.0 is defined to be 0.0.
+/// Any other division by 0.0 is defined to be getLargest().
+///
+/// As a convenience for modifying the exponent, left and right shifting are
+/// both implemented, and both interpret negative shifts as positive shifts in
+/// the opposite direction.
+///
+/// Scales are limited to the range accepted by x87 long double. This makes
+/// it trivial to add functionality to convert to APFloat (this is already
+/// relied on for the implementation of printing).
+///
+/// Possible (and conflicting) future directions:
+///
+/// 1. Turn this into a wrapper around \a APFloat.
+/// 2. Share the algorithm implementations with \a APFloat.
+/// 3. Allow \a ScaledNumber to represent a signed number.
+template <class DigitsT> class ScaledNumber : ScaledNumberBase {
+public:
+ static_assert(!std::numeric_limits<DigitsT>::is_signed,
+ "only unsigned floats supported");
+
+ typedef DigitsT DigitsType;
+
+private:
+ typedef std::numeric_limits<DigitsType> DigitsLimits;
+
+ static const int Width = sizeof(DigitsType) * 8;
+ static_assert(Width <= 64, "invalid integer width for digits");
+
+private:
+ DigitsType Digits;
+ int16_t Scale;
+
+public:
+ ScaledNumber() : Digits(0), Scale(0) {}
+
+ ScaledNumber(DigitsType Digits, int16_t Scale)
+ : Digits(Digits), Scale(Scale) {}
+
+private:
+ ScaledNumber(const std::pair<DigitsT, int16_t> &X)
+ : Digits(X.first), Scale(X.second) {}
+
+public:
+ static ScaledNumber getZero() { return ScaledNumber(0, 0); }
+ static ScaledNumber getOne() { return ScaledNumber(1, 0); }
+ static ScaledNumber getLargest() {
+ return ScaledNumber(DigitsLimits::max(), ScaledNumbers::MaxScale);
+ }
+ static ScaledNumber get(uint64_t N) { return adjustToWidth(N, 0); }
+ static ScaledNumber getInverse(uint64_t N) {
+ return get(N).invert();
+ }
+ static ScaledNumber getFraction(DigitsType N, DigitsType D) {
+ return getQuotient(N, D);
+ }
+
+ int16_t getScale() const { return Scale; }
+ DigitsType getDigits() const { return Digits; }
+
+ /// \brief Convert to the given integer type.
+ ///
+ /// Convert to \c IntT using simple saturating arithmetic, truncating if
+ /// necessary.
+ template <class IntT> IntT toInt() const;
+
+ bool isZero() const { return !Digits; }
+ bool isLargest() const { return *this == getLargest(); }
+ bool isOne() const {
+ if (Scale > 0 || Scale <= -Width)
+ return false;
+ return Digits == DigitsType(1) << -Scale;
+ }
+
+ /// \brief The log base 2, rounded.
+ ///
+ /// Get the lg of the scalar. lg 0 is defined to be INT32_MIN.
+ int32_t lg() const { return ScaledNumbers::getLg(Digits, Scale); }
+
+ /// \brief The log base 2, rounded towards INT32_MIN.
+ ///
+ /// Get the lg floor. lg 0 is defined to be INT32_MIN.
+ int32_t lgFloor() const { return ScaledNumbers::getLgFloor(Digits, Scale); }
+
+ /// \brief The log base 2, rounded towards INT32_MAX.
+ ///
+ /// Get the lg ceiling. lg 0 is defined to be INT32_MIN.
+ int32_t lgCeiling() const {
+ return ScaledNumbers::getLgCeiling(Digits, Scale);
+ }
+
+ bool operator==(const ScaledNumber &X) const { return compare(X) == 0; }
+ bool operator<(const ScaledNumber &X) const { return compare(X) < 0; }
+ bool operator!=(const ScaledNumber &X) const { return compare(X) != 0; }
+ bool operator>(const ScaledNumber &X) const { return compare(X) > 0; }
+ bool operator<=(const ScaledNumber &X) const { return compare(X) <= 0; }
+ bool operator>=(const ScaledNumber &X) const { return compare(X) >= 0; }
+
+ bool operator!() const { return isZero(); }
+
+ /// \brief Convert to a decimal representation in a string.
+ ///
+ /// Convert to a string. Uses scientific notation for very large/small
+ /// numbers. Scientific notation is used roughly for numbers outside of the
+ /// range 2^-64 through 2^64.
+ ///
+ /// \c Precision indicates the number of decimal digits of precision to use;
+ /// 0 requests the maximum available.
+ ///
+ /// As a special case to make debugging easier, if the number is small enough
+ /// to convert without scientific notation and has more than \c Precision
+ /// digits before the decimal place, it's printed accurately to the first
+ /// digit past zero. E.g., assuming 10 digits of precision:
+ ///
+ /// 98765432198.7654... => 98765432198.8
+ /// 8765432198.7654... => 8765432198.8
+ /// 765432198.7654... => 765432198.8
+ /// 65432198.7654... => 65432198.77
+ /// 5432198.7654... => 5432198.765
+ std::string toString(unsigned Precision = DefaultPrecision) {
+ return ScaledNumberBase::toString(Digits, Scale, Width, Precision);
+ }
+
+ /// \brief Print a decimal representation.
+ ///
+ /// Print a string. See toString for documentation.
+ raw_ostream &print(raw_ostream &OS,
+ unsigned Precision = DefaultPrecision) const {
+ return ScaledNumberBase::print(OS, Digits, Scale, Width, Precision);
+ }
+ void dump() const { return ScaledNumberBase::dump(Digits, Scale, Width); }
+
+ ScaledNumber &operator+=(const ScaledNumber &X) {
+ std::tie(Digits, Scale) =
+ ScaledNumbers::getSum(Digits, Scale, X.Digits, X.Scale);
+ // Check for exponent past MaxScale.
+ if (Scale > ScaledNumbers::MaxScale)
+ *this = getLargest();
+ return *this;
+ }
+ ScaledNumber &operator-=(const ScaledNumber &X) {
+ std::tie(Digits, Scale) =
+ ScaledNumbers::getDifference(Digits, Scale, X.Digits, X.Scale);
+ return *this;
+ }
+ ScaledNumber &operator*=(const ScaledNumber &X);
+ ScaledNumber &operator/=(const ScaledNumber &X);
+ ScaledNumber &operator<<=(int16_t Shift) {
+ shiftLeft(Shift);
+ return *this;
+ }
+ ScaledNumber &operator>>=(int16_t Shift) {
+ shiftRight(Shift);
+ return *this;
+ }
+
+private:
+ void shiftLeft(int32_t Shift);
+ void shiftRight(int32_t Shift);
+
+ /// \brief Adjust two floats to have matching exponents.
+ ///
+ /// Adjust \c this and \c X to have matching exponents. Returns the new \c X
+ /// by value. Does nothing if \a isZero() for either.
+ ///
+ /// The value that compares smaller will lose precision, and possibly become
+ /// \a isZero().
+ ScaledNumber matchScales(ScaledNumber X) {
+ ScaledNumbers::matchScales(Digits, Scale, X.Digits, X.Scale);
+ return X;
+ }
+
+public:
+ /// \brief Scale a large number accurately.
+ ///
+ /// Scale N (multiply it by this). Uses full precision multiplication, even
+ /// if Width is smaller than 64, so information is not lost.
+ uint64_t scale(uint64_t N) const;
+ uint64_t scaleByInverse(uint64_t N) const {
+ // TODO: implement directly, rather than relying on inverse. Inverse is
+ // expensive.
+ return inverse().scale(N);
+ }
+ int64_t scale(int64_t N) const {
+ std::pair<uint64_t, bool> Unsigned = splitSigned(N);
+ return joinSigned(scale(Unsigned.first), Unsigned.second);
+ }
+ int64_t scaleByInverse(int64_t N) const {
+ std::pair<uint64_t, bool> Unsigned = splitSigned(N);
+ return joinSigned(scaleByInverse(Unsigned.first), Unsigned.second);
+ }
+
+ int compare(const ScaledNumber &X) const {
+ return ScaledNumbers::compare(Digits, Scale, X.Digits, X.Scale);
+ }
+ int compareTo(uint64_t N) const {
+ return ScaledNumbers::compare<uint64_t>(Digits, Scale, N, 0);
+ }
+ int compareTo(int64_t N) const { return N < 0 ? 1 : compareTo(uint64_t(N)); }
+
+ ScaledNumber &invert() { return *this = ScaledNumber::get(1) / *this; }
+ ScaledNumber inverse() const { return ScaledNumber(*this).invert(); }
+
+private:
+ static ScaledNumber getProduct(DigitsType LHS, DigitsType RHS) {
+ return ScaledNumbers::getProduct(LHS, RHS);
+ }
+ static ScaledNumber getQuotient(DigitsType Dividend, DigitsType Divisor) {
+ return ScaledNumbers::getQuotient(Dividend, Divisor);
+ }
+
+ static int countLeadingZerosWidth(DigitsType Digits) {
+ if (Width == 64)
+ return countLeadingZeros64(Digits);
+ if (Width == 32)
+ return countLeadingZeros32(Digits);
+ return countLeadingZeros32(Digits) + Width - 32;
+ }
+
+ /// \brief Adjust a number to width, rounding up if necessary.
+ ///
+ /// Should only be called for \c Shift close to zero.
+ ///
+ /// \pre Shift >= MinScale && Shift + 64 <= MaxScale.
+ static ScaledNumber adjustToWidth(uint64_t N, int32_t Shift) {
+ assert(Shift >= ScaledNumbers::MinScale && "Shift should be close to 0");
+ assert(Shift <= ScaledNumbers::MaxScale - 64 &&
+ "Shift should be close to 0");
+ auto Adjusted = ScaledNumbers::getAdjusted<DigitsT>(N, Shift);
+ return Adjusted;
+ }
+
+ static ScaledNumber getRounded(ScaledNumber P, bool Round) {
+ // Saturate.
+ if (P.isLargest())
+ return P;
+
+ return ScaledNumbers::getRounded(P.Digits, P.Scale, Round);
+ }
+};
+
+#define SCALED_NUMBER_BOP(op, base) \
+ template <class DigitsT> \
+ ScaledNumber<DigitsT> operator op(const ScaledNumber<DigitsT> &L, \
+ const ScaledNumber<DigitsT> &R) { \
+ return ScaledNumber<DigitsT>(L) base R; \
+ }
+SCALED_NUMBER_BOP(+, += )
+SCALED_NUMBER_BOP(-, -= )
+SCALED_NUMBER_BOP(*, *= )
+SCALED_NUMBER_BOP(/, /= )
+#undef SCALED_NUMBER_BOP
+
+template <class DigitsT>
+ScaledNumber<DigitsT> operator<<(const ScaledNumber<DigitsT> &L,
+ int16_t Shift) {
+ return ScaledNumber<DigitsT>(L) <<= Shift;
+}
+
+template <class DigitsT>
+ScaledNumber<DigitsT> operator>>(const ScaledNumber<DigitsT> &L,
+ int16_t Shift) {
+ return ScaledNumber<DigitsT>(L) >>= Shift;
+}
+
+template <class DigitsT>
+raw_ostream &operator<<(raw_ostream &OS, const ScaledNumber<DigitsT> &X) {
+ return X.print(OS, 10);
+}
+
+#define SCALED_NUMBER_COMPARE_TO_TYPE(op, T1, T2) \
+ template <class DigitsT> \
+ bool operator op(const ScaledNumber<DigitsT> &L, T1 R) { \
+ return L.compareTo(T2(R)) op 0; \
+ } \
+ template <class DigitsT> \
+ bool operator op(T1 L, const ScaledNumber<DigitsT> &R) { \
+ return 0 op R.compareTo(T2(L)); \
+ }
+#define SCALED_NUMBER_COMPARE_TO(op) \
+ SCALED_NUMBER_COMPARE_TO_TYPE(op, uint64_t, uint64_t) \
+ SCALED_NUMBER_COMPARE_TO_TYPE(op, uint32_t, uint64_t) \
+ SCALED_NUMBER_COMPARE_TO_TYPE(op, int64_t, int64_t) \
+ SCALED_NUMBER_COMPARE_TO_TYPE(op, int32_t, int64_t)
+SCALED_NUMBER_COMPARE_TO(< )
+SCALED_NUMBER_COMPARE_TO(> )
+SCALED_NUMBER_COMPARE_TO(== )
+SCALED_NUMBER_COMPARE_TO(!= )
+SCALED_NUMBER_COMPARE_TO(<= )
+SCALED_NUMBER_COMPARE_TO(>= )
+#undef SCALED_NUMBER_COMPARE_TO
+#undef SCALED_NUMBER_COMPARE_TO_TYPE
+
+template <class DigitsT>
+uint64_t ScaledNumber<DigitsT>::scale(uint64_t N) const {
+ if (Width == 64 || N <= DigitsLimits::max())
+ return (get(N) * *this).template toInt<uint64_t>();
+
+ // Defer to the 64-bit version.
+ return ScaledNumber<uint64_t>(Digits, Scale).scale(N);
+}
+
+template <class DigitsT>
+template <class IntT>
+IntT ScaledNumber<DigitsT>::toInt() const {
+ typedef std::numeric_limits<IntT> Limits;
+ if (*this < 1)
+ return 0;
+ if (*this >= Limits::max())
+ return Limits::max();
+
+ IntT N = Digits;
+ if (Scale > 0) {
+ assert(size_t(Scale) < sizeof(IntT) * 8);
+ return N << Scale;
+ }
+ if (Scale < 0) {
+ assert(size_t(-Scale) < sizeof(IntT) * 8);
+ return N >> -Scale;
+ }
+ return N;
+}
+
+template <class DigitsT>
+ScaledNumber<DigitsT> &ScaledNumber<DigitsT>::
+operator*=(const ScaledNumber &X) {
+ if (isZero())
+ return *this;
+ if (X.isZero())
+ return *this = X;
+
+ // Save the exponents.
+ int32_t Scales = int32_t(Scale) + int32_t(X.Scale);
+
+ // Get the raw product.
+ *this = getProduct(Digits, X.Digits);
+
+ // Combine with exponents.
+ return *this <<= Scales;
+}
+template <class DigitsT>
+ScaledNumber<DigitsT> &ScaledNumber<DigitsT>::
+operator/=(const ScaledNumber &X) {
+ if (isZero())
+ return *this;
+ if (X.isZero())
+ return *this = getLargest();
+
+ // Save the exponents.
+ int32_t Scales = int32_t(Scale) - int32_t(X.Scale);
+
+ // Get the raw quotient.
+ *this = getQuotient(Digits, X.Digits);
+
+ // Combine with exponents.
+ return *this <<= Scales;
+}
+template <class DigitsT> void ScaledNumber<DigitsT>::shiftLeft(int32_t Shift) {
+ if (!Shift || isZero())
+ return;
+ assert(Shift != INT32_MIN);
+ if (Shift < 0) {
+ shiftRight(-Shift);
+ return;
+ }
+
+ // Shift as much as we can in the exponent.
+ int32_t ScaleShift = std::min(Shift, ScaledNumbers::MaxScale - Scale);
+ Scale += ScaleShift;
+ if (ScaleShift == Shift)
+ return;
+
+ // Check this late, since it's rare.
+ if (isLargest())
+ return;
+
+ // Shift the digits themselves.
+ Shift -= ScaleShift;
+ if (Shift > countLeadingZerosWidth(Digits)) {
+ // Saturate.
+ *this = getLargest();
+ return;
+ }
+
+ Digits <<= Shift;
+ return;
+}
+
+template <class DigitsT> void ScaledNumber<DigitsT>::shiftRight(int32_t Shift) {
+ if (!Shift || isZero())
+ return;
+ assert(Shift != INT32_MIN);
+ if (Shift < 0) {
+ shiftLeft(-Shift);
+ return;
+ }
+
+ // Shift as much as we can in the exponent.
+ int32_t ScaleShift = std::min(Shift, Scale - ScaledNumbers::MinScale);
+ Scale -= ScaleShift;
+ if (ScaleShift == Shift)
+ return;
+
+ // Shift the digits themselves.
+ Shift -= ScaleShift;
+ if (Shift >= Width) {
+ // Saturate.
+ *this = getZero();
+ return;
+ }
+
+ Digits >>= Shift;
+ return;
+}
+
+template <typename T> struct isPodLike;
+template <typename T> struct isPodLike<ScaledNumber<T>> {
+ static const bool value = true;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Signals.h b/contrib/llvm/include/llvm/Support/Signals.h
new file mode 100644
index 0000000..2a4d84b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Signals.h
@@ -0,0 +1,71 @@
+//===- llvm/Support/Signals.h - Signal Handling support ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some helpful functions for dealing with the possibility of
+// unix signals occurring while your program is running.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SIGNALS_H
+#define LLVM_SUPPORT_SIGNALS_H
+
+#include <string>
+
+namespace llvm {
+class StringRef;
+class raw_ostream;
+
+namespace sys {
+
+ /// This function runs all the registered interrupt handlers, including the
+ /// removal of files registered by RemoveFileOnSignal.
+ void RunInterruptHandlers();
+
+ /// This function registers signal handlers to ensure that if a signal gets
+ /// delivered that the named file is removed.
+ /// @brief Remove a file if a fatal signal occurs.
+ bool RemoveFileOnSignal(StringRef Filename, std::string* ErrMsg = nullptr);
+
+ /// This function removes a file from the list of files to be removed on
+ /// signal delivery.
+ void DontRemoveFileOnSignal(StringRef Filename);
+
+ /// When an error signal (such as SIBABRT or SIGSEGV) is delivered to the
+ /// process, print a stack trace and then exit.
+ /// @brief Print a stack trace if a fatal signal occurs.
+ void PrintStackTraceOnErrorSignal(bool DisableCrashReporting = false);
+
+ /// Disable all system dialog boxes that appear when the process crashes.
+ void DisableSystemDialogsOnCrash();
+
+ /// \brief Print the stack trace using the given \c raw_ostream object.
+ void PrintStackTrace(raw_ostream &OS);
+
+ // Run all registered signal handlers.
+ void RunSignalHandlers();
+
+ /// AddSignalHandler - Add a function to be called when an abort/kill signal
+ /// is delivered to the process. The handler can have a cookie passed to it
+ /// to identify what instance of the handler it is.
+ void AddSignalHandler(void (*FnPtr)(void *), void *Cookie);
+
+ /// This function registers a function to be called when the user "interrupts"
+ /// the program (typically by pressing ctrl-c). When the user interrupts the
+ /// program, the specified interrupt function is called instead of the program
+ /// being killed, and the interrupt function automatically disabled. Note
+ /// that interrupt functions are not allowed to call any non-reentrant
+ /// functions. An null interrupt function pointer disables the current
+ /// installed function. Note also that the handler may be executed on a
+ /// different thread on some platforms.
+ /// @brief Register a function to be called when ctrl-c is pressed.
+ void SetInterruptFunction(void (*IF)());
+} // End sys namespace
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Solaris.h b/contrib/llvm/include/llvm/Support/Solaris.h
new file mode 100644
index 0000000..b082285
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Solaris.h
@@ -0,0 +1,49 @@
+/*===- llvm/Support/Solaris.h ------------------------------------*- C++ -*-===*
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *===----------------------------------------------------------------------===*
+ *
+ * This file contains portability fixes for Solaris hosts.
+ *
+ *===----------------------------------------------------------------------===*/
+
+#ifndef LLVM_SUPPORT_SOLARIS_H
+#define LLVM_SUPPORT_SOLARIS_H
+
+#include <sys/types.h>
+#include <sys/regset.h>
+
+/* Solaris doesn't have endian.h. SPARC is the only supported big-endian ISA. */
+#define BIG_ENDIAN 4321
+#define LITTLE_ENDIAN 1234
+#if defined(__sparc) || defined(__sparc__)
+#define BYTE_ORDER BIG_ENDIAN
+#else
+#define BYTE_ORDER LITTLE_ENDIAN
+#endif
+
+#undef CS
+#undef DS
+#undef ES
+#undef FS
+#undef GS
+#undef SS
+#undef EAX
+#undef ECX
+#undef EDX
+#undef EBX
+#undef ESP
+#undef EBP
+#undef ESI
+#undef EDI
+#undef EIP
+#undef UESP
+#undef EFL
+#undef ERR
+#undef TRAPNO
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/SourceMgr.h b/contrib/llvm/include/llvm/Support/SourceMgr.h
new file mode 100644
index 0000000..1f8b1a018
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/SourceMgr.h
@@ -0,0 +1,285 @@
+//===- SourceMgr.h - Manager for Source Buffers & Diagnostics ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SMDiagnostic and SourceMgr classes. This
+// provides a simple substrate for diagnostics, #include handling, and other low
+// level things for simple parsers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SOURCEMGR_H
+#define LLVM_SUPPORT_SOURCEMGR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SMLoc.h"
+#include <string>
+
+namespace llvm {
+ class SourceMgr;
+ class SMDiagnostic;
+ class SMFixIt;
+ class Twine;
+ class raw_ostream;
+
+/// This owns the files read by a parser, handles include stacks,
+/// and handles diagnostic wrangling.
+class SourceMgr {
+public:
+ enum DiagKind {
+ DK_Error,
+ DK_Warning,
+ DK_Note
+ };
+
+ /// Clients that want to handle their own diagnostics in a custom way can
+ /// register a function pointer+context as a diagnostic handler.
+ /// It gets called each time PrintMessage is invoked.
+ typedef void (*DiagHandlerTy)(const SMDiagnostic &, void *Context);
+private:
+ struct SrcBuffer {
+ /// The memory buffer for the file.
+ std::unique_ptr<MemoryBuffer> Buffer;
+
+ /// This is the location of the parent include, or null if at the top level.
+ SMLoc IncludeLoc;
+
+ SrcBuffer() {}
+
+ SrcBuffer(SrcBuffer &&O)
+ : Buffer(std::move(O.Buffer)), IncludeLoc(O.IncludeLoc) {}
+ };
+
+ /// This is all of the buffers that we are reading from.
+ std::vector<SrcBuffer> Buffers;
+
+ // This is the list of directories we should search for include files in.
+ std::vector<std::string> IncludeDirectories;
+
+ /// This is a cache for line number queries, its implementation is really
+ /// private to SourceMgr.cpp.
+ mutable void *LineNoCache;
+
+ DiagHandlerTy DiagHandler;
+ void *DiagContext;
+
+ bool isValidBufferID(unsigned i) const { return i && i <= Buffers.size(); }
+
+ SourceMgr(const SourceMgr&) = delete;
+ void operator=(const SourceMgr&) = delete;
+public:
+ SourceMgr()
+ : LineNoCache(nullptr), DiagHandler(nullptr), DiagContext(nullptr) {}
+ ~SourceMgr();
+
+ void setIncludeDirs(const std::vector<std::string> &Dirs) {
+ IncludeDirectories = Dirs;
+ }
+
+ /// Specify a diagnostic handler to be invoked every time PrintMessage is
+ /// called. \p Ctx is passed into the handler when it is invoked.
+ void setDiagHandler(DiagHandlerTy DH, void *Ctx = nullptr) {
+ DiagHandler = DH;
+ DiagContext = Ctx;
+ }
+
+ DiagHandlerTy getDiagHandler() const { return DiagHandler; }
+ void *getDiagContext() const { return DiagContext; }
+
+ const SrcBuffer &getBufferInfo(unsigned i) const {
+ assert(isValidBufferID(i));
+ return Buffers[i - 1];
+ }
+
+ const MemoryBuffer *getMemoryBuffer(unsigned i) const {
+ assert(isValidBufferID(i));
+ return Buffers[i - 1].Buffer.get();
+ }
+
+ unsigned getNumBuffers() const {
+ return Buffers.size();
+ }
+
+ unsigned getMainFileID() const {
+ assert(getNumBuffers());
+ return 1;
+ }
+
+ SMLoc getParentIncludeLoc(unsigned i) const {
+ assert(isValidBufferID(i));
+ return Buffers[i - 1].IncludeLoc;
+ }
+
+ /// Add a new source buffer to this source manager. This takes ownership of
+ /// the memory buffer.
+ unsigned AddNewSourceBuffer(std::unique_ptr<MemoryBuffer> F,
+ SMLoc IncludeLoc) {
+ SrcBuffer NB;
+ NB.Buffer = std::move(F);
+ NB.IncludeLoc = IncludeLoc;
+ Buffers.push_back(std::move(NB));
+ return Buffers.size();
+ }
+
+ /// Search for a file with the specified name in the current directory or in
+ /// one of the IncludeDirs.
+ ///
+ /// If no file is found, this returns 0, otherwise it returns the buffer ID
+ /// of the stacked file. The full path to the included file can be found in
+ /// \p IncludedFile.
+ unsigned AddIncludeFile(const std::string &Filename, SMLoc IncludeLoc,
+ std::string &IncludedFile);
+
+ /// Return the ID of the buffer containing the specified location.
+ ///
+ /// 0 is returned if the buffer is not found.
+ unsigned FindBufferContainingLoc(SMLoc Loc) const;
+
+ /// Find the line number for the specified location in the specified file.
+ /// This is not a fast method.
+ unsigned FindLineNumber(SMLoc Loc, unsigned BufferID = 0) const {
+ return getLineAndColumn(Loc, BufferID).first;
+ }
+
+ /// Find the line and column number for the specified location in the
+ /// specified file. This is not a fast method.
+ std::pair<unsigned, unsigned> getLineAndColumn(SMLoc Loc,
+ unsigned BufferID = 0) const;
+
+ /// Emit a message about the specified location with the specified string.
+ ///
+ /// \param ShowColors Display colored messages if output is a terminal and
+ /// the default error handler is used.
+ void PrintMessage(raw_ostream &OS, SMLoc Loc, DiagKind Kind,
+ const Twine &Msg,
+ ArrayRef<SMRange> Ranges = None,
+ ArrayRef<SMFixIt> FixIts = None,
+ bool ShowColors = true) const;
+
+ /// Emits a diagnostic to llvm::errs().
+ void PrintMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = None,
+ ArrayRef<SMFixIt> FixIts = None,
+ bool ShowColors = true) const;
+
+ /// Emits a manually-constructed diagnostic to the given output stream.
+ ///
+ /// \param ShowColors Display colored messages if output is a terminal and
+ /// the default error handler is used.
+ void PrintMessage(raw_ostream &OS, const SMDiagnostic &Diagnostic,
+ bool ShowColors = true) const;
+
+ /// Return an SMDiagnostic at the specified location with the specified
+ /// string.
+ ///
+ /// \param Msg If non-null, the kind of message (e.g., "error") which is
+ /// prefixed to the message.
+ SMDiagnostic GetMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
+ ArrayRef<SMRange> Ranges = None,
+ ArrayRef<SMFixIt> FixIts = None) const;
+
+ /// Prints the names of included files and the line of the file they were
+ /// included from. A diagnostic handler can use this before printing its
+ /// custom formatted message.
+ ///
+ /// \param IncludeLoc The location of the include.
+ /// \param OS the raw_ostream to print on.
+ void PrintIncludeStack(SMLoc IncludeLoc, raw_ostream &OS) const;
+};
+
+
+/// Represents a single fixit, a replacement of one range of text with another.
+class SMFixIt {
+ SMRange Range;
+
+ std::string Text;
+
+public:
+ // FIXME: Twine.str() is not very efficient.
+ SMFixIt(SMLoc Loc, const Twine &Insertion)
+ : Range(Loc, Loc), Text(Insertion.str()) {
+ assert(Loc.isValid());
+ }
+
+ // FIXME: Twine.str() is not very efficient.
+ SMFixIt(SMRange R, const Twine &Replacement)
+ : Range(R), Text(Replacement.str()) {
+ assert(R.isValid());
+ }
+
+ StringRef getText() const { return Text; }
+ SMRange getRange() const { return Range; }
+
+ bool operator<(const SMFixIt &Other) const {
+ if (Range.Start.getPointer() != Other.Range.Start.getPointer())
+ return Range.Start.getPointer() < Other.Range.Start.getPointer();
+ if (Range.End.getPointer() != Other.Range.End.getPointer())
+ return Range.End.getPointer() < Other.Range.End.getPointer();
+ return Text < Other.Text;
+ }
+};
+
+
+/// Instances of this class encapsulate one diagnostic report, allowing
+/// printing to a raw_ostream as a caret diagnostic.
+class SMDiagnostic {
+ const SourceMgr *SM;
+ SMLoc Loc;
+ std::string Filename;
+ int LineNo, ColumnNo;
+ SourceMgr::DiagKind Kind;
+ std::string Message, LineContents;
+ std::vector<std::pair<unsigned, unsigned> > Ranges;
+ SmallVector<SMFixIt, 4> FixIts;
+
+public:
+ // Null diagnostic.
+ SMDiagnostic()
+ : SM(nullptr), LineNo(0), ColumnNo(0), Kind(SourceMgr::DK_Error) {}
+ // Diagnostic with no location (e.g. file not found, command line arg error).
+ SMDiagnostic(StringRef filename, SourceMgr::DiagKind Knd, StringRef Msg)
+ : SM(nullptr), Filename(filename), LineNo(-1), ColumnNo(-1), Kind(Knd),
+ Message(Msg) {}
+
+ // Diagnostic with a location.
+ SMDiagnostic(const SourceMgr &sm, SMLoc L, StringRef FN,
+ int Line, int Col, SourceMgr::DiagKind Kind,
+ StringRef Msg, StringRef LineStr,
+ ArrayRef<std::pair<unsigned,unsigned> > Ranges,
+ ArrayRef<SMFixIt> FixIts = None);
+
+ const SourceMgr *getSourceMgr() const { return SM; }
+ SMLoc getLoc() const { return Loc; }
+ StringRef getFilename() const { return Filename; }
+ int getLineNo() const { return LineNo; }
+ int getColumnNo() const { return ColumnNo; }
+ SourceMgr::DiagKind getKind() const { return Kind; }
+ StringRef getMessage() const { return Message; }
+ StringRef getLineContents() const { return LineContents; }
+ ArrayRef<std::pair<unsigned, unsigned> > getRanges() const {
+ return Ranges;
+ }
+
+ void addFixIt(const SMFixIt &Hint) {
+ FixIts.push_back(Hint);
+ }
+
+ ArrayRef<SMFixIt> getFixIts() const {
+ return FixIts;
+ }
+
+ void print(const char *ProgName, raw_ostream &S, bool ShowColors = true,
+ bool ShowKindLabel = true) const;
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/SpecialCaseList.h b/contrib/llvm/include/llvm/Support/SpecialCaseList.h
new file mode 100644
index 0000000..ce693c5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/SpecialCaseList.h
@@ -0,0 +1,104 @@
+//===-- SpecialCaseList.h - special case list for sanitizers ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class used to parse user-provided text files with
+// "special case lists" for code sanitizers. Such files are used to
+// define "ABI list" for DataFlowSanitizer and blacklists for another sanitizers
+// like AddressSanitizer or UndefinedBehaviorSanitizer.
+//
+// Empty lines and lines starting with "#" are ignored. All the rest lines
+// should have the form:
+// section:wildcard_expression[=category]
+// If category is not specified, it is assumed to be empty string.
+// Definitions of "section" and "category" are sanitizer-specific. For example,
+// sanitizer blacklists support sections "src", "fun" and "global".
+// Wildcard expressions define, respectively, source files, functions or
+// globals which shouldn't be instrumented.
+// Examples of categories:
+// "functional": used in DFSan to list functions with pure functional
+// semantics.
+// "init": used in ASan blacklist to disable initialization-order bugs
+// detection for certain globals or source files.
+// Full special case list file example:
+// ---
+// # Blacklisted items:
+// fun:*_ZN4base6subtle*
+// global:*global_with_bad_access_or_initialization*
+// global:*global_with_initialization_issues*=init
+// type:*Namespace::ClassName*=init
+// src:file_with_tricky_code.cc
+// src:ignore-global-initializers-issues.cc=init
+//
+// # Functions with pure functional semantics:
+// fun:cos=functional
+// fun:sin=functional
+// ---
+// Note that the wild card is in fact an llvm::Regex, but * is automatically
+// replaced with .*
+// This is similar to the "ignore" feature of ThreadSanitizer.
+// http://code.google.com/p/data-race-test/wiki/ThreadSanitizerIgnores
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SPECIALCASELIST_H
+#define LLVM_SUPPORT_SPECIALCASELIST_H
+
+#include "llvm/ADT/StringMap.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+class MemoryBuffer;
+class Regex;
+class StringRef;
+
+class SpecialCaseList {
+public:
+ /// Parses the special case list entries from files. On failure, returns
+ /// 0 and writes an error message to string.
+ static std::unique_ptr<SpecialCaseList>
+ create(const std::vector<std::string> &Paths, std::string &Error);
+ /// Parses the special case list from a memory buffer. On failure, returns
+ /// 0 and writes an error message to string.
+ static std::unique_ptr<SpecialCaseList> create(const MemoryBuffer *MB,
+ std::string &Error);
+ /// Parses the special case list entries from files. On failure, reports a
+ /// fatal error.
+ static std::unique_ptr<SpecialCaseList>
+ createOrDie(const std::vector<std::string> &Paths);
+
+ ~SpecialCaseList();
+
+ /// Returns true, if special case list contains a line
+ /// \code
+ /// @Section:<E>=@Category
+ /// \endcode
+ /// and @Query satisfies a wildcard expression <E>.
+ bool inSection(StringRef Section, StringRef Query,
+ StringRef Category = StringRef()) const;
+
+private:
+ SpecialCaseList(SpecialCaseList const &) = delete;
+ SpecialCaseList &operator=(SpecialCaseList const &) = delete;
+
+ struct Entry;
+ StringMap<StringMap<Entry>> Entries;
+ StringMap<StringMap<std::string>> Regexps;
+ bool IsCompiled;
+
+ SpecialCaseList();
+ /// Parses just-constructed SpecialCaseList entries from a memory buffer.
+ bool parse(const MemoryBuffer *MB, std::string &Error);
+ /// compile() should be called once, after parsing all the memory buffers.
+ void compile();
+};
+
+} // namespace llvm
+
+#endif // LLVM_SUPPORT_SPECIALCASELIST_H
+
diff --git a/contrib/llvm/include/llvm/Support/StreamingMemoryObject.h b/contrib/llvm/include/llvm/Support/StreamingMemoryObject.h
new file mode 100644
index 0000000..a5980c2
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/StreamingMemoryObject.h
@@ -0,0 +1,95 @@
+//===- StreamingMemoryObject.h - Streamable data interface -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_STREAMINGMEMORYOBJECT_H
+#define LLVM_SUPPORT_STREAMINGMEMORYOBJECT_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataStream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryObject.h"
+#include <memory>
+#include <vector>
+
+namespace llvm {
+
+/// Interface to data which is actually streamed from a DataStreamer. In
+/// addition to inherited members, it has the dropLeadingBytes and
+/// setKnownObjectSize methods which are not applicable to non-streamed objects.
+class StreamingMemoryObject : public MemoryObject {
+public:
+ StreamingMemoryObject(std::unique_ptr<DataStreamer> Streamer);
+ uint64_t getExtent() const override;
+ uint64_t readBytes(uint8_t *Buf, uint64_t Size,
+ uint64_t Address) const override;
+ const uint8_t *getPointer(uint64_t address, uint64_t size) const override {
+ // FIXME: This could be fixed by ensuring the bytes are fetched and
+ // making a copy, requiring that the bitcode size be known, or
+ // otherwise ensuring that the memory doesn't go away/get reallocated,
+ // but it's not currently necessary. Users that need the pointer (any
+ // that need Blobs) don't stream.
+ report_fatal_error("getPointer in streaming memory objects not allowed");
+ return nullptr;
+ }
+ bool isValidAddress(uint64_t address) const override;
+
+ /// Drop s bytes from the front of the stream, pushing the positions of the
+ /// remaining bytes down by s. This is used to skip past the bitcode header,
+ /// since we don't know a priori if it's present, and we can't put bytes
+ /// back into the stream once we've read them.
+ bool dropLeadingBytes(size_t s);
+
+ /// If the data object size is known in advance, many of the operations can
+ /// be made more efficient, so this method should be called before reading
+ /// starts (although it can be called anytime).
+ void setKnownObjectSize(size_t size);
+
+ /// The number of bytes read at a time from the data streamer.
+ static const uint32_t kChunkSize = 4096 * 4;
+
+private:
+ mutable std::vector<unsigned char> Bytes;
+ std::unique_ptr<DataStreamer> Streamer;
+ mutable size_t BytesRead; // Bytes read from stream
+ size_t BytesSkipped;// Bytes skipped at start of stream (e.g. wrapper/header)
+ mutable size_t ObjectSize; // 0 if unknown, set if wrapper seen or EOF reached
+ mutable bool EOFReached;
+
+ // Fetch enough bytes such that Pos can be read (i.e. BytesRead >
+ // Pos). Returns true if Pos can be read. Unlike most of the
+ // functions in BitcodeReader, returns true on success. Most of the
+ // requests will be small, but we fetch at kChunkSize bytes at a
+ // time to avoid making too many potentially expensive GetBytes
+ // calls.
+ bool fetchToPos(size_t Pos) const {
+ while (Pos >= BytesRead) {
+ if (EOFReached)
+ return false;
+ Bytes.resize(BytesRead + BytesSkipped + kChunkSize);
+ size_t bytes = Streamer->GetBytes(&Bytes[BytesRead + BytesSkipped],
+ kChunkSize);
+ BytesRead += bytes;
+ if (bytes == 0) { // reached EOF/ran out of bytes
+ if (ObjectSize == 0)
+ ObjectSize = BytesRead;
+ EOFReached = true;
+ }
+ }
+ return !ObjectSize || Pos < ObjectSize;
+ }
+
+ StreamingMemoryObject(const StreamingMemoryObject&) = delete;
+ void operator=(const StreamingMemoryObject&) = delete;
+};
+
+MemoryObject *getNonStreamedMemoryObject(
+ const unsigned char *Start, const unsigned char *End);
+
+}
+#endif // STREAMINGMEMORYOBJECT_H_
diff --git a/contrib/llvm/include/llvm/Support/StringPool.h b/contrib/llvm/include/llvm/Support/StringPool.h
new file mode 100644
index 0000000..2ec0c3b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/StringPool.h
@@ -0,0 +1,138 @@
+//===-- StringPool.h - Interned string pool ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares an interned string pool, which helps reduce the cost of
+// strings by using the same storage for identical strings.
+//
+// To intern a string:
+//
+// StringPool Pool;
+// PooledStringPtr Str = Pool.intern("wakka wakka");
+//
+// To use the value of an interned string, use operator bool and operator*:
+//
+// if (Str)
+// cerr << "the string is" << *Str << "\n";
+//
+// Pooled strings are immutable, but you can change a PooledStringPtr to point
+// to another instance. So that interned strings can eventually be freed,
+// strings in the string pool are reference-counted (automatically).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_STRINGPOOL_H
+#define LLVM_SUPPORT_STRINGPOOL_H
+
+#include "llvm/ADT/StringMap.h"
+#include <cassert>
+
+namespace llvm {
+
+ class PooledStringPtr;
+
+ /// StringPool - An interned string pool. Use the intern method to add a
+ /// string. Strings are removed automatically as PooledStringPtrs are
+ /// destroyed.
+ class StringPool {
+ /// PooledString - This is the value of an entry in the pool's interning
+ /// table.
+ struct PooledString {
+ StringPool *Pool; ///< So the string can remove itself.
+ unsigned Refcount; ///< Number of referencing PooledStringPtrs.
+
+ public:
+ PooledString() : Pool(nullptr), Refcount(0) { }
+ };
+
+ friend class PooledStringPtr;
+
+ typedef StringMap<PooledString> table_t;
+ typedef StringMapEntry<PooledString> entry_t;
+ table_t InternTable;
+
+ public:
+ StringPool();
+ ~StringPool();
+
+ /// intern - Adds a string to the pool and returns a reference-counted
+ /// pointer to it. No additional memory is allocated if the string already
+ /// exists in the pool.
+ PooledStringPtr intern(StringRef Str);
+
+ /// empty - Checks whether the pool is empty. Returns true if so.
+ ///
+ inline bool empty() const { return InternTable.empty(); }
+ };
+
+ /// PooledStringPtr - A pointer to an interned string. Use operator bool to
+ /// test whether the pointer is valid, and operator * to get the string if so.
+ /// This is a lightweight value class with storage requirements equivalent to
+ /// a single pointer, but it does have reference-counting overhead when
+ /// copied.
+ class PooledStringPtr {
+ typedef StringPool::entry_t entry_t;
+ entry_t *S;
+
+ public:
+ PooledStringPtr() : S(nullptr) {}
+
+ explicit PooledStringPtr(entry_t *E) : S(E) {
+ if (S) ++S->getValue().Refcount;
+ }
+
+ PooledStringPtr(const PooledStringPtr &That) : S(That.S) {
+ if (S) ++S->getValue().Refcount;
+ }
+
+ PooledStringPtr &operator=(const PooledStringPtr &That) {
+ if (S != That.S) {
+ clear();
+ S = That.S;
+ if (S) ++S->getValue().Refcount;
+ }
+ return *this;
+ }
+
+ void clear() {
+ if (!S)
+ return;
+ if (--S->getValue().Refcount == 0) {
+ S->getValue().Pool->InternTable.remove(S);
+ S->Destroy();
+ }
+ S = nullptr;
+ }
+
+ ~PooledStringPtr() { clear(); }
+
+ inline const char *begin() const {
+ assert(*this && "Attempt to dereference empty PooledStringPtr!");
+ return S->getKeyData();
+ }
+
+ inline const char *end() const {
+ assert(*this && "Attempt to dereference empty PooledStringPtr!");
+ return S->getKeyData() + S->getKeyLength();
+ }
+
+ inline unsigned size() const {
+ assert(*this && "Attempt to dereference empty PooledStringPtr!");
+ return S->getKeyLength();
+ }
+
+ inline const char *operator*() const { return begin(); }
+ inline explicit operator bool() const { return S != nullptr; }
+
+ inline bool operator==(const PooledStringPtr &That) const { return S == That.S; }
+ inline bool operator!=(const PooledStringPtr &That) const { return S != That.S; }
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/StringSaver.h b/contrib/llvm/include/llvm/Support/StringSaver.h
new file mode 100644
index 0000000..38fb7bb
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/StringSaver.h
@@ -0,0 +1,32 @@
+//===- llvm/Support/StringSaver.h -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_STRINGSAVER_H
+#define LLVM_SUPPORT_STRINGSAVER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Allocator.h"
+
+namespace llvm {
+
+/// \brief Saves strings in the inheritor's stable storage and returns a stable
+/// raw character pointer.
+class StringSaver final {
+ BumpPtrAllocator &Alloc;
+
+public:
+ StringSaver(BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
+ const char *save(const char *S) { return save(StringRef(S)); }
+ const char *save(StringRef S);
+ const char *save(const Twine &S) { return save(StringRef(S.str())); }
+ const char *save(std::string &S) { return save(StringRef(S)); }
+};
+}
+#endif
diff --git a/contrib/llvm/include/llvm/Support/SwapByteOrder.h b/contrib/llvm/include/llvm/Support/SwapByteOrder.h
new file mode 100644
index 0000000..7761fa1
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/SwapByteOrder.h
@@ -0,0 +1,125 @@
+//===- SwapByteOrder.h - Generic and optimized byte swaps -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares generic and optimized functions to swap the byte order of
+// an integral type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SWAPBYTEORDER_H
+#define LLVM_SUPPORT_SWAPBYTEORDER_H
+
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DataTypes.h"
+#include <cstddef>
+#include <limits>
+
+namespace llvm {
+namespace sys {
+
+/// SwapByteOrder_16 - This function returns a byte-swapped representation of
+/// the 16-bit argument.
+inline uint16_t SwapByteOrder_16(uint16_t value) {
+#if defined(_MSC_VER) && !defined(_DEBUG)
+ // The DLL version of the runtime lacks these functions (bug!?), but in a
+ // release build they're replaced with BSWAP instructions anyway.
+ return _byteswap_ushort(value);
+#else
+ uint16_t Hi = value << 8;
+ uint16_t Lo = value >> 8;
+ return Hi | Lo;
+#endif
+}
+
+/// SwapByteOrder_32 - This function returns a byte-swapped representation of
+/// the 32-bit argument.
+inline uint32_t SwapByteOrder_32(uint32_t value) {
+#if defined(__llvm__) || (LLVM_GNUC_PREREQ(4, 3, 0) && !defined(__ICC))
+ return __builtin_bswap32(value);
+#elif defined(_MSC_VER) && !defined(_DEBUG)
+ return _byteswap_ulong(value);
+#else
+ uint32_t Byte0 = value & 0x000000FF;
+ uint32_t Byte1 = value & 0x0000FF00;
+ uint32_t Byte2 = value & 0x00FF0000;
+ uint32_t Byte3 = value & 0xFF000000;
+ return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24);
+#endif
+}
+
+/// SwapByteOrder_64 - This function returns a byte-swapped representation of
+/// the 64-bit argument.
+inline uint64_t SwapByteOrder_64(uint64_t value) {
+#if defined(__llvm__) || (LLVM_GNUC_PREREQ(4, 3, 0) && !defined(__ICC))
+ return __builtin_bswap64(value);
+#elif defined(_MSC_VER) && !defined(_DEBUG)
+ return _byteswap_uint64(value);
+#else
+ uint64_t Hi = SwapByteOrder_32(uint32_t(value));
+ uint32_t Lo = SwapByteOrder_32(uint32_t(value >> 32));
+ return (Hi << 32) | Lo;
+#endif
+}
+
+inline unsigned char getSwappedBytes(unsigned char C) { return C; }
+inline signed char getSwappedBytes(signed char C) { return C; }
+inline char getSwappedBytes(char C) { return C; }
+
+inline unsigned short getSwappedBytes(unsigned short C) { return SwapByteOrder_16(C); }
+inline signed short getSwappedBytes( signed short C) { return SwapByteOrder_16(C); }
+
+inline unsigned int getSwappedBytes(unsigned int C) { return SwapByteOrder_32(C); }
+inline signed int getSwappedBytes( signed int C) { return SwapByteOrder_32(C); }
+
+#if __LONG_MAX__ == __INT_MAX__
+inline unsigned long getSwappedBytes(unsigned long C) { return SwapByteOrder_32(C); }
+inline signed long getSwappedBytes( signed long C) { return SwapByteOrder_32(C); }
+#elif __LONG_MAX__ == __LONG_LONG_MAX__
+inline unsigned long getSwappedBytes(unsigned long C) { return SwapByteOrder_64(C); }
+inline signed long getSwappedBytes( signed long C) { return SwapByteOrder_64(C); }
+#else
+#error "Unknown long size!"
+#endif
+
+inline unsigned long long getSwappedBytes(unsigned long long C) {
+ return SwapByteOrder_64(C);
+}
+inline signed long long getSwappedBytes(signed long long C) {
+ return SwapByteOrder_64(C);
+}
+
+inline float getSwappedBytes(float C) {
+ union {
+ uint32_t i;
+ float f;
+ } in, out;
+ in.f = C;
+ out.i = SwapByteOrder_32(in.i);
+ return out.f;
+}
+
+inline double getSwappedBytes(double C) {
+ union {
+ uint64_t i;
+ double d;
+ } in, out;
+ in.d = C;
+ out.i = SwapByteOrder_64(in.i);
+ return out.d;
+}
+
+template<typename T>
+inline void swapByteOrder(T &Value) {
+ Value = getSwappedBytes(Value);
+}
+
+} // end namespace sys
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/SystemUtils.h b/contrib/llvm/include/llvm/Support/SystemUtils.h
new file mode 100644
index 0000000..2997b1b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/SystemUtils.h
@@ -0,0 +1,32 @@
+//===- SystemUtils.h - Utilities to do low-level system stuff ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains functions used to do a variety of low-level, often
+// system-specific, tasks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_SYSTEMUTILS_H
+#define LLVM_SUPPORT_SYSTEMUTILS_H
+
+namespace llvm {
+ class raw_ostream;
+
+/// Determine if the raw_ostream provided is connected to a terminal. If so,
+/// generate a warning message to errs() advising against display of bitcode
+/// and return true. Otherwise just return false.
+/// @brief Check for output written to a console
+bool CheckBitcodeOutputToConsole(
+ raw_ostream &stream_to_check, ///< The stream to be checked
+ bool print_warning = true ///< Control whether warnings are printed
+);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/TargetParser.h b/contrib/llvm/include/llvm/Support/TargetParser.h
new file mode 100644
index 0000000..c21019d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/TargetParser.h
@@ -0,0 +1,145 @@
+//===-- TargetParser - Parser for target features ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a target parser to recognise hardware features such as
+// FPU/CPU/ARCH names as well as specific support such as HDIV, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TARGETPARSER_H
+#define LLVM_SUPPORT_TARGETPARSER_H
+
+// FIXME: vector is used because that's what clang uses for subtarget feature
+// lists, but SmallVector would probably be better
+#include <vector>
+
+namespace llvm {
+class StringRef;
+
+// Target specific information into their own namespaces. These should be
+// generated from TableGen because the information is already there, and there
+// is where new information about targets will be added.
+// FIXME: To TableGen this we need to make some table generated files available
+// even if the back-end is not compiled with LLVM, plus we need to create a new
+// back-end to TableGen to create these clean tables.
+namespace ARM {
+
+// FPU names.
+enum FPUKind {
+#define ARM_FPU(NAME, KIND, VERSION, NEON_SUPPORT, RESTRICTION) KIND,
+#include "ARMTargetParser.def"
+ FK_LAST
+};
+
+// FPU Version
+enum FPUVersion {
+ FV_NONE = 0,
+ FV_VFPV2,
+ FV_VFPV3,
+ FV_VFPV3_FP16,
+ FV_VFPV4,
+ FV_VFPV5
+};
+
+// An FPU name implies one of three levels of Neon support:
+enum NeonSupportLevel {
+ NS_None = 0, ///< No Neon
+ NS_Neon, ///< Neon
+ NS_Crypto ///< Neon with Crypto
+};
+
+// An FPU name restricts the FPU in one of three ways:
+enum FPURestriction {
+ FR_None = 0, ///< No restriction
+ FR_D16, ///< Only 16 D registers
+ FR_SP_D16 ///< Only single-precision instructions, with 16 D registers
+};
+
+// Arch names.
+enum ArchKind {
+#define ARM_ARCH(NAME, ID, CPU_ATTR, SUB_ARCH, ARCH_ATTR, ARCH_FPU, ARCH_BASE_EXT) ID,
+#include "ARMTargetParser.def"
+ AK_LAST
+};
+
+// Arch extension modifiers for CPUs.
+enum ArchExtKind : unsigned {
+ AEK_INVALID = 0x0,
+ AEK_NONE = 0x1,
+ AEK_CRC = 0x2,
+ AEK_CRYPTO = 0x4,
+ AEK_FP = 0x8,
+ AEK_HWDIV = 0x10,
+ AEK_HWDIVARM = 0x20,
+ AEK_MP = 0x40,
+ AEK_SIMD = 0x80,
+ AEK_SEC = 0x100,
+ AEK_VIRT = 0x200,
+ AEK_DSP = 0x400,
+ AEK_FP16 = 0x800,
+ // Unsupported extensions.
+ AEK_OS = 0x8000000,
+ AEK_IWMMXT = 0x10000000,
+ AEK_IWMMXT2 = 0x20000000,
+ AEK_MAVERICK = 0x40000000,
+ AEK_XSCALE = 0x80000000,
+};
+
+// ISA kinds.
+enum ISAKind { IK_INVALID = 0, IK_ARM, IK_THUMB, IK_AARCH64 };
+
+// Endianness
+// FIXME: BE8 vs. BE32?
+enum EndianKind { EK_INVALID = 0, EK_LITTLE, EK_BIG };
+
+// v6/v7/v8 Profile
+enum ProfileKind { PK_INVALID = 0, PK_A, PK_R, PK_M };
+
+StringRef getCanonicalArchName(StringRef Arch);
+
+// Information by ID
+StringRef getFPUName(unsigned FPUKind);
+unsigned getFPUVersion(unsigned FPUKind);
+unsigned getFPUNeonSupportLevel(unsigned FPUKind);
+unsigned getFPURestriction(unsigned FPUKind);
+
+// FIXME: These should be moved to TargetTuple once it exists
+bool getFPUFeatures(unsigned FPUKind, std::vector<const char *> &Features);
+bool getHWDivFeatures(unsigned HWDivKind, std::vector<const char *> &Features);
+bool getExtensionFeatures(unsigned Extensions,
+ std::vector<const char*> &Features);
+
+StringRef getArchName(unsigned ArchKind);
+unsigned getArchAttr(unsigned ArchKind);
+StringRef getCPUAttr(unsigned ArchKind);
+StringRef getSubArch(unsigned ArchKind);
+StringRef getArchExtName(unsigned ArchExtKind);
+const char *getArchExtFeature(StringRef ArchExt);
+StringRef getHWDivName(unsigned HWDivKind);
+
+// Information by Name
+unsigned getDefaultFPU(StringRef CPU, unsigned ArchKind);
+unsigned getDefaultExtensions(StringRef CPU, unsigned ArchKind);
+StringRef getDefaultCPU(StringRef Arch);
+
+// Parser
+unsigned parseHWDiv(StringRef HWDiv);
+unsigned parseFPU(StringRef FPU);
+unsigned parseArch(StringRef Arch);
+unsigned parseArchExt(StringRef ArchExt);
+unsigned parseCPUArch(StringRef CPU);
+unsigned parseArchISA(StringRef Arch);
+unsigned parseArchEndian(StringRef Arch);
+unsigned parseArchProfile(StringRef Arch);
+unsigned parseArchVersion(StringRef Arch);
+
+} // namespace ARM
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/TargetRegistry.h b/contrib/llvm/include/llvm/Support/TargetRegistry.h
new file mode 100644
index 0000000..aec181b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/TargetRegistry.h
@@ -0,0 +1,1188 @@
+//===-- Support/TargetRegistry.h - Target Registration ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes the TargetRegistry interface, which tools can use to access
+// the appropriate target specific classes (TargetMachine, AsmPrinter, etc.)
+// which have been registered.
+//
+// Target specific class implementations should register themselves using the
+// appropriate TargetRegistry interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TARGETREGISTRY_H
+#define LLVM_SUPPORT_TARGETREGISTRY_H
+
+#include "llvm-c/Disassembler.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/FormattedStream.h"
+#include <cassert>
+#include <memory>
+#include <string>
+
+namespace llvm {
+class AsmPrinter;
+class MCAsmBackend;
+class MCAsmInfo;
+class MCAsmParser;
+class MCCodeEmitter;
+class MCCodeGenInfo;
+class MCContext;
+class MCDisassembler;
+class MCInstrAnalysis;
+class MCInstPrinter;
+class MCInstrInfo;
+class MCRegisterInfo;
+class MCStreamer;
+class MCSubtargetInfo;
+class MCSymbolizer;
+class MCRelocationInfo;
+class MCTargetAsmParser;
+class MCTargetOptions;
+class MCTargetStreamer;
+class TargetMachine;
+class TargetOptions;
+class raw_ostream;
+class raw_pwrite_stream;
+class formatted_raw_ostream;
+
+MCStreamer *createNullStreamer(MCContext &Ctx);
+MCStreamer *createAsmStreamer(MCContext &Ctx,
+ std::unique_ptr<formatted_raw_ostream> OS,
+ bool isVerboseAsm, bool useDwarfDirectory,
+ MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+ MCAsmBackend *TAB, bool ShowInst);
+
+/// Takes ownership of \p TAB and \p CE.
+MCStreamer *createELFStreamer(MCContext &Ctx, MCAsmBackend &TAB,
+ raw_pwrite_stream &OS, MCCodeEmitter *CE,
+ bool RelaxAll);
+MCStreamer *createMachOStreamer(MCContext &Ctx, MCAsmBackend &TAB,
+ raw_pwrite_stream &OS, MCCodeEmitter *CE,
+ bool RelaxAll, bool DWARFMustBeAtTheEnd,
+ bool LabelSections = false);
+
+MCRelocationInfo *createMCRelocationInfo(const Triple &TT, MCContext &Ctx);
+
+MCSymbolizer *createMCSymbolizer(const Triple &TT, LLVMOpInfoCallback GetOpInfo,
+ LLVMSymbolLookupCallback SymbolLookUp,
+ void *DisInfo, MCContext *Ctx,
+ std::unique_ptr<MCRelocationInfo> &&RelInfo);
+
+/// Target - Wrapper for Target specific information.
+///
+/// For registration purposes, this is a POD type so that targets can be
+/// registered without the use of static constructors.
+///
+/// Targets should implement a single global instance of this class (which
+/// will be zero initialized), and pass that instance to the TargetRegistry as
+/// part of their initialization.
+class Target {
+public:
+ friend struct TargetRegistry;
+
+ typedef bool (*ArchMatchFnTy)(Triple::ArchType Arch);
+
+ typedef MCAsmInfo *(*MCAsmInfoCtorFnTy)(const MCRegisterInfo &MRI,
+ const Triple &TT);
+ typedef MCCodeGenInfo *(*MCCodeGenInfoCtorFnTy)(const Triple &TT,
+ Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL);
+ typedef MCInstrInfo *(*MCInstrInfoCtorFnTy)(void);
+ typedef MCInstrAnalysis *(*MCInstrAnalysisCtorFnTy)(const MCInstrInfo *Info);
+ typedef MCRegisterInfo *(*MCRegInfoCtorFnTy)(const Triple &TT);
+ typedef MCSubtargetInfo *(*MCSubtargetInfoCtorFnTy)(const Triple &TT,
+ StringRef CPU,
+ StringRef Features);
+ typedef TargetMachine *(*TargetMachineCtorTy)(
+ const Target &T, const Triple &TT, StringRef CPU, StringRef Features,
+ const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
+ // If it weren't for layering issues (this header is in llvm/Support, but
+ // depends on MC?) this should take the Streamer by value rather than rvalue
+ // reference.
+ typedef AsmPrinter *(*AsmPrinterCtorTy)(
+ TargetMachine &TM, std::unique_ptr<MCStreamer> &&Streamer);
+ typedef MCAsmBackend *(*MCAsmBackendCtorTy)(const Target &T,
+ const MCRegisterInfo &MRI,
+ const Triple &TT, StringRef CPU);
+ typedef MCTargetAsmParser *(*MCAsmParserCtorTy)(
+ const MCSubtargetInfo &STI, MCAsmParser &P, const MCInstrInfo &MII,
+ const MCTargetOptions &Options);
+ typedef MCDisassembler *(*MCDisassemblerCtorTy)(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx);
+ typedef MCInstPrinter *(*MCInstPrinterCtorTy)(const Triple &T,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI);
+ typedef MCCodeEmitter *(*MCCodeEmitterCtorTy)(const MCInstrInfo &II,
+ const MCRegisterInfo &MRI,
+ MCContext &Ctx);
+ typedef MCStreamer *(*ELFStreamerCtorTy)(const Triple &T, MCContext &Ctx,
+ MCAsmBackend &TAB,
+ raw_pwrite_stream &OS,
+ MCCodeEmitter *Emitter,
+ bool RelaxAll);
+ typedef MCStreamer *(*MachOStreamerCtorTy)(MCContext &Ctx, MCAsmBackend &TAB,
+ raw_pwrite_stream &OS,
+ MCCodeEmitter *Emitter,
+ bool RelaxAll,
+ bool DWARFMustBeAtTheEnd);
+ typedef MCStreamer *(*COFFStreamerCtorTy)(MCContext &Ctx, MCAsmBackend &TAB,
+ raw_pwrite_stream &OS,
+ MCCodeEmitter *Emitter,
+ bool RelaxAll,
+ bool IncrementalLinkerCompatible);
+ typedef MCTargetStreamer *(*NullTargetStreamerCtorTy)(MCStreamer &S);
+ typedef MCTargetStreamer *(*AsmTargetStreamerCtorTy)(
+ MCStreamer &S, formatted_raw_ostream &OS, MCInstPrinter *InstPrint,
+ bool IsVerboseAsm);
+ typedef MCTargetStreamer *(*ObjectTargetStreamerCtorTy)(
+ MCStreamer &S, const MCSubtargetInfo &STI);
+ typedef MCRelocationInfo *(*MCRelocationInfoCtorTy)(const Triple &TT,
+ MCContext &Ctx);
+ typedef MCSymbolizer *(*MCSymbolizerCtorTy)(
+ const Triple &TT, LLVMOpInfoCallback GetOpInfo,
+ LLVMSymbolLookupCallback SymbolLookUp, void *DisInfo, MCContext *Ctx,
+ std::unique_ptr<MCRelocationInfo> &&RelInfo);
+
+private:
+ /// Next - The next registered target in the linked list, maintained by the
+ /// TargetRegistry.
+ Target *Next;
+
+ /// The target function for checking if an architecture is supported.
+ ArchMatchFnTy ArchMatchFn;
+
+ /// Name - The target name.
+ const char *Name;
+
+ /// ShortDesc - A short description of the target.
+ const char *ShortDesc;
+
+ /// HasJIT - Whether this target supports the JIT.
+ bool HasJIT;
+
+ /// MCAsmInfoCtorFn - Constructor function for this target's MCAsmInfo, if
+ /// registered.
+ MCAsmInfoCtorFnTy MCAsmInfoCtorFn;
+
+ /// MCCodeGenInfoCtorFn - Constructor function for this target's
+ /// MCCodeGenInfo, if registered.
+ MCCodeGenInfoCtorFnTy MCCodeGenInfoCtorFn;
+
+ /// MCInstrInfoCtorFn - Constructor function for this target's MCInstrInfo,
+ /// if registered.
+ MCInstrInfoCtorFnTy MCInstrInfoCtorFn;
+
+ /// MCInstrAnalysisCtorFn - Constructor function for this target's
+ /// MCInstrAnalysis, if registered.
+ MCInstrAnalysisCtorFnTy MCInstrAnalysisCtorFn;
+
+ /// MCRegInfoCtorFn - Constructor function for this target's MCRegisterInfo,
+ /// if registered.
+ MCRegInfoCtorFnTy MCRegInfoCtorFn;
+
+ /// MCSubtargetInfoCtorFn - Constructor function for this target's
+ /// MCSubtargetInfo, if registered.
+ MCSubtargetInfoCtorFnTy MCSubtargetInfoCtorFn;
+
+ /// TargetMachineCtorFn - Construction function for this target's
+ /// TargetMachine, if registered.
+ TargetMachineCtorTy TargetMachineCtorFn;
+
+ /// MCAsmBackendCtorFn - Construction function for this target's
+ /// MCAsmBackend, if registered.
+ MCAsmBackendCtorTy MCAsmBackendCtorFn;
+
+ /// MCAsmParserCtorFn - Construction function for this target's
+ /// MCTargetAsmParser, if registered.
+ MCAsmParserCtorTy MCAsmParserCtorFn;
+
+ /// AsmPrinterCtorFn - Construction function for this target's AsmPrinter,
+ /// if registered.
+ AsmPrinterCtorTy AsmPrinterCtorFn;
+
+ /// MCDisassemblerCtorFn - Construction function for this target's
+ /// MCDisassembler, if registered.
+ MCDisassemblerCtorTy MCDisassemblerCtorFn;
+
+ /// MCInstPrinterCtorFn - Construction function for this target's
+ /// MCInstPrinter, if registered.
+ MCInstPrinterCtorTy MCInstPrinterCtorFn;
+
+ /// MCCodeEmitterCtorFn - Construction function for this target's
+ /// CodeEmitter, if registered.
+ MCCodeEmitterCtorTy MCCodeEmitterCtorFn;
+
+ // Construction functions for the various object formats, if registered.
+ COFFStreamerCtorTy COFFStreamerCtorFn;
+ MachOStreamerCtorTy MachOStreamerCtorFn;
+ ELFStreamerCtorTy ELFStreamerCtorFn;
+
+ /// Construction function for this target's null TargetStreamer, if
+ /// registered (default = nullptr).
+ NullTargetStreamerCtorTy NullTargetStreamerCtorFn;
+
+ /// Construction function for this target's asm TargetStreamer, if
+ /// registered (default = nullptr).
+ AsmTargetStreamerCtorTy AsmTargetStreamerCtorFn;
+
+ /// Construction function for this target's obj TargetStreamer, if
+ /// registered (default = nullptr).
+ ObjectTargetStreamerCtorTy ObjectTargetStreamerCtorFn;
+
+ /// MCRelocationInfoCtorFn - Construction function for this target's
+ /// MCRelocationInfo, if registered (default = llvm::createMCRelocationInfo)
+ MCRelocationInfoCtorTy MCRelocationInfoCtorFn;
+
+ /// MCSymbolizerCtorFn - Construction function for this target's
+ /// MCSymbolizer, if registered (default = llvm::createMCSymbolizer)
+ MCSymbolizerCtorTy MCSymbolizerCtorFn;
+
+public:
+ Target()
+ : COFFStreamerCtorFn(nullptr), MachOStreamerCtorFn(nullptr),
+ ELFStreamerCtorFn(nullptr), NullTargetStreamerCtorFn(nullptr),
+ AsmTargetStreamerCtorFn(nullptr), ObjectTargetStreamerCtorFn(nullptr),
+ MCRelocationInfoCtorFn(nullptr), MCSymbolizerCtorFn(nullptr) {}
+
+ /// @name Target Information
+ /// @{
+
+ // getNext - Return the next registered target.
+ const Target *getNext() const { return Next; }
+
+ /// getName - Get the target name.
+ const char *getName() const { return Name; }
+
+ /// getShortDescription - Get a short description of the target.
+ const char *getShortDescription() const { return ShortDesc; }
+
+ /// @}
+ /// @name Feature Predicates
+ /// @{
+
+ /// hasJIT - Check if this targets supports the just-in-time compilation.
+ bool hasJIT() const { return HasJIT; }
+
+ /// hasTargetMachine - Check if this target supports code generation.
+ bool hasTargetMachine() const { return TargetMachineCtorFn != nullptr; }
+
+ /// hasMCAsmBackend - Check if this target supports .o generation.
+ bool hasMCAsmBackend() const { return MCAsmBackendCtorFn != nullptr; }
+
+ /// @}
+ /// @name Feature Constructors
+ /// @{
+
+ /// createMCAsmInfo - Create a MCAsmInfo implementation for the specified
+ /// target triple.
+ ///
+ /// \param TheTriple This argument is used to determine the target machine
+ /// feature set; it should always be provided. Generally this should be
+ /// either the target triple from the module, or the target triple of the
+ /// host if that does not exist.
+ MCAsmInfo *createMCAsmInfo(const MCRegisterInfo &MRI,
+ StringRef TheTriple) const {
+ if (!MCAsmInfoCtorFn)
+ return nullptr;
+ return MCAsmInfoCtorFn(MRI, Triple(TheTriple));
+ }
+
+ /// createMCCodeGenInfo - Create a MCCodeGenInfo implementation.
+ ///
+ MCCodeGenInfo *createMCCodeGenInfo(StringRef TT, Reloc::Model RM,
+ CodeModel::Model CM,
+ CodeGenOpt::Level OL) const {
+ if (!MCCodeGenInfoCtorFn)
+ return nullptr;
+ return MCCodeGenInfoCtorFn(Triple(TT), RM, CM, OL);
+ }
+
+ /// createMCInstrInfo - Create a MCInstrInfo implementation.
+ ///
+ MCInstrInfo *createMCInstrInfo() const {
+ if (!MCInstrInfoCtorFn)
+ return nullptr;
+ return MCInstrInfoCtorFn();
+ }
+
+ /// createMCInstrAnalysis - Create a MCInstrAnalysis implementation.
+ ///
+ MCInstrAnalysis *createMCInstrAnalysis(const MCInstrInfo *Info) const {
+ if (!MCInstrAnalysisCtorFn)
+ return nullptr;
+ return MCInstrAnalysisCtorFn(Info);
+ }
+
+ /// createMCRegInfo - Create a MCRegisterInfo implementation.
+ ///
+ MCRegisterInfo *createMCRegInfo(StringRef TT) const {
+ if (!MCRegInfoCtorFn)
+ return nullptr;
+ return MCRegInfoCtorFn(Triple(TT));
+ }
+
+ /// createMCSubtargetInfo - Create a MCSubtargetInfo implementation.
+ ///
+ /// \param TheTriple This argument is used to determine the target machine
+ /// feature set; it should always be provided. Generally this should be
+ /// either the target triple from the module, or the target triple of the
+ /// host if that does not exist.
+ /// \param CPU This specifies the name of the target CPU.
+ /// \param Features This specifies the string representation of the
+ /// additional target features.
+ MCSubtargetInfo *createMCSubtargetInfo(StringRef TheTriple, StringRef CPU,
+ StringRef Features) const {
+ if (!MCSubtargetInfoCtorFn)
+ return nullptr;
+ return MCSubtargetInfoCtorFn(Triple(TheTriple), CPU, Features);
+ }
+
+ /// createTargetMachine - Create a target specific machine implementation
+ /// for the specified \p Triple.
+ ///
+ /// \param TT This argument is used to determine the target machine
+ /// feature set; it should always be provided. Generally this should be
+ /// either the target triple from the module, or the target triple of the
+ /// host if that does not exist.
+ TargetMachine *
+ createTargetMachine(StringRef TT, StringRef CPU, StringRef Features,
+ const TargetOptions &Options,
+ Reloc::Model RM = Reloc::Default,
+ CodeModel::Model CM = CodeModel::Default,
+ CodeGenOpt::Level OL = CodeGenOpt::Default) const {
+ if (!TargetMachineCtorFn)
+ return nullptr;
+ return TargetMachineCtorFn(*this, Triple(TT), CPU, Features, Options, RM,
+ CM, OL);
+ }
+
+ /// createMCAsmBackend - Create a target specific assembly parser.
+ ///
+ /// \param TheTriple The target triple string.
+ MCAsmBackend *createMCAsmBackend(const MCRegisterInfo &MRI,
+ StringRef TheTriple, StringRef CPU) const {
+ if (!MCAsmBackendCtorFn)
+ return nullptr;
+ return MCAsmBackendCtorFn(*this, MRI, Triple(TheTriple), CPU);
+ }
+
+ /// createMCAsmParser - Create a target specific assembly parser.
+ ///
+ /// \param Parser The target independent parser implementation to use for
+ /// parsing and lexing.
+ MCTargetAsmParser *createMCAsmParser(const MCSubtargetInfo &STI,
+ MCAsmParser &Parser,
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options) const {
+ if (!MCAsmParserCtorFn)
+ return nullptr;
+ return MCAsmParserCtorFn(STI, Parser, MII, Options);
+ }
+
+ /// createAsmPrinter - Create a target specific assembly printer pass. This
+ /// takes ownership of the MCStreamer object.
+ AsmPrinter *createAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> &&Streamer) const {
+ if (!AsmPrinterCtorFn)
+ return nullptr;
+ return AsmPrinterCtorFn(TM, std::move(Streamer));
+ }
+
+ MCDisassembler *createMCDisassembler(const MCSubtargetInfo &STI,
+ MCContext &Ctx) const {
+ if (!MCDisassemblerCtorFn)
+ return nullptr;
+ return MCDisassemblerCtorFn(*this, STI, Ctx);
+ }
+
+ MCInstPrinter *createMCInstPrinter(const Triple &T, unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI) const {
+ if (!MCInstPrinterCtorFn)
+ return nullptr;
+ return MCInstPrinterCtorFn(T, SyntaxVariant, MAI, MII, MRI);
+ }
+
+ /// createMCCodeEmitter - Create a target specific code emitter.
+ MCCodeEmitter *createMCCodeEmitter(const MCInstrInfo &II,
+ const MCRegisterInfo &MRI,
+ MCContext &Ctx) const {
+ if (!MCCodeEmitterCtorFn)
+ return nullptr;
+ return MCCodeEmitterCtorFn(II, MRI, Ctx);
+ }
+
+ /// Create a target specific MCStreamer.
+ ///
+ /// \param T The target triple.
+ /// \param Ctx The target context.
+ /// \param TAB The target assembler backend object. Takes ownership.
+ /// \param OS The stream object.
+ /// \param Emitter The target independent assembler object.Takes ownership.
+ /// \param RelaxAll Relax all fixups?
+ MCStreamer *createMCObjectStreamer(const Triple &T, MCContext &Ctx,
+ MCAsmBackend &TAB, raw_pwrite_stream &OS,
+ MCCodeEmitter *Emitter,
+ const MCSubtargetInfo &STI, bool RelaxAll,
+ bool IncrementalLinkerCompatible,
+ bool DWARFMustBeAtTheEnd) const {
+ MCStreamer *S;
+ switch (T.getObjectFormat()) {
+ default:
+ llvm_unreachable("Unknown object format");
+ case Triple::COFF:
+ assert(T.isOSWindows() && "only Windows COFF is supported");
+ S = COFFStreamerCtorFn(Ctx, TAB, OS, Emitter, RelaxAll,
+ IncrementalLinkerCompatible);
+ break;
+ case Triple::MachO:
+ if (MachOStreamerCtorFn)
+ S = MachOStreamerCtorFn(Ctx, TAB, OS, Emitter, RelaxAll,
+ DWARFMustBeAtTheEnd);
+ else
+ S = createMachOStreamer(Ctx, TAB, OS, Emitter, RelaxAll,
+ DWARFMustBeAtTheEnd);
+ break;
+ case Triple::ELF:
+ if (ELFStreamerCtorFn)
+ S = ELFStreamerCtorFn(T, Ctx, TAB, OS, Emitter, RelaxAll);
+ else
+ S = createELFStreamer(Ctx, TAB, OS, Emitter, RelaxAll);
+ break;
+ }
+ if (ObjectTargetStreamerCtorFn)
+ ObjectTargetStreamerCtorFn(*S, STI);
+ return S;
+ }
+
+ MCStreamer *createAsmStreamer(MCContext &Ctx,
+ std::unique_ptr<formatted_raw_ostream> OS,
+ bool IsVerboseAsm, bool UseDwarfDirectory,
+ MCInstPrinter *InstPrint, MCCodeEmitter *CE,
+ MCAsmBackend *TAB, bool ShowInst) const {
+ formatted_raw_ostream &OSRef = *OS;
+ MCStreamer *S = llvm::createAsmStreamer(Ctx, std::move(OS), IsVerboseAsm,
+ UseDwarfDirectory, InstPrint, CE,
+ TAB, ShowInst);
+ createAsmTargetStreamer(*S, OSRef, InstPrint, IsVerboseAsm);
+ return S;
+ }
+
+ MCTargetStreamer *createAsmTargetStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS,
+ MCInstPrinter *InstPrint,
+ bool IsVerboseAsm) const {
+ if (AsmTargetStreamerCtorFn)
+ return AsmTargetStreamerCtorFn(S, OS, InstPrint, IsVerboseAsm);
+ return nullptr;
+ }
+
+ MCStreamer *createNullStreamer(MCContext &Ctx) const {
+ MCStreamer *S = llvm::createNullStreamer(Ctx);
+ createNullTargetStreamer(*S);
+ return S;
+ }
+
+ MCTargetStreamer *createNullTargetStreamer(MCStreamer &S) const {
+ if (NullTargetStreamerCtorFn)
+ return NullTargetStreamerCtorFn(S);
+ return nullptr;
+ }
+
+ /// createMCRelocationInfo - Create a target specific MCRelocationInfo.
+ ///
+ /// \param TT The target triple.
+ /// \param Ctx The target context.
+ MCRelocationInfo *createMCRelocationInfo(StringRef TT, MCContext &Ctx) const {
+ MCRelocationInfoCtorTy Fn = MCRelocationInfoCtorFn
+ ? MCRelocationInfoCtorFn
+ : llvm::createMCRelocationInfo;
+ return Fn(Triple(TT), Ctx);
+ }
+
+ /// createMCSymbolizer - Create a target specific MCSymbolizer.
+ ///
+ /// \param TT The target triple.
+ /// \param GetOpInfo The function to get the symbolic information for
+ /// operands.
+ /// \param SymbolLookUp The function to lookup a symbol name.
+ /// \param DisInfo The pointer to the block of symbolic information for above
+ /// call
+ /// back.
+ /// \param Ctx The target context.
+ /// \param RelInfo The relocation information for this target. Takes
+ /// ownership.
+ MCSymbolizer *
+ createMCSymbolizer(StringRef TT, LLVMOpInfoCallback GetOpInfo,
+ LLVMSymbolLookupCallback SymbolLookUp, void *DisInfo,
+ MCContext *Ctx,
+ std::unique_ptr<MCRelocationInfo> &&RelInfo) const {
+ MCSymbolizerCtorTy Fn =
+ MCSymbolizerCtorFn ? MCSymbolizerCtorFn : llvm::createMCSymbolizer;
+ return Fn(Triple(TT), GetOpInfo, SymbolLookUp, DisInfo, Ctx,
+ std::move(RelInfo));
+ }
+
+ /// @}
+};
+
+/// TargetRegistry - Generic interface to target specific features.
+struct TargetRegistry {
+ // FIXME: Make this a namespace, probably just move all the Register*
+ // functions into Target (currently they all just set members on the Target
+ // anyway, and Target friends this class so those functions can...
+ // function).
+ TargetRegistry() = delete;
+
+ class iterator
+ : public std::iterator<std::forward_iterator_tag, Target, ptrdiff_t> {
+ const Target *Current;
+ explicit iterator(Target *T) : Current(T) {}
+ friend struct TargetRegistry;
+
+ public:
+ iterator() : Current(nullptr) {}
+
+ bool operator==(const iterator &x) const { return Current == x.Current; }
+ bool operator!=(const iterator &x) const { return !operator==(x); }
+
+ // Iterator traversal: forward iteration only
+ iterator &operator++() { // Preincrement
+ assert(Current && "Cannot increment end iterator!");
+ Current = Current->getNext();
+ return *this;
+ }
+ iterator operator++(int) { // Postincrement
+ iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ const Target &operator*() const {
+ assert(Current && "Cannot dereference end iterator!");
+ return *Current;
+ }
+
+ const Target *operator->() const { return &operator*(); }
+ };
+
+ /// printRegisteredTargetsForVersion - Print the registered targets
+ /// appropriately for inclusion in a tool's version output.
+ static void printRegisteredTargetsForVersion();
+
+ /// @name Registry Access
+ /// @{
+
+ static iterator_range<iterator> targets();
+
+ /// lookupTarget - Lookup a target based on a target triple.
+ ///
+ /// \param Triple - The triple to use for finding a target.
+ /// \param Error - On failure, an error string describing why no target was
+ /// found.
+ static const Target *lookupTarget(const std::string &Triple,
+ std::string &Error);
+
+ /// lookupTarget - Lookup a target based on an architecture name
+ /// and a target triple. If the architecture name is non-empty,
+ /// then the lookup is done by architecture. Otherwise, the target
+ /// triple is used.
+ ///
+ /// \param ArchName - The architecture to use for finding a target.
+ /// \param TheTriple - The triple to use for finding a target. The
+ /// triple is updated with canonical architecture name if a lookup
+ /// by architecture is done.
+ /// \param Error - On failure, an error string describing why no target was
+ /// found.
+ static const Target *lookupTarget(const std::string &ArchName,
+ Triple &TheTriple, std::string &Error);
+
+ /// @}
+ /// @name Target Registration
+ /// @{
+
+ /// RegisterTarget - Register the given target. Attempts to register a
+ /// target which has already been registered will be ignored.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Name - The target name. This should be a static string.
+ /// @param ShortDesc - A short target description. This should be a static
+ /// string.
+ /// @param ArchMatchFn - The arch match checking function for this target.
+ /// @param HasJIT - Whether the target supports JIT code
+ /// generation.
+ static void RegisterTarget(Target &T, const char *Name, const char *ShortDesc,
+ Target::ArchMatchFnTy ArchMatchFn,
+ bool HasJIT = false);
+
+ /// RegisterMCAsmInfo - Register a MCAsmInfo implementation for the
+ /// given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct a MCAsmInfo for the target.
+ static void RegisterMCAsmInfo(Target &T, Target::MCAsmInfoCtorFnTy Fn) {
+ T.MCAsmInfoCtorFn = Fn;
+ }
+
+ /// RegisterMCCodeGenInfo - Register a MCCodeGenInfo implementation for the
+ /// given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct a MCCodeGenInfo for the target.
+ static void RegisterMCCodeGenInfo(Target &T,
+ Target::MCCodeGenInfoCtorFnTy Fn) {
+ T.MCCodeGenInfoCtorFn = Fn;
+ }
+
+ /// RegisterMCInstrInfo - Register a MCInstrInfo implementation for the
+ /// given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct a MCInstrInfo for the target.
+ static void RegisterMCInstrInfo(Target &T, Target::MCInstrInfoCtorFnTy Fn) {
+ T.MCInstrInfoCtorFn = Fn;
+ }
+
+ /// RegisterMCInstrAnalysis - Register a MCInstrAnalysis implementation for
+ /// the given target.
+ static void RegisterMCInstrAnalysis(Target &T,
+ Target::MCInstrAnalysisCtorFnTy Fn) {
+ T.MCInstrAnalysisCtorFn = Fn;
+ }
+
+ /// RegisterMCRegInfo - Register a MCRegisterInfo implementation for the
+ /// given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct a MCRegisterInfo for the target.
+ static void RegisterMCRegInfo(Target &T, Target::MCRegInfoCtorFnTy Fn) {
+ T.MCRegInfoCtorFn = Fn;
+ }
+
+ /// RegisterMCSubtargetInfo - Register a MCSubtargetInfo implementation for
+ /// the given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct a MCSubtargetInfo for the target.
+ static void RegisterMCSubtargetInfo(Target &T,
+ Target::MCSubtargetInfoCtorFnTy Fn) {
+ T.MCSubtargetInfoCtorFn = Fn;
+ }
+
+ /// RegisterTargetMachine - Register a TargetMachine implementation for the
+ /// given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct a TargetMachine for the target.
+ static void RegisterTargetMachine(Target &T, Target::TargetMachineCtorTy Fn) {
+ T.TargetMachineCtorFn = Fn;
+ }
+
+ /// RegisterMCAsmBackend - Register a MCAsmBackend implementation for the
+ /// given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an AsmBackend for the target.
+ static void RegisterMCAsmBackend(Target &T, Target::MCAsmBackendCtorTy Fn) {
+ T.MCAsmBackendCtorFn = Fn;
+ }
+
+ /// RegisterMCAsmParser - Register a MCTargetAsmParser implementation for
+ /// the given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an MCTargetAsmParser for the target.
+ static void RegisterMCAsmParser(Target &T, Target::MCAsmParserCtorTy Fn) {
+ T.MCAsmParserCtorFn = Fn;
+ }
+
+ /// RegisterAsmPrinter - Register an AsmPrinter implementation for the given
+ /// target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an AsmPrinter for the target.
+ static void RegisterAsmPrinter(Target &T, Target::AsmPrinterCtorTy Fn) {
+ T.AsmPrinterCtorFn = Fn;
+ }
+
+ /// RegisterMCDisassembler - Register a MCDisassembler implementation for
+ /// the given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an MCDisassembler for the target.
+ static void RegisterMCDisassembler(Target &T,
+ Target::MCDisassemblerCtorTy Fn) {
+ T.MCDisassemblerCtorFn = Fn;
+ }
+
+ /// RegisterMCInstPrinter - Register a MCInstPrinter implementation for the
+ /// given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an MCInstPrinter for the target.
+ static void RegisterMCInstPrinter(Target &T, Target::MCInstPrinterCtorTy Fn) {
+ T.MCInstPrinterCtorFn = Fn;
+ }
+
+ /// RegisterMCCodeEmitter - Register a MCCodeEmitter implementation for the
+ /// given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an MCCodeEmitter for the target.
+ static void RegisterMCCodeEmitter(Target &T, Target::MCCodeEmitterCtorTy Fn) {
+ T.MCCodeEmitterCtorFn = Fn;
+ }
+
+ static void RegisterCOFFStreamer(Target &T, Target::COFFStreamerCtorTy Fn) {
+ T.COFFStreamerCtorFn = Fn;
+ }
+
+ static void RegisterMachOStreamer(Target &T, Target::MachOStreamerCtorTy Fn) {
+ T.MachOStreamerCtorFn = Fn;
+ }
+
+ static void RegisterELFStreamer(Target &T, Target::ELFStreamerCtorTy Fn) {
+ T.ELFStreamerCtorFn = Fn;
+ }
+
+ static void RegisterNullTargetStreamer(Target &T,
+ Target::NullTargetStreamerCtorTy Fn) {
+ T.NullTargetStreamerCtorFn = Fn;
+ }
+
+ static void RegisterAsmTargetStreamer(Target &T,
+ Target::AsmTargetStreamerCtorTy Fn) {
+ T.AsmTargetStreamerCtorFn = Fn;
+ }
+
+ static void
+ RegisterObjectTargetStreamer(Target &T,
+ Target::ObjectTargetStreamerCtorTy Fn) {
+ T.ObjectTargetStreamerCtorFn = Fn;
+ }
+
+ /// RegisterMCRelocationInfo - Register an MCRelocationInfo
+ /// implementation for the given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an MCRelocationInfo for the target.
+ static void RegisterMCRelocationInfo(Target &T,
+ Target::MCRelocationInfoCtorTy Fn) {
+ T.MCRelocationInfoCtorFn = Fn;
+ }
+
+ /// RegisterMCSymbolizer - Register an MCSymbolizer
+ /// implementation for the given target.
+ ///
+ /// Clients are responsible for ensuring that registration doesn't occur
+ /// while another thread is attempting to access the registry. Typically
+ /// this is done by initializing all targets at program startup.
+ ///
+ /// @param T - The target being registered.
+ /// @param Fn - A function to construct an MCSymbolizer for the target.
+ static void RegisterMCSymbolizer(Target &T, Target::MCSymbolizerCtorTy Fn) {
+ T.MCSymbolizerCtorFn = Fn;
+ }
+
+ /// @}
+};
+
+//===--------------------------------------------------------------------===//
+
+/// RegisterTarget - Helper template for registering a target, for use in the
+/// target's initialization function. Usage:
+///
+///
+/// Target TheFooTarget; // The global target instance.
+///
+/// extern "C" void LLVMInitializeFooTargetInfo() {
+/// RegisterTarget<Triple::foo> X(TheFooTarget, "foo", "Foo description");
+/// }
+template <Triple::ArchType TargetArchType = Triple::UnknownArch,
+ bool HasJIT = false>
+struct RegisterTarget {
+ RegisterTarget(Target &T, const char *Name, const char *Desc) {
+ TargetRegistry::RegisterTarget(T, Name, Desc, &getArchMatch, HasJIT);
+ }
+
+ static bool getArchMatch(Triple::ArchType Arch) {
+ return Arch == TargetArchType;
+ }
+};
+
+/// RegisterMCAsmInfo - Helper template for registering a target assembly info
+/// implementation. This invokes the static "Create" method on the class to
+/// actually do the construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCAsmInfo<FooMCAsmInfo> X(TheFooTarget);
+/// }
+template <class MCAsmInfoImpl> struct RegisterMCAsmInfo {
+ RegisterMCAsmInfo(Target &T) {
+ TargetRegistry::RegisterMCAsmInfo(T, &Allocator);
+ }
+
+private:
+ static MCAsmInfo *Allocator(const MCRegisterInfo & /*MRI*/,
+ const Triple &TT) {
+ return new MCAsmInfoImpl(TT);
+ }
+};
+
+/// RegisterMCAsmInfoFn - Helper template for registering a target assembly info
+/// implementation. This invokes the specified function to do the
+/// construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCAsmInfoFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCAsmInfoFn {
+ RegisterMCAsmInfoFn(Target &T, Target::MCAsmInfoCtorFnTy Fn) {
+ TargetRegistry::RegisterMCAsmInfo(T, Fn);
+ }
+};
+
+/// RegisterMCCodeGenInfo - Helper template for registering a target codegen
+/// info
+/// implementation. This invokes the static "Create" method on the class
+/// to actually do the construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCCodeGenInfo<FooMCCodeGenInfo> X(TheFooTarget);
+/// }
+template <class MCCodeGenInfoImpl> struct RegisterMCCodeGenInfo {
+ RegisterMCCodeGenInfo(Target &T) {
+ TargetRegistry::RegisterMCCodeGenInfo(T, &Allocator);
+ }
+
+private:
+ static MCCodeGenInfo *Allocator(const Triple & /*TT*/, Reloc::Model /*RM*/,
+ CodeModel::Model /*CM*/,
+ CodeGenOpt::Level /*OL*/) {
+ return new MCCodeGenInfoImpl();
+ }
+};
+
+/// RegisterMCCodeGenInfoFn - Helper template for registering a target codegen
+/// info implementation. This invokes the specified function to do the
+/// construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCCodeGenInfoFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCCodeGenInfoFn {
+ RegisterMCCodeGenInfoFn(Target &T, Target::MCCodeGenInfoCtorFnTy Fn) {
+ TargetRegistry::RegisterMCCodeGenInfo(T, Fn);
+ }
+};
+
+/// RegisterMCInstrInfo - Helper template for registering a target instruction
+/// info implementation. This invokes the static "Create" method on the class
+/// to actually do the construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCInstrInfo<FooMCInstrInfo> X(TheFooTarget);
+/// }
+template <class MCInstrInfoImpl> struct RegisterMCInstrInfo {
+ RegisterMCInstrInfo(Target &T) {
+ TargetRegistry::RegisterMCInstrInfo(T, &Allocator);
+ }
+
+private:
+ static MCInstrInfo *Allocator() { return new MCInstrInfoImpl(); }
+};
+
+/// RegisterMCInstrInfoFn - Helper template for registering a target
+/// instruction info implementation. This invokes the specified function to
+/// do the construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCInstrInfoFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCInstrInfoFn {
+ RegisterMCInstrInfoFn(Target &T, Target::MCInstrInfoCtorFnTy Fn) {
+ TargetRegistry::RegisterMCInstrInfo(T, Fn);
+ }
+};
+
+/// RegisterMCInstrAnalysis - Helper template for registering a target
+/// instruction analyzer implementation. This invokes the static "Create"
+/// method on the class to actually do the construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCInstrAnalysis<FooMCInstrAnalysis> X(TheFooTarget);
+/// }
+template <class MCInstrAnalysisImpl> struct RegisterMCInstrAnalysis {
+ RegisterMCInstrAnalysis(Target &T) {
+ TargetRegistry::RegisterMCInstrAnalysis(T, &Allocator);
+ }
+
+private:
+ static MCInstrAnalysis *Allocator(const MCInstrInfo *Info) {
+ return new MCInstrAnalysisImpl(Info);
+ }
+};
+
+/// RegisterMCInstrAnalysisFn - Helper template for registering a target
+/// instruction analyzer implementation. This invokes the specified function
+/// to do the construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCInstrAnalysisFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCInstrAnalysisFn {
+ RegisterMCInstrAnalysisFn(Target &T, Target::MCInstrAnalysisCtorFnTy Fn) {
+ TargetRegistry::RegisterMCInstrAnalysis(T, Fn);
+ }
+};
+
+/// RegisterMCRegInfo - Helper template for registering a target register info
+/// implementation. This invokes the static "Create" method on the class to
+/// actually do the construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCRegInfo<FooMCRegInfo> X(TheFooTarget);
+/// }
+template <class MCRegisterInfoImpl> struct RegisterMCRegInfo {
+ RegisterMCRegInfo(Target &T) {
+ TargetRegistry::RegisterMCRegInfo(T, &Allocator);
+ }
+
+private:
+ static MCRegisterInfo *Allocator(const Triple & /*TT*/) {
+ return new MCRegisterInfoImpl();
+ }
+};
+
+/// RegisterMCRegInfoFn - Helper template for registering a target register
+/// info implementation. This invokes the specified function to do the
+/// construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCRegInfoFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCRegInfoFn {
+ RegisterMCRegInfoFn(Target &T, Target::MCRegInfoCtorFnTy Fn) {
+ TargetRegistry::RegisterMCRegInfo(T, Fn);
+ }
+};
+
+/// RegisterMCSubtargetInfo - Helper template for registering a target
+/// subtarget info implementation. This invokes the static "Create" method
+/// on the class to actually do the construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCSubtargetInfo<FooMCSubtargetInfo> X(TheFooTarget);
+/// }
+template <class MCSubtargetInfoImpl> struct RegisterMCSubtargetInfo {
+ RegisterMCSubtargetInfo(Target &T) {
+ TargetRegistry::RegisterMCSubtargetInfo(T, &Allocator);
+ }
+
+private:
+ static MCSubtargetInfo *Allocator(const Triple & /*TT*/, StringRef /*CPU*/,
+ StringRef /*FS*/) {
+ return new MCSubtargetInfoImpl();
+ }
+};
+
+/// RegisterMCSubtargetInfoFn - Helper template for registering a target
+/// subtarget info implementation. This invokes the specified function to
+/// do the construction. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterMCSubtargetInfoFn X(TheFooTarget, TheFunction);
+/// }
+struct RegisterMCSubtargetInfoFn {
+ RegisterMCSubtargetInfoFn(Target &T, Target::MCSubtargetInfoCtorFnTy Fn) {
+ TargetRegistry::RegisterMCSubtargetInfo(T, Fn);
+ }
+};
+
+/// RegisterTargetMachine - Helper template for registering a target machine
+/// implementation, for use in the target machine initialization
+/// function. Usage:
+///
+/// extern "C" void LLVMInitializeFooTarget() {
+/// extern Target TheFooTarget;
+/// RegisterTargetMachine<FooTargetMachine> X(TheFooTarget);
+/// }
+template <class TargetMachineImpl> struct RegisterTargetMachine {
+ RegisterTargetMachine(Target &T) {
+ TargetRegistry::RegisterTargetMachine(T, &Allocator);
+ }
+
+private:
+ static TargetMachine *Allocator(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Reloc::Model RM,
+ CodeModel::Model CM, CodeGenOpt::Level OL) {
+ return new TargetMachineImpl(T, TT, CPU, FS, Options, RM, CM, OL);
+ }
+};
+
+/// RegisterMCAsmBackend - Helper template for registering a target specific
+/// assembler backend. Usage:
+///
+/// extern "C" void LLVMInitializeFooMCAsmBackend() {
+/// extern Target TheFooTarget;
+/// RegisterMCAsmBackend<FooAsmLexer> X(TheFooTarget);
+/// }
+template <class MCAsmBackendImpl> struct RegisterMCAsmBackend {
+ RegisterMCAsmBackend(Target &T) {
+ TargetRegistry::RegisterMCAsmBackend(T, &Allocator);
+ }
+
+private:
+ static MCAsmBackend *Allocator(const Target &T, const MCRegisterInfo &MRI,
+ const Triple &TheTriple, StringRef CPU) {
+ return new MCAsmBackendImpl(T, MRI, TheTriple, CPU);
+ }
+};
+
+/// RegisterMCAsmParser - Helper template for registering a target specific
+/// assembly parser, for use in the target machine initialization
+/// function. Usage:
+///
+/// extern "C" void LLVMInitializeFooMCAsmParser() {
+/// extern Target TheFooTarget;
+/// RegisterMCAsmParser<FooAsmParser> X(TheFooTarget);
+/// }
+template <class MCAsmParserImpl> struct RegisterMCAsmParser {
+ RegisterMCAsmParser(Target &T) {
+ TargetRegistry::RegisterMCAsmParser(T, &Allocator);
+ }
+
+private:
+ static MCTargetAsmParser *Allocator(const MCSubtargetInfo &STI,
+ MCAsmParser &P, const MCInstrInfo &MII,
+ const MCTargetOptions &Options) {
+ return new MCAsmParserImpl(STI, P, MII, Options);
+ }
+};
+
+/// RegisterAsmPrinter - Helper template for registering a target specific
+/// assembly printer, for use in the target machine initialization
+/// function. Usage:
+///
+/// extern "C" void LLVMInitializeFooAsmPrinter() {
+/// extern Target TheFooTarget;
+/// RegisterAsmPrinter<FooAsmPrinter> X(TheFooTarget);
+/// }
+template <class AsmPrinterImpl> struct RegisterAsmPrinter {
+ RegisterAsmPrinter(Target &T) {
+ TargetRegistry::RegisterAsmPrinter(T, &Allocator);
+ }
+
+private:
+ static AsmPrinter *Allocator(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> &&Streamer) {
+ return new AsmPrinterImpl(TM, std::move(Streamer));
+ }
+};
+
+/// RegisterMCCodeEmitter - Helper template for registering a target specific
+/// machine code emitter, for use in the target initialization
+/// function. Usage:
+///
+/// extern "C" void LLVMInitializeFooMCCodeEmitter() {
+/// extern Target TheFooTarget;
+/// RegisterMCCodeEmitter<FooCodeEmitter> X(TheFooTarget);
+/// }
+template <class MCCodeEmitterImpl> struct RegisterMCCodeEmitter {
+ RegisterMCCodeEmitter(Target &T) {
+ TargetRegistry::RegisterMCCodeEmitter(T, &Allocator);
+ }
+
+private:
+ static MCCodeEmitter *Allocator(const MCInstrInfo & /*II*/,
+ const MCRegisterInfo & /*MRI*/,
+ MCContext & /*Ctx*/) {
+ return new MCCodeEmitterImpl();
+ }
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/TargetSelect.h b/contrib/llvm/include/llvm/Support/TargetSelect.h
new file mode 100644
index 0000000..582785c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/TargetSelect.h
@@ -0,0 +1,165 @@
+//===- TargetSelect.h - Target Selection & Registration ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides utilities to make sure that certain classes of targets are
+// linked into the main application executable, and initialize them as
+// appropriate.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TARGETSELECT_H
+#define LLVM_SUPPORT_TARGETSELECT_H
+
+#include "llvm/Config/llvm-config.h"
+
+extern "C" {
+ // Declare all of the target-initialization functions that are available.
+#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetInfo();
+#include "llvm/Config/Targets.def"
+
+#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##Target();
+#include "llvm/Config/Targets.def"
+
+ // Declare all of the target-MC-initialization functions that are available.
+#define LLVM_TARGET(TargetName) void LLVMInitialize##TargetName##TargetMC();
+#include "llvm/Config/Targets.def"
+
+ // Declare all of the available assembly printer initialization functions.
+#define LLVM_ASM_PRINTER(TargetName) void LLVMInitialize##TargetName##AsmPrinter();
+#include "llvm/Config/AsmPrinters.def"
+
+ // Declare all of the available assembly parser initialization functions.
+#define LLVM_ASM_PARSER(TargetName) void LLVMInitialize##TargetName##AsmParser();
+#include "llvm/Config/AsmParsers.def"
+
+ // Declare all of the available disassembler initialization functions.
+#define LLVM_DISASSEMBLER(TargetName) \
+ void LLVMInitialize##TargetName##Disassembler();
+#include "llvm/Config/Disassemblers.def"
+}
+
+namespace llvm {
+ /// InitializeAllTargetInfos - The main program should call this function if
+ /// it wants access to all available targets that LLVM is configured to
+ /// support, to make them available via the TargetRegistry.
+ ///
+ /// It is legal for a client to make multiple calls to this function.
+ inline void InitializeAllTargetInfos() {
+#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetInfo();
+#include "llvm/Config/Targets.def"
+ }
+
+ /// InitializeAllTargets - The main program should call this function if it
+ /// wants access to all available target machines that LLVM is configured to
+ /// support, to make them available via the TargetRegistry.
+ ///
+ /// It is legal for a client to make multiple calls to this function.
+ inline void InitializeAllTargets() {
+ // FIXME: Remove this, clients should do it.
+ InitializeAllTargetInfos();
+
+#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##Target();
+#include "llvm/Config/Targets.def"
+ }
+
+ /// InitializeAllTargetMCs - The main program should call this function if it
+ /// wants access to all available target MC that LLVM is configured to
+ /// support, to make them available via the TargetRegistry.
+ ///
+ /// It is legal for a client to make multiple calls to this function.
+ inline void InitializeAllTargetMCs() {
+#define LLVM_TARGET(TargetName) LLVMInitialize##TargetName##TargetMC();
+#include "llvm/Config/Targets.def"
+ }
+
+ /// InitializeAllAsmPrinters - The main program should call this function if
+ /// it wants all asm printers that LLVM is configured to support, to make them
+ /// available via the TargetRegistry.
+ ///
+ /// It is legal for a client to make multiple calls to this function.
+ inline void InitializeAllAsmPrinters() {
+#define LLVM_ASM_PRINTER(TargetName) LLVMInitialize##TargetName##AsmPrinter();
+#include "llvm/Config/AsmPrinters.def"
+ }
+
+ /// InitializeAllAsmParsers - The main program should call this function if it
+ /// wants all asm parsers that LLVM is configured to support, to make them
+ /// available via the TargetRegistry.
+ ///
+ /// It is legal for a client to make multiple calls to this function.
+ inline void InitializeAllAsmParsers() {
+#define LLVM_ASM_PARSER(TargetName) LLVMInitialize##TargetName##AsmParser();
+#include "llvm/Config/AsmParsers.def"
+ }
+
+ /// InitializeAllDisassemblers - The main program should call this function if
+ /// it wants all disassemblers that LLVM is configured to support, to make
+ /// them available via the TargetRegistry.
+ ///
+ /// It is legal for a client to make multiple calls to this function.
+ inline void InitializeAllDisassemblers() {
+#define LLVM_DISASSEMBLER(TargetName) LLVMInitialize##TargetName##Disassembler();
+#include "llvm/Config/Disassemblers.def"
+ }
+
+ /// InitializeNativeTarget - The main program should call this function to
+ /// initialize the native target corresponding to the host. This is useful
+ /// for JIT applications to ensure that the target gets linked in correctly.
+ ///
+ /// It is legal for a client to make multiple calls to this function.
+ inline bool InitializeNativeTarget() {
+ // If we have a native target, initialize it to ensure it is linked in.
+#ifdef LLVM_NATIVE_TARGET
+ LLVM_NATIVE_TARGETINFO();
+ LLVM_NATIVE_TARGET();
+ LLVM_NATIVE_TARGETMC();
+ return false;
+#else
+ return true;
+#endif
+ }
+
+ /// InitializeNativeTargetAsmPrinter - The main program should call
+ /// this function to initialize the native target asm printer.
+ inline bool InitializeNativeTargetAsmPrinter() {
+ // If we have a native target, initialize the corresponding asm printer.
+#ifdef LLVM_NATIVE_ASMPRINTER
+ LLVM_NATIVE_ASMPRINTER();
+ return false;
+#else
+ return true;
+#endif
+ }
+
+ /// InitializeNativeTargetAsmParser - The main program should call
+ /// this function to initialize the native target asm parser.
+ inline bool InitializeNativeTargetAsmParser() {
+ // If we have a native target, initialize the corresponding asm parser.
+#ifdef LLVM_NATIVE_ASMPARSER
+ LLVM_NATIVE_ASMPARSER();
+ return false;
+#else
+ return true;
+#endif
+ }
+
+ /// InitializeNativeTargetDisassembler - The main program should call
+ /// this function to initialize the native target disassembler.
+ inline bool InitializeNativeTargetDisassembler() {
+ // If we have a native target, initialize the corresponding disassembler.
+#ifdef LLVM_NATIVE_DISASSEMBLER
+ LLVM_NATIVE_DISASSEMBLER();
+ return false;
+#else
+ return true;
+#endif
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ThreadLocal.h b/contrib/llvm/include/llvm/Support/ThreadLocal.h
new file mode 100644
index 0000000..427a67e
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ThreadLocal.h
@@ -0,0 +1,63 @@
+//===- llvm/Support/ThreadLocal.h - Thread Local Data ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::ThreadLocal class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_THREADLOCAL_H
+#define LLVM_SUPPORT_THREADLOCAL_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Threading.h"
+#include <cassert>
+
+namespace llvm {
+ namespace sys {
+ // ThreadLocalImpl - Common base class of all ThreadLocal instantiations.
+ // YOU SHOULD NEVER USE THIS DIRECTLY.
+ class ThreadLocalImpl {
+ typedef uint64_t ThreadLocalDataTy;
+ /// \brief Platform-specific thread local data.
+ ///
+ /// This is embedded in the class and we avoid malloc'ing/free'ing it,
+ /// to make this class more safe for use along with CrashRecoveryContext.
+ union {
+ char data[sizeof(ThreadLocalDataTy)];
+ ThreadLocalDataTy align_data;
+ };
+ public:
+ ThreadLocalImpl();
+ virtual ~ThreadLocalImpl();
+ void setInstance(const void* d);
+ void *getInstance();
+ void removeInstance();
+ };
+
+ /// ThreadLocal - A class used to abstract thread-local storage. It holds,
+ /// for each thread, a pointer a single object of type T.
+ template<class T>
+ class ThreadLocal : public ThreadLocalImpl {
+ public:
+ ThreadLocal() : ThreadLocalImpl() { }
+
+ /// get - Fetches a pointer to the object associated with the current
+ /// thread. If no object has yet been associated, it returns NULL;
+ T* get() { return static_cast<T*>(getInstance()); }
+
+ // set - Associates a pointer to an object with the current thread.
+ void set(T* d) { setInstance(d); }
+
+ // erase - Removes the pointer associated with the current thread.
+ void erase() { removeInstance(); }
+ };
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ThreadPool.h b/contrib/llvm/include/llvm/Support/ThreadPool.h
new file mode 100644
index 0000000..745334d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ThreadPool.h
@@ -0,0 +1,136 @@
+//===-- llvm/Support/ThreadPool.h - A ThreadPool implementation -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a crude C++11 based thread pool.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_THREAD_POOL_H
+#define LLVM_SUPPORT_THREAD_POOL_H
+
+#include "llvm/Support/thread.h"
+
+#ifdef _MSC_VER
+// concrt.h depends on eh.h for __uncaught_exception declaration
+// even if we disable exceptions.
+#include <eh.h>
+
+// Disable warnings from ppltasks.h transitively included by <future>.
+#pragma warning(push)
+#pragma warning(disable:4530)
+#pragma warning(disable:4062)
+#endif
+
+#include <future>
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#include <condition_variable>
+#include <functional>
+#include <memory>
+#include <mutex>
+#include <queue>
+#include <utility>
+
+namespace llvm {
+
+/// A ThreadPool for asynchronous parallel execution on a defined number of
+/// threads.
+///
+/// The pool keeps a vector of threads alive, waiting on a condition variable
+/// for some work to become available.
+class ThreadPool {
+public:
+#ifndef _MSC_VER
+ using VoidTy = void;
+ using TaskTy = std::function<void()>;
+ using PackagedTaskTy = std::packaged_task<void()>;
+#else
+ // MSVC 2013 has a bug and can't use std::packaged_task<void()>;
+ // We force it to use bool(bool) instead.
+ using VoidTy = bool;
+ using TaskTy = std::function<bool(bool)>;
+ using PackagedTaskTy = std::packaged_task<bool(bool)>;
+#endif
+
+ /// Construct a pool with the number of core available on the system (or
+ /// whatever the value returned by std::thread::hardware_concurrency() is).
+ ThreadPool();
+
+ /// Construct a pool of \p ThreadCount threads
+ ThreadPool(unsigned ThreadCount);
+
+ /// Blocking destructor: the pool will wait for all the threads to complete.
+ ~ThreadPool();
+
+ /// Asynchronous submission of a task to the pool. The returned future can be
+ /// used to wait for the task to finish and is *non-blocking* on destruction.
+ template <typename Function, typename... Args>
+ inline std::shared_future<VoidTy> async(Function &&F, Args &&... ArgList) {
+ auto Task =
+ std::bind(std::forward<Function>(F), std::forward<Args>(ArgList)...);
+#ifndef _MSC_VER
+ return asyncImpl(std::move(Task));
+#else
+ // This lambda has to be marked mutable because MSVC 2013's std::bind call
+ // operator isn't const qualified.
+ return asyncImpl([Task](VoidTy) mutable -> VoidTy {
+ Task();
+ return VoidTy();
+ });
+#endif
+ }
+
+ /// Asynchronous submission of a task to the pool. The returned future can be
+ /// used to wait for the task to finish and is *non-blocking* on destruction.
+ template <typename Function>
+ inline std::shared_future<VoidTy> async(Function &&F) {
+#ifndef _MSC_VER
+ return asyncImpl(std::forward<Function>(F));
+#else
+ return asyncImpl([F] (VoidTy) -> VoidTy { F(); return VoidTy(); });
+#endif
+ }
+
+ /// Blocking wait for all the threads to complete and the queue to be empty.
+ /// It is an error to try to add new tasks while blocking on this call.
+ void wait();
+
+private:
+ /// Asynchronous submission of a task to the pool. The returned future can be
+ /// used to wait for the task to finish and is *non-blocking* on destruction.
+ std::shared_future<VoidTy> asyncImpl(TaskTy F);
+
+ /// Threads in flight
+ std::vector<llvm::thread> Threads;
+
+ /// Tasks waiting for execution in the pool.
+ std::queue<PackagedTaskTy> Tasks;
+
+ /// Locking and signaling for accessing the Tasks queue.
+ std::mutex QueueLock;
+ std::condition_variable QueueCondition;
+
+ /// Locking and signaling for job completion
+ std::mutex CompletionLock;
+ std::condition_variable CompletionCondition;
+
+ /// Keep track of the number of thread actually busy
+ std::atomic<unsigned> ActiveThreads;
+
+#if LLVM_ENABLE_THREADS // avoids warning for unused variable
+ /// Signal for the destruction of the pool, asking thread to exit.
+ bool EnableFlag;
+#endif
+};
+}
+
+#endif // LLVM_SUPPORT_THREAD_POOL_H
diff --git a/contrib/llvm/include/llvm/Support/Threading.h b/contrib/llvm/include/llvm/Support/Threading.h
new file mode 100644
index 0000000..9007c13
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Threading.h
@@ -0,0 +1,39 @@
+//===-- llvm/Support/Threading.h - Control multithreading mode --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares helper functions for running LLVM in a multi-threaded
+// environment.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_THREADING_H
+#define LLVM_SUPPORT_THREADING_H
+
+namespace llvm {
+ /// Returns true if LLVM is compiled with support for multi-threading, and
+ /// false otherwise.
+ bool llvm_is_multithreaded();
+
+ /// llvm_execute_on_thread - Execute the given \p UserFn on a separate
+ /// thread, passing it the provided \p UserData and waits for thread
+ /// completion.
+ ///
+ /// This function does not guarantee that the code will actually be executed
+ /// on a separate thread or honoring the requested stack size, but tries to do
+ /// so where system support is available.
+ ///
+ /// \param UserFn - The callback to execute.
+ /// \param UserData - An argument to pass to the callback function.
+ /// \param RequestedStackSize - If non-zero, a requested size (in bytes) for
+ /// the thread stack.
+ void llvm_execute_on_thread(void (*UserFn)(void*), void *UserData,
+ unsigned RequestedStackSize = 0);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/TimeValue.h b/contrib/llvm/include/llvm/Support/TimeValue.h
new file mode 100644
index 0000000..6bca58b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/TimeValue.h
@@ -0,0 +1,386 @@
+//===-- TimeValue.h - Declare OS TimeValue Concept --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file declares the operating system TimeValue concept.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TIMEVALUE_H
+#define LLVM_SUPPORT_TIMEVALUE_H
+
+#include "llvm/Support/DataTypes.h"
+#include <string>
+
+namespace llvm {
+namespace sys {
+ /// This class is used where a precise fixed point in time is required. The
+ /// range of TimeValue spans many hundreds of billions of years both past and
+ /// present. The precision of TimeValue is to the nanosecond. However, the
+ /// actual precision of its values will be determined by the resolution of
+ /// the system clock. The TimeValue class is used in conjunction with several
+ /// other lib/System interfaces to specify the time at which a call should
+ /// timeout, etc.
+ /// @since 1.4
+ /// @brief Provides an abstraction for a fixed point in time.
+ class TimeValue {
+
+ /// @name Constants
+ /// @{
+ public:
+
+ /// A constant TimeValue representing the smallest time
+ /// value permissible by the class. MinTime is some point
+ /// in the distant past, about 300 billion years BCE.
+ /// @brief The smallest possible time value.
+ static TimeValue MinTime() {
+ return TimeValue ( INT64_MIN,0 );
+ }
+
+ /// A constant TimeValue representing the largest time
+ /// value permissible by the class. MaxTime is some point
+ /// in the distant future, about 300 billion years AD.
+ /// @brief The largest possible time value.
+ static TimeValue MaxTime() {
+ return TimeValue ( INT64_MAX,0 );
+ }
+
+ /// A constant TimeValue representing the base time,
+ /// or zero time of 00:00:00 (midnight) January 1st, 2000.
+ /// @brief 00:00:00 Jan 1, 2000 UTC.
+ static TimeValue ZeroTime() {
+ return TimeValue ( 0,0 );
+ }
+
+ /// A constant TimeValue for the Posix base time which is
+ /// 00:00:00 (midnight) January 1st, 1970.
+ /// @brief 00:00:00 Jan 1, 1970 UTC.
+ static TimeValue PosixZeroTime() {
+ return TimeValue ( PosixZeroTimeSeconds,0 );
+ }
+
+ /// A constant TimeValue for the Win32 base time which is
+ /// 00:00:00 (midnight) January 1st, 1601.
+ /// @brief 00:00:00 Jan 1, 1601 UTC.
+ static TimeValue Win32ZeroTime() {
+ return TimeValue ( Win32ZeroTimeSeconds,0 );
+ }
+
+ /// @}
+ /// @name Types
+ /// @{
+ public:
+ typedef int64_t SecondsType; ///< Type used for representing seconds.
+ typedef int32_t NanoSecondsType;///< Type used for representing nanoseconds.
+
+ enum TimeConversions {
+ NANOSECONDS_PER_SECOND = 1000000000, ///< One Billion
+ MICROSECONDS_PER_SECOND = 1000000, ///< One Million
+ MILLISECONDS_PER_SECOND = 1000, ///< One Thousand
+ NANOSECONDS_PER_MICROSECOND = 1000, ///< One Thousand
+ NANOSECONDS_PER_MILLISECOND = 1000000,///< One Million
+ NANOSECONDS_PER_WIN32_TICK = 100 ///< Win32 tick is 10^7 Hz (10ns)
+ };
+
+ /// @}
+ /// @name Constructors
+ /// @{
+ public:
+ /// \brief Default construct a time value, initializing to ZeroTime.
+ TimeValue() : seconds_(0), nanos_(0) {}
+
+ /// Caller provides the exact value in seconds and nanoseconds. The
+ /// \p nanos argument defaults to zero for convenience.
+ /// @brief Explicit constructor
+ explicit TimeValue (SecondsType seconds, NanoSecondsType nanos = 0)
+ : seconds_( seconds ), nanos_( nanos ) { this->normalize(); }
+
+ /// Caller provides the exact value as a double in seconds with the
+ /// fractional part representing nanoseconds.
+ /// @brief Double Constructor.
+ explicit TimeValue( double new_time )
+ : seconds_( 0 ) , nanos_ ( 0 ) {
+ SecondsType integer_part = static_cast<SecondsType>( new_time );
+ seconds_ = integer_part;
+ nanos_ = static_cast<NanoSecondsType>( (new_time -
+ static_cast<double>(integer_part)) * NANOSECONDS_PER_SECOND );
+ this->normalize();
+ }
+
+ /// This is a static constructor that returns a TimeValue that represents
+ /// the current time.
+ /// @brief Creates a TimeValue with the current time (UTC).
+ static TimeValue now();
+
+ /// @}
+ /// @name Operators
+ /// @{
+ public:
+ /// Add \p that to \p this.
+ /// @returns this
+ /// @brief Incrementing assignment operator.
+ TimeValue& operator += (const TimeValue& that ) {
+ this->seconds_ += that.seconds_ ;
+ this->nanos_ += that.nanos_ ;
+ this->normalize();
+ return *this;
+ }
+
+ /// Subtract \p that from \p this.
+ /// @returns this
+ /// @brief Decrementing assignment operator.
+ TimeValue& operator -= (const TimeValue &that ) {
+ this->seconds_ -= that.seconds_ ;
+ this->nanos_ -= that.nanos_ ;
+ this->normalize();
+ return *this;
+ }
+
+ /// Determine if \p this is less than \p that.
+ /// @returns True iff *this < that.
+ /// @brief True if this < that.
+ int operator < (const TimeValue &that) const { return that > *this; }
+
+ /// Determine if \p this is greather than \p that.
+ /// @returns True iff *this > that.
+ /// @brief True if this > that.
+ int operator > (const TimeValue &that) const {
+ if ( this->seconds_ > that.seconds_ ) {
+ return 1;
+ } else if ( this->seconds_ == that.seconds_ ) {
+ if ( this->nanos_ > that.nanos_ ) return 1;
+ }
+ return 0;
+ }
+
+ /// Determine if \p this is less than or equal to \p that.
+ /// @returns True iff *this <= that.
+ /// @brief True if this <= that.
+ int operator <= (const TimeValue &that) const { return that >= *this; }
+
+ /// Determine if \p this is greater than or equal to \p that.
+ /// @returns True iff *this >= that.
+ int operator >= (const TimeValue &that) const {
+ if ( this->seconds_ > that.seconds_ ) {
+ return 1;
+ } else if ( this->seconds_ == that.seconds_ ) {
+ if ( this->nanos_ >= that.nanos_ ) return 1;
+ }
+ return 0;
+ }
+
+ /// Determines if two TimeValue objects represent the same moment in time.
+ /// @returns True iff *this == that.
+ int operator == (const TimeValue &that) const {
+ return (this->seconds_ == that.seconds_) &&
+ (this->nanos_ == that.nanos_);
+ }
+
+ /// Determines if two TimeValue objects represent times that are not the
+ /// same.
+ /// @returns True iff *this != that.
+ int operator != (const TimeValue &that) const { return !(*this == that); }
+
+ /// Adds two TimeValue objects together.
+ /// @returns The sum of the two operands as a new TimeValue
+ /// @brief Addition operator.
+ friend TimeValue operator + (const TimeValue &tv1, const TimeValue &tv2);
+
+ /// Subtracts two TimeValue objects.
+ /// @returns The difference of the two operands as a new TimeValue
+ /// @brief Subtraction operator.
+ friend TimeValue operator - (const TimeValue &tv1, const TimeValue &tv2);
+
+ /// @}
+ /// @name Accessors
+ /// @{
+ public:
+
+ /// Returns only the seconds component of the TimeValue. The nanoseconds
+ /// portion is ignored. No rounding is performed.
+ /// @brief Retrieve the seconds component
+ SecondsType seconds() const { return seconds_; }
+
+ /// Returns only the nanoseconds component of the TimeValue. The seconds
+ /// portion is ignored.
+ /// @brief Retrieve the nanoseconds component.
+ NanoSecondsType nanoseconds() const { return nanos_; }
+
+ /// Returns only the fractional portion of the TimeValue rounded down to the
+ /// nearest microsecond (divide by one thousand).
+ /// @brief Retrieve the fractional part as microseconds;
+ uint32_t microseconds() const {
+ return nanos_ / NANOSECONDS_PER_MICROSECOND;
+ }
+
+ /// Returns only the fractional portion of the TimeValue rounded down to the
+ /// nearest millisecond (divide by one million).
+ /// @brief Retrieve the fractional part as milliseconds;
+ uint32_t milliseconds() const {
+ return nanos_ / NANOSECONDS_PER_MILLISECOND;
+ }
+
+ /// Returns the TimeValue as a number of microseconds. Note that the value
+ /// returned can overflow because the range of a uint64_t is smaller than
+ /// the range of a TimeValue. Nevertheless, this is useful on some operating
+ /// systems and is therefore provided.
+ /// @brief Convert to a number of microseconds (can overflow)
+ uint64_t usec() const {
+ return seconds_ * MICROSECONDS_PER_SECOND +
+ ( nanos_ / NANOSECONDS_PER_MICROSECOND );
+ }
+
+ /// Returns the TimeValue as a number of milliseconds. Note that the value
+ /// returned can overflow because the range of a uint64_t is smaller than
+ /// the range of a TimeValue. Nevertheless, this is useful on some operating
+ /// systems and is therefore provided.
+ /// @brief Convert to a number of milliseconds (can overflow)
+ uint64_t msec() const {
+ return seconds_ * MILLISECONDS_PER_SECOND +
+ ( nanos_ / NANOSECONDS_PER_MILLISECOND );
+ }
+
+ /// Converts the TimeValue into the corresponding number of seconds
+ /// since the epoch (00:00:00 Jan 1,1970).
+ uint64_t toEpochTime() const {
+ return seconds_ - PosixZeroTimeSeconds;
+ }
+
+ /// Converts the TimeValue into the corresponding number of "ticks" for
+ /// Win32 platforms, correcting for the difference in Win32 zero time.
+ /// @brief Convert to Win32's FILETIME
+ /// (100ns intervals since 00:00:00 Jan 1, 1601 UTC)
+ uint64_t toWin32Time() const {
+ uint64_t result = (uint64_t)10000000 * (seconds_ - Win32ZeroTimeSeconds);
+ result += nanos_ / NANOSECONDS_PER_WIN32_TICK;
+ return result;
+ }
+
+ /// Provides the seconds and nanoseconds as results in its arguments after
+ /// correction for the Posix zero time.
+ /// @brief Convert to timespec time (ala POSIX.1b)
+ void getTimespecTime( uint64_t& seconds, uint32_t& nanos ) const {
+ seconds = seconds_ - PosixZeroTimeSeconds;
+ nanos = nanos_;
+ }
+
+ /// Provides conversion of the TimeValue into a readable time & date.
+ /// @returns std::string containing the readable time value
+ /// @brief Convert time to a string.
+ std::string str() const;
+
+ /// @}
+ /// @name Mutators
+ /// @{
+ public:
+ /// The seconds component of the TimeValue is set to \p sec without
+ /// modifying the nanoseconds part. This is useful for whole second
+ /// arithmetic.
+ /// @brief Set the seconds component.
+ void seconds (SecondsType sec ) {
+ this->seconds_ = sec;
+ this->normalize();
+ }
+
+ /// The nanoseconds component of the TimeValue is set to \p nanos without
+ /// modifying the seconds part. This is useful for basic computations
+ /// involving just the nanoseconds portion. Note that the TimeValue will be
+ /// normalized after this call so that the fractional (nanoseconds) portion
+ /// will have the smallest equivalent value.
+ /// @brief Set the nanoseconds component using a number of nanoseconds.
+ void nanoseconds ( NanoSecondsType nanos ) {
+ this->nanos_ = nanos;
+ this->normalize();
+ }
+
+ /// The seconds component remains unchanged.
+ /// @brief Set the nanoseconds component using a number of microseconds.
+ void microseconds ( int32_t micros ) {
+ this->nanos_ = micros * NANOSECONDS_PER_MICROSECOND;
+ this->normalize();
+ }
+
+ /// The seconds component remains unchanged.
+ /// @brief Set the nanoseconds component using a number of milliseconds.
+ void milliseconds ( int32_t millis ) {
+ this->nanos_ = millis * NANOSECONDS_PER_MILLISECOND;
+ this->normalize();
+ }
+
+ /// @brief Converts from microsecond format to TimeValue format
+ void usec( int64_t microseconds ) {
+ this->seconds_ = microseconds / MICROSECONDS_PER_SECOND;
+ this->nanos_ = NanoSecondsType(microseconds % MICROSECONDS_PER_SECOND) *
+ NANOSECONDS_PER_MICROSECOND;
+ this->normalize();
+ }
+
+ /// @brief Converts from millisecond format to TimeValue format
+ void msec( int64_t milliseconds ) {
+ this->seconds_ = milliseconds / MILLISECONDS_PER_SECOND;
+ this->nanos_ = NanoSecondsType(milliseconds % MILLISECONDS_PER_SECOND) *
+ NANOSECONDS_PER_MILLISECOND;
+ this->normalize();
+ }
+
+ /// Converts the \p seconds argument from PosixTime to the corresponding
+ /// TimeValue and assigns that value to \p this.
+ /// @brief Convert seconds form PosixTime to TimeValue
+ void fromEpochTime( SecondsType seconds ) {
+ seconds_ = seconds + PosixZeroTimeSeconds;
+ nanos_ = 0;
+ this->normalize();
+ }
+
+ /// Converts the \p win32Time argument from Windows FILETIME to the
+ /// corresponding TimeValue and assigns that value to \p this.
+ /// @brief Convert seconds form Windows FILETIME to TimeValue
+ void fromWin32Time( uint64_t win32Time ) {
+ this->seconds_ = win32Time / 10000000 + Win32ZeroTimeSeconds;
+ this->nanos_ = NanoSecondsType(win32Time % 10000000) * 100;
+ }
+
+ /// @}
+ /// @name Implementation
+ /// @{
+ private:
+ /// This causes the values to be represented so that the fractional
+ /// part is minimized, possibly incrementing the seconds part.
+ /// @brief Normalize to canonical form.
+ void normalize();
+
+ /// @}
+ /// @name Data
+ /// @{
+ private:
+ /// Store the values as a <timeval>.
+ SecondsType seconds_;///< Stores the seconds part of the TimeVal
+ NanoSecondsType nanos_; ///< Stores the nanoseconds part of the TimeVal
+
+ static const SecondsType PosixZeroTimeSeconds;
+ static const SecondsType Win32ZeroTimeSeconds;
+ /// @}
+
+ };
+
+inline TimeValue operator + (const TimeValue &tv1, const TimeValue &tv2) {
+ TimeValue sum (tv1.seconds_ + tv2.seconds_, tv1.nanos_ + tv2.nanos_);
+ sum.normalize ();
+ return sum;
+}
+
+inline TimeValue operator - (const TimeValue &tv1, const TimeValue &tv2) {
+ TimeValue difference (tv1.seconds_ - tv2.seconds_, tv1.nanos_ - tv2.nanos_ );
+ difference.normalize ();
+ return difference;
+}
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Timer.h b/contrib/llvm/include/llvm/Support/Timer.h
new file mode 100644
index 0000000..499fe7b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Timer.h
@@ -0,0 +1,196 @@
+//===-- llvm/Support/Timer.h - Interval Timing Support ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TIMER_H
+#define LLVM_SUPPORT_TIMER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Timer;
+class TimerGroup;
+class raw_ostream;
+
+class TimeRecord {
+ double WallTime; // Wall clock time elapsed in seconds
+ double UserTime; // User time elapsed
+ double SystemTime; // System time elapsed
+ ssize_t MemUsed; // Memory allocated (in bytes)
+public:
+ TimeRecord() : WallTime(0), UserTime(0), SystemTime(0), MemUsed(0) {}
+
+ /// getCurrentTime - Get the current time and memory usage. If Start is true
+ /// we get the memory usage before the time, otherwise we get time before
+ /// memory usage. This matters if the time to get the memory usage is
+ /// significant and shouldn't be counted as part of a duration.
+ static TimeRecord getCurrentTime(bool Start = true);
+
+ double getProcessTime() const { return UserTime + SystemTime; }
+ double getUserTime() const { return UserTime; }
+ double getSystemTime() const { return SystemTime; }
+ double getWallTime() const { return WallTime; }
+ ssize_t getMemUsed() const { return MemUsed; }
+
+ // operator< - Allow sorting.
+ bool operator<(const TimeRecord &T) const {
+ // Sort by Wall Time elapsed, as it is the only thing really accurate
+ return WallTime < T.WallTime;
+ }
+
+ void operator+=(const TimeRecord &RHS) {
+ WallTime += RHS.WallTime;
+ UserTime += RHS.UserTime;
+ SystemTime += RHS.SystemTime;
+ MemUsed += RHS.MemUsed;
+ }
+ void operator-=(const TimeRecord &RHS) {
+ WallTime -= RHS.WallTime;
+ UserTime -= RHS.UserTime;
+ SystemTime -= RHS.SystemTime;
+ MemUsed -= RHS.MemUsed;
+ }
+
+ /// Print the current time record to \p OS, with a breakdown showing
+ /// contributions to the \p Total time record.
+ void print(const TimeRecord &Total, raw_ostream &OS) const;
+};
+
+/// Timer - This class is used to track the amount of time spent between
+/// invocations of its startTimer()/stopTimer() methods. Given appropriate OS
+/// support it can also keep track of the RSS of the program at various points.
+/// By default, the Timer will print the amount of time it has captured to
+/// standard error when the last timer is destroyed, otherwise it is printed
+/// when its TimerGroup is destroyed. Timers do not print their information
+/// if they are never started.
+///
+class Timer {
+ TimeRecord Time; // The total time captured
+ TimeRecord StartTime; // The time startTimer() was last called
+ std::string Name; // The name of this time variable.
+ bool Running; // Is the timer currently running?
+ bool Triggered; // Has the timer ever been triggered?
+ TimerGroup *TG; // The TimerGroup this Timer is in.
+
+ Timer **Prev, *Next; // Doubly linked list of timers in the group.
+public:
+ explicit Timer(StringRef N) : TG(nullptr) { init(N); }
+ Timer(StringRef N, TimerGroup &tg) : TG(nullptr) { init(N, tg); }
+ Timer(const Timer &RHS) : TG(nullptr) {
+ assert(!RHS.TG && "Can only copy uninitialized timers");
+ }
+ const Timer &operator=(const Timer &T) {
+ assert(!TG && !T.TG && "Can only assign uninit timers");
+ return *this;
+ }
+ ~Timer();
+
+ // Create an uninitialized timer, client must use 'init'.
+ explicit Timer() : TG(nullptr) {}
+ void init(StringRef N);
+ void init(StringRef N, TimerGroup &tg);
+
+ const std::string &getName() const { return Name; }
+ bool isInitialized() const { return TG != nullptr; }
+
+ /// Check if startTimer() has ever been called on this timer.
+ bool hasTriggered() const { return Triggered; }
+
+ /// Start the timer running. Time between calls to startTimer/stopTimer is
+ /// counted by the Timer class. Note that these calls must be correctly
+ /// paired.
+ void startTimer();
+
+ /// Stop the timer.
+ void stopTimer();
+
+ /// Clear the timer state.
+ void clear();
+
+ /// Return the duration for which this timer has been running.
+ TimeRecord getTotalTime() const { return Time; }
+
+private:
+ friend class TimerGroup;
+};
+
+/// The TimeRegion class is used as a helper class to call the startTimer() and
+/// stopTimer() methods of the Timer class. When the object is constructed, it
+/// starts the timer specified as its argument. When it is destroyed, it stops
+/// the relevant timer. This makes it easy to time a region of code.
+///
+class TimeRegion {
+ Timer *T;
+ TimeRegion(const TimeRegion &) = delete;
+
+public:
+ explicit TimeRegion(Timer &t) : T(&t) {
+ T->startTimer();
+ }
+ explicit TimeRegion(Timer *t) : T(t) {
+ if (T) T->startTimer();
+ }
+ ~TimeRegion() {
+ if (T) T->stopTimer();
+ }
+};
+
+/// NamedRegionTimer - This class is basically a combination of TimeRegion and
+/// Timer. It allows you to declare a new timer, AND specify the region to
+/// time, all in one statement. All timers with the same name are merged. This
+/// is primarily used for debugging and for hunting performance problems.
+///
+struct NamedRegionTimer : public TimeRegion {
+ explicit NamedRegionTimer(StringRef Name,
+ bool Enabled = true);
+ explicit NamedRegionTimer(StringRef Name, StringRef GroupName,
+ bool Enabled = true);
+};
+
+/// The TimerGroup class is used to group together related timers into a single
+/// report that is printed when the TimerGroup is destroyed. It is illegal to
+/// destroy a TimerGroup object before all of the Timers in it are gone. A
+/// TimerGroup can be specified for a newly created timer in its constructor.
+///
+class TimerGroup {
+ std::string Name;
+ Timer *FirstTimer; // First timer in the group.
+ std::vector<std::pair<TimeRecord, std::string>> TimersToPrint;
+
+ TimerGroup **Prev, *Next; // Doubly linked list of TimerGroup's.
+ TimerGroup(const TimerGroup &TG) = delete;
+ void operator=(const TimerGroup &TG) = delete;
+
+public:
+ explicit TimerGroup(StringRef name);
+ ~TimerGroup();
+
+ void setName(StringRef name) { Name.assign(name.begin(), name.end()); }
+
+ /// print - Print any started timers in this group and zero them.
+ void print(raw_ostream &OS);
+
+ /// printAll - This static method prints all timers and clears them all out.
+ static void printAll(raw_ostream &OS);
+
+private:
+ friend class Timer;
+ void addTimer(Timer &T);
+ void removeTimer(Timer &T);
+ void PrintQueuedTimers(raw_ostream &OS);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/ToolOutputFile.h b/contrib/llvm/include/llvm/Support/ToolOutputFile.h
new file mode 100644
index 0000000..1be26c2
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/ToolOutputFile.h
@@ -0,0 +1,63 @@
+//===- ToolOutputFile.h - Output files for compiler-like tools -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the tool_output_file class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TOOLOUTPUTFILE_H
+#define LLVM_SUPPORT_TOOLOUTPUTFILE_H
+
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// This class contains a raw_fd_ostream and adds a few extra features commonly
+/// needed for compiler-like tool output files:
+/// - The file is automatically deleted if the process is killed.
+/// - The file is automatically deleted when the tool_output_file
+/// object is destroyed unless the client calls keep().
+class tool_output_file {
+ /// This class is declared before the raw_fd_ostream so that it is constructed
+ /// before the raw_fd_ostream is constructed and destructed after the
+ /// raw_fd_ostream is destructed. It installs cleanups in its constructor and
+ /// uninstalls them in its destructor.
+ class CleanupInstaller {
+ /// The name of the file.
+ std::string Filename;
+ public:
+ /// The flag which indicates whether we should not delete the file.
+ bool Keep;
+
+ explicit CleanupInstaller(StringRef ilename);
+ ~CleanupInstaller();
+ } Installer;
+
+ /// The contained stream. This is intentionally declared after Installer.
+ raw_fd_ostream OS;
+
+public:
+ /// This constructor's arguments are passed to to raw_fd_ostream's
+ /// constructor.
+ tool_output_file(StringRef Filename, std::error_code &EC,
+ sys::fs::OpenFlags Flags);
+
+ tool_output_file(StringRef Filename, int FD);
+
+ /// Return the contained raw_fd_ostream.
+ raw_fd_ostream &os() { return OS; }
+
+ /// Indicate that the tool's job wrt this output file has been successful and
+ /// the file should not be deleted.
+ void keep() { Installer.Keep = true; }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/TrailingObjects.h b/contrib/llvm/include/llvm/Support/TrailingObjects.h
new file mode 100644
index 0000000..8529746
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/TrailingObjects.h
@@ -0,0 +1,349 @@
+//===--- TrailingObjects.h - Variable-length classes ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This header defines support for implementing classes that have
+/// some trailing object (or arrays of objects) appended to them. The
+/// main purpose is to make it obvious where this idiom is being used,
+/// and to make the usage more idiomatic and more difficult to get
+/// wrong.
+///
+/// The TrailingObject template abstracts away the reinterpret_cast,
+/// pointer arithmetic, and size calculations used for the allocation
+/// and access of appended arrays of objects, and takes care that they
+/// are all allocated at their required alignment. Additionally, it
+/// ensures that the base type is final -- deriving from a class that
+/// expects data appended immediately after it is typically not safe.
+///
+/// Users are expected to derive from this template, and provide
+/// numTrailingObjects implementations for each trailing type except
+/// the last, e.g. like this sample:
+///
+/// \code
+/// class VarLengthObj : private TrailingObjects<VarLengthObj, int, double> {
+/// friend TrailingObjects;
+///
+/// unsigned NumInts, NumDoubles;
+/// size_t numTrailingObjects(OverloadToken<int>) const { return NumInts; }
+/// };
+/// \endcode
+///
+/// You can access the appended arrays via 'getTrailingObjects', and
+/// determine the size needed for allocation via
+/// 'additionalSizeToAlloc' and 'totalSizeToAlloc'.
+///
+/// All the methods implemented by this class are are intended for use
+/// by the implementation of the class, not as part of its interface
+/// (thus, private inheritance is suggested).
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TRAILINGOBJECTS_H
+#define LLVM_SUPPORT_TRAILINGOBJECTS_H
+
+#include "llvm/Support/AlignOf.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/type_traits.h"
+#include <new>
+#include <type_traits>
+
+namespace llvm {
+
+namespace trailing_objects_internal {
+/// Helper template to calculate the max alignment requirement for a set of
+/// objects.
+template <typename First, typename... Rest> class AlignmentCalcHelper {
+private:
+ enum {
+ FirstAlignment = AlignOf<First>::Alignment,
+ RestAlignment = AlignmentCalcHelper<Rest...>::Alignment,
+ };
+
+public:
+ enum {
+ Alignment = FirstAlignment > RestAlignment ? FirstAlignment : RestAlignment
+ };
+};
+
+template <typename First> class AlignmentCalcHelper<First> {
+public:
+ enum { Alignment = AlignOf<First>::Alignment };
+};
+
+/// The base class for TrailingObjects* classes.
+class TrailingObjectsBase {
+protected:
+ /// OverloadToken's purpose is to allow specifying function overloads
+ /// for different types, without actually taking the types as
+ /// parameters. (Necessary because member function templates cannot
+ /// be specialized, so overloads must be used instead of
+ /// specialization.)
+ template <typename T> struct OverloadToken {};
+};
+
+/// This helper template works-around MSVC 2013's lack of useful
+/// alignas() support. The argument to LLVM_ALIGNAS(), in MSVC, is
+/// required to be a literal integer. But, you *can* use template
+/// specialization to select between a bunch of different LLVM_ALIGNAS
+/// expressions...
+template <int Align>
+class TrailingObjectsAligner : public TrailingObjectsBase {};
+template <>
+class LLVM_ALIGNAS(1) TrailingObjectsAligner<1> : public TrailingObjectsBase {};
+template <>
+class LLVM_ALIGNAS(2) TrailingObjectsAligner<2> : public TrailingObjectsBase {};
+template <>
+class LLVM_ALIGNAS(4) TrailingObjectsAligner<4> : public TrailingObjectsBase {};
+template <>
+class LLVM_ALIGNAS(8) TrailingObjectsAligner<8> : public TrailingObjectsBase {};
+template <>
+class LLVM_ALIGNAS(16) TrailingObjectsAligner<16> : public TrailingObjectsBase {
+};
+template <>
+class LLVM_ALIGNAS(32) TrailingObjectsAligner<32> : public TrailingObjectsBase {
+};
+
+// Just a little helper for transforming a type pack into the same
+// number of a different type. e.g.:
+// ExtractSecondType<Foo..., int>::type
+template <typename Ty1, typename Ty2> struct ExtractSecondType {
+ typedef Ty2 type;
+};
+
+// TrailingObjectsImpl is somewhat complicated, because it is a
+// recursively inheriting template, in order to handle the template
+// varargs. Each level of inheritance picks off a single trailing type
+// then recurses on the rest. The "Align", "BaseTy", and
+// "TopTrailingObj" arguments are passed through unchanged through the
+// recursion. "PrevTy" is, at each level, the type handled by the
+// level right above it.
+
+template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy,
+ typename... MoreTys>
+struct TrailingObjectsImpl {
+ // The main template definition is never used -- the two
+ // specializations cover all possibilities.
+};
+
+template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy,
+ typename NextTy, typename... MoreTys>
+struct TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy, NextTy,
+ MoreTys...>
+ : public TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, NextTy,
+ MoreTys...> {
+
+ typedef TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, NextTy, MoreTys...>
+ ParentType;
+
+ // Ensure the methods we inherit are not hidden.
+ using ParentType::getTrailingObjectsImpl;
+ using ParentType::additionalSizeToAllocImpl;
+
+ static LLVM_CONSTEXPR bool requiresRealignment() {
+ return llvm::AlignOf<PrevTy>::Alignment < llvm::AlignOf<NextTy>::Alignment;
+ }
+
+ // These two functions are helper functions for
+ // TrailingObjects::getTrailingObjects. They recurse to the left --
+ // the result for each type in the list of trailing types depends on
+ // the result of calling the function on the type to the
+ // left. However, the function for the type to the left is
+ // implemented by a *subclass* of this class, so we invoke it via
+ // the TopTrailingObj, which is, via the
+ // curiously-recurring-template-pattern, the most-derived type in
+ // this recursion, and thus, contains all the overloads.
+ static const NextTy *
+ getTrailingObjectsImpl(const BaseTy *Obj,
+ TrailingObjectsBase::OverloadToken<NextTy>) {
+ auto *Ptr = TopTrailingObj::getTrailingObjectsImpl(
+ Obj, TrailingObjectsBase::OverloadToken<PrevTy>()) +
+ TopTrailingObj::callNumTrailingObjects(
+ Obj, TrailingObjectsBase::OverloadToken<PrevTy>());
+
+ if (requiresRealignment())
+ return reinterpret_cast<const NextTy *>(
+ llvm::alignAddr(Ptr, llvm::alignOf<NextTy>()));
+ else
+ return reinterpret_cast<const NextTy *>(Ptr);
+ }
+
+ static NextTy *
+ getTrailingObjectsImpl(BaseTy *Obj,
+ TrailingObjectsBase::OverloadToken<NextTy>) {
+ auto *Ptr = TopTrailingObj::getTrailingObjectsImpl(
+ Obj, TrailingObjectsBase::OverloadToken<PrevTy>()) +
+ TopTrailingObj::callNumTrailingObjects(
+ Obj, TrailingObjectsBase::OverloadToken<PrevTy>());
+
+ if (requiresRealignment())
+ return reinterpret_cast<NextTy *>(
+ llvm::alignAddr(Ptr, llvm::alignOf<NextTy>()));
+ else
+ return reinterpret_cast<NextTy *>(Ptr);
+ }
+
+ // Helper function for TrailingObjects::additionalSizeToAlloc: this
+ // function recurses to superclasses, each of which requires one
+ // fewer size_t argument, and adds its own size.
+ static LLVM_CONSTEXPR size_t additionalSizeToAllocImpl(
+ size_t SizeSoFar, size_t Count1,
+ typename ExtractSecondType<MoreTys, size_t>::type... MoreCounts) {
+ return additionalSizeToAllocImpl(
+ (requiresRealignment()
+ ? llvm::RoundUpToAlignment(SizeSoFar, llvm::alignOf<NextTy>())
+ : SizeSoFar) +
+ sizeof(NextTy) * Count1,
+ MoreCounts...);
+ }
+};
+
+// The base case of the TrailingObjectsImpl inheritance recursion,
+// when there's no more trailing types.
+template <int Align, typename BaseTy, typename TopTrailingObj, typename PrevTy>
+struct TrailingObjectsImpl<Align, BaseTy, TopTrailingObj, PrevTy>
+ : public TrailingObjectsAligner<Align> {
+ // This is a dummy method, only here so the "using" doesn't fail --
+ // it will never be called, because this function recurses backwards
+ // up the inheritance chain to subclasses.
+ static void getTrailingObjectsImpl();
+
+ static LLVM_CONSTEXPR size_t additionalSizeToAllocImpl(size_t SizeSoFar) {
+ return SizeSoFar;
+ }
+
+ template <bool CheckAlignment> static void verifyTrailingObjectsAlignment() {}
+};
+
+} // end namespace trailing_objects_internal
+
+// Finally, the main type defined in this file, the one intended for users...
+
+/// See the file comment for details on the usage of the
+/// TrailingObjects type.
+template <typename BaseTy, typename... TrailingTys>
+class TrailingObjects : private trailing_objects_internal::TrailingObjectsImpl<
+ trailing_objects_internal::AlignmentCalcHelper<
+ TrailingTys...>::Alignment,
+ BaseTy, TrailingObjects<BaseTy, TrailingTys...>,
+ BaseTy, TrailingTys...> {
+
+ template <int A, typename B, typename T, typename P, typename... M>
+ friend struct trailing_objects_internal::TrailingObjectsImpl;
+
+ template <typename... Tys> class Foo {};
+
+ typedef trailing_objects_internal::TrailingObjectsImpl<
+ trailing_objects_internal::AlignmentCalcHelper<TrailingTys...>::Alignment,
+ BaseTy, TrailingObjects<BaseTy, TrailingTys...>, BaseTy, TrailingTys...>
+ ParentType;
+ using TrailingObjectsBase = trailing_objects_internal::TrailingObjectsBase;
+
+ using ParentType::getTrailingObjectsImpl;
+
+ // This function contains only a static_assert BaseTy is final. The
+ // static_assert must be in a function, and not at class-level
+ // because BaseTy isn't complete at class instantiation time, but
+ // will be by the time this function is instantiated.
+ static void verifyTrailingObjectsAssertions() {
+#ifdef LLVM_IS_FINAL
+ static_assert(LLVM_IS_FINAL(BaseTy), "BaseTy must be final.");
+#endif
+ }
+
+ // These two methods are the base of the recursion for this method.
+ static const BaseTy *
+ getTrailingObjectsImpl(const BaseTy *Obj,
+ TrailingObjectsBase::OverloadToken<BaseTy>) {
+ return Obj;
+ }
+
+ static BaseTy *
+ getTrailingObjectsImpl(BaseTy *Obj,
+ TrailingObjectsBase::OverloadToken<BaseTy>) {
+ return Obj;
+ }
+
+ // callNumTrailingObjects simply calls numTrailingObjects on the
+ // provided Obj -- except when the type being queried is BaseTy
+ // itself. There is always only one of the base object, so that case
+ // is handled here. (An additional benefit of indirecting through
+ // this function is that consumers only say "friend
+ // TrailingObjects", and thus, only this class itself can call the
+ // numTrailingObjects function.)
+ static size_t
+ callNumTrailingObjects(const BaseTy *Obj,
+ TrailingObjectsBase::OverloadToken<BaseTy>) {
+ return 1;
+ }
+
+ template <typename T>
+ static size_t callNumTrailingObjects(const BaseTy *Obj,
+ TrailingObjectsBase::OverloadToken<T>) {
+ return Obj->numTrailingObjects(TrailingObjectsBase::OverloadToken<T>());
+ }
+
+public:
+ // make this (privately inherited) class public.
+ using ParentType::OverloadToken;
+
+ /// Returns a pointer to the trailing object array of the given type
+ /// (which must be one of those specified in the class template). The
+ /// array may have zero or more elements in it.
+ template <typename T> const T *getTrailingObjects() const {
+ verifyTrailingObjectsAssertions();
+ // Forwards to an impl function with overloads, since member
+ // function templates can't be specialized.
+ return this->getTrailingObjectsImpl(
+ static_cast<const BaseTy *>(this),
+ TrailingObjectsBase::OverloadToken<T>());
+ }
+
+ /// Returns a pointer to the trailing object array of the given type
+ /// (which must be one of those specified in the class template). The
+ /// array may have zero or more elements in it.
+ template <typename T> T *getTrailingObjects() {
+ verifyTrailingObjectsAssertions();
+ // Forwards to an impl function with overloads, since member
+ // function templates can't be specialized.
+ return this->getTrailingObjectsImpl(
+ static_cast<BaseTy *>(this), TrailingObjectsBase::OverloadToken<T>());
+ }
+
+ /// Returns the size of the trailing data, if an object were
+ /// allocated with the given counts (The counts are in the same order
+ /// as the template arguments). This does not include the size of the
+ /// base object. The template arguments must be the same as those
+ /// used in the class; they are supplied here redundantly only so
+ /// that it's clear what the counts are counting in callers.
+ template <typename... Tys>
+ static LLVM_CONSTEXPR typename std::enable_if<
+ std::is_same<Foo<TrailingTys...>, Foo<Tys...>>::value, size_t>::type
+ additionalSizeToAlloc(
+ typename trailing_objects_internal::ExtractSecondType<
+ TrailingTys, size_t>::type... Counts) {
+ return ParentType::additionalSizeToAllocImpl(0, Counts...);
+ }
+
+ /// Returns the total size of an object if it were allocated with the
+ /// given trailing object counts. This is the same as
+ /// additionalSizeToAlloc, except it *does* include the size of the base
+ /// object.
+ template <typename... Tys>
+ static LLVM_CONSTEXPR typename std::enable_if<
+ std::is_same<Foo<TrailingTys...>, Foo<Tys...>>::value, size_t>::type
+ totalSizeToAlloc(typename trailing_objects_internal::ExtractSecondType<
+ TrailingTys, size_t>::type... Counts) {
+ return sizeof(BaseTy) + ParentType::additionalSizeToAllocImpl(0, Counts...);
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Unicode.h b/contrib/llvm/include/llvm/Support/Unicode.h
new file mode 100644
index 0000000..f668a5b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Unicode.h
@@ -0,0 +1,67 @@
+//===- llvm/Support/Unicode.h - Unicode character properties -*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions that allow querying certain properties of Unicode
+// characters.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_UNICODE_H
+#define LLVM_SUPPORT_UNICODE_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+namespace sys {
+namespace unicode {
+
+enum ColumnWidthErrors {
+ ErrorInvalidUTF8 = -2,
+ ErrorNonPrintableCharacter = -1
+};
+
+/// Determines if a character is likely to be displayed correctly on the
+/// terminal. Exact implementation would have to depend on the specific
+/// terminal, so we define the semantic that should be suitable for generic case
+/// of a terminal capable to output Unicode characters.
+///
+/// All characters from the Unicode code point range are considered printable
+/// except for:
+/// * C0 and C1 control character ranges;
+/// * default ignorable code points as per 5.21 of
+/// http://www.unicode.org/versions/Unicode6.2.0/UnicodeStandard-6.2.pdf
+/// except for U+00AD SOFT HYPHEN, as it's actually displayed on most
+/// terminals;
+/// * format characters (category = Cf);
+/// * surrogates (category = Cs);
+/// * unassigned characters (category = Cn).
+/// \return true if the character is considered printable.
+bool isPrintable(int UCS);
+
+/// Gets the number of positions the UTF8-encoded \p Text is likely to occupy
+/// when output on a terminal ("character width"). This depends on the
+/// implementation of the terminal, and there's no standard definition of
+/// character width.
+///
+/// The implementation defines it in a way that is expected to be compatible
+/// with a generic Unicode-capable terminal.
+///
+/// \return Character width:
+/// * ErrorNonPrintableCharacter (-1) if \p Text contains non-printable
+/// characters (as identified by isPrintable);
+/// * 0 for each non-spacing and enclosing combining mark;
+/// * 2 for each CJK character excluding halfwidth forms;
+/// * 1 for each of the remaining characters.
+int columnWidthUTF8(StringRef Text);
+
+} // namespace unicode
+} // namespace sys
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/UnicodeCharRanges.h b/contrib/llvm/include/llvm/Support/UnicodeCharRanges.h
new file mode 100644
index 0000000..134698c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/UnicodeCharRanges.h
@@ -0,0 +1,108 @@
+//===--- UnicodeCharRanges.h - Types and functions for character ranges ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_SUPPORT_UNICODECHARRANGES_H
+#define LLVM_SUPPORT_UNICODECHARRANGES_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+
+namespace llvm {
+namespace sys {
+
+#define DEBUG_TYPE "unicode"
+
+/// \brief Represents a closed range of Unicode code points [Lower, Upper].
+struct UnicodeCharRange {
+ uint32_t Lower;
+ uint32_t Upper;
+};
+
+inline bool operator<(uint32_t Value, UnicodeCharRange Range) {
+ return Value < Range.Lower;
+}
+inline bool operator<(UnicodeCharRange Range, uint32_t Value) {
+ return Range.Upper < Value;
+}
+
+/// \brief Holds a reference to an ordered array of UnicodeCharRange and allows
+/// to quickly check if a code point is contained in the set represented by this
+/// array.
+class UnicodeCharSet {
+public:
+ typedef ArrayRef<UnicodeCharRange> CharRanges;
+
+ /// \brief Constructs a UnicodeCharSet instance from an array of
+ /// UnicodeCharRanges.
+ ///
+ /// Array pointed by \p Ranges should have the lifetime at least as long as
+ /// the UnicodeCharSet instance, and should not change. Array is validated by
+ /// the constructor, so it makes sense to create as few UnicodeCharSet
+ /// instances per each array of ranges, as possible.
+#ifdef NDEBUG
+
+ // FIXME: This could use constexpr + static_assert. This way we
+ // may get rid of NDEBUG in this header. Unfortunately there are some
+ // problems to get this working with MSVC 2013. Change this when
+ // the support for MSVC 2013 is dropped.
+ LLVM_CONSTEXPR UnicodeCharSet(CharRanges Ranges) : Ranges(Ranges) {}
+#else
+ UnicodeCharSet(CharRanges Ranges) : Ranges(Ranges) {
+ assert(rangesAreValid());
+ }
+#endif
+
+ /// \brief Returns true if the character set contains the Unicode code point
+ /// \p C.
+ bool contains(uint32_t C) const {
+ return std::binary_search(Ranges.begin(), Ranges.end(), C);
+ }
+
+private:
+ /// \brief Returns true if each of the ranges is a proper closed range
+ /// [min, max], and if the ranges themselves are ordered and non-overlapping.
+ bool rangesAreValid() const {
+ uint32_t Prev = 0;
+ for (CharRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
+ I != E; ++I) {
+ if (I != Ranges.begin() && Prev >= I->Lower) {
+ DEBUG(dbgs() << "Upper bound 0x");
+ DEBUG(dbgs().write_hex(Prev));
+ DEBUG(dbgs() << " should be less than succeeding lower bound 0x");
+ DEBUG(dbgs().write_hex(I->Lower) << "\n");
+ return false;
+ }
+ if (I->Upper < I->Lower) {
+ DEBUG(dbgs() << "Upper bound 0x");
+ DEBUG(dbgs().write_hex(I->Lower));
+ DEBUG(dbgs() << " should not be less than lower bound 0x");
+ DEBUG(dbgs().write_hex(I->Upper) << "\n");
+ return false;
+ }
+ Prev = I->Upper;
+ }
+
+ return true;
+ }
+
+ const CharRanges Ranges;
+};
+
+#undef DEBUG_TYPE // "unicode"
+
+} // namespace sys
+} // namespace llvm
+
+
+#endif // LLVM_SUPPORT_UNICODECHARRANGES_H
diff --git a/contrib/llvm/include/llvm/Support/UniqueLock.h b/contrib/llvm/include/llvm/Support/UniqueLock.h
new file mode 100644
index 0000000..529284d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/UniqueLock.h
@@ -0,0 +1,67 @@
+//===-- Support/UniqueLock.h - Acquire/Release Mutex In Scope ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a guard for a block of code that ensures a Mutex is locked
+// upon construction and released upon destruction.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_UNIQUE_LOCK_H
+#define LLVM_SUPPORT_UNIQUE_LOCK_H
+
+#include "llvm/Support/Mutex.h"
+
+namespace llvm {
+ /// A pared-down imitation of std::unique_lock from C++11. Contrary to the
+ /// name, it's really more of a wrapper for a lock. It may or may not have
+ /// an associated mutex, which is guaranteed to be locked upon creation
+ /// and unlocked after destruction. unique_lock can also unlock the mutex
+ /// and re-lock it freely during its lifetime.
+ /// @brief Guard a section of code with a mutex.
+ template<typename MutexT>
+ class unique_lock {
+ MutexT *M;
+ bool locked;
+
+ unique_lock(const unique_lock &) = delete;
+ void operator=(const unique_lock &) = delete;
+ public:
+ unique_lock() : M(nullptr), locked(false) {}
+ explicit unique_lock(MutexT &m) : M(&m), locked(true) { M->lock(); }
+
+ void operator=(unique_lock &&o) {
+ if (owns_lock())
+ M->unlock();
+ M = o.M;
+ locked = o.locked;
+ o.M = nullptr;
+ o.locked = false;
+ }
+
+ ~unique_lock() { if (owns_lock()) M->unlock(); }
+
+ void lock() {
+ assert(!locked && "mutex already locked!");
+ assert(M && "no associated mutex!");
+ M->lock();
+ locked = true;
+ }
+
+ void unlock() {
+ assert(locked && "unlocking a mutex that isn't locked!");
+ assert(M && "no associated mutex!");
+ M->unlock();
+ locked = false;
+ }
+
+ bool owns_lock() { return locked; }
+ };
+}
+
+#endif // LLVM_SUPPORT_UNIQUE_LOCK_H
diff --git a/contrib/llvm/include/llvm/Support/Valgrind.h b/contrib/llvm/include/llvm/Support/Valgrind.h
new file mode 100644
index 0000000..12b0dc9
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Valgrind.h
@@ -0,0 +1,34 @@
+//===- llvm/Support/Valgrind.h - Communication with Valgrind -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Methods for communicating with a valgrind instance this program is running
+// under. These are all no-ops unless LLVM was configured on a system with the
+// valgrind headers installed and valgrind is controlling this process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_VALGRIND_H
+#define LLVM_SUPPORT_VALGRIND_H
+
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Compiler.h"
+#include <stddef.h>
+
+namespace llvm {
+namespace sys {
+ // True if Valgrind is controlling this process.
+ bool RunningOnValgrind();
+
+ // Discard valgrind's translation of code in the range [Addr .. Addr + Len).
+ // Otherwise valgrind may continue to execute the old version of the code.
+ void ValgrindDiscardTranslations(const void *Addr, size_t Len);
+}
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Watchdog.h b/contrib/llvm/include/llvm/Support/Watchdog.h
new file mode 100644
index 0000000..01e1d92
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Watchdog.h
@@ -0,0 +1,38 @@
+//===--- Watchdog.h - Watchdog timer ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the llvm::sys::Watchdog class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WATCHDOG_H
+#define LLVM_SUPPORT_WATCHDOG_H
+
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+ namespace sys {
+
+ /// This class provides an abstraction for a timeout around an operation
+ /// that must complete in a given amount of time. Failure to complete before
+ /// the timeout is an unrecoverable situation and no mechanisms to attempt
+ /// to handle it are provided.
+ class Watchdog {
+ public:
+ Watchdog(unsigned int seconds);
+ ~Watchdog();
+ private:
+ // Noncopyable.
+ Watchdog(const Watchdog &other) = delete;
+ Watchdog &operator=(const Watchdog &other) = delete;
+ };
+ }
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/Win64EH.h b/contrib/llvm/include/llvm/Support/Win64EH.h
new file mode 100644
index 0000000..f6c4927
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/Win64EH.h
@@ -0,0 +1,147 @@
+//===-- llvm/Support/Win64EH.h ---Win64 EH Constants-------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains constants and structures used for implementing
+// exception handling on Win64 platforms. For more information, see
+// http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WIN64EH_H
+#define LLVM_SUPPORT_WIN64EH_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace Win64EH {
+
+/// UnwindOpcodes - Enumeration whose values specify a single operation in
+/// the prolog of a function.
+enum UnwindOpcodes {
+ UOP_PushNonVol = 0,
+ UOP_AllocLarge,
+ UOP_AllocSmall,
+ UOP_SetFPReg,
+ UOP_SaveNonVol,
+ UOP_SaveNonVolBig,
+ UOP_SaveXMM128 = 8,
+ UOP_SaveXMM128Big,
+ UOP_PushMachFrame
+};
+
+/// UnwindCode - This union describes a single operation in a function prolog,
+/// or part thereof.
+union UnwindCode {
+ struct {
+ uint8_t CodeOffset;
+ uint8_t UnwindOpAndOpInfo;
+ } u;
+ support::ulittle16_t FrameOffset;
+
+ uint8_t getUnwindOp() const {
+ return u.UnwindOpAndOpInfo & 0x0F;
+ }
+ uint8_t getOpInfo() const {
+ return (u.UnwindOpAndOpInfo >> 4) & 0x0F;
+ }
+};
+
+enum {
+ /// UNW_ExceptionHandler - Specifies that this function has an exception
+ /// handler.
+ UNW_ExceptionHandler = 0x01,
+ /// UNW_TerminateHandler - Specifies that this function has a termination
+ /// handler.
+ UNW_TerminateHandler = 0x02,
+ /// UNW_ChainInfo - Specifies that this UnwindInfo structure is chained to
+ /// another one.
+ UNW_ChainInfo = 0x04
+};
+
+/// RuntimeFunction - An entry in the table of functions with unwind info.
+struct RuntimeFunction {
+ support::ulittle32_t StartAddress;
+ support::ulittle32_t EndAddress;
+ support::ulittle32_t UnwindInfoOffset;
+};
+
+/// UnwindInfo - An entry in the exception table.
+struct UnwindInfo {
+ uint8_t VersionAndFlags;
+ uint8_t PrologSize;
+ uint8_t NumCodes;
+ uint8_t FrameRegisterAndOffset;
+ UnwindCode UnwindCodes[1];
+
+ uint8_t getVersion() const {
+ return VersionAndFlags & 0x07;
+ }
+ uint8_t getFlags() const {
+ return (VersionAndFlags >> 3) & 0x1f;
+ }
+ uint8_t getFrameRegister() const {
+ return FrameRegisterAndOffset & 0x0f;
+ }
+ uint8_t getFrameOffset() const {
+ return (FrameRegisterAndOffset >> 4) & 0x0f;
+ }
+
+ // The data after unwindCodes depends on flags.
+ // If UNW_ExceptionHandler or UNW_TerminateHandler is set then follows
+ // the address of the language-specific exception handler.
+ // If UNW_ChainInfo is set then follows a RuntimeFunction which defines
+ // the chained unwind info.
+ // For more information please see MSDN at:
+ // http://msdn.microsoft.com/en-us/library/ddssxxy8.aspx
+
+ /// \brief Return pointer to language specific data part of UnwindInfo.
+ void *getLanguageSpecificData() {
+ return reinterpret_cast<void *>(&UnwindCodes[(NumCodes+1) & ~1]);
+ }
+
+ /// \brief Return pointer to language specific data part of UnwindInfo.
+ const void *getLanguageSpecificData() const {
+ return reinterpret_cast<const void *>(&UnwindCodes[(NumCodes + 1) & ~1]);
+ }
+
+ /// \brief Return image-relative offset of language-specific exception handler.
+ uint32_t getLanguageSpecificHandlerOffset() const {
+ return *reinterpret_cast<const support::ulittle32_t *>(
+ getLanguageSpecificData());
+ }
+
+ /// \brief Set image-relative offset of language-specific exception handler.
+ void setLanguageSpecificHandlerOffset(uint32_t offset) {
+ *reinterpret_cast<support::ulittle32_t *>(getLanguageSpecificData()) =
+ offset;
+ }
+
+ /// \brief Return pointer to exception-specific data.
+ void *getExceptionData() {
+ return reinterpret_cast<void *>(reinterpret_cast<uint32_t *>(
+ getLanguageSpecificData())+1);
+ }
+
+ /// \brief Return pointer to chained unwind info.
+ RuntimeFunction *getChainedFunctionEntry() {
+ return reinterpret_cast<RuntimeFunction *>(getLanguageSpecificData());
+ }
+
+ /// \brief Return pointer to chained unwind info.
+ const RuntimeFunction *getChainedFunctionEntry() const {
+ return reinterpret_cast<const RuntimeFunction *>(getLanguageSpecificData());
+ }
+};
+
+
+} // End of namespace Win64EH
+} // End of namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/WindowsError.h b/contrib/llvm/include/llvm/Support/WindowsError.h
new file mode 100644
index 0000000..63bfe59
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/WindowsError.h
@@ -0,0 +1,19 @@
+//===-- WindowsError.h - Support for mapping windows errors to posix-------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_WINDOWSERROR_H
+#define LLVM_SUPPORT_WINDOWSERROR_H
+
+#include <system_error>
+
+namespace llvm {
+std::error_code mapWindowsError(unsigned EV);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/YAMLParser.h b/contrib/llvm/include/llvm/Support/YAMLParser.h
new file mode 100644
index 0000000..b056ab6
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/YAMLParser.h
@@ -0,0 +1,588 @@
+//===--- YAMLParser.h - Simple YAML parser --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a YAML 1.2 parser.
+//
+// See http://www.yaml.org/spec/1.2/spec.html for the full standard.
+//
+// This currently does not implement the following:
+// * Multi-line literal folding.
+// * Tag resolution.
+// * UTF-16.
+// * BOMs anywhere other than the first Unicode scalar value in the file.
+//
+// The most important class here is Stream. This represents a YAML stream with
+// 0, 1, or many documents.
+//
+// SourceMgr sm;
+// StringRef input = getInput();
+// yaml::Stream stream(input, sm);
+//
+// for (yaml::document_iterator di = stream.begin(), de = stream.end();
+// di != de; ++di) {
+// yaml::Node *n = di->getRoot();
+// if (n) {
+// // Do something with n...
+// } else
+// break;
+// }
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_YAMLPARSER_H
+#define LLVM_SUPPORT_YAMLPARSER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/SMLoc.h"
+#include <limits>
+#include <map>
+#include <utility>
+
+namespace llvm {
+class MemoryBufferRef;
+class SourceMgr;
+class Twine;
+class raw_ostream;
+
+namespace yaml {
+
+class document_iterator;
+class Document;
+class Node;
+class Scanner;
+struct Token;
+
+/// \brief Dump all the tokens in this stream to OS.
+/// \returns true if there was an error, false otherwise.
+bool dumpTokens(StringRef Input, raw_ostream &);
+
+/// \brief Scans all tokens in input without outputting anything. This is used
+/// for benchmarking the tokenizer.
+/// \returns true if there was an error, false otherwise.
+bool scanTokens(StringRef Input);
+
+/// \brief Escape \a Input for a double quoted scalar.
+std::string escape(StringRef Input);
+
+/// \brief This class represents a YAML stream potentially containing multiple
+/// documents.
+class Stream {
+public:
+ /// \brief This keeps a reference to the string referenced by \p Input.
+ Stream(StringRef Input, SourceMgr &, bool ShowColors = true);
+
+ Stream(MemoryBufferRef InputBuffer, SourceMgr &, bool ShowColors = true);
+ ~Stream();
+
+ document_iterator begin();
+ document_iterator end();
+ void skip();
+ bool failed();
+ bool validate() {
+ skip();
+ return !failed();
+ }
+
+ void printError(Node *N, const Twine &Msg);
+
+private:
+ std::unique_ptr<Scanner> scanner;
+ std::unique_ptr<Document> CurrentDoc;
+
+ friend class Document;
+};
+
+/// \brief Abstract base class for all Nodes.
+class Node {
+ virtual void anchor();
+
+public:
+ enum NodeKind {
+ NK_Null,
+ NK_Scalar,
+ NK_BlockScalar,
+ NK_KeyValue,
+ NK_Mapping,
+ NK_Sequence,
+ NK_Alias
+ };
+
+ Node(unsigned int Type, std::unique_ptr<Document> &, StringRef Anchor,
+ StringRef Tag);
+
+ /// \brief Get the value of the anchor attached to this node. If it does not
+ /// have one, getAnchor().size() will be 0.
+ StringRef getAnchor() const { return Anchor; }
+
+ /// \brief Get the tag as it was written in the document. This does not
+ /// perform tag resolution.
+ StringRef getRawTag() const { return Tag; }
+
+ /// \brief Get the verbatium tag for a given Node. This performs tag resoluton
+ /// and substitution.
+ std::string getVerbatimTag() const;
+
+ SMRange getSourceRange() const { return SourceRange; }
+ void setSourceRange(SMRange SR) { SourceRange = SR; }
+
+ // These functions forward to Document and Scanner.
+ Token &peekNext();
+ Token getNext();
+ Node *parseBlockNode();
+ BumpPtrAllocator &getAllocator();
+ void setError(const Twine &Message, Token &Location) const;
+ bool failed() const;
+
+ virtual void skip() {}
+
+ unsigned int getType() const { return TypeID; }
+
+ void *operator new(size_t Size, BumpPtrAllocator &Alloc,
+ size_t Alignment = 16) LLVM_NOEXCEPT {
+ return Alloc.Allocate(Size, Alignment);
+ }
+
+ void operator delete(void *Ptr, BumpPtrAllocator &Alloc,
+ size_t Size) LLVM_NOEXCEPT {
+ Alloc.Deallocate(Ptr, Size);
+ }
+
+protected:
+ std::unique_ptr<Document> &Doc;
+ SMRange SourceRange;
+
+ void operator delete(void *) LLVM_NOEXCEPT = delete;
+
+ ~Node() = default;
+
+private:
+ unsigned int TypeID;
+ StringRef Anchor;
+ /// \brief The tag as typed in the document.
+ StringRef Tag;
+};
+
+/// \brief A null value.
+///
+/// Example:
+/// !!null null
+class NullNode final : public Node {
+ void anchor() override;
+
+public:
+ NullNode(std::unique_ptr<Document> &D)
+ : Node(NK_Null, D, StringRef(), StringRef()) {}
+
+ static inline bool classof(const Node *N) { return N->getType() == NK_Null; }
+};
+
+/// \brief A scalar node is an opaque datum that can be presented as a
+/// series of zero or more Unicode scalar values.
+///
+/// Example:
+/// Adena
+class ScalarNode final : public Node {
+ void anchor() override;
+
+public:
+ ScalarNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
+ StringRef Val)
+ : Node(NK_Scalar, D, Anchor, Tag), Value(Val) {
+ SMLoc Start = SMLoc::getFromPointer(Val.begin());
+ SMLoc End = SMLoc::getFromPointer(Val.end());
+ SourceRange = SMRange(Start, End);
+ }
+
+ // Return Value without any escaping or folding or other fun YAML stuff. This
+ // is the exact bytes that are contained in the file (after conversion to
+ // utf8).
+ StringRef getRawValue() const { return Value; }
+
+ /// \brief Gets the value of this node as a StringRef.
+ ///
+ /// \param Storage is used to store the content of the returned StringRef iff
+ /// it requires any modification from how it appeared in the source.
+ /// This happens with escaped characters and multi-line literals.
+ StringRef getValue(SmallVectorImpl<char> &Storage) const;
+
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_Scalar;
+ }
+
+private:
+ StringRef Value;
+
+ StringRef unescapeDoubleQuoted(StringRef UnquotedValue,
+ StringRef::size_type Start,
+ SmallVectorImpl<char> &Storage) const;
+};
+
+/// \brief A block scalar node is an opaque datum that can be presented as a
+/// series of zero or more Unicode scalar values.
+///
+/// Example:
+/// |
+/// Hello
+/// World
+class BlockScalarNode final : public Node {
+ void anchor() override;
+
+public:
+ BlockScalarNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
+ StringRef Value, StringRef RawVal)
+ : Node(NK_BlockScalar, D, Anchor, Tag), Value(Value) {
+ SMLoc Start = SMLoc::getFromPointer(RawVal.begin());
+ SMLoc End = SMLoc::getFromPointer(RawVal.end());
+ SourceRange = SMRange(Start, End);
+ }
+
+ /// \brief Gets the value of this node as a StringRef.
+ StringRef getValue() const { return Value; }
+
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_BlockScalar;
+ }
+
+private:
+ StringRef Value;
+};
+
+/// \brief A key and value pair. While not technically a Node under the YAML
+/// representation graph, it is easier to treat them this way.
+///
+/// TODO: Consider making this not a child of Node.
+///
+/// Example:
+/// Section: .text
+class KeyValueNode final : public Node {
+ void anchor() override;
+
+public:
+ KeyValueNode(std::unique_ptr<Document> &D)
+ : Node(NK_KeyValue, D, StringRef(), StringRef()), Key(nullptr),
+ Value(nullptr) {}
+
+ /// \brief Parse and return the key.
+ ///
+ /// This may be called multiple times.
+ ///
+ /// \returns The key, or nullptr if failed() == true.
+ Node *getKey();
+
+ /// \brief Parse and return the value.
+ ///
+ /// This may be called multiple times.
+ ///
+ /// \returns The value, or nullptr if failed() == true.
+ Node *getValue();
+
+ void skip() override {
+ getKey()->skip();
+ if (Node *Val = getValue())
+ Val->skip();
+ }
+
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_KeyValue;
+ }
+
+private:
+ Node *Key;
+ Node *Value;
+};
+
+/// \brief This is an iterator abstraction over YAML collections shared by both
+/// sequences and maps.
+///
+/// BaseT must have a ValueT* member named CurrentEntry and a member function
+/// increment() which must set CurrentEntry to 0 to create an end iterator.
+template <class BaseT, class ValueT>
+class basic_collection_iterator
+ : public std::iterator<std::forward_iterator_tag, ValueT> {
+public:
+ basic_collection_iterator() : Base(nullptr) {}
+ basic_collection_iterator(BaseT *B) : Base(B) {}
+
+ ValueT *operator->() const {
+ assert(Base && Base->CurrentEntry && "Attempted to access end iterator!");
+ return Base->CurrentEntry;
+ }
+
+ ValueT &operator*() const {
+ assert(Base && Base->CurrentEntry &&
+ "Attempted to dereference end iterator!");
+ return *Base->CurrentEntry;
+ }
+
+ operator ValueT *() const {
+ assert(Base && Base->CurrentEntry && "Attempted to access end iterator!");
+ return Base->CurrentEntry;
+ }
+
+ bool operator!=(const basic_collection_iterator &Other) const {
+ if (Base != Other.Base)
+ return true;
+ return (Base && Other.Base) &&
+ Base->CurrentEntry != Other.Base->CurrentEntry;
+ }
+
+ basic_collection_iterator &operator++() {
+ assert(Base && "Attempted to advance iterator past end!");
+ Base->increment();
+ // Create an end iterator.
+ if (!Base->CurrentEntry)
+ Base = nullptr;
+ return *this;
+ }
+
+private:
+ BaseT *Base;
+};
+
+// The following two templates are used for both MappingNode and Sequence Node.
+template <class CollectionType>
+typename CollectionType::iterator begin(CollectionType &C) {
+ assert(C.IsAtBeginning && "You may only iterate over a collection once!");
+ C.IsAtBeginning = false;
+ typename CollectionType::iterator ret(&C);
+ ++ret;
+ return ret;
+}
+
+template <class CollectionType> void skip(CollectionType &C) {
+ // TODO: support skipping from the middle of a parsed collection ;/
+ assert((C.IsAtBeginning || C.IsAtEnd) && "Cannot skip mid parse!");
+ if (C.IsAtBeginning)
+ for (typename CollectionType::iterator i = begin(C), e = C.end(); i != e;
+ ++i)
+ i->skip();
+}
+
+/// \brief Represents a YAML map created from either a block map for a flow map.
+///
+/// This parses the YAML stream as increment() is called.
+///
+/// Example:
+/// Name: _main
+/// Scope: Global
+class MappingNode final : public Node {
+ void anchor() override;
+
+public:
+ enum MappingType {
+ MT_Block,
+ MT_Flow,
+ MT_Inline ///< An inline mapping node is used for "[key: value]".
+ };
+
+ MappingNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
+ MappingType MT)
+ : Node(NK_Mapping, D, Anchor, Tag), Type(MT), IsAtBeginning(true),
+ IsAtEnd(false), CurrentEntry(nullptr) {}
+
+ friend class basic_collection_iterator<MappingNode, KeyValueNode>;
+ typedef basic_collection_iterator<MappingNode, KeyValueNode> iterator;
+ template <class T> friend typename T::iterator yaml::begin(T &);
+ template <class T> friend void yaml::skip(T &);
+
+ iterator begin() { return yaml::begin(*this); }
+
+ iterator end() { return iterator(); }
+
+ void skip() override { yaml::skip(*this); }
+
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_Mapping;
+ }
+
+private:
+ MappingType Type;
+ bool IsAtBeginning;
+ bool IsAtEnd;
+ KeyValueNode *CurrentEntry;
+
+ void increment();
+};
+
+/// \brief Represents a YAML sequence created from either a block sequence for a
+/// flow sequence.
+///
+/// This parses the YAML stream as increment() is called.
+///
+/// Example:
+/// - Hello
+/// - World
+class SequenceNode final : public Node {
+ void anchor() override;
+
+public:
+ enum SequenceType {
+ ST_Block,
+ ST_Flow,
+ // Use for:
+ //
+ // key:
+ // - val1
+ // - val2
+ //
+ // As a BlockMappingEntry and BlockEnd are not created in this case.
+ ST_Indentless
+ };
+
+ SequenceNode(std::unique_ptr<Document> &D, StringRef Anchor, StringRef Tag,
+ SequenceType ST)
+ : Node(NK_Sequence, D, Anchor, Tag), SeqType(ST), IsAtBeginning(true),
+ IsAtEnd(false),
+ WasPreviousTokenFlowEntry(true), // Start with an imaginary ','.
+ CurrentEntry(nullptr) {}
+
+ friend class basic_collection_iterator<SequenceNode, Node>;
+ typedef basic_collection_iterator<SequenceNode, Node> iterator;
+ template <class T> friend typename T::iterator yaml::begin(T &);
+ template <class T> friend void yaml::skip(T &);
+
+ void increment();
+
+ iterator begin() { return yaml::begin(*this); }
+
+ iterator end() { return iterator(); }
+
+ void skip() override { yaml::skip(*this); }
+
+ static inline bool classof(const Node *N) {
+ return N->getType() == NK_Sequence;
+ }
+
+private:
+ SequenceType SeqType;
+ bool IsAtBeginning;
+ bool IsAtEnd;
+ bool WasPreviousTokenFlowEntry;
+ Node *CurrentEntry;
+};
+
+/// \brief Represents an alias to a Node with an anchor.
+///
+/// Example:
+/// *AnchorName
+class AliasNode final : public Node {
+ void anchor() override;
+
+public:
+ AliasNode(std::unique_ptr<Document> &D, StringRef Val)
+ : Node(NK_Alias, D, StringRef(), StringRef()), Name(Val) {}
+
+ StringRef getName() const { return Name; }
+ Node *getTarget();
+
+ static inline bool classof(const Node *N) { return N->getType() == NK_Alias; }
+
+private:
+ StringRef Name;
+};
+
+/// \brief A YAML Stream is a sequence of Documents. A document contains a root
+/// node.
+class Document {
+public:
+ /// \brief Root for parsing a node. Returns a single node.
+ Node *parseBlockNode();
+
+ Document(Stream &ParentStream);
+
+ /// \brief Finish parsing the current document and return true if there are
+ /// more. Return false otherwise.
+ bool skip();
+
+ /// \brief Parse and return the root level node.
+ Node *getRoot() {
+ if (Root)
+ return Root;
+ return Root = parseBlockNode();
+ }
+
+ const std::map<StringRef, StringRef> &getTagMap() const { return TagMap; }
+
+private:
+ friend class Node;
+ friend class document_iterator;
+
+ /// \brief Stream to read tokens from.
+ Stream &stream;
+
+ /// \brief Used to allocate nodes to. All are destroyed without calling their
+ /// destructor when the document is destroyed.
+ BumpPtrAllocator NodeAllocator;
+
+ /// \brief The root node. Used to support skipping a partially parsed
+ /// document.
+ Node *Root;
+
+ /// \brief Maps tag prefixes to their expansion.
+ std::map<StringRef, StringRef> TagMap;
+
+ Token &peekNext();
+ Token getNext();
+ void setError(const Twine &Message, Token &Location) const;
+ bool failed() const;
+
+ /// \brief Parse %BLAH directives and return true if any were encountered.
+ bool parseDirectives();
+
+ /// \brief Parse %YAML
+ void parseYAMLDirective();
+
+ /// \brief Parse %TAG
+ void parseTAGDirective();
+
+ /// \brief Consume the next token and error if it is not \a TK.
+ bool expectToken(int TK);
+};
+
+/// \brief Iterator abstraction for Documents over a Stream.
+class document_iterator {
+public:
+ document_iterator() : Doc(nullptr) {}
+ document_iterator(std::unique_ptr<Document> &D) : Doc(&D) {}
+
+ bool operator==(const document_iterator &Other) {
+ if (isAtEnd() || Other.isAtEnd())
+ return isAtEnd() && Other.isAtEnd();
+
+ return Doc == Other.Doc;
+ }
+ bool operator!=(const document_iterator &Other) { return !(*this == Other); }
+
+ document_iterator operator++() {
+ assert(Doc && "incrementing iterator past the end.");
+ if (!(*Doc)->skip()) {
+ Doc->reset(nullptr);
+ } else {
+ Stream &S = (*Doc)->stream;
+ Doc->reset(new Document(S));
+ }
+ return *this;
+ }
+
+ Document &operator*() { return *Doc->get(); }
+
+ std::unique_ptr<Document> &operator->() { return *Doc; }
+
+private:
+ bool isAtEnd() const { return !Doc || !*Doc; }
+
+ std::unique_ptr<Document> *Doc;
+};
+
+} // End namespace yaml.
+
+} // End namespace llvm.
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/YAMLTraits.h b/contrib/llvm/include/llvm/Support/YAMLTraits.h
new file mode 100644
index 0000000..fb2badf
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/YAMLTraits.h
@@ -0,0 +1,1394 @@
+//===- llvm/Support/YAMLTraits.h --------------------------------*- C++ -*-===//
+//
+// The LLVM Linker
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_YAMLTRAITS_H
+#define LLVM_SUPPORT_YAMLTRAITS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/raw_ostream.h"
+#include <system_error>
+
+namespace llvm {
+namespace yaml {
+
+/// This class should be specialized by any type that needs to be converted
+/// to/from a YAML mapping. For example:
+///
+/// struct MappingTraits<MyStruct> {
+/// static void mapping(IO &io, MyStruct &s) {
+/// io.mapRequired("name", s.name);
+/// io.mapRequired("size", s.size);
+/// io.mapOptional("age", s.age);
+/// }
+/// };
+template<class T>
+struct MappingTraits {
+ // Must provide:
+ // static void mapping(IO &io, T &fields);
+ // Optionally may provide:
+ // static StringRef validate(IO &io, T &fields);
+ //
+ // The optional flow flag will cause generated YAML to use a flow mapping
+ // (e.g. { a: 0, b: 1 }):
+ // static const bool flow = true;
+};
+
+/// This class should be specialized by any integral type that converts
+/// to/from a YAML scalar where there is a one-to-one mapping between
+/// in-memory values and a string in YAML. For example:
+///
+/// struct ScalarEnumerationTraits<Colors> {
+/// static void enumeration(IO &io, Colors &value) {
+/// io.enumCase(value, "red", cRed);
+/// io.enumCase(value, "blue", cBlue);
+/// io.enumCase(value, "green", cGreen);
+/// }
+/// };
+template<typename T>
+struct ScalarEnumerationTraits {
+ // Must provide:
+ // static void enumeration(IO &io, T &value);
+};
+
+/// This class should be specialized by any integer type that is a union
+/// of bit values and the YAML representation is a flow sequence of
+/// strings. For example:
+///
+/// struct ScalarBitSetTraits<MyFlags> {
+/// static void bitset(IO &io, MyFlags &value) {
+/// io.bitSetCase(value, "big", flagBig);
+/// io.bitSetCase(value, "flat", flagFlat);
+/// io.bitSetCase(value, "round", flagRound);
+/// }
+/// };
+template<typename T>
+struct ScalarBitSetTraits {
+ // Must provide:
+ // static void bitset(IO &io, T &value);
+};
+
+/// This class should be specialized by type that requires custom conversion
+/// to/from a yaml scalar. For example:
+///
+/// template<>
+/// struct ScalarTraits<MyType> {
+/// static void output(const MyType &val, void*, llvm::raw_ostream &out) {
+/// // stream out custom formatting
+/// out << llvm::format("%x", val);
+/// }
+/// static StringRef input(StringRef scalar, void*, MyType &value) {
+/// // parse scalar and set `value`
+/// // return empty string on success, or error string
+/// return StringRef();
+/// }
+/// static bool mustQuote(StringRef) { return true; }
+/// };
+template<typename T>
+struct ScalarTraits {
+ // Must provide:
+ //
+ // Function to write the value as a string:
+ //static void output(const T &value, void *ctxt, llvm::raw_ostream &out);
+ //
+ // Function to convert a string to a value. Returns the empty
+ // StringRef on success or an error string if string is malformed:
+ //static StringRef input(StringRef scalar, void *ctxt, T &value);
+ //
+ // Function to determine if the value should be quoted.
+ //static bool mustQuote(StringRef);
+};
+
+
+/// This class should be specialized by type that requires custom conversion
+/// to/from a YAML literal block scalar. For example:
+///
+/// template <>
+/// struct BlockScalarTraits<MyType> {
+/// static void output(const MyType &Value, void*, llvm::raw_ostream &Out)
+/// {
+/// // stream out custom formatting
+/// Out << Val;
+/// }
+/// static StringRef input(StringRef Scalar, void*, MyType &Value) {
+/// // parse scalar and set `value`
+/// // return empty string on success, or error string
+/// return StringRef();
+/// }
+/// };
+template <typename T>
+struct BlockScalarTraits {
+ // Must provide:
+ //
+ // Function to write the value as a string:
+ // static void output(const T &Value, void *ctx, llvm::raw_ostream &Out);
+ //
+ // Function to convert a string to a value. Returns the empty
+ // StringRef on success or an error string if string is malformed:
+ // static StringRef input(StringRef Scalar, void *ctxt, T &Value);
+};
+
+/// This class should be specialized by any type that needs to be converted
+/// to/from a YAML sequence. For example:
+///
+/// template<>
+/// struct SequenceTraits< std::vector<MyType> > {
+/// static size_t size(IO &io, std::vector<MyType> &seq) {
+/// return seq.size();
+/// }
+/// static MyType& element(IO &, std::vector<MyType> &seq, size_t index) {
+/// if ( index >= seq.size() )
+/// seq.resize(index+1);
+/// return seq[index];
+/// }
+/// };
+template<typename T>
+struct SequenceTraits {
+ // Must provide:
+ // static size_t size(IO &io, T &seq);
+ // static T::value_type& element(IO &io, T &seq, size_t index);
+ //
+ // The following is option and will cause generated YAML to use
+ // a flow sequence (e.g. [a,b,c]).
+ // static const bool flow = true;
+};
+
+/// This class should be specialized by any type that needs to be converted
+/// to/from a list of YAML documents.
+template<typename T>
+struct DocumentListTraits {
+ // Must provide:
+ // static size_t size(IO &io, T &seq);
+ // static T::value_type& element(IO &io, T &seq, size_t index);
+};
+
+// Only used by compiler if both template types are the same
+template <typename T, T>
+struct SameType;
+
+// Only used for better diagnostics of missing traits
+template <typename T>
+struct MissingTrait;
+
+// Test if ScalarEnumerationTraits<T> is defined on type T.
+template <class T>
+struct has_ScalarEnumerationTraits
+{
+ typedef void (*Signature_enumeration)(class IO&, T&);
+
+ template <typename U>
+ static char test(SameType<Signature_enumeration, &U::enumeration>*);
+
+ template <typename U>
+ static double test(...);
+
+public:
+ static bool const value =
+ (sizeof(test<ScalarEnumerationTraits<T> >(nullptr)) == 1);
+};
+
+// Test if ScalarBitSetTraits<T> is defined on type T.
+template <class T>
+struct has_ScalarBitSetTraits
+{
+ typedef void (*Signature_bitset)(class IO&, T&);
+
+ template <typename U>
+ static char test(SameType<Signature_bitset, &U::bitset>*);
+
+ template <typename U>
+ static double test(...);
+
+public:
+ static bool const value = (sizeof(test<ScalarBitSetTraits<T> >(nullptr)) == 1);
+};
+
+// Test if ScalarTraits<T> is defined on type T.
+template <class T>
+struct has_ScalarTraits
+{
+ typedef StringRef (*Signature_input)(StringRef, void*, T&);
+ typedef void (*Signature_output)(const T&, void*, llvm::raw_ostream&);
+ typedef bool (*Signature_mustQuote)(StringRef);
+
+ template <typename U>
+ static char test(SameType<Signature_input, &U::input> *,
+ SameType<Signature_output, &U::output> *,
+ SameType<Signature_mustQuote, &U::mustQuote> *);
+
+ template <typename U>
+ static double test(...);
+
+public:
+ static bool const value =
+ (sizeof(test<ScalarTraits<T>>(nullptr, nullptr, nullptr)) == 1);
+};
+
+// Test if BlockScalarTraits<T> is defined on type T.
+template <class T>
+struct has_BlockScalarTraits
+{
+ typedef StringRef (*Signature_input)(StringRef, void *, T &);
+ typedef void (*Signature_output)(const T &, void *, llvm::raw_ostream &);
+
+ template <typename U>
+ static char test(SameType<Signature_input, &U::input> *,
+ SameType<Signature_output, &U::output> *);
+
+ template <typename U>
+ static double test(...);
+
+public:
+ static bool const value =
+ (sizeof(test<BlockScalarTraits<T>>(nullptr, nullptr)) == 1);
+};
+
+// Test if MappingTraits<T> is defined on type T.
+template <class T>
+struct has_MappingTraits
+{
+ typedef void (*Signature_mapping)(class IO&, T&);
+
+ template <typename U>
+ static char test(SameType<Signature_mapping, &U::mapping>*);
+
+ template <typename U>
+ static double test(...);
+
+public:
+ static bool const value = (sizeof(test<MappingTraits<T> >(nullptr)) == 1);
+};
+
+// Test if MappingTraits<T>::validate() is defined on type T.
+template <class T>
+struct has_MappingValidateTraits
+{
+ typedef StringRef (*Signature_validate)(class IO&, T&);
+
+ template <typename U>
+ static char test(SameType<Signature_validate, &U::validate>*);
+
+ template <typename U>
+ static double test(...);
+
+public:
+ static bool const value = (sizeof(test<MappingTraits<T> >(nullptr)) == 1);
+};
+
+// Test if SequenceTraits<T> is defined on type T.
+template <class T>
+struct has_SequenceMethodTraits
+{
+ typedef size_t (*Signature_size)(class IO&, T&);
+
+ template <typename U>
+ static char test(SameType<Signature_size, &U::size>*);
+
+ template <typename U>
+ static double test(...);
+
+public:
+ static bool const value = (sizeof(test<SequenceTraits<T> >(nullptr)) == 1);
+};
+
+// has_FlowTraits<int> will cause an error with some compilers because
+// it subclasses int. Using this wrapper only instantiates the
+// real has_FlowTraits only if the template type is a class.
+template <typename T, bool Enabled = std::is_class<T>::value>
+class has_FlowTraits
+{
+public:
+ static const bool value = false;
+};
+
+// Some older gcc compilers don't support straight forward tests
+// for members, so test for ambiguity cause by the base and derived
+// classes both defining the member.
+template <class T>
+struct has_FlowTraits<T, true>
+{
+ struct Fallback { bool flow; };
+ struct Derived : T, Fallback { };
+
+ template<typename C>
+ static char (&f(SameType<bool Fallback::*, &C::flow>*))[1];
+
+ template<typename C>
+ static char (&f(...))[2];
+
+public:
+ static bool const value = sizeof(f<Derived>(nullptr)) == 2;
+};
+
+// Test if SequenceTraits<T> is defined on type T
+template<typename T>
+struct has_SequenceTraits : public std::integral_constant<bool,
+ has_SequenceMethodTraits<T>::value > { };
+
+// Test if DocumentListTraits<T> is defined on type T
+template <class T>
+struct has_DocumentListTraits
+{
+ typedef size_t (*Signature_size)(class IO&, T&);
+
+ template <typename U>
+ static char test(SameType<Signature_size, &U::size>*);
+
+ template <typename U>
+ static double test(...);
+
+public:
+ static bool const value = (sizeof(test<DocumentListTraits<T> >(nullptr))==1);
+};
+
+inline bool isNumber(StringRef S) {
+ static const char OctalChars[] = "01234567";
+ if (S.startswith("0") &&
+ S.drop_front().find_first_not_of(OctalChars) == StringRef::npos)
+ return true;
+
+ if (S.startswith("0o") &&
+ S.drop_front(2).find_first_not_of(OctalChars) == StringRef::npos)
+ return true;
+
+ static const char HexChars[] = "0123456789abcdefABCDEF";
+ if (S.startswith("0x") &&
+ S.drop_front(2).find_first_not_of(HexChars) == StringRef::npos)
+ return true;
+
+ static const char DecChars[] = "0123456789";
+ if (S.find_first_not_of(DecChars) == StringRef::npos)
+ return true;
+
+ if (S.equals(".inf") || S.equals(".Inf") || S.equals(".INF"))
+ return true;
+
+ Regex FloatMatcher("^(\\.[0-9]+|[0-9]+(\\.[0-9]*)?)([eE][-+]?[0-9]+)?$");
+ if (FloatMatcher.match(S))
+ return true;
+
+ return false;
+}
+
+inline bool isNumeric(StringRef S) {
+ if ((S.front() == '-' || S.front() == '+') && isNumber(S.drop_front()))
+ return true;
+
+ if (isNumber(S))
+ return true;
+
+ if (S.equals(".nan") || S.equals(".NaN") || S.equals(".NAN"))
+ return true;
+
+ return false;
+}
+
+inline bool isNull(StringRef S) {
+ return S.equals("null") || S.equals("Null") || S.equals("NULL") ||
+ S.equals("~");
+}
+
+inline bool isBool(StringRef S) {
+ return S.equals("true") || S.equals("True") || S.equals("TRUE") ||
+ S.equals("false") || S.equals("False") || S.equals("FALSE");
+}
+
+inline bool needsQuotes(StringRef S) {
+ if (S.empty())
+ return true;
+ if (isspace(S.front()) || isspace(S.back()))
+ return true;
+ if (S.front() == ',')
+ return true;
+
+ static const char ScalarSafeChars[] =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-/^., \t";
+ if (S.find_first_not_of(ScalarSafeChars) != StringRef::npos)
+ return true;
+
+ if (isNull(S))
+ return true;
+ if (isBool(S))
+ return true;
+ if (isNumeric(S))
+ return true;
+
+ return false;
+}
+
+template<typename T>
+struct missingTraits : public std::integral_constant<bool,
+ !has_ScalarEnumerationTraits<T>::value
+ && !has_ScalarBitSetTraits<T>::value
+ && !has_ScalarTraits<T>::value
+ && !has_BlockScalarTraits<T>::value
+ && !has_MappingTraits<T>::value
+ && !has_SequenceTraits<T>::value
+ && !has_DocumentListTraits<T>::value > {};
+
+template<typename T>
+struct validatedMappingTraits : public std::integral_constant<bool,
+ has_MappingTraits<T>::value
+ && has_MappingValidateTraits<T>::value> {};
+
+template<typename T>
+struct unvalidatedMappingTraits : public std::integral_constant<bool,
+ has_MappingTraits<T>::value
+ && !has_MappingValidateTraits<T>::value> {};
+// Base class for Input and Output.
+class IO {
+public:
+
+ IO(void *Ctxt=nullptr);
+ virtual ~IO();
+
+ virtual bool outputting() = 0;
+
+ virtual unsigned beginSequence() = 0;
+ virtual bool preflightElement(unsigned, void *&) = 0;
+ virtual void postflightElement(void*) = 0;
+ virtual void endSequence() = 0;
+ virtual bool canElideEmptySequence() = 0;
+
+ virtual unsigned beginFlowSequence() = 0;
+ virtual bool preflightFlowElement(unsigned, void *&) = 0;
+ virtual void postflightFlowElement(void*) = 0;
+ virtual void endFlowSequence() = 0;
+
+ virtual bool mapTag(StringRef Tag, bool Default=false) = 0;
+ virtual void beginMapping() = 0;
+ virtual void endMapping() = 0;
+ virtual bool preflightKey(const char*, bool, bool, bool &, void *&) = 0;
+ virtual void postflightKey(void*) = 0;
+
+ virtual void beginFlowMapping() = 0;
+ virtual void endFlowMapping() = 0;
+
+ virtual void beginEnumScalar() = 0;
+ virtual bool matchEnumScalar(const char*, bool) = 0;
+ virtual bool matchEnumFallback() = 0;
+ virtual void endEnumScalar() = 0;
+
+ virtual bool beginBitSetScalar(bool &) = 0;
+ virtual bool bitSetMatch(const char*, bool) = 0;
+ virtual void endBitSetScalar() = 0;
+
+ virtual void scalarString(StringRef &, bool) = 0;
+ virtual void blockScalarString(StringRef &) = 0;
+
+ virtual void setError(const Twine &) = 0;
+
+ template <typename T>
+ void enumCase(T &Val, const char* Str, const T ConstVal) {
+ if ( matchEnumScalar(Str, outputting() && Val == ConstVal) ) {
+ Val = ConstVal;
+ }
+ }
+
+ // allow anonymous enum values to be used with LLVM_YAML_STRONG_TYPEDEF
+ template <typename T>
+ void enumCase(T &Val, const char* Str, const uint32_t ConstVal) {
+ if ( matchEnumScalar(Str, outputting() && Val == static_cast<T>(ConstVal)) ) {
+ Val = ConstVal;
+ }
+ }
+
+ template <typename FBT, typename T>
+ void enumFallback(T &Val) {
+ if ( matchEnumFallback() ) {
+ // FIXME: Force integral conversion to allow strong typedefs to convert.
+ FBT Res = (uint64_t)Val;
+ yamlize(*this, Res, true);
+ Val = (uint64_t)Res;
+ }
+ }
+
+ template <typename T>
+ void bitSetCase(T &Val, const char* Str, const T ConstVal) {
+ if ( bitSetMatch(Str, outputting() && (Val & ConstVal) == ConstVal) ) {
+ Val = Val | ConstVal;
+ }
+ }
+
+ // allow anonymous enum values to be used with LLVM_YAML_STRONG_TYPEDEF
+ template <typename T>
+ void bitSetCase(T &Val, const char* Str, const uint32_t ConstVal) {
+ if ( bitSetMatch(Str, outputting() && (Val & ConstVal) == ConstVal) ) {
+ Val = Val | ConstVal;
+ }
+ }
+
+ template <typename T>
+ void maskedBitSetCase(T &Val, const char *Str, T ConstVal, T Mask) {
+ if (bitSetMatch(Str, outputting() && (Val & Mask) == ConstVal))
+ Val = Val | ConstVal;
+ }
+
+ template <typename T>
+ void maskedBitSetCase(T &Val, const char *Str, uint32_t ConstVal,
+ uint32_t Mask) {
+ if (bitSetMatch(Str, outputting() && (Val & Mask) == ConstVal))
+ Val = Val | ConstVal;
+ }
+
+ void *getContext();
+ void setContext(void *);
+
+ template <typename T>
+ void mapRequired(const char* Key, T& Val) {
+ this->processKey(Key, Val, true);
+ }
+
+ template <typename T>
+ typename std::enable_if<has_SequenceTraits<T>::value,void>::type
+ mapOptional(const char* Key, T& Val) {
+ // omit key/value instead of outputting empty sequence
+ if ( this->canElideEmptySequence() && !(Val.begin() != Val.end()) )
+ return;
+ this->processKey(Key, Val, false);
+ }
+
+ template <typename T>
+ void mapOptional(const char* Key, Optional<T> &Val) {
+ processKeyWithDefault(Key, Val, Optional<T>(), /*Required=*/false);
+ }
+
+ template <typename T>
+ typename std::enable_if<!has_SequenceTraits<T>::value,void>::type
+ mapOptional(const char* Key, T& Val) {
+ this->processKey(Key, Val, false);
+ }
+
+ template <typename T>
+ void mapOptional(const char* Key, T& Val, const T& Default) {
+ this->processKeyWithDefault(Key, Val, Default, false);
+ }
+
+private:
+ template <typename T>
+ void processKeyWithDefault(const char *Key, Optional<T> &Val,
+ const Optional<T> &DefaultValue, bool Required) {
+ assert(DefaultValue.hasValue() == false &&
+ "Optional<T> shouldn't have a value!");
+ void *SaveInfo;
+ bool UseDefault;
+ const bool sameAsDefault = outputting() && !Val.hasValue();
+ if (!outputting() && !Val.hasValue())
+ Val = T();
+ if (this->preflightKey(Key, Required, sameAsDefault, UseDefault,
+ SaveInfo)) {
+ yamlize(*this, Val.getValue(), Required);
+ this->postflightKey(SaveInfo);
+ } else {
+ if (UseDefault)
+ Val = DefaultValue;
+ }
+ }
+
+ template <typename T>
+ void processKeyWithDefault(const char *Key, T &Val, const T& DefaultValue,
+ bool Required) {
+ void *SaveInfo;
+ bool UseDefault;
+ const bool sameAsDefault = outputting() && Val == DefaultValue;
+ if ( this->preflightKey(Key, Required, sameAsDefault, UseDefault,
+ SaveInfo) ) {
+ yamlize(*this, Val, Required);
+ this->postflightKey(SaveInfo);
+ }
+ else {
+ if ( UseDefault )
+ Val = DefaultValue;
+ }
+ }
+
+ template <typename T>
+ void processKey(const char *Key, T &Val, bool Required) {
+ void *SaveInfo;
+ bool UseDefault;
+ if ( this->preflightKey(Key, Required, false, UseDefault, SaveInfo) ) {
+ yamlize(*this, Val, Required);
+ this->postflightKey(SaveInfo);
+ }
+ }
+
+private:
+ void *Ctxt;
+};
+
+template<typename T>
+typename std::enable_if<has_ScalarEnumerationTraits<T>::value,void>::type
+yamlize(IO &io, T &Val, bool) {
+ io.beginEnumScalar();
+ ScalarEnumerationTraits<T>::enumeration(io, Val);
+ io.endEnumScalar();
+}
+
+template<typename T>
+typename std::enable_if<has_ScalarBitSetTraits<T>::value,void>::type
+yamlize(IO &io, T &Val, bool) {
+ bool DoClear;
+ if ( io.beginBitSetScalar(DoClear) ) {
+ if ( DoClear )
+ Val = static_cast<T>(0);
+ ScalarBitSetTraits<T>::bitset(io, Val);
+ io.endBitSetScalar();
+ }
+}
+
+template<typename T>
+typename std::enable_if<has_ScalarTraits<T>::value,void>::type
+yamlize(IO &io, T &Val, bool) {
+ if ( io.outputting() ) {
+ std::string Storage;
+ llvm::raw_string_ostream Buffer(Storage);
+ ScalarTraits<T>::output(Val, io.getContext(), Buffer);
+ StringRef Str = Buffer.str();
+ io.scalarString(Str, ScalarTraits<T>::mustQuote(Str));
+ }
+ else {
+ StringRef Str;
+ io.scalarString(Str, ScalarTraits<T>::mustQuote(Str));
+ StringRef Result = ScalarTraits<T>::input(Str, io.getContext(), Val);
+ if ( !Result.empty() ) {
+ io.setError(llvm::Twine(Result));
+ }
+ }
+}
+
+template <typename T>
+typename std::enable_if<has_BlockScalarTraits<T>::value, void>::type
+yamlize(IO &YamlIO, T &Val, bool) {
+ if (YamlIO.outputting()) {
+ std::string Storage;
+ llvm::raw_string_ostream Buffer(Storage);
+ BlockScalarTraits<T>::output(Val, YamlIO.getContext(), Buffer);
+ StringRef Str = Buffer.str();
+ YamlIO.blockScalarString(Str);
+ } else {
+ StringRef Str;
+ YamlIO.blockScalarString(Str);
+ StringRef Result =
+ BlockScalarTraits<T>::input(Str, YamlIO.getContext(), Val);
+ if (!Result.empty())
+ YamlIO.setError(llvm::Twine(Result));
+ }
+}
+
+template<typename T>
+typename std::enable_if<validatedMappingTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool) {
+ if (has_FlowTraits<MappingTraits<T>>::value)
+ io.beginFlowMapping();
+ else
+ io.beginMapping();
+ if (io.outputting()) {
+ StringRef Err = MappingTraits<T>::validate(io, Val);
+ if (!Err.empty()) {
+ llvm::errs() << Err << "\n";
+ assert(Err.empty() && "invalid struct trying to be written as yaml");
+ }
+ }
+ MappingTraits<T>::mapping(io, Val);
+ if (!io.outputting()) {
+ StringRef Err = MappingTraits<T>::validate(io, Val);
+ if (!Err.empty())
+ io.setError(Err);
+ }
+ if (has_FlowTraits<MappingTraits<T>>::value)
+ io.endFlowMapping();
+ else
+ io.endMapping();
+}
+
+template<typename T>
+typename std::enable_if<unvalidatedMappingTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool) {
+ if (has_FlowTraits<MappingTraits<T>>::value) {
+ io.beginFlowMapping();
+ MappingTraits<T>::mapping(io, Val);
+ io.endFlowMapping();
+ } else {
+ io.beginMapping();
+ MappingTraits<T>::mapping(io, Val);
+ io.endMapping();
+ }
+}
+
+template<typename T>
+typename std::enable_if<missingTraits<T>::value, void>::type
+yamlize(IO &io, T &Val, bool) {
+ char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
+}
+
+template<typename T>
+typename std::enable_if<has_SequenceTraits<T>::value,void>::type
+yamlize(IO &io, T &Seq, bool) {
+ if ( has_FlowTraits< SequenceTraits<T> >::value ) {
+ unsigned incnt = io.beginFlowSequence();
+ unsigned count = io.outputting() ? SequenceTraits<T>::size(io, Seq) : incnt;
+ for(unsigned i=0; i < count; ++i) {
+ void *SaveInfo;
+ if ( io.preflightFlowElement(i, SaveInfo) ) {
+ yamlize(io, SequenceTraits<T>::element(io, Seq, i), true);
+ io.postflightFlowElement(SaveInfo);
+ }
+ }
+ io.endFlowSequence();
+ }
+ else {
+ unsigned incnt = io.beginSequence();
+ unsigned count = io.outputting() ? SequenceTraits<T>::size(io, Seq) : incnt;
+ for(unsigned i=0; i < count; ++i) {
+ void *SaveInfo;
+ if ( io.preflightElement(i, SaveInfo) ) {
+ yamlize(io, SequenceTraits<T>::element(io, Seq, i), true);
+ io.postflightElement(SaveInfo);
+ }
+ }
+ io.endSequence();
+ }
+}
+
+template<>
+struct ScalarTraits<bool> {
+ static void output(const bool &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, bool &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<StringRef> {
+ static void output(const StringRef &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, StringRef &);
+ static bool mustQuote(StringRef S) { return needsQuotes(S); }
+};
+
+template<>
+struct ScalarTraits<std::string> {
+ static void output(const std::string &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, std::string &);
+ static bool mustQuote(StringRef S) { return needsQuotes(S); }
+};
+
+template<>
+struct ScalarTraits<uint8_t> {
+ static void output(const uint8_t &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, uint8_t &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<uint16_t> {
+ static void output(const uint16_t &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, uint16_t &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<uint32_t> {
+ static void output(const uint32_t &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, uint32_t &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<uint64_t> {
+ static void output(const uint64_t &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, uint64_t &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<int8_t> {
+ static void output(const int8_t &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, int8_t &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<int16_t> {
+ static void output(const int16_t &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, int16_t &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<int32_t> {
+ static void output(const int32_t &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, int32_t &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<int64_t> {
+ static void output(const int64_t &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, int64_t &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<float> {
+ static void output(const float &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, float &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<double> {
+ static void output(const double &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, double &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+// Utility for use within MappingTraits<>::mapping() method
+// to [de]normalize an object for use with YAML conversion.
+template <typename TNorm, typename TFinal>
+struct MappingNormalization {
+ MappingNormalization(IO &i_o, TFinal &Obj)
+ : io(i_o), BufPtr(nullptr), Result(Obj) {
+ if ( io.outputting() ) {
+ BufPtr = new (&Buffer) TNorm(io, Obj);
+ }
+ else {
+ BufPtr = new (&Buffer) TNorm(io);
+ }
+ }
+
+ ~MappingNormalization() {
+ if ( ! io.outputting() ) {
+ Result = BufPtr->denormalize(io);
+ }
+ BufPtr->~TNorm();
+ }
+
+ TNorm* operator->() { return BufPtr; }
+
+private:
+ typedef llvm::AlignedCharArrayUnion<TNorm> Storage;
+
+ Storage Buffer;
+ IO &io;
+ TNorm *BufPtr;
+ TFinal &Result;
+};
+
+// Utility for use within MappingTraits<>::mapping() method
+// to [de]normalize an object for use with YAML conversion.
+template <typename TNorm, typename TFinal>
+struct MappingNormalizationHeap {
+ MappingNormalizationHeap(IO &i_o, TFinal &Obj)
+ : io(i_o), BufPtr(nullptr), Result(Obj) {
+ if ( io.outputting() ) {
+ BufPtr = new (&Buffer) TNorm(io, Obj);
+ }
+ else {
+ BufPtr = new TNorm(io);
+ }
+ }
+
+ ~MappingNormalizationHeap() {
+ if ( io.outputting() ) {
+ BufPtr->~TNorm();
+ }
+ else {
+ Result = BufPtr->denormalize(io);
+ }
+ }
+
+ TNorm* operator->() { return BufPtr; }
+
+private:
+ typedef llvm::AlignedCharArrayUnion<TNorm> Storage;
+
+ Storage Buffer;
+ IO &io;
+ TNorm *BufPtr;
+ TFinal &Result;
+};
+
+///
+/// The Input class is used to parse a yaml document into in-memory structs
+/// and vectors.
+///
+/// It works by using YAMLParser to do a syntax parse of the entire yaml
+/// document, then the Input class builds a graph of HNodes which wraps
+/// each yaml Node. The extra layer is buffering. The low level yaml
+/// parser only lets you look at each node once. The buffering layer lets
+/// you search and interate multiple times. This is necessary because
+/// the mapRequired() method calls may not be in the same order
+/// as the keys in the document.
+///
+class Input : public IO {
+public:
+ // Construct a yaml Input object from a StringRef and optional
+ // user-data. The DiagHandler can be specified to provide
+ // alternative error reporting.
+ Input(StringRef InputContent,
+ void *Ctxt = nullptr,
+ SourceMgr::DiagHandlerTy DiagHandler = nullptr,
+ void *DiagHandlerCtxt = nullptr);
+ ~Input() override;
+
+ // Check if there was an syntax or semantic error during parsing.
+ std::error_code error();
+
+private:
+ bool outputting() override;
+ bool mapTag(StringRef, bool) override;
+ void beginMapping() override;
+ void endMapping() override;
+ bool preflightKey(const char *, bool, bool, bool &, void *&) override;
+ void postflightKey(void *) override;
+ void beginFlowMapping() override;
+ void endFlowMapping() override;
+ unsigned beginSequence() override;
+ void endSequence() override;
+ bool preflightElement(unsigned index, void *&) override;
+ void postflightElement(void *) override;
+ unsigned beginFlowSequence() override;
+ bool preflightFlowElement(unsigned , void *&) override;
+ void postflightFlowElement(void *) override;
+ void endFlowSequence() override;
+ void beginEnumScalar() override;
+ bool matchEnumScalar(const char*, bool) override;
+ bool matchEnumFallback() override;
+ void endEnumScalar() override;
+ bool beginBitSetScalar(bool &) override;
+ bool bitSetMatch(const char *, bool ) override;
+ void endBitSetScalar() override;
+ void scalarString(StringRef &, bool) override;
+ void blockScalarString(StringRef &) override;
+ void setError(const Twine &message) override;
+ bool canElideEmptySequence() override;
+
+ class HNode {
+ virtual void anchor();
+ public:
+ HNode(Node *n) : _node(n) { }
+ virtual ~HNode() { }
+ static inline bool classof(const HNode *) { return true; }
+
+ Node *_node;
+ };
+
+ class EmptyHNode : public HNode {
+ void anchor() override;
+ public:
+ EmptyHNode(Node *n) : HNode(n) { }
+ static inline bool classof(const HNode *n) {
+ return NullNode::classof(n->_node);
+ }
+ static inline bool classof(const EmptyHNode *) { return true; }
+ };
+
+ class ScalarHNode : public HNode {
+ void anchor() override;
+ public:
+ ScalarHNode(Node *n, StringRef s) : HNode(n), _value(s) { }
+
+ StringRef value() const { return _value; }
+
+ static inline bool classof(const HNode *n) {
+ return ScalarNode::classof(n->_node) ||
+ BlockScalarNode::classof(n->_node);
+ }
+ static inline bool classof(const ScalarHNode *) { return true; }
+ protected:
+ StringRef _value;
+ };
+
+ class MapHNode : public HNode {
+ void anchor() override;
+
+ public:
+ MapHNode(Node *n) : HNode(n) { }
+
+ static inline bool classof(const HNode *n) {
+ return MappingNode::classof(n->_node);
+ }
+ static inline bool classof(const MapHNode *) { return true; }
+
+ typedef llvm::StringMap<std::unique_ptr<HNode>> NameToNode;
+
+ bool isValidKey(StringRef key);
+
+ NameToNode Mapping;
+ llvm::SmallVector<const char*, 6> ValidKeys;
+ };
+
+ class SequenceHNode : public HNode {
+ void anchor() override;
+
+ public:
+ SequenceHNode(Node *n) : HNode(n) { }
+
+ static inline bool classof(const HNode *n) {
+ return SequenceNode::classof(n->_node);
+ }
+ static inline bool classof(const SequenceHNode *) { return true; }
+
+ std::vector<std::unique_ptr<HNode>> Entries;
+ };
+
+ std::unique_ptr<Input::HNode> createHNodes(Node *node);
+ void setError(HNode *hnode, const Twine &message);
+ void setError(Node *node, const Twine &message);
+
+public:
+ // These are only used by operator>>. They could be private
+ // if those templated things could be made friends.
+ bool setCurrentDocument();
+ bool nextDocument();
+
+ /// Returns the current node that's being parsed by the YAML Parser.
+ const Node *getCurrentNode() const;
+
+private:
+ llvm::SourceMgr SrcMgr; // must be before Strm
+ std::unique_ptr<llvm::yaml::Stream> Strm;
+ std::unique_ptr<HNode> TopNode;
+ std::error_code EC;
+ llvm::BumpPtrAllocator StringAllocator;
+ llvm::yaml::document_iterator DocIterator;
+ std::vector<bool> BitValuesUsed;
+ HNode *CurrentNode;
+ bool ScalarMatchFound;
+};
+
+///
+/// The Output class is used to generate a yaml document from in-memory structs
+/// and vectors.
+///
+class Output : public IO {
+public:
+ Output(llvm::raw_ostream &, void *Ctxt = nullptr, int WrapColumn = 70);
+ ~Output() override;
+
+ bool outputting() override;
+ bool mapTag(StringRef, bool) override;
+ void beginMapping() override;
+ void endMapping() override;
+ bool preflightKey(const char *key, bool, bool, bool &, void *&) override;
+ void postflightKey(void *) override;
+ void beginFlowMapping() override;
+ void endFlowMapping() override;
+ unsigned beginSequence() override;
+ void endSequence() override;
+ bool preflightElement(unsigned, void *&) override;
+ void postflightElement(void *) override;
+ unsigned beginFlowSequence() override;
+ bool preflightFlowElement(unsigned, void *&) override;
+ void postflightFlowElement(void *) override;
+ void endFlowSequence() override;
+ void beginEnumScalar() override;
+ bool matchEnumScalar(const char*, bool) override;
+ bool matchEnumFallback() override;
+ void endEnumScalar() override;
+ bool beginBitSetScalar(bool &) override;
+ bool bitSetMatch(const char *, bool ) override;
+ void endBitSetScalar() override;
+ void scalarString(StringRef &, bool) override;
+ void blockScalarString(StringRef &) override;
+ void setError(const Twine &message) override;
+ bool canElideEmptySequence() override;
+public:
+ // These are only used by operator<<. They could be private
+ // if that templated operator could be made a friend.
+ void beginDocuments();
+ bool preflightDocument(unsigned);
+ void postflightDocument();
+ void endDocuments();
+
+private:
+ void output(StringRef s);
+ void outputUpToEndOfLine(StringRef s);
+ void newLineCheck();
+ void outputNewLine();
+ void paddedKey(StringRef key);
+ void flowKey(StringRef Key);
+
+ enum InState {
+ inSeq,
+ inFlowSeq,
+ inMapFirstKey,
+ inMapOtherKey,
+ inFlowMapFirstKey,
+ inFlowMapOtherKey
+ };
+
+ llvm::raw_ostream &Out;
+ int WrapColumn;
+ SmallVector<InState, 8> StateStack;
+ int Column;
+ int ColumnAtFlowStart;
+ int ColumnAtMapFlowStart;
+ bool NeedBitValueComma;
+ bool NeedFlowSequenceComma;
+ bool EnumerationMatchFound;
+ bool NeedsNewLine;
+};
+
+/// YAML I/O does conversion based on types. But often native data types
+/// are just a typedef of built in intergral types (e.g. int). But the C++
+/// type matching system sees through the typedef and all the typedefed types
+/// look like a built in type. This will cause the generic YAML I/O conversion
+/// to be used. To provide better control over the YAML conversion, you can
+/// use this macro instead of typedef. It will create a class with one field
+/// and automatic conversion operators to and from the base type.
+/// Based on BOOST_STRONG_TYPEDEF
+#define LLVM_YAML_STRONG_TYPEDEF(_base, _type) \
+ struct _type { \
+ _type() { } \
+ _type(const _base v) : value(v) { } \
+ _type(const _type &v) : value(v.value) {} \
+ _type &operator=(const _type &rhs) { value = rhs.value; return *this; }\
+ _type &operator=(const _base &rhs) { value = rhs; return *this; } \
+ operator const _base & () const { return value; } \
+ bool operator==(const _type &rhs) const { return value == rhs.value; } \
+ bool operator==(const _base &rhs) const { return value == rhs; } \
+ bool operator<(const _type &rhs) const { return value < rhs.value; } \
+ _base value; \
+ };
+
+///
+/// Use these types instead of uintXX_t in any mapping to have
+/// its yaml output formatted as hexadecimal.
+///
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, Hex8)
+LLVM_YAML_STRONG_TYPEDEF(uint16_t, Hex16)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, Hex32)
+LLVM_YAML_STRONG_TYPEDEF(uint64_t, Hex64)
+
+template<>
+struct ScalarTraits<Hex8> {
+ static void output(const Hex8 &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, Hex8 &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<Hex16> {
+ static void output(const Hex16 &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, Hex16 &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<Hex32> {
+ static void output(const Hex32 &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, Hex32 &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+template<>
+struct ScalarTraits<Hex64> {
+ static void output(const Hex64 &, void*, llvm::raw_ostream &);
+ static StringRef input(StringRef, void*, Hex64 &);
+ static bool mustQuote(StringRef) { return false; }
+};
+
+// Define non-member operator>> so that Input can stream in a document list.
+template <typename T>
+inline
+typename std::enable_if<has_DocumentListTraits<T>::value, Input &>::type
+operator>>(Input &yin, T &docList) {
+ int i = 0;
+ while ( yin.setCurrentDocument() ) {
+ yamlize(yin, DocumentListTraits<T>::element(yin, docList, i), true);
+ if ( yin.error() )
+ return yin;
+ yin.nextDocument();
+ ++i;
+ }
+ return yin;
+}
+
+// Define non-member operator>> so that Input can stream in a map as a document.
+template <typename T>
+inline
+typename std::enable_if<has_MappingTraits<T>::value, Input &>::type
+operator>>(Input &yin, T &docMap) {
+ yin.setCurrentDocument();
+ yamlize(yin, docMap, true);
+ return yin;
+}
+
+// Define non-member operator>> so that Input can stream in a sequence as
+// a document.
+template <typename T>
+inline
+typename std::enable_if<has_SequenceTraits<T>::value, Input &>::type
+operator>>(Input &yin, T &docSeq) {
+ if (yin.setCurrentDocument())
+ yamlize(yin, docSeq, true);
+ return yin;
+}
+
+// Define non-member operator>> so that Input can stream in a block scalar.
+template <typename T>
+inline
+typename std::enable_if<has_BlockScalarTraits<T>::value, Input &>::type
+operator>>(Input &In, T &Val) {
+ if (In.setCurrentDocument())
+ yamlize(In, Val, true);
+ return In;
+}
+
+// Provide better error message about types missing a trait specialization
+template <typename T>
+inline
+typename std::enable_if<missingTraits<T>::value, Input &>::type
+operator>>(Input &yin, T &docSeq) {
+ char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
+ return yin;
+}
+
+// Define non-member operator<< so that Output can stream out document list.
+template <typename T>
+inline
+typename std::enable_if<has_DocumentListTraits<T>::value, Output &>::type
+operator<<(Output &yout, T &docList) {
+ yout.beginDocuments();
+ const size_t count = DocumentListTraits<T>::size(yout, docList);
+ for(size_t i=0; i < count; ++i) {
+ if ( yout.preflightDocument(i) ) {
+ yamlize(yout, DocumentListTraits<T>::element(yout, docList, i), true);
+ yout.postflightDocument();
+ }
+ }
+ yout.endDocuments();
+ return yout;
+}
+
+// Define non-member operator<< so that Output can stream out a map.
+template <typename T>
+inline
+typename std::enable_if<has_MappingTraits<T>::value, Output &>::type
+operator<<(Output &yout, T &map) {
+ yout.beginDocuments();
+ if ( yout.preflightDocument(0) ) {
+ yamlize(yout, map, true);
+ yout.postflightDocument();
+ }
+ yout.endDocuments();
+ return yout;
+}
+
+// Define non-member operator<< so that Output can stream out a sequence.
+template <typename T>
+inline
+typename std::enable_if<has_SequenceTraits<T>::value, Output &>::type
+operator<<(Output &yout, T &seq) {
+ yout.beginDocuments();
+ if ( yout.preflightDocument(0) ) {
+ yamlize(yout, seq, true);
+ yout.postflightDocument();
+ }
+ yout.endDocuments();
+ return yout;
+}
+
+// Define non-member operator<< so that Output can stream out a block scalar.
+template <typename T>
+inline
+typename std::enable_if<has_BlockScalarTraits<T>::value, Output &>::type
+operator<<(Output &Out, T &Val) {
+ Out.beginDocuments();
+ if (Out.preflightDocument(0)) {
+ yamlize(Out, Val, true);
+ Out.postflightDocument();
+ }
+ Out.endDocuments();
+ return Out;
+}
+
+// Provide better error message about types missing a trait specialization
+template <typename T>
+inline
+typename std::enable_if<missingTraits<T>::value, Output &>::type
+operator<<(Output &yout, T &seq) {
+ char missing_yaml_trait_for_type[sizeof(MissingTrait<T>)];
+ return yout;
+}
+
+} // namespace yaml
+} // namespace llvm
+
+/// Utility for declaring that a std::vector of a particular type
+/// should be considered a YAML sequence.
+#define LLVM_YAML_IS_SEQUENCE_VECTOR(_type) \
+ namespace llvm { \
+ namespace yaml { \
+ template<> \
+ struct SequenceTraits< std::vector<_type> > { \
+ static size_t size(IO &io, std::vector<_type> &seq) { \
+ return seq.size(); \
+ } \
+ static _type& element(IO &io, std::vector<_type> &seq, size_t index) {\
+ if ( index >= seq.size() ) \
+ seq.resize(index+1); \
+ return seq[index]; \
+ } \
+ }; \
+ } \
+ }
+
+/// Utility for declaring that a std::vector of a particular type
+/// should be considered a YAML flow sequence.
+#define LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(_type) \
+ namespace llvm { \
+ namespace yaml { \
+ template<> \
+ struct SequenceTraits< std::vector<_type> > { \
+ static size_t size(IO &io, std::vector<_type> &seq) { \
+ return seq.size(); \
+ } \
+ static _type& element(IO &io, std::vector<_type> &seq, size_t index) {\
+ (void)flow; /* Remove this workaround after PR17897 is fixed */ \
+ if ( index >= seq.size() ) \
+ seq.resize(index+1); \
+ return seq[index]; \
+ } \
+ static const bool flow = true; \
+ }; \
+ } \
+ }
+
+/// Utility for declaring that a std::vector of a particular type
+/// should be considered a YAML document list.
+#define LLVM_YAML_IS_DOCUMENT_LIST_VECTOR(_type) \
+ namespace llvm { \
+ namespace yaml { \
+ template<> \
+ struct DocumentListTraits< std::vector<_type> > { \
+ static size_t size(IO &io, std::vector<_type> &seq) { \
+ return seq.size(); \
+ } \
+ static _type& element(IO &io, std::vector<_type> &seq, size_t index) {\
+ if ( index >= seq.size() ) \
+ seq.resize(index+1); \
+ return seq[index]; \
+ } \
+ }; \
+ } \
+ }
+
+#endif // LLVM_SUPPORT_YAMLTRAITS_H
diff --git a/contrib/llvm/include/llvm/Support/circular_raw_ostream.h b/contrib/llvm/include/llvm/Support/circular_raw_ostream.h
new file mode 100644
index 0000000..b46fd7f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/circular_raw_ostream.h
@@ -0,0 +1,156 @@
+//===-- llvm/Support/circular_raw_ostream.h - Buffered streams --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains raw_ostream implementations for streams to do circular
+// buffering of their output.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_CIRCULAR_RAW_OSTREAM_H
+#define LLVM_SUPPORT_CIRCULAR_RAW_OSTREAM_H
+
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+ /// circular_raw_ostream - A raw_ostream which *can* save its data
+ /// to a circular buffer, or can pass it through directly to an
+ /// underlying stream if specified with a buffer of zero.
+ ///
+ class circular_raw_ostream : public raw_ostream {
+ public:
+ /// TAKE_OWNERSHIP - Tell this stream that it owns the underlying
+ /// stream and is responsible for cleanup, memory management
+ /// issues, etc.
+ ///
+ static const bool TAKE_OWNERSHIP = true;
+
+ /// REFERENCE_ONLY - Tell this stream it should not manage the
+ /// held stream.
+ ///
+ static const bool REFERENCE_ONLY = false;
+
+ private:
+ /// TheStream - The real stream we output to. We set it to be
+ /// unbuffered, since we're already doing our own buffering.
+ ///
+ raw_ostream *TheStream;
+
+ /// OwnsStream - Are we responsible for managing the underlying
+ /// stream?
+ ///
+ bool OwnsStream;
+
+ /// BufferSize - The size of the buffer in bytes.
+ ///
+ size_t BufferSize;
+
+ /// BufferArray - The actual buffer storage.
+ ///
+ char *BufferArray;
+
+ /// Cur - Pointer to the current output point in BufferArray.
+ ///
+ char *Cur;
+
+ /// Filled - Indicate whether the buffer has been completely
+ /// filled. This helps avoid garbage output.
+ ///
+ bool Filled;
+
+ /// Banner - A pointer to a banner to print before dumping the
+ /// log.
+ ///
+ const char *Banner;
+
+ /// flushBuffer - Dump the contents of the buffer to Stream.
+ ///
+ void flushBuffer() {
+ if (Filled)
+ // Write the older portion of the buffer.
+ TheStream->write(Cur, BufferArray + BufferSize - Cur);
+ // Write the newer portion of the buffer.
+ TheStream->write(BufferArray, Cur - BufferArray);
+ Cur = BufferArray;
+ Filled = false;
+ }
+
+ void write_impl(const char *Ptr, size_t Size) override;
+
+ /// current_pos - Return the current position within the stream,
+ /// not counting the bytes currently in the buffer.
+ ///
+ uint64_t current_pos() const override {
+ // This has the same effect as calling TheStream.current_pos(),
+ // but that interface is private.
+ return TheStream->tell() - TheStream->GetNumBytesInBuffer();
+ }
+
+ public:
+ /// circular_raw_ostream - Construct an optionally
+ /// circular-buffered stream, handing it an underlying stream to
+ /// do the "real" output.
+ ///
+ /// As a side effect, if BuffSize is nonzero, the given Stream is
+ /// set to be Unbuffered. This is because circular_raw_ostream
+ /// does its own buffering, so it doesn't want another layer of
+ /// buffering to be happening underneath it.
+ ///
+ /// "Owns" tells the circular_raw_ostream whether it is
+ /// responsible for managing the held stream, doing memory
+ /// management of it, etc.
+ ///
+ circular_raw_ostream(raw_ostream &Stream, const char *Header,
+ size_t BuffSize = 0, bool Owns = REFERENCE_ONLY)
+ : raw_ostream(/*unbuffered*/ true), TheStream(nullptr),
+ OwnsStream(Owns), BufferSize(BuffSize), BufferArray(nullptr),
+ Filled(false), Banner(Header) {
+ if (BufferSize != 0)
+ BufferArray = new char[BufferSize];
+ Cur = BufferArray;
+ setStream(Stream, Owns);
+ }
+
+ ~circular_raw_ostream() override {
+ flush();
+ flushBufferWithBanner();
+ releaseStream();
+ delete[] BufferArray;
+ }
+
+ /// setStream - Tell the circular_raw_ostream to output a
+ /// different stream. "Owns" tells circular_raw_ostream whether
+ /// it should take responsibility for managing the underlying
+ /// stream.
+ ///
+ void setStream(raw_ostream &Stream, bool Owns = REFERENCE_ONLY) {
+ releaseStream();
+ TheStream = &Stream;
+ OwnsStream = Owns;
+ }
+
+ /// flushBufferWithBanner - Force output of the buffer along with
+ /// a small header.
+ ///
+ void flushBufferWithBanner();
+
+ private:
+ /// releaseStream - Delete the held stream if needed. Otherwise,
+ /// transfer the buffer settings from this circular_raw_ostream
+ /// back to the underlying stream.
+ ///
+ void releaseStream() {
+ if (!TheStream)
+ return;
+ if (OwnsStream)
+ delete TheStream;
+ }
+ };
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/raw_os_ostream.h b/contrib/llvm/include/llvm/Support/raw_os_ostream.h
new file mode 100644
index 0000000..a983aeb
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/raw_os_ostream.h
@@ -0,0 +1,42 @@
+//===- raw_os_ostream.h - std::ostream adaptor for raw_ostream --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the raw_os_ostream class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RAW_OS_OSTREAM_H
+#define LLVM_SUPPORT_RAW_OS_OSTREAM_H
+
+#include "llvm/Support/raw_ostream.h"
+#include <iosfwd>
+
+namespace llvm {
+
+/// raw_os_ostream - A raw_ostream that writes to an std::ostream. This is a
+/// simple adaptor class. It does not check for output errors; clients should
+/// use the underlying stream to detect errors.
+class raw_os_ostream : public raw_ostream {
+ std::ostream &OS;
+
+ /// write_impl - See raw_ostream::write_impl.
+ void write_impl(const char *Ptr, size_t Size) override;
+
+ /// current_pos - Return the current position within the stream, not
+ /// counting the bytes currently in the buffer.
+ uint64_t current_pos() const override;
+
+public:
+ raw_os_ostream(std::ostream &O) : OS(O) {}
+ ~raw_os_ostream() override;
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/raw_ostream.h b/contrib/llvm/include/llvm/Support/raw_ostream.h
new file mode 100644
index 0000000..d1e96f8
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/raw_ostream.h
@@ -0,0 +1,530 @@
+//===--- raw_ostream.h - Raw output stream ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the raw_ostream class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_RAW_OSTREAM_H
+#define LLVM_SUPPORT_RAW_OSTREAM_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/DataTypes.h"
+#include <system_error>
+
+namespace llvm {
+class format_object_base;
+class FormattedString;
+class FormattedNumber;
+template <typename T> class SmallVectorImpl;
+
+namespace sys {
+namespace fs {
+enum OpenFlags : unsigned;
+}
+}
+
+/// This class implements an extremely fast bulk output stream that can *only*
+/// output to a stream. It does not support seeking, reopening, rewinding, line
+/// buffered disciplines etc. It is a simple buffer that outputs
+/// a chunk at a time.
+class raw_ostream {
+private:
+ void operator=(const raw_ostream &) = delete;
+ raw_ostream(const raw_ostream &) = delete;
+
+ /// The buffer is handled in such a way that the buffer is
+ /// uninitialized, unbuffered, or out of space when OutBufCur >=
+ /// OutBufEnd. Thus a single comparison suffices to determine if we
+ /// need to take the slow path to write a single character.
+ ///
+ /// The buffer is in one of three states:
+ /// 1. Unbuffered (BufferMode == Unbuffered)
+ /// 1. Uninitialized (BufferMode != Unbuffered && OutBufStart == 0).
+ /// 2. Buffered (BufferMode != Unbuffered && OutBufStart != 0 &&
+ /// OutBufEnd - OutBufStart >= 1).
+ ///
+ /// If buffered, then the raw_ostream owns the buffer if (BufferMode ==
+ /// InternalBuffer); otherwise the buffer has been set via SetBuffer and is
+ /// managed by the subclass.
+ ///
+ /// If a subclass installs an external buffer using SetBuffer then it can wait
+ /// for a \see write_impl() call to handle the data which has been put into
+ /// this buffer.
+ char *OutBufStart, *OutBufEnd, *OutBufCur;
+
+ enum BufferKind {
+ Unbuffered = 0,
+ InternalBuffer,
+ ExternalBuffer
+ } BufferMode;
+
+public:
+ // color order matches ANSI escape sequence, don't change
+ enum Colors {
+ BLACK=0,
+ RED,
+ GREEN,
+ YELLOW,
+ BLUE,
+ MAGENTA,
+ CYAN,
+ WHITE,
+ SAVEDCOLOR
+ };
+
+ explicit raw_ostream(bool unbuffered = false)
+ : BufferMode(unbuffered ? Unbuffered : InternalBuffer) {
+ // Start out ready to flush.
+ OutBufStart = OutBufEnd = OutBufCur = nullptr;
+ }
+
+ virtual ~raw_ostream();
+
+ /// tell - Return the current offset with the file.
+ uint64_t tell() const { return current_pos() + GetNumBytesInBuffer(); }
+
+ //===--------------------------------------------------------------------===//
+ // Configuration Interface
+ //===--------------------------------------------------------------------===//
+
+ /// Set the stream to be buffered, with an automatically determined buffer
+ /// size.
+ void SetBuffered();
+
+ /// Set the stream to be buffered, using the specified buffer size.
+ void SetBufferSize(size_t Size) {
+ flush();
+ SetBufferAndMode(new char[Size], Size, InternalBuffer);
+ }
+
+ size_t GetBufferSize() const {
+ // If we're supposed to be buffered but haven't actually gotten around
+ // to allocating the buffer yet, return the value that would be used.
+ if (BufferMode != Unbuffered && OutBufStart == nullptr)
+ return preferred_buffer_size();
+
+ // Otherwise just return the size of the allocated buffer.
+ return OutBufEnd - OutBufStart;
+ }
+
+ /// Set the stream to be unbuffered. When unbuffered, the stream will flush
+ /// after every write. This routine will also flush the buffer immediately
+ /// when the stream is being set to unbuffered.
+ void SetUnbuffered() {
+ flush();
+ SetBufferAndMode(nullptr, 0, Unbuffered);
+ }
+
+ size_t GetNumBytesInBuffer() const {
+ return OutBufCur - OutBufStart;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Data Output Interface
+ //===--------------------------------------------------------------------===//
+
+ void flush() {
+ if (OutBufCur != OutBufStart)
+ flush_nonempty();
+ }
+
+ raw_ostream &operator<<(char C) {
+ if (OutBufCur >= OutBufEnd)
+ return write(C);
+ *OutBufCur++ = C;
+ return *this;
+ }
+
+ raw_ostream &operator<<(unsigned char C) {
+ if (OutBufCur >= OutBufEnd)
+ return write(C);
+ *OutBufCur++ = C;
+ return *this;
+ }
+
+ raw_ostream &operator<<(signed char C) {
+ if (OutBufCur >= OutBufEnd)
+ return write(C);
+ *OutBufCur++ = C;
+ return *this;
+ }
+
+ raw_ostream &operator<<(StringRef Str) {
+ // Inline fast path, particularly for strings with a known length.
+ size_t Size = Str.size();
+
+ // Make sure we can use the fast path.
+ if (Size > (size_t)(OutBufEnd - OutBufCur))
+ return write(Str.data(), Size);
+
+ if (Size) {
+ memcpy(OutBufCur, Str.data(), Size);
+ OutBufCur += Size;
+ }
+ return *this;
+ }
+
+ raw_ostream &operator<<(const char *Str) {
+ // Inline fast path, particularly for constant strings where a sufficiently
+ // smart compiler will simplify strlen.
+
+ return this->operator<<(StringRef(Str));
+ }
+
+ raw_ostream &operator<<(const std::string &Str) {
+ // Avoid the fast path, it would only increase code size for a marginal win.
+ return write(Str.data(), Str.length());
+ }
+
+ raw_ostream &operator<<(const llvm::SmallVectorImpl<char> &Str) {
+ return write(Str.data(), Str.size());
+ }
+
+ raw_ostream &operator<<(unsigned long N);
+ raw_ostream &operator<<(long N);
+ raw_ostream &operator<<(unsigned long long N);
+ raw_ostream &operator<<(long long N);
+ raw_ostream &operator<<(const void *P);
+ raw_ostream &operator<<(unsigned int N) {
+ return this->operator<<(static_cast<unsigned long>(N));
+ }
+
+ raw_ostream &operator<<(int N) {
+ return this->operator<<(static_cast<long>(N));
+ }
+
+ raw_ostream &operator<<(double N);
+
+ /// Output \p N in hexadecimal, without any prefix or padding.
+ raw_ostream &write_hex(unsigned long long N);
+
+ /// Output \p Str, turning '\\', '\t', '\n', '"', and anything that doesn't
+ /// satisfy std::isprint into an escape sequence.
+ raw_ostream &write_escaped(StringRef Str, bool UseHexEscapes = false);
+
+ raw_ostream &write(unsigned char C);
+ raw_ostream &write(const char *Ptr, size_t Size);
+
+ // Formatted output, see the format() function in Support/Format.h.
+ raw_ostream &operator<<(const format_object_base &Fmt);
+
+ // Formatted output, see the leftJustify() function in Support/Format.h.
+ raw_ostream &operator<<(const FormattedString &);
+
+ // Formatted output, see the formatHex() function in Support/Format.h.
+ raw_ostream &operator<<(const FormattedNumber &);
+
+ /// indent - Insert 'NumSpaces' spaces.
+ raw_ostream &indent(unsigned NumSpaces);
+
+ /// Changes the foreground color of text that will be output from this point
+ /// forward.
+ /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to
+ /// change only the bold attribute, and keep colors untouched
+ /// @param Bold bold/brighter text, default false
+ /// @param BG if true change the background, default: change foreground
+ /// @returns itself so it can be used within << invocations
+ virtual raw_ostream &changeColor(enum Colors Color,
+ bool Bold = false,
+ bool BG = false) {
+ (void)Color;
+ (void)Bold;
+ (void)BG;
+ return *this;
+ }
+
+ /// Resets the colors to terminal defaults. Call this when you are done
+ /// outputting colored text, or before program exit.
+ virtual raw_ostream &resetColor() { return *this; }
+
+ /// Reverses the foreground and background colors.
+ virtual raw_ostream &reverseColor() { return *this; }
+
+ /// This function determines if this stream is connected to a "tty" or
+ /// "console" window. That is, the output would be displayed to the user
+ /// rather than being put on a pipe or stored in a file.
+ virtual bool is_displayed() const { return false; }
+
+ /// This function determines if this stream is displayed and supports colors.
+ virtual bool has_colors() const { return is_displayed(); }
+
+ //===--------------------------------------------------------------------===//
+ // Subclass Interface
+ //===--------------------------------------------------------------------===//
+
+private:
+ /// The is the piece of the class that is implemented by subclasses. This
+ /// writes the \p Size bytes starting at
+ /// \p Ptr to the underlying stream.
+ ///
+ /// This function is guaranteed to only be called at a point at which it is
+ /// safe for the subclass to install a new buffer via SetBuffer.
+ ///
+ /// \param Ptr The start of the data to be written. For buffered streams this
+ /// is guaranteed to be the start of the buffer.
+ ///
+ /// \param Size The number of bytes to be written.
+ ///
+ /// \invariant { Size > 0 }
+ virtual void write_impl(const char *Ptr, size_t Size) = 0;
+
+ // An out of line virtual method to provide a home for the class vtable.
+ virtual void handle();
+
+ /// Return the current position within the stream, not counting the bytes
+ /// currently in the buffer.
+ virtual uint64_t current_pos() const = 0;
+
+protected:
+ /// Use the provided buffer as the raw_ostream buffer. This is intended for
+ /// use only by subclasses which can arrange for the output to go directly
+ /// into the desired output buffer, instead of being copied on each flush.
+ void SetBuffer(char *BufferStart, size_t Size) {
+ SetBufferAndMode(BufferStart, Size, ExternalBuffer);
+ }
+
+ /// Return an efficient buffer size for the underlying output mechanism.
+ virtual size_t preferred_buffer_size() const;
+
+ /// Return the beginning of the current stream buffer, or 0 if the stream is
+ /// unbuffered.
+ const char *getBufferStart() const { return OutBufStart; }
+
+ //===--------------------------------------------------------------------===//
+ // Private Interface
+ //===--------------------------------------------------------------------===//
+private:
+ /// Install the given buffer and mode.
+ void SetBufferAndMode(char *BufferStart, size_t Size, BufferKind Mode);
+
+ /// Flush the current buffer, which is known to be non-empty. This outputs the
+ /// currently buffered data and resets the buffer to empty.
+ void flush_nonempty();
+
+ /// Copy data into the buffer. Size must not be greater than the number of
+ /// unused bytes in the buffer.
+ void copy_to_buffer(const char *Ptr, size_t Size);
+};
+
+/// An abstract base class for streams implementations that also support a
+/// pwrite operation. This is useful for code that can mostly stream out data,
+/// but needs to patch in a header that needs to know the output size.
+class raw_pwrite_stream : public raw_ostream {
+ virtual void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) = 0;
+
+public:
+ explicit raw_pwrite_stream(bool Unbuffered = false)
+ : raw_ostream(Unbuffered) {}
+ void pwrite(const char *Ptr, size_t Size, uint64_t Offset) {
+#ifndef NDBEBUG
+ uint64_t Pos = tell();
+ // /dev/null always reports a pos of 0, so we cannot perform this check
+ // in that case.
+ if (Pos)
+ assert(Size + Offset <= Pos && "We don't support extending the stream");
+#endif
+ pwrite_impl(Ptr, Size, Offset);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// File Output Streams
+//===----------------------------------------------------------------------===//
+
+/// A raw_ostream that writes to a file descriptor.
+///
+class raw_fd_ostream : public raw_pwrite_stream {
+ int FD;
+ bool ShouldClose;
+
+ /// Error This flag is true if an error of any kind has been detected.
+ ///
+ bool Error;
+
+ uint64_t pos;
+
+ bool SupportsSeeking;
+
+ /// See raw_ostream::write_impl.
+ void write_impl(const char *Ptr, size_t Size) override;
+
+ void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) override;
+
+ /// Return the current position within the stream, not counting the bytes
+ /// currently in the buffer.
+ uint64_t current_pos() const override { return pos; }
+
+ /// Determine an efficient buffer size.
+ size_t preferred_buffer_size() const override;
+
+ /// Set the flag indicating that an output error has been encountered.
+ void error_detected() { Error = true; }
+
+public:
+ /// Open the specified file for writing. If an error occurs, information
+ /// about the error is put into EC, and the stream should be immediately
+ /// destroyed;
+ /// \p Flags allows optional flags to control how the file will be opened.
+ ///
+ /// As a special case, if Filename is "-", then the stream will use
+ /// STDOUT_FILENO instead of opening a file. Note that it will still consider
+ /// itself to own the file descriptor. In particular, it will close the
+ /// file descriptor when it is done (this is necessary to detect
+ /// output errors).
+ raw_fd_ostream(StringRef Filename, std::error_code &EC,
+ sys::fs::OpenFlags Flags);
+
+ /// FD is the file descriptor that this writes to. If ShouldClose is true,
+ /// this closes the file when the stream is destroyed.
+ raw_fd_ostream(int fd, bool shouldClose, bool unbuffered=false);
+
+ ~raw_fd_ostream() override;
+
+ /// Manually flush the stream and close the file. Note that this does not call
+ /// fsync.
+ void close();
+
+ bool supportsSeeking() { return SupportsSeeking; }
+
+ /// Flushes the stream and repositions the underlying file descriptor position
+ /// to the offset specified from the beginning of the file.
+ uint64_t seek(uint64_t off);
+
+ raw_ostream &changeColor(enum Colors colors, bool bold=false,
+ bool bg=false) override;
+ raw_ostream &resetColor() override;
+
+ raw_ostream &reverseColor() override;
+
+ bool is_displayed() const override;
+
+ bool has_colors() const override;
+
+ /// Return the value of the flag in this raw_fd_ostream indicating whether an
+ /// output error has been encountered.
+ /// This doesn't implicitly flush any pending output. Also, it doesn't
+ /// guarantee to detect all errors unless the stream has been closed.
+ bool has_error() const {
+ return Error;
+ }
+
+ /// Set the flag read by has_error() to false. If the error flag is set at the
+ /// time when this raw_ostream's destructor is called, report_fatal_error is
+ /// called to report the error. Use clear_error() after handling the error to
+ /// avoid this behavior.
+ ///
+ /// "Errors should never pass silently.
+ /// Unless explicitly silenced."
+ /// - from The Zen of Python, by Tim Peters
+ ///
+ void clear_error() {
+ Error = false;
+ }
+};
+
+/// This returns a reference to a raw_ostream for standard output. Use it like:
+/// outs() << "foo" << "bar";
+raw_ostream &outs();
+
+/// This returns a reference to a raw_ostream for standard error. Use it like:
+/// errs() << "foo" << "bar";
+raw_ostream &errs();
+
+/// This returns a reference to a raw_ostream which simply discards output.
+raw_ostream &nulls();
+
+//===----------------------------------------------------------------------===//
+// Output Stream Adaptors
+//===----------------------------------------------------------------------===//
+
+/// A raw_ostream that writes to an std::string. This is a simple adaptor
+/// class. This class does not encounter output errors.
+class raw_string_ostream : public raw_ostream {
+ std::string &OS;
+
+ /// See raw_ostream::write_impl.
+ void write_impl(const char *Ptr, size_t Size) override;
+
+ /// Return the current position within the stream, not counting the bytes
+ /// currently in the buffer.
+ uint64_t current_pos() const override { return OS.size(); }
+
+public:
+ explicit raw_string_ostream(std::string &O) : OS(O) {}
+ ~raw_string_ostream() override;
+
+ /// Flushes the stream contents to the target string and returns the string's
+ /// reference.
+ std::string& str() {
+ flush();
+ return OS;
+ }
+};
+
+/// A raw_ostream that writes to an SmallVector or SmallString. This is a
+/// simple adaptor class. This class does not encounter output errors.
+/// raw_svector_ostream operates without a buffer, delegating all memory
+/// management to the SmallString. Thus the SmallString is always up-to-date,
+/// may be used directly and there is no need to call flush().
+class raw_svector_ostream : public raw_pwrite_stream {
+ SmallVectorImpl<char> &OS;
+
+ /// See raw_ostream::write_impl.
+ void write_impl(const char *Ptr, size_t Size) override;
+
+ void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) override;
+
+ /// Return the current position within the stream.
+ uint64_t current_pos() const override;
+
+public:
+ /// Construct a new raw_svector_ostream.
+ ///
+ /// \param O The vector to write to; this should generally have at least 128
+ /// bytes free to avoid any extraneous memory overhead.
+ explicit raw_svector_ostream(SmallVectorImpl<char> &O) : OS(O) {
+ SetUnbuffered();
+ }
+ ~raw_svector_ostream() override {}
+
+ void flush() = delete;
+
+ /// Return a StringRef for the vector contents.
+ StringRef str() { return StringRef(OS.data(), OS.size()); }
+};
+
+/// A raw_ostream that discards all output.
+class raw_null_ostream : public raw_pwrite_stream {
+ /// See raw_ostream::write_impl.
+ void write_impl(const char *Ptr, size_t size) override;
+ void pwrite_impl(const char *Ptr, size_t Size, uint64_t Offset) override;
+
+ /// Return the current position within the stream, not counting the bytes
+ /// currently in the buffer.
+ uint64_t current_pos() const override;
+
+public:
+ explicit raw_null_ostream() {}
+ ~raw_null_ostream() override;
+};
+
+class buffer_ostream : public raw_svector_ostream {
+ raw_ostream &OS;
+ SmallVector<char, 0> Buffer;
+
+public:
+ buffer_ostream(raw_ostream &OS) : raw_svector_ostream(Buffer), OS(OS) {}
+ ~buffer_ostream() override { OS << str(); }
+};
+
+} // end llvm namespace
+
+#endif // LLVM_SUPPORT_RAW_OSTREAM_H
diff --git a/contrib/llvm/include/llvm/Support/thread.h b/contrib/llvm/include/llvm/Support/thread.h
new file mode 100644
index 0000000..2d13041
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/thread.h
@@ -0,0 +1,66 @@
+//===-- llvm/Support/thread.h - Wrapper for <thread> ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header is a wrapper for <thread> that works around problems with the
+// MSVC headers when exceptions are disabled. It also provides llvm::thread,
+// which is either a typedef of std::thread or a replacement that calls the
+// function synchronously depending on the value of LLVM_ENABLE_THREADS.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_THREAD_H
+#define LLVM_SUPPORT_THREAD_H
+
+#include "llvm/Config/llvm-config.h"
+
+#if LLVM_ENABLE_THREADS
+
+#ifdef _MSC_VER
+// concrt.h depends on eh.h for __uncaught_exception declaration
+// even if we disable exceptions.
+#include <eh.h>
+
+// Suppress 'C++ exception handler used, but unwind semantics are not enabled.'
+#pragma warning(push)
+#pragma warning(disable:4530)
+#endif
+
+#include <thread>
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+namespace llvm {
+typedef std::thread thread;
+}
+
+#else // !LLVM_ENABLE_THREADS
+
+#include <utility>
+
+namespace llvm {
+
+struct thread {
+ thread() {}
+ thread(thread &&other) {}
+ template <class Function, class... Args>
+ explicit thread(Function &&f, Args &&... args) {
+ f(std::forward<Args>(args)...);
+ }
+ thread(const thread &) = delete;
+
+ void join() {}
+};
+
+}
+
+#endif // LLVM_ENABLE_THREADS
+
+#endif
diff --git a/contrib/llvm/include/llvm/Support/type_traits.h b/contrib/llvm/include/llvm/Support/type_traits.h
new file mode 100644
index 0000000..88385c3
--- /dev/null
+++ b/contrib/llvm/include/llvm/Support/type_traits.h
@@ -0,0 +1,109 @@
+//===- llvm/Support/type_traits.h - Simplfied type traits -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides useful additions to the standard type_traits library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_TYPE_TRAITS_H
+#define LLVM_SUPPORT_TYPE_TRAITS_H
+
+#include <type_traits>
+#include <utility>
+
+#ifndef __has_feature
+#define LLVM_DEFINED_HAS_FEATURE
+#define __has_feature(x) 0
+#endif
+
+namespace llvm {
+
+/// isPodLike - This is a type trait that is used to determine whether a given
+/// type can be copied around with memcpy instead of running ctors etc.
+template <typename T>
+struct isPodLike {
+ // std::is_trivially_copyable is available in libc++ with clang, libstdc++
+ // that comes with GCC 5.
+#if (__has_feature(is_trivially_copyable) && defined(_LIBCPP_VERSION)) || \
+ (defined(__GNUC__) && __GNUC__ >= 5)
+ // If the compiler supports the is_trivially_copyable trait use it, as it
+ // matches the definition of isPodLike closely.
+ static const bool value = std::is_trivially_copyable<T>::value;
+#elif __has_feature(is_trivially_copyable)
+ // Use the internal name if the compiler supports is_trivially_copyable but we
+ // don't know if the standard library does. This is the case for clang in
+ // conjunction with libstdc++ from GCC 4.x.
+ static const bool value = __is_trivially_copyable(T);
+#else
+ // If we don't know anything else, we can (at least) assume that all non-class
+ // types are PODs.
+ static const bool value = !std::is_class<T>::value;
+#endif
+};
+
+// std::pair's are pod-like if their elements are.
+template<typename T, typename U>
+struct isPodLike<std::pair<T, U> > {
+ static const bool value = isPodLike<T>::value && isPodLike<U>::value;
+};
+
+/// \brief Metafunction that determines whether the given type is either an
+/// integral type or an enumeration type.
+///
+/// Note that this accepts potentially more integral types than is_integral
+/// because it is based on merely being convertible implicitly to an integral
+/// type.
+template <typename T> class is_integral_or_enum {
+ typedef typename std::remove_reference<T>::type UnderlyingT;
+
+public:
+ static const bool value =
+ !std::is_class<UnderlyingT>::value && // Filter conversion operators.
+ !std::is_pointer<UnderlyingT>::value &&
+ !std::is_floating_point<UnderlyingT>::value &&
+ std::is_convertible<UnderlyingT, unsigned long long>::value;
+};
+
+/// \brief If T is a pointer, just return it. If it is not, return T&.
+template<typename T, typename Enable = void>
+struct add_lvalue_reference_if_not_pointer { typedef T &type; };
+
+template <typename T>
+struct add_lvalue_reference_if_not_pointer<
+ T, typename std::enable_if<std::is_pointer<T>::value>::type> {
+ typedef T type;
+};
+
+/// \brief If T is a pointer to X, return a pointer to const X. If it is not,
+/// return const T.
+template<typename T, typename Enable = void>
+struct add_const_past_pointer { typedef const T type; };
+
+template <typename T>
+struct add_const_past_pointer<
+ T, typename std::enable_if<std::is_pointer<T>::value>::type> {
+ typedef const typename std::remove_pointer<T>::type *type;
+};
+
+}
+
+// If the compiler supports detecting whether a class is final, define
+// an LLVM_IS_FINAL macro. If it cannot be defined properly, this
+// macro will be left undefined.
+#if __cplusplus >= 201402L
+#define LLVM_IS_FINAL(Ty) std::is_final<Ty>()
+#elif __has_feature(is_final) || LLVM_GNUC_PREREQ(4, 7, 0)
+#define LLVM_IS_FINAL(Ty) __is_final(Ty)
+#endif
+
+#ifdef LLVM_DEFINED_HAS_FEATURE
+#undef __has_feature
+#endif
+
+#endif
diff --git a/contrib/llvm/include/llvm/TableGen/Error.h b/contrib/llvm/include/llvm/TableGen/Error.h
new file mode 100644
index 0000000..3df658d
--- /dev/null
+++ b/contrib/llvm/include/llvm/TableGen/Error.h
@@ -0,0 +1,39 @@
+//===- llvm/TableGen/Error.h - tblgen error handling helpers ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains error handling helper routines to pretty-print diagnostic
+// messages from tblgen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_ERROR_H
+#define LLVM_TABLEGEN_ERROR_H
+
+#include "llvm/Support/SourceMgr.h"
+
+namespace llvm {
+
+void PrintWarning(ArrayRef<SMLoc> WarningLoc, const Twine &Msg);
+void PrintWarning(const char *Loc, const Twine &Msg);
+void PrintWarning(const Twine &Msg);
+
+void PrintError(ArrayRef<SMLoc> ErrorLoc, const Twine &Msg);
+void PrintError(const char *Loc, const Twine &Msg);
+void PrintError(const Twine &Msg);
+
+LLVM_ATTRIBUTE_NORETURN void PrintFatalError(const Twine &Msg);
+LLVM_ATTRIBUTE_NORETURN void PrintFatalError(ArrayRef<SMLoc> ErrorLoc,
+ const Twine &Msg);
+
+extern SourceMgr SrcMgr;
+extern unsigned ErrorsPrinted;
+
+} // end namespace "llvm"
+
+#endif
diff --git a/contrib/llvm/include/llvm/TableGen/Main.h b/contrib/llvm/include/llvm/TableGen/Main.h
new file mode 100644
index 0000000..866b986
--- /dev/null
+++ b/contrib/llvm/include/llvm/TableGen/Main.h
@@ -0,0 +1,28 @@
+//===- llvm/TableGen/Main.h - tblgen entry point ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the common entry point for tblgen tools.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_MAIN_H
+#define LLVM_TABLEGEN_MAIN_H
+
+namespace llvm {
+
+class RecordKeeper;
+class raw_ostream;
+/// \brief Perform the action using Records, and write output to OS.
+/// \returns true on error, false otherwise
+typedef bool TableGenMainFn(raw_ostream &OS, RecordKeeper &Records);
+
+int TableGenMain(char *argv0, TableGenMainFn *MainFn);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/TableGen/Record.h b/contrib/llvm/include/llvm/TableGen/Record.h
new file mode 100644
index 0000000..eb1c5c7
--- /dev/null
+++ b/contrib/llvm/include/llvm/TableGen/Record.h
@@ -0,0 +1,1592 @@
+//===- llvm/TableGen/Record.h - Classes for Table Records -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the main TableGen data structures, including the TableGen
+// types, values, and high-level data structures.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_RECORD_H
+#define LLVM_TABLEGEN_RECORD_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SMLoc.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+
+namespace llvm {
+
+class ListRecTy;
+struct MultiClass;
+class Record;
+class RecordVal;
+class RecordKeeper;
+
+//===----------------------------------------------------------------------===//
+// Type Classes
+//===----------------------------------------------------------------------===//
+
+class RecTy {
+public:
+ /// \brief Subclass discriminator (for dyn_cast<> et al.)
+ enum RecTyKind {
+ BitRecTyKind,
+ BitsRecTyKind,
+ IntRecTyKind,
+ StringRecTyKind,
+ ListRecTyKind,
+ DagRecTyKind,
+ RecordRecTyKind
+ };
+
+private:
+ RecTyKind Kind;
+ std::unique_ptr<ListRecTy> ListTy;
+
+public:
+ RecTyKind getRecTyKind() const { return Kind; }
+
+ RecTy(RecTyKind K) : Kind(K) {}
+ virtual ~RecTy() {}
+
+ virtual std::string getAsString() const = 0;
+ void print(raw_ostream &OS) const { OS << getAsString(); }
+ void dump() const;
+
+ /// typeIsConvertibleTo - Return true if all values of 'this' type can be
+ /// converted to the specified type.
+ virtual bool typeIsConvertibleTo(const RecTy *RHS) const;
+
+ /// getListTy - Returns the type representing list<this>.
+ ListRecTy *getListTy();
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const RecTy &Ty) {
+ Ty.print(OS);
+ return OS;
+}
+
+/// BitRecTy - 'bit' - Represent a single bit
+///
+class BitRecTy : public RecTy {
+ static BitRecTy Shared;
+ BitRecTy() : RecTy(BitRecTyKind) {}
+
+public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == BitRecTyKind;
+ }
+
+ static BitRecTy *get() { return &Shared; }
+
+ std::string getAsString() const override { return "bit"; }
+
+ bool typeIsConvertibleTo(const RecTy *RHS) const override;
+};
+
+/// BitsRecTy - 'bits<n>' - Represent a fixed number of bits
+///
+class BitsRecTy : public RecTy {
+ unsigned Size;
+ explicit BitsRecTy(unsigned Sz) : RecTy(BitsRecTyKind), Size(Sz) {}
+
+public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == BitsRecTyKind;
+ }
+
+ static BitsRecTy *get(unsigned Sz);
+
+ unsigned getNumBits() const { return Size; }
+
+ std::string getAsString() const override;
+
+ bool typeIsConvertibleTo(const RecTy *RHS) const override;
+};
+
+/// IntRecTy - 'int' - Represent an integer value of no particular size
+///
+class IntRecTy : public RecTy {
+ static IntRecTy Shared;
+ IntRecTy() : RecTy(IntRecTyKind) {}
+
+public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == IntRecTyKind;
+ }
+
+ static IntRecTy *get() { return &Shared; }
+
+ std::string getAsString() const override { return "int"; }
+
+ bool typeIsConvertibleTo(const RecTy *RHS) const override;
+};
+
+/// StringRecTy - 'string' - Represent an string value
+///
+class StringRecTy : public RecTy {
+ static StringRecTy Shared;
+ StringRecTy() : RecTy(StringRecTyKind) {}
+
+public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == StringRecTyKind;
+ }
+
+ static StringRecTy *get() { return &Shared; }
+
+ std::string getAsString() const override;
+};
+
+/// ListRecTy - 'list<Ty>' - Represent a list of values, all of which must be of
+/// the specified type.
+///
+class ListRecTy : public RecTy {
+ RecTy *Ty;
+ explicit ListRecTy(RecTy *T) : RecTy(ListRecTyKind), Ty(T) {}
+ friend ListRecTy *RecTy::getListTy();
+
+public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == ListRecTyKind;
+ }
+
+ static ListRecTy *get(RecTy *T) { return T->getListTy(); }
+ RecTy *getElementType() const { return Ty; }
+
+ std::string getAsString() const override;
+
+ bool typeIsConvertibleTo(const RecTy *RHS) const override;
+};
+
+/// DagRecTy - 'dag' - Represent a dag fragment
+///
+class DagRecTy : public RecTy {
+ static DagRecTy Shared;
+ DagRecTy() : RecTy(DagRecTyKind) {}
+
+public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == DagRecTyKind;
+ }
+
+ static DagRecTy *get() { return &Shared; }
+
+ std::string getAsString() const override;
+};
+
+/// RecordRecTy - '[classname]' - Represent an instance of a class, such as:
+/// (R32 X = EAX).
+///
+class RecordRecTy : public RecTy {
+ Record *Rec;
+ explicit RecordRecTy(Record *R) : RecTy(RecordRecTyKind), Rec(R) {}
+ friend class Record;
+
+public:
+ static bool classof(const RecTy *RT) {
+ return RT->getRecTyKind() == RecordRecTyKind;
+ }
+
+ static RecordRecTy *get(Record *R);
+
+ Record *getRecord() const { return Rec; }
+
+ std::string getAsString() const override;
+
+ bool typeIsConvertibleTo(const RecTy *RHS) const override;
+};
+
+/// resolveTypes - Find a common type that T1 and T2 convert to.
+/// Return 0 if no such type exists.
+///
+RecTy *resolveTypes(RecTy *T1, RecTy *T2);
+
+//===----------------------------------------------------------------------===//
+// Initializer Classes
+//===----------------------------------------------------------------------===//
+
+class Init {
+protected:
+ /// \brief Discriminator enum (for isa<>, dyn_cast<>, et al.)
+ ///
+ /// This enum is laid out by a preorder traversal of the inheritance
+ /// hierarchy, and does not contain an entry for abstract classes, as per
+ /// the recommendation in docs/HowToSetUpLLVMStyleRTTI.rst.
+ ///
+ /// We also explicitly include "first" and "last" values for each
+ /// interior node of the inheritance tree, to make it easier to read the
+ /// corresponding classof().
+ ///
+ /// We could pack these a bit tighter by not having the IK_FirstXXXInit
+ /// and IK_LastXXXInit be their own values, but that would degrade
+ /// readability for really no benefit.
+ enum InitKind {
+ IK_BitInit,
+ IK_FirstTypedInit,
+ IK_BitsInit,
+ IK_DagInit,
+ IK_DefInit,
+ IK_FieldInit,
+ IK_IntInit,
+ IK_ListInit,
+ IK_FirstOpInit,
+ IK_BinOpInit,
+ IK_TernOpInit,
+ IK_UnOpInit,
+ IK_LastOpInit,
+ IK_StringInit,
+ IK_VarInit,
+ IK_VarListElementInit,
+ IK_LastTypedInit,
+ IK_UnsetInit,
+ IK_VarBitInit
+ };
+
+private:
+ const InitKind Kind;
+ Init(const Init &) = delete;
+ Init &operator=(const Init &) = delete;
+ virtual void anchor();
+
+public:
+ InitKind getKind() const { return Kind; }
+
+protected:
+ explicit Init(InitKind K) : Kind(K) {}
+
+public:
+ virtual ~Init() {}
+
+ /// isComplete - This virtual method should be overridden by values that may
+ /// not be completely specified yet.
+ virtual bool isComplete() const { return true; }
+
+ /// print - Print out this value.
+ void print(raw_ostream &OS) const { OS << getAsString(); }
+
+ /// getAsString - Convert this value to a string form.
+ virtual std::string getAsString() const = 0;
+ /// getAsUnquotedString - Convert this value to a string form,
+ /// without adding quote markers. This primaruly affects
+ /// StringInits where we will not surround the string value with
+ /// quotes.
+ virtual std::string getAsUnquotedString() const { return getAsString(); }
+
+ /// dump - Debugging method that may be called through a debugger, just
+ /// invokes print on stderr.
+ void dump() const;
+
+ /// convertInitializerTo - This virtual function converts to the appropriate
+ /// Init based on the passed in type.
+ virtual Init *convertInitializerTo(RecTy *Ty) const = 0;
+
+ /// convertInitializerBitRange - This method is used to implement the bitrange
+ /// selection operator. Given an initializer, it selects the specified bits
+ /// out, returning them as a new init of bits type. If it is not legal to use
+ /// the bit subscript operator on this initializer, return null.
+ ///
+ virtual Init *
+ convertInitializerBitRange(const std::vector<unsigned> &Bits) const {
+ return nullptr;
+ }
+
+ /// convertInitListSlice - This method is used to implement the list slice
+ /// selection operator. Given an initializer, it selects the specified list
+ /// elements, returning them as a new init of list type. If it is not legal
+ /// to take a slice of this, return null.
+ ///
+ virtual Init *
+ convertInitListSlice(const std::vector<unsigned> &Elements) const {
+ return nullptr;
+ }
+
+ /// getFieldType - This method is used to implement the FieldInit class.
+ /// Implementors of this method should return the type of the named field if
+ /// they are of record type.
+ ///
+ virtual RecTy *getFieldType(const std::string &FieldName) const {
+ return nullptr;
+ }
+
+ /// getFieldInit - This method complements getFieldType to return the
+ /// initializer for the specified field. If getFieldType returns non-null
+ /// this method should return non-null, otherwise it returns null.
+ ///
+ virtual Init *getFieldInit(Record &R, const RecordVal *RV,
+ const std::string &FieldName) const {
+ return nullptr;
+ }
+
+ /// resolveReferences - This method is used by classes that refer to other
+ /// variables which may not be defined at the time the expression is formed.
+ /// If a value is set for the variable later, this method will be called on
+ /// users of the value to allow the value to propagate out.
+ ///
+ virtual Init *resolveReferences(Record &R, const RecordVal *RV) const {
+ return const_cast<Init *>(this);
+ }
+
+ /// getBit - This method is used to return the initializer for the specified
+ /// bit.
+ virtual Init *getBit(unsigned Bit) const = 0;
+
+ /// getBitVar - This method is used to retrieve the initializer for bit
+ /// reference. For non-VarBitInit, it simply returns itself.
+ virtual Init *getBitVar() const { return const_cast<Init*>(this); }
+
+ /// getBitNum - This method is used to retrieve the bit number of a bit
+ /// reference. For non-VarBitInit, it simply returns 0.
+ virtual unsigned getBitNum() const { return 0; }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) {
+ I.print(OS); return OS;
+}
+
+/// TypedInit - This is the common super-class of types that have a specific,
+/// explicit, type.
+///
+class TypedInit : public Init {
+ RecTy *Ty;
+
+ TypedInit(const TypedInit &Other) = delete;
+ TypedInit &operator=(const TypedInit &Other) = delete;
+
+protected:
+ explicit TypedInit(InitKind K, RecTy *T) : Init(K), Ty(T) {}
+ ~TypedInit() override {
+ // If this is a DefInit we need to delete the RecordRecTy.
+ if (getKind() == IK_DefInit)
+ delete Ty;
+ }
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() >= IK_FirstTypedInit &&
+ I->getKind() <= IK_LastTypedInit;
+ }
+ RecTy *getType() const { return Ty; }
+
+ Init *convertInitializerTo(RecTy *Ty) const override;
+
+ Init *
+ convertInitializerBitRange(const std::vector<unsigned> &Bits) const override;
+ Init *
+ convertInitListSlice(const std::vector<unsigned> &Elements) const override;
+
+ /// getFieldType - This method is used to implement the FieldInit class.
+ /// Implementors of this method should return the type of the named field if
+ /// they are of record type.
+ ///
+ RecTy *getFieldType(const std::string &FieldName) const override;
+
+ /// resolveListElementReference - This method is used to implement
+ /// VarListElementInit::resolveReferences. If the list element is resolvable
+ /// now, we return the resolved value, otherwise we return null.
+ virtual Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const = 0;
+};
+
+/// UnsetInit - ? - Represents an uninitialized value
+///
+class UnsetInit : public Init {
+ UnsetInit() : Init(IK_UnsetInit) {}
+ UnsetInit(const UnsetInit &) = delete;
+ UnsetInit &operator=(const UnsetInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_UnsetInit;
+ }
+ static UnsetInit *get();
+
+ Init *convertInitializerTo(RecTy *Ty) const override;
+
+ Init *getBit(unsigned Bit) const override {
+ return const_cast<UnsetInit*>(this);
+ }
+
+ bool isComplete() const override { return false; }
+ std::string getAsString() const override { return "?"; }
+};
+
+/// BitInit - true/false - Represent a concrete initializer for a bit.
+///
+class BitInit : public Init {
+ bool Value;
+
+ explicit BitInit(bool V) : Init(IK_BitInit), Value(V) {}
+ BitInit(const BitInit &Other) = delete;
+ BitInit &operator=(BitInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_BitInit;
+ }
+ static BitInit *get(bool V);
+
+ bool getValue() const { return Value; }
+
+ Init *convertInitializerTo(RecTy *Ty) const override;
+
+ Init *getBit(unsigned Bit) const override {
+ assert(Bit < 1 && "Bit index out of range!");
+ return const_cast<BitInit*>(this);
+ }
+
+ std::string getAsString() const override { return Value ? "1" : "0"; }
+};
+
+/// BitsInit - { a, b, c } - Represents an initializer for a BitsRecTy value.
+/// It contains a vector of bits, whose size is determined by the type.
+///
+class BitsInit : public TypedInit, public FoldingSetNode {
+ std::vector<Init*> Bits;
+
+ BitsInit(ArrayRef<Init *> Range)
+ : TypedInit(IK_BitsInit, BitsRecTy::get(Range.size())),
+ Bits(Range.begin(), Range.end()) {}
+
+ BitsInit(const BitsInit &Other) = delete;
+ BitsInit &operator=(const BitsInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_BitsInit;
+ }
+ static BitsInit *get(ArrayRef<Init *> Range);
+
+ void Profile(FoldingSetNodeID &ID) const;
+
+ unsigned getNumBits() const { return Bits.size(); }
+
+ Init *convertInitializerTo(RecTy *Ty) const override;
+ Init *
+ convertInitializerBitRange(const std::vector<unsigned> &Bits) const override;
+
+ bool isComplete() const override {
+ for (unsigned i = 0; i != getNumBits(); ++i)
+ if (!getBit(i)->isComplete()) return false;
+ return true;
+ }
+ bool allInComplete() const {
+ for (unsigned i = 0; i != getNumBits(); ++i)
+ if (getBit(i)->isComplete()) return false;
+ return true;
+ }
+ std::string getAsString() const override;
+
+ /// resolveListElementReference - This method is used to implement
+ /// VarListElementInit::resolveReferences. If the list element is resolvable
+ /// now, we return the resolved value, otherwise we return null.
+ Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const override {
+ llvm_unreachable("Illegal element reference off bits<n>");
+ }
+
+ Init *resolveReferences(Record &R, const RecordVal *RV) const override;
+
+ Init *getBit(unsigned Bit) const override {
+ assert(Bit < Bits.size() && "Bit index out of range!");
+ return Bits[Bit];
+ }
+};
+
+/// IntInit - 7 - Represent an initialization by a literal integer value.
+///
+class IntInit : public TypedInit {
+ int64_t Value;
+
+ explicit IntInit(int64_t V)
+ : TypedInit(IK_IntInit, IntRecTy::get()), Value(V) {}
+
+ IntInit(const IntInit &Other) = delete;
+ IntInit &operator=(const IntInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_IntInit;
+ }
+ static IntInit *get(int64_t V);
+
+ int64_t getValue() const { return Value; }
+
+ Init *convertInitializerTo(RecTy *Ty) const override;
+ Init *
+ convertInitializerBitRange(const std::vector<unsigned> &Bits) const override;
+
+ std::string getAsString() const override;
+
+ /// resolveListElementReference - This method is used to implement
+ /// VarListElementInit::resolveReferences. If the list element is resolvable
+ /// now, we return the resolved value, otherwise we return null.
+ Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const override {
+ llvm_unreachable("Illegal element reference off int");
+ }
+
+ Init *getBit(unsigned Bit) const override {
+ return BitInit::get((Value & (1ULL << Bit)) != 0);
+ }
+};
+
+/// StringInit - "foo" - Represent an initialization by a string value.
+///
+class StringInit : public TypedInit {
+ std::string Value;
+
+ explicit StringInit(StringRef V)
+ : TypedInit(IK_StringInit, StringRecTy::get()), Value(V) {}
+
+ StringInit(const StringInit &Other) = delete;
+ StringInit &operator=(const StringInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_StringInit;
+ }
+ static StringInit *get(StringRef);
+
+ const std::string &getValue() const { return Value; }
+
+ Init *convertInitializerTo(RecTy *Ty) const override;
+
+ std::string getAsString() const override { return "\"" + Value + "\""; }
+ std::string getAsUnquotedString() const override { return Value; }
+
+ /// resolveListElementReference - This method is used to implement
+ /// VarListElementInit::resolveReferences. If the list element is resolvable
+ /// now, we return the resolved value, otherwise we return null.
+ Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const override {
+ llvm_unreachable("Illegal element reference off string");
+ }
+
+ Init *getBit(unsigned Bit) const override {
+ llvm_unreachable("Illegal bit reference off string");
+ }
+};
+
+/// ListInit - [AL, AH, CL] - Represent a list of defs
+///
+class ListInit : public TypedInit, public FoldingSetNode {
+ std::vector<Init*> Values;
+
+public:
+ typedef std::vector<Init*>::const_iterator const_iterator;
+
+private:
+ explicit ListInit(ArrayRef<Init *> Range, RecTy *EltTy)
+ : TypedInit(IK_ListInit, ListRecTy::get(EltTy)),
+ Values(Range.begin(), Range.end()) {}
+
+ ListInit(const ListInit &Other) = delete;
+ ListInit &operator=(const ListInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_ListInit;
+ }
+ static ListInit *get(ArrayRef<Init *> Range, RecTy *EltTy);
+
+ void Profile(FoldingSetNodeID &ID) const;
+
+ Init *getElement(unsigned i) const {
+ assert(i < Values.size() && "List element index out of range!");
+ return Values[i];
+ }
+
+ Record *getElementAsRecord(unsigned i) const;
+
+ Init *
+ convertInitListSlice(const std::vector<unsigned> &Elements) const override;
+
+ Init *convertInitializerTo(RecTy *Ty) const override;
+
+ /// resolveReferences - This method is used by classes that refer to other
+ /// variables which may not be defined at the time they expression is formed.
+ /// If a value is set for the variable later, this method will be called on
+ /// users of the value to allow the value to propagate out.
+ ///
+ Init *resolveReferences(Record &R, const RecordVal *RV) const override;
+
+ std::string getAsString() const override;
+
+ ArrayRef<Init*> getValues() const { return Values; }
+
+ const_iterator begin() const { return Values.begin(); }
+ const_iterator end () const { return Values.end(); }
+
+ size_t size () const { return Values.size(); }
+ bool empty() const { return Values.empty(); }
+
+ /// resolveListElementReference - This method is used to implement
+ /// VarListElementInit::resolveReferences. If the list element is resolvable
+ /// now, we return the resolved value, otherwise we return null.
+ Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const override;
+
+ Init *getBit(unsigned Bit) const override {
+ llvm_unreachable("Illegal bit reference off list");
+ }
+};
+
+/// OpInit - Base class for operators
+///
+class OpInit : public TypedInit {
+ OpInit(const OpInit &Other) = delete;
+ OpInit &operator=(OpInit &Other) = delete;
+
+protected:
+ explicit OpInit(InitKind K, RecTy *Type) : TypedInit(K, Type) {}
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() >= IK_FirstOpInit &&
+ I->getKind() <= IK_LastOpInit;
+ }
+ // Clone - Clone this operator, replacing arguments with the new list
+ virtual OpInit *clone(std::vector<Init *> &Operands) const = 0;
+
+ virtual unsigned getNumOperands() const = 0;
+ virtual Init *getOperand(unsigned i) const = 0;
+
+ // Fold - If possible, fold this to a simpler init. Return this if not
+ // possible to fold.
+ virtual Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const = 0;
+
+ Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const override;
+
+ Init *getBit(unsigned Bit) const override;
+};
+
+/// UnOpInit - !op (X) - Transform an init.
+///
+class UnOpInit : public OpInit {
+public:
+ enum UnaryOp { CAST, HEAD, TAIL, EMPTY };
+
+private:
+ UnaryOp Opc;
+ Init *LHS;
+
+ UnOpInit(UnaryOp opc, Init *lhs, RecTy *Type)
+ : OpInit(IK_UnOpInit, Type), Opc(opc), LHS(lhs) {}
+
+ UnOpInit(const UnOpInit &Other) = delete;
+ UnOpInit &operator=(const UnOpInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_UnOpInit;
+ }
+ static UnOpInit *get(UnaryOp opc, Init *lhs, RecTy *Type);
+
+ // Clone - Clone this operator, replacing arguments with the new list
+ OpInit *clone(std::vector<Init *> &Operands) const override {
+ assert(Operands.size() == 1 &&
+ "Wrong number of operands for unary operation");
+ return UnOpInit::get(getOpcode(), *Operands.begin(), getType());
+ }
+
+ unsigned getNumOperands() const override { return 1; }
+ Init *getOperand(unsigned i) const override {
+ assert(i == 0 && "Invalid operand id for unary operator");
+ return getOperand();
+ }
+
+ UnaryOp getOpcode() const { return Opc; }
+ Init *getOperand() const { return LHS; }
+
+ // Fold - If possible, fold this to a simpler init. Return this if not
+ // possible to fold.
+ Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const override;
+
+ Init *resolveReferences(Record &R, const RecordVal *RV) const override;
+
+ std::string getAsString() const override;
+};
+
+/// BinOpInit - !op (X, Y) - Combine two inits.
+///
+class BinOpInit : public OpInit {
+public:
+ enum BinaryOp { ADD, AND, SHL, SRA, SRL, LISTCONCAT, STRCONCAT, CONCAT, EQ };
+
+private:
+ BinaryOp Opc;
+ Init *LHS, *RHS;
+
+ BinOpInit(BinaryOp opc, Init *lhs, Init *rhs, RecTy *Type) :
+ OpInit(IK_BinOpInit, Type), Opc(opc), LHS(lhs), RHS(rhs) {}
+
+ BinOpInit(const BinOpInit &Other) = delete;
+ BinOpInit &operator=(const BinOpInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_BinOpInit;
+ }
+ static BinOpInit *get(BinaryOp opc, Init *lhs, Init *rhs,
+ RecTy *Type);
+
+ // Clone - Clone this operator, replacing arguments with the new list
+ OpInit *clone(std::vector<Init *> &Operands) const override {
+ assert(Operands.size() == 2 &&
+ "Wrong number of operands for binary operation");
+ return BinOpInit::get(getOpcode(), Operands[0], Operands[1], getType());
+ }
+
+ unsigned getNumOperands() const override { return 2; }
+ Init *getOperand(unsigned i) const override {
+ switch (i) {
+ default: llvm_unreachable("Invalid operand id for binary operator");
+ case 0: return getLHS();
+ case 1: return getRHS();
+ }
+ }
+
+ BinaryOp getOpcode() const { return Opc; }
+ Init *getLHS() const { return LHS; }
+ Init *getRHS() const { return RHS; }
+
+ // Fold - If possible, fold this to a simpler init. Return this if not
+ // possible to fold.
+ Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const override;
+
+ Init *resolveReferences(Record &R, const RecordVal *RV) const override;
+
+ std::string getAsString() const override;
+};
+
+/// TernOpInit - !op (X, Y, Z) - Combine two inits.
+///
+class TernOpInit : public OpInit {
+public:
+ enum TernaryOp { SUBST, FOREACH, IF };
+
+private:
+ TernaryOp Opc;
+ Init *LHS, *MHS, *RHS;
+
+ TernOpInit(TernaryOp opc, Init *lhs, Init *mhs, Init *rhs,
+ RecTy *Type) :
+ OpInit(IK_TernOpInit, Type), Opc(opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
+
+ TernOpInit(const TernOpInit &Other) = delete;
+ TernOpInit &operator=(const TernOpInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_TernOpInit;
+ }
+ static TernOpInit *get(TernaryOp opc, Init *lhs,
+ Init *mhs, Init *rhs,
+ RecTy *Type);
+
+ // Clone - Clone this operator, replacing arguments with the new list
+ OpInit *clone(std::vector<Init *> &Operands) const override {
+ assert(Operands.size() == 3 &&
+ "Wrong number of operands for ternary operation");
+ return TernOpInit::get(getOpcode(), Operands[0], Operands[1], Operands[2],
+ getType());
+ }
+
+ unsigned getNumOperands() const override { return 3; }
+ Init *getOperand(unsigned i) const override {
+ switch (i) {
+ default: llvm_unreachable("Invalid operand id for ternary operator");
+ case 0: return getLHS();
+ case 1: return getMHS();
+ case 2: return getRHS();
+ }
+ }
+
+ TernaryOp getOpcode() const { return Opc; }
+ Init *getLHS() const { return LHS; }
+ Init *getMHS() const { return MHS; }
+ Init *getRHS() const { return RHS; }
+
+ // Fold - If possible, fold this to a simpler init. Return this if not
+ // possible to fold.
+ Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const override;
+
+ bool isComplete() const override { return false; }
+
+ Init *resolveReferences(Record &R, const RecordVal *RV) const override;
+
+ std::string getAsString() const override;
+};
+
+/// VarInit - 'Opcode' - Represent a reference to an entire variable object.
+///
+class VarInit : public TypedInit {
+ Init *VarName;
+
+ explicit VarInit(Init *VN, RecTy *T)
+ : TypedInit(IK_VarInit, T), VarName(VN) {}
+
+ VarInit(const VarInit &Other) = delete;
+ VarInit &operator=(const VarInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_VarInit;
+ }
+ static VarInit *get(const std::string &VN, RecTy *T);
+ static VarInit *get(Init *VN, RecTy *T);
+
+ const std::string &getName() const;
+ Init *getNameInit() const { return VarName; }
+ std::string getNameInitAsString() const {
+ return getNameInit()->getAsUnquotedString();
+ }
+
+ Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const override;
+
+ RecTy *getFieldType(const std::string &FieldName) const override;
+ Init *getFieldInit(Record &R, const RecordVal *RV,
+ const std::string &FieldName) const override;
+
+ /// resolveReferences - This method is used by classes that refer to other
+ /// variables which may not be defined at the time they expression is formed.
+ /// If a value is set for the variable later, this method will be called on
+ /// users of the value to allow the value to propagate out.
+ ///
+ Init *resolveReferences(Record &R, const RecordVal *RV) const override;
+
+ Init *getBit(unsigned Bit) const override;
+
+ std::string getAsString() const override { return getName(); }
+};
+
+/// VarBitInit - Opcode{0} - Represent access to one bit of a variable or field.
+///
+class VarBitInit : public Init {
+ TypedInit *TI;
+ unsigned Bit;
+
+ VarBitInit(TypedInit *T, unsigned B) : Init(IK_VarBitInit), TI(T), Bit(B) {
+ assert(T->getType() &&
+ (isa<IntRecTy>(T->getType()) ||
+ (isa<BitsRecTy>(T->getType()) &&
+ cast<BitsRecTy>(T->getType())->getNumBits() > B)) &&
+ "Illegal VarBitInit expression!");
+ }
+
+ VarBitInit(const VarBitInit &Other) = delete;
+ VarBitInit &operator=(const VarBitInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_VarBitInit;
+ }
+ static VarBitInit *get(TypedInit *T, unsigned B);
+
+ Init *convertInitializerTo(RecTy *Ty) const override;
+
+ Init *getBitVar() const override { return TI; }
+ unsigned getBitNum() const override { return Bit; }
+
+ std::string getAsString() const override;
+ Init *resolveReferences(Record &R, const RecordVal *RV) const override;
+
+ Init *getBit(unsigned B) const override {
+ assert(B < 1 && "Bit index out of range!");
+ return const_cast<VarBitInit*>(this);
+ }
+};
+
+/// VarListElementInit - List[4] - Represent access to one element of a var or
+/// field.
+class VarListElementInit : public TypedInit {
+ TypedInit *TI;
+ unsigned Element;
+
+ VarListElementInit(TypedInit *T, unsigned E)
+ : TypedInit(IK_VarListElementInit,
+ cast<ListRecTy>(T->getType())->getElementType()),
+ TI(T), Element(E) {
+ assert(T->getType() && isa<ListRecTy>(T->getType()) &&
+ "Illegal VarBitInit expression!");
+ }
+
+ VarListElementInit(const VarListElementInit &Other) = delete;
+ void operator=(const VarListElementInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_VarListElementInit;
+ }
+ static VarListElementInit *get(TypedInit *T, unsigned E);
+
+ TypedInit *getVariable() const { return TI; }
+ unsigned getElementNum() const { return Element; }
+
+ /// resolveListElementReference - This method is used to implement
+ /// VarListElementInit::resolveReferences. If the list element is resolvable
+ /// now, we return the resolved value, otherwise we return null.
+ Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const override;
+
+ std::string getAsString() const override;
+ Init *resolveReferences(Record &R, const RecordVal *RV) const override;
+
+ Init *getBit(unsigned Bit) const override;
+};
+
+/// DefInit - AL - Represent a reference to a 'def' in the description
+///
+class DefInit : public TypedInit {
+ Record *Def;
+
+ DefInit(Record *D, RecordRecTy *T) : TypedInit(IK_DefInit, T), Def(D) {}
+ friend class Record;
+
+ DefInit(const DefInit &Other) = delete;
+ DefInit &operator=(const DefInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_DefInit;
+ }
+ static DefInit *get(Record*);
+
+ Init *convertInitializerTo(RecTy *Ty) const override;
+
+ Record *getDef() const { return Def; }
+
+ //virtual Init *convertInitializerBitRange(const std::vector<unsigned> &Bits);
+
+ RecTy *getFieldType(const std::string &FieldName) const override;
+ Init *getFieldInit(Record &R, const RecordVal *RV,
+ const std::string &FieldName) const override;
+
+ std::string getAsString() const override;
+
+ Init *getBit(unsigned Bit) const override {
+ llvm_unreachable("Illegal bit reference off def");
+ }
+
+ /// resolveListElementReference - This method is used to implement
+ /// VarListElementInit::resolveReferences. If the list element is resolvable
+ /// now, we return the resolved value, otherwise we return null.
+ Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const override {
+ llvm_unreachable("Illegal element reference off def");
+ }
+};
+
+/// FieldInit - X.Y - Represent a reference to a subfield of a variable
+///
+class FieldInit : public TypedInit {
+ Init *Rec; // Record we are referring to
+ std::string FieldName; // Field we are accessing
+
+ FieldInit(Init *R, const std::string &FN)
+ : TypedInit(IK_FieldInit, R->getFieldType(FN)), Rec(R), FieldName(FN) {
+ assert(getType() && "FieldInit with non-record type!");
+ }
+
+ FieldInit(const FieldInit &Other) = delete;
+ FieldInit &operator=(const FieldInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_FieldInit;
+ }
+ static FieldInit *get(Init *R, const std::string &FN);
+
+ Init *getBit(unsigned Bit) const override;
+
+ Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const override;
+
+ Init *resolveReferences(Record &R, const RecordVal *RV) const override;
+
+ std::string getAsString() const override {
+ return Rec->getAsString() + "." + FieldName;
+ }
+};
+
+/// DagInit - (v a, b) - Represent a DAG tree value. DAG inits are required
+/// to have at least one value then a (possibly empty) list of arguments. Each
+/// argument can have a name associated with it.
+///
+class DagInit : public TypedInit, public FoldingSetNode {
+ Init *Val;
+ std::string ValName;
+ std::vector<Init*> Args;
+ std::vector<std::string> ArgNames;
+
+ DagInit(Init *V, const std::string &VN,
+ ArrayRef<Init *> ArgRange,
+ ArrayRef<std::string> NameRange)
+ : TypedInit(IK_DagInit, DagRecTy::get()), Val(V), ValName(VN),
+ Args(ArgRange.begin(), ArgRange.end()),
+ ArgNames(NameRange.begin(), NameRange.end()) {}
+
+ DagInit(const DagInit &Other) = delete;
+ DagInit &operator=(const DagInit &Other) = delete;
+
+public:
+ static bool classof(const Init *I) {
+ return I->getKind() == IK_DagInit;
+ }
+ static DagInit *get(Init *V, const std::string &VN,
+ ArrayRef<Init *> ArgRange,
+ ArrayRef<std::string> NameRange);
+ static DagInit *get(Init *V, const std::string &VN,
+ const std::vector<
+ std::pair<Init*, std::string> > &args);
+
+ void Profile(FoldingSetNodeID &ID) const;
+
+ Init *convertInitializerTo(RecTy *Ty) const override;
+
+ Init *getOperator() const { return Val; }
+
+ const std::string &getName() const { return ValName; }
+
+ unsigned getNumArgs() const { return Args.size(); }
+ Init *getArg(unsigned Num) const {
+ assert(Num < Args.size() && "Arg number out of range!");
+ return Args[Num];
+ }
+ const std::string &getArgName(unsigned Num) const {
+ assert(Num < ArgNames.size() && "Arg number out of range!");
+ return ArgNames[Num];
+ }
+
+ Init *resolveReferences(Record &R, const RecordVal *RV) const override;
+
+ std::string getAsString() const override;
+
+ typedef std::vector<Init*>::const_iterator const_arg_iterator;
+ typedef std::vector<std::string>::const_iterator const_name_iterator;
+
+ inline const_arg_iterator arg_begin() const { return Args.begin(); }
+ inline const_arg_iterator arg_end () const { return Args.end(); }
+
+ inline size_t arg_size () const { return Args.size(); }
+ inline bool arg_empty() const { return Args.empty(); }
+
+ inline const_name_iterator name_begin() const { return ArgNames.begin(); }
+ inline const_name_iterator name_end () const { return ArgNames.end(); }
+
+ inline size_t name_size () const { return ArgNames.size(); }
+ inline bool name_empty() const { return ArgNames.empty(); }
+
+ Init *getBit(unsigned Bit) const override {
+ llvm_unreachable("Illegal bit reference off dag");
+ }
+
+ Init *resolveListElementReference(Record &R, const RecordVal *RV,
+ unsigned Elt) const override {
+ llvm_unreachable("Illegal element reference off dag");
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// High-Level Classes
+//===----------------------------------------------------------------------===//
+
+class RecordVal {
+ PointerIntPair<Init *, 1, bool> NameAndPrefix;
+ RecTy *Ty;
+ Init *Value;
+
+public:
+ RecordVal(Init *N, RecTy *T, bool P);
+ RecordVal(const std::string &N, RecTy *T, bool P);
+
+ const std::string &getName() const;
+ const Init *getNameInit() const { return NameAndPrefix.getPointer(); }
+ std::string getNameInitAsString() const {
+ return getNameInit()->getAsUnquotedString();
+ }
+
+ bool getPrefix() const { return NameAndPrefix.getInt(); }
+ RecTy *getType() const { return Ty; }
+ Init *getValue() const { return Value; }
+
+ bool setValue(Init *V) {
+ if (V) {
+ Value = V->convertInitializerTo(Ty);
+ return Value == nullptr;
+ }
+ Value = nullptr;
+ return false;
+ }
+
+ void dump() const;
+ void print(raw_ostream &OS, bool PrintSem = true) const;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const RecordVal &RV) {
+ RV.print(OS << " ");
+ return OS;
+}
+
+class Record {
+ static unsigned LastID;
+
+ // Unique record ID.
+ unsigned ID;
+ Init *Name;
+ // Location where record was instantiated, followed by the location of
+ // multiclass prototypes used.
+ SmallVector<SMLoc, 4> Locs;
+ std::vector<Init *> TemplateArgs;
+ std::vector<RecordVal> Values;
+ std::vector<Record *> SuperClasses;
+ std::vector<SMRange> SuperClassRanges;
+
+ // Tracks Record instances. Not owned by Record.
+ RecordKeeper &TrackedRecords;
+
+ std::unique_ptr<DefInit> TheInit;
+ bool IsAnonymous;
+
+ // Class-instance values can be used by other defs. For example, Struct<i>
+ // is used here as a template argument to another class:
+ //
+ // multiclass MultiClass<int i> {
+ // def Def : Class<Struct<i>>;
+ //
+ // These need to get fully resolved before instantiating any other
+ // definitions that use them (e.g. Def). However, inside a multiclass they
+ // can't be immediately resolved so we mark them ResolveFirst to fully
+ // resolve them later as soon as the multiclass is instantiated.
+ bool ResolveFirst;
+
+ void init();
+ void checkName();
+
+public:
+ // Constructs a record.
+ explicit Record(Init *N, ArrayRef<SMLoc> locs, RecordKeeper &records,
+ bool Anonymous = false) :
+ ID(LastID++), Name(N), Locs(locs.begin(), locs.end()),
+ TrackedRecords(records), IsAnonymous(Anonymous), ResolveFirst(false) {
+ init();
+ }
+ explicit Record(const std::string &N, ArrayRef<SMLoc> locs,
+ RecordKeeper &records, bool Anonymous = false)
+ : Record(StringInit::get(N), locs, records, Anonymous) {}
+
+
+ // When copy-constructing a Record, we must still guarantee a globally unique
+ // ID number. Don't copy TheInit either since it's owned by the original
+ // record. All other fields can be copied normally.
+ Record(const Record &O) :
+ ID(LastID++), Name(O.Name), Locs(O.Locs), TemplateArgs(O.TemplateArgs),
+ Values(O.Values), SuperClasses(O.SuperClasses),
+ SuperClassRanges(O.SuperClassRanges), TrackedRecords(O.TrackedRecords),
+ IsAnonymous(O.IsAnonymous),
+ ResolveFirst(O.ResolveFirst) { }
+
+ static unsigned getNewUID() { return LastID++; }
+
+ unsigned getID() const { return ID; }
+
+ const std::string &getName() const;
+ Init *getNameInit() const {
+ return Name;
+ }
+ const std::string getNameInitAsString() const {
+ return getNameInit()->getAsUnquotedString();
+ }
+
+ void setName(Init *Name); // Also updates RecordKeeper.
+ void setName(const std::string &Name); // Also updates RecordKeeper.
+
+ ArrayRef<SMLoc> getLoc() const { return Locs; }
+
+ /// get the corresponding DefInit.
+ DefInit *getDefInit();
+
+ ArrayRef<Init *> getTemplateArgs() const {
+ return TemplateArgs;
+ }
+ ArrayRef<RecordVal> getValues() const { return Values; }
+ ArrayRef<Record *> getSuperClasses() const { return SuperClasses; }
+ ArrayRef<SMRange> getSuperClassRanges() const { return SuperClassRanges; }
+
+ bool isTemplateArg(Init *Name) const {
+ for (Init *TA : TemplateArgs)
+ if (TA == Name) return true;
+ return false;
+ }
+ bool isTemplateArg(StringRef Name) const {
+ return isTemplateArg(StringInit::get(Name));
+ }
+
+ const RecordVal *getValue(const Init *Name) const {
+ for (const RecordVal &Val : Values)
+ if (Val.getNameInit() == Name) return &Val;
+ return nullptr;
+ }
+ const RecordVal *getValue(StringRef Name) const {
+ return getValue(StringInit::get(Name));
+ }
+ RecordVal *getValue(const Init *Name) {
+ for (RecordVal &Val : Values)
+ if (Val.getNameInit() == Name) return &Val;
+ return nullptr;
+ }
+ RecordVal *getValue(StringRef Name) {
+ return getValue(StringInit::get(Name));
+ }
+
+ void addTemplateArg(Init *Name) {
+ assert(!isTemplateArg(Name) && "Template arg already defined!");
+ TemplateArgs.push_back(Name);
+ }
+ void addTemplateArg(StringRef Name) {
+ addTemplateArg(StringInit::get(Name));
+ }
+
+ void addValue(const RecordVal &RV) {
+ assert(getValue(RV.getNameInit()) == nullptr && "Value already added!");
+ Values.push_back(RV);
+ if (Values.size() > 1)
+ // Keep NAME at the end of the list. It makes record dumps a
+ // bit prettier and allows TableGen tests to be written more
+ // naturally. Tests can use CHECK-NEXT to look for Record
+ // fields they expect to see after a def. They can't do that if
+ // NAME is the first Record field.
+ std::swap(Values[Values.size() - 2], Values[Values.size() - 1]);
+ }
+
+ void removeValue(Init *Name) {
+ for (unsigned i = 0, e = Values.size(); i != e; ++i)
+ if (Values[i].getNameInit() == Name) {
+ Values.erase(Values.begin()+i);
+ return;
+ }
+ llvm_unreachable("Cannot remove an entry that does not exist!");
+ }
+
+ void removeValue(StringRef Name) {
+ removeValue(StringInit::get(Name));
+ }
+
+ bool isSubClassOf(const Record *R) const {
+ for (const Record *SC : SuperClasses)
+ if (SC == R)
+ return true;
+ return false;
+ }
+
+ bool isSubClassOf(StringRef Name) const {
+ for (const Record *SC : SuperClasses)
+ if (SC->getNameInitAsString() == Name)
+ return true;
+ return false;
+ }
+
+ void addSuperClass(Record *R, SMRange Range) {
+ assert(!isSubClassOf(R) && "Already subclassing record!");
+ SuperClasses.push_back(R);
+ SuperClassRanges.push_back(Range);
+ }
+
+ /// resolveReferences - If there are any field references that refer to fields
+ /// that have been filled in, we can propagate the values now.
+ ///
+ void resolveReferences() { resolveReferencesTo(nullptr); }
+
+ /// resolveReferencesTo - If anything in this record refers to RV, replace the
+ /// reference to RV with the RHS of RV. If RV is null, we resolve all
+ /// possible references.
+ void resolveReferencesTo(const RecordVal *RV);
+
+ RecordKeeper &getRecords() const {
+ return TrackedRecords;
+ }
+
+ bool isAnonymous() const {
+ return IsAnonymous;
+ }
+
+ bool isResolveFirst() const {
+ return ResolveFirst;
+ }
+
+ void setResolveFirst(bool b) {
+ ResolveFirst = b;
+ }
+
+ void dump() const;
+
+ //===--------------------------------------------------------------------===//
+ // High-level methods useful to tablegen back-ends
+ //
+
+ /// getValueInit - Return the initializer for a value with the specified name,
+ /// or throw an exception if the field does not exist.
+ ///
+ Init *getValueInit(StringRef FieldName) const;
+
+ /// Return true if the named field is unset.
+ bool isValueUnset(StringRef FieldName) const {
+ return isa<UnsetInit>(getValueInit(FieldName));
+ }
+
+ /// getValueAsString - This method looks up the specified field and returns
+ /// its value as a string, throwing an exception if the field does not exist
+ /// or if the value is not a string.
+ ///
+ std::string getValueAsString(StringRef FieldName) const;
+
+ /// getValueAsBitsInit - This method looks up the specified field and returns
+ /// its value as a BitsInit, throwing an exception if the field does not exist
+ /// or if the value is not the right type.
+ ///
+ BitsInit *getValueAsBitsInit(StringRef FieldName) const;
+
+ /// getValueAsListInit - This method looks up the specified field and returns
+ /// its value as a ListInit, throwing an exception if the field does not exist
+ /// or if the value is not the right type.
+ ///
+ ListInit *getValueAsListInit(StringRef FieldName) const;
+
+ /// getValueAsListOfDefs - This method looks up the specified field and
+ /// returns its value as a vector of records, throwing an exception if the
+ /// field does not exist or if the value is not the right type.
+ ///
+ std::vector<Record*> getValueAsListOfDefs(StringRef FieldName) const;
+
+ /// getValueAsListOfInts - This method looks up the specified field and
+ /// returns its value as a vector of integers, throwing an exception if the
+ /// field does not exist or if the value is not the right type.
+ ///
+ std::vector<int64_t> getValueAsListOfInts(StringRef FieldName) const;
+
+ /// getValueAsListOfStrings - This method looks up the specified field and
+ /// returns its value as a vector of strings, throwing an exception if the
+ /// field does not exist or if the value is not the right type.
+ ///
+ std::vector<std::string> getValueAsListOfStrings(StringRef FieldName) const;
+
+ /// getValueAsDef - This method looks up the specified field and returns its
+ /// value as a Record, throwing an exception if the field does not exist or if
+ /// the value is not the right type.
+ ///
+ Record *getValueAsDef(StringRef FieldName) const;
+
+ /// getValueAsBit - This method looks up the specified field and returns its
+ /// value as a bit, throwing an exception if the field does not exist or if
+ /// the value is not the right type.
+ ///
+ bool getValueAsBit(StringRef FieldName) const;
+
+ /// getValueAsBitOrUnset - This method looks up the specified field and
+ /// returns its value as a bit. If the field is unset, sets Unset to true and
+ /// returns false.
+ ///
+ bool getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const;
+
+ /// getValueAsInt - This method looks up the specified field and returns its
+ /// value as an int64_t, throwing an exception if the field does not exist or
+ /// if the value is not the right type.
+ ///
+ int64_t getValueAsInt(StringRef FieldName) const;
+
+ /// getValueAsDag - This method looks up the specified field and returns its
+ /// value as an Dag, throwing an exception if the field does not exist or if
+ /// the value is not the right type.
+ ///
+ DagInit *getValueAsDag(StringRef FieldName) const;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const Record &R);
+
+struct MultiClass {
+ Record Rec; // Placeholder for template args and Name.
+ typedef std::vector<std::unique_ptr<Record>> RecordVector;
+ RecordVector DefPrototypes;
+
+ void dump() const;
+
+ MultiClass(const std::string &Name, SMLoc Loc, RecordKeeper &Records) :
+ Rec(Name, Loc, Records) {}
+};
+
+class RecordKeeper {
+ typedef std::map<std::string, std::unique_ptr<Record>> RecordMap;
+ RecordMap Classes, Defs;
+
+public:
+ const RecordMap &getClasses() const { return Classes; }
+ const RecordMap &getDefs() const { return Defs; }
+
+ Record *getClass(const std::string &Name) const {
+ auto I = Classes.find(Name);
+ return I == Classes.end() ? nullptr : I->second.get();
+ }
+ Record *getDef(const std::string &Name) const {
+ auto I = Defs.find(Name);
+ return I == Defs.end() ? nullptr : I->second.get();
+ }
+ void addClass(std::unique_ptr<Record> R) {
+ bool Ins = Classes.insert(std::make_pair(R->getName(),
+ std::move(R))).second;
+ (void)Ins;
+ assert(Ins && "Class already exists");
+ }
+ void addDef(std::unique_ptr<Record> R) {
+ bool Ins = Defs.insert(std::make_pair(R->getName(),
+ std::move(R))).second;
+ (void)Ins;
+ assert(Ins && "Record already exists");
+ }
+
+ //===--------------------------------------------------------------------===//
+ // High-level helper methods, useful for tablegen backends...
+
+ /// getAllDerivedDefinitions - This method returns all concrete definitions
+ /// that derive from the specified class name. If a class with the specified
+ /// name does not exist, an exception is thrown.
+ std::vector<Record*>
+ getAllDerivedDefinitions(const std::string &ClassName) const;
+
+ void dump() const;
+};
+
+/// LessRecord - Sorting predicate to sort record pointers by name.
+///
+struct LessRecord {
+ bool operator()(const Record *Rec1, const Record *Rec2) const {
+ return StringRef(Rec1->getName()).compare_numeric(Rec2->getName()) < 0;
+ }
+};
+
+/// LessRecordByID - Sorting predicate to sort record pointers by their
+/// unique ID. If you just need a deterministic order, use this, since it
+/// just compares two `unsigned`; the other sorting predicates require
+/// string manipulation.
+struct LessRecordByID {
+ bool operator()(const Record *LHS, const Record *RHS) const {
+ return LHS->getID() < RHS->getID();
+ }
+};
+
+/// LessRecordFieldName - Sorting predicate to sort record pointers by their
+/// name field.
+///
+struct LessRecordFieldName {
+ bool operator()(const Record *Rec1, const Record *Rec2) const {
+ return Rec1->getValueAsString("Name") < Rec2->getValueAsString("Name");
+ }
+};
+
+struct LessRecordRegister {
+ static bool ascii_isdigit(char x) { return x >= '0' && x <= '9'; }
+
+ struct RecordParts {
+ SmallVector<std::pair< bool, StringRef>, 4> Parts;
+
+ RecordParts(StringRef Rec) {
+ if (Rec.empty())
+ return;
+
+ size_t Len = 0;
+ const char *Start = Rec.data();
+ const char *Curr = Start;
+ bool isDigitPart = ascii_isdigit(Curr[0]);
+ for (size_t I = 0, E = Rec.size(); I != E; ++I, ++Len) {
+ bool isDigit = ascii_isdigit(Curr[I]);
+ if (isDigit != isDigitPart) {
+ Parts.push_back(std::make_pair(isDigitPart, StringRef(Start, Len)));
+ Len = 0;
+ Start = &Curr[I];
+ isDigitPart = ascii_isdigit(Curr[I]);
+ }
+ }
+ // Push the last part.
+ Parts.push_back(std::make_pair(isDigitPart, StringRef(Start, Len)));
+ }
+
+ size_t size() { return Parts.size(); }
+
+ std::pair<bool, StringRef> getPart(size_t i) {
+ assert (i < Parts.size() && "Invalid idx!");
+ return Parts[i];
+ }
+ };
+
+ bool operator()(const Record *Rec1, const Record *Rec2) const {
+ RecordParts LHSParts(StringRef(Rec1->getName()));
+ RecordParts RHSParts(StringRef(Rec2->getName()));
+
+ size_t LHSNumParts = LHSParts.size();
+ size_t RHSNumParts = RHSParts.size();
+ assert (LHSNumParts && RHSNumParts && "Expected at least one part!");
+
+ if (LHSNumParts != RHSNumParts)
+ return LHSNumParts < RHSNumParts;
+
+ // We expect the registers to be of the form [_a-zA-z]+([0-9]*[_a-zA-Z]*)*.
+ for (size_t I = 0, E = LHSNumParts; I < E; I+=2) {
+ std::pair<bool, StringRef> LHSPart = LHSParts.getPart(I);
+ std::pair<bool, StringRef> RHSPart = RHSParts.getPart(I);
+ // Expect even part to always be alpha.
+ assert (LHSPart.first == false && RHSPart.first == false &&
+ "Expected both parts to be alpha.");
+ if (int Res = LHSPart.second.compare(RHSPart.second))
+ return Res < 0;
+ }
+ for (size_t I = 1, E = LHSNumParts; I < E; I+=2) {
+ std::pair<bool, StringRef> LHSPart = LHSParts.getPart(I);
+ std::pair<bool, StringRef> RHSPart = RHSParts.getPart(I);
+ // Expect odd part to always be numeric.
+ assert (LHSPart.first == true && RHSPart.first == true &&
+ "Expected both parts to be numeric.");
+ if (LHSPart.second.size() != RHSPart.second.size())
+ return LHSPart.second.size() < RHSPart.second.size();
+
+ unsigned LHSVal, RHSVal;
+
+ bool LHSFailed = LHSPart.second.getAsInteger(10, LHSVal); (void)LHSFailed;
+ assert(!LHSFailed && "Unable to convert LHS to integer.");
+ bool RHSFailed = RHSPart.second.getAsInteger(10, RHSVal); (void)RHSFailed;
+ assert(!RHSFailed && "Unable to convert RHS to integer.");
+
+ if (LHSVal != RHSVal)
+ return LHSVal < RHSVal;
+ }
+ return LHSNumParts < RHSNumParts;
+ }
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const RecordKeeper &RK);
+
+/// QualifyName - Return an Init with a qualifier prefix referring
+/// to CurRec's name.
+Init *QualifyName(Record &CurRec, MultiClass *CurMultiClass,
+ Init *Name, const std::string &Scoper);
+
+/// QualifyName - Return an Init with a qualifier prefix referring
+/// to CurRec's name.
+Init *QualifyName(Record &CurRec, MultiClass *CurMultiClass,
+ const std::string &Name, const std::string &Scoper);
+
+} // end llvm namespace
+
+#endif // LLVM_TABLEGEN_RECORD_H
diff --git a/contrib/llvm/include/llvm/TableGen/SetTheory.h b/contrib/llvm/include/llvm/TableGen/SetTheory.h
new file mode 100644
index 0000000..d4e0f53
--- /dev/null
+++ b/contrib/llvm/include/llvm/TableGen/SetTheory.h
@@ -0,0 +1,142 @@
+//===- SetTheory.h - Generate ordered sets from DAG expressions -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SetTheory class that computes ordered sets of
+// Records from DAG expressions. Operators for standard set operations are
+// predefined, and it is possible to add special purpose set operators as well.
+//
+// The user may define named sets as Records of predefined classes. Set
+// expanders can be added to a SetTheory instance to teach it how to find the
+// elements of such a named set.
+//
+// These are the predefined operators. The argument lists can be individual
+// elements (defs), other sets (defs of expandable classes), lists, or DAG
+// expressions that are evaluated recursively.
+//
+// - (add S1, S2 ...) Union sets. This is also how sets are created from element
+// lists.
+//
+// - (sub S1, S2, ...) Set difference. Every element in S1 except for the
+// elements in S2, ...
+//
+// - (and S1, S2) Set intersection. Every element in S1 that is also in S2.
+//
+// - (shl S, N) Shift left. Remove the first N elements from S.
+//
+// - (trunc S, N) Truncate. The first N elements of S.
+//
+// - (rotl S, N) Rotate left. Same as (add (shl S, N), (trunc S, N)).
+//
+// - (rotr S, N) Rotate right.
+//
+// - (decimate S, N) Decimate S by picking every N'th element, starting with
+// the first one. For instance, (decimate S, 2) returns the even elements of
+// S.
+//
+// - (sequence "Format", From, To) Generate a sequence of defs with printf.
+// For instance, (sequence "R%u", 0, 3) -> [ R0, R1, R2, R3 ]
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_SETTHEORY_H
+#define LLVM_TABLEGEN_SETTHEORY_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/SMLoc.h"
+#include <map>
+#include <vector>
+
+namespace llvm {
+
+class DagInit;
+class Init;
+class Record;
+
+class SetTheory {
+public:
+ typedef std::vector<Record*> RecVec;
+ typedef SmallSetVector<Record*, 16> RecSet;
+
+ /// Operator - A callback representing a DAG operator.
+ class Operator {
+ virtual void anchor();
+ public:
+ virtual ~Operator() {}
+
+ /// apply - Apply this operator to Expr's arguments and insert the result
+ /// in Elts.
+ virtual void apply(SetTheory&, DagInit *Expr, RecSet &Elts,
+ ArrayRef<SMLoc> Loc) =0;
+ };
+
+ /// Expander - A callback function that can transform a Record representing a
+ /// set into a fully expanded list of elements. Expanders provide a way for
+ /// users to define named sets that can be used in DAG expressions.
+ class Expander {
+ virtual void anchor();
+ public:
+ virtual ~Expander() {}
+
+ virtual void expand(SetTheory&, Record*, RecSet &Elts) =0;
+ };
+
+private:
+ // Map set defs to their fully expanded contents. This serves as a memoization
+ // cache and it makes it possible to return const references on queries.
+ typedef std::map<Record*, RecVec> ExpandMap;
+ ExpandMap Expansions;
+
+ // Known DAG operators by name.
+ StringMap<std::unique_ptr<Operator>> Operators;
+
+ // Typed expanders by class name.
+ StringMap<std::unique_ptr<Expander>> Expanders;
+
+public:
+ /// Create a SetTheory instance with only the standard operators.
+ SetTheory();
+
+ /// addExpander - Add an expander for Records with the named super class.
+ void addExpander(StringRef ClassName, std::unique_ptr<Expander>);
+
+ /// addFieldExpander - Add an expander for ClassName that simply evaluates
+ /// FieldName in the Record to get the set elements. That is all that is
+ /// needed for a class like:
+ ///
+ /// class Set<dag d> {
+ /// dag Elts = d;
+ /// }
+ ///
+ void addFieldExpander(StringRef ClassName, StringRef FieldName);
+
+ /// addOperator - Add a DAG operator.
+ void addOperator(StringRef Name, std::unique_ptr<Operator>);
+
+ /// evaluate - Evaluate Expr and append the resulting set to Elts.
+ void evaluate(Init *Expr, RecSet &Elts, ArrayRef<SMLoc> Loc);
+
+ /// evaluate - Evaluate a sequence of Inits and append to Elts.
+ template<typename Iter>
+ void evaluate(Iter begin, Iter end, RecSet &Elts, ArrayRef<SMLoc> Loc) {
+ while (begin != end)
+ evaluate(*begin++, Elts, Loc);
+ }
+
+ /// expand - Expand a record into a set of elements if possible. Return a
+ /// pointer to the expanded elements, or NULL if Set cannot be expanded
+ /// further.
+ const RecVec *expand(Record *Set);
+};
+
+} // end namespace llvm
+
+#endif
+
diff --git a/contrib/llvm/include/llvm/TableGen/StringMatcher.h b/contrib/llvm/include/llvm/TableGen/StringMatcher.h
new file mode 100644
index 0000000..b438779
--- /dev/null
+++ b/contrib/llvm/include/llvm/TableGen/StringMatcher.h
@@ -0,0 +1,54 @@
+//===- StringMatcher.h - Generate a matcher for input strings ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the StringMatcher class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_STRINGMATCHER_H
+#define LLVM_TABLEGEN_STRINGMATCHER_H
+
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+ class raw_ostream;
+
+/// StringMatcher - Given a list of strings and code to execute when they match,
+/// output a simple switch tree to classify the input string.
+///
+/// If a match is found, the code in Vals[i].second is executed; control must
+/// not exit this code fragment. If nothing matches, execution falls through.
+///
+class StringMatcher {
+public:
+ typedef std::pair<std::string, std::string> StringPair;
+
+private:
+ StringRef StrVariableName;
+ const std::vector<StringPair> &Matches;
+ raw_ostream &OS;
+
+public:
+ StringMatcher(StringRef strVariableName,
+ const std::vector<StringPair> &matches, raw_ostream &os)
+ : StrVariableName(strVariableName), Matches(matches), OS(os) {}
+
+ void Emit(unsigned Indent = 0) const;
+
+private:
+ bool EmitStringMatcherForChar(const std::vector<const StringPair*> &Matches,
+ unsigned CharNo, unsigned IndentCount) const;
+};
+
+} // end llvm namespace.
+
+#endif
diff --git a/contrib/llvm/include/llvm/TableGen/StringToOffsetTable.h b/contrib/llvm/include/llvm/TableGen/StringToOffsetTable.h
new file mode 100644
index 0000000..e327703
--- /dev/null
+++ b/contrib/llvm/include/llvm/TableGen/StringToOffsetTable.h
@@ -0,0 +1,83 @@
+//===- StringToOffsetTable.h - Emit a big concatenated string ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_STRINGTOOFFSETTABLE_H
+#define LLVM_TABLEGEN_STRINGTOOFFSETTABLE_H
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cctype>
+
+namespace llvm {
+
+/// StringToOffsetTable - This class uniques a bunch of nul-terminated strings
+/// and keeps track of their offset in a massive contiguous string allocation.
+/// It can then output this string blob and use indexes into the string to
+/// reference each piece.
+class StringToOffsetTable {
+ StringMap<unsigned> StringOffset;
+ std::string AggregateString;
+
+public:
+ unsigned GetOrAddStringOffset(StringRef Str, bool appendZero = true) {
+ auto IterBool =
+ StringOffset.insert(std::make_pair(Str, AggregateString.size()));
+ if (IterBool.second) {
+ // Add the string to the aggregate if this is the first time found.
+ AggregateString.append(Str.begin(), Str.end());
+ if (appendZero)
+ AggregateString += '\0';
+ }
+
+ return IterBool.first->second;
+ }
+
+ void EmitString(raw_ostream &O) {
+ // Escape the string.
+ SmallString<256> Str;
+ raw_svector_ostream(Str).write_escaped(AggregateString);
+ AggregateString = Str.str();
+
+ O << " \"";
+ unsigned CharsPrinted = 0;
+ for (unsigned i = 0, e = AggregateString.size(); i != e; ++i) {
+ if (CharsPrinted > 70) {
+ O << "\"\n \"";
+ CharsPrinted = 0;
+ }
+ O << AggregateString[i];
+ ++CharsPrinted;
+
+ // Print escape sequences all together.
+ if (AggregateString[i] != '\\')
+ continue;
+
+ assert(i+1 < AggregateString.size() && "Incomplete escape sequence!");
+ if (isdigit(AggregateString[i+1])) {
+ assert(isdigit(AggregateString[i+2]) &&
+ isdigit(AggregateString[i+3]) &&
+ "Expected 3 digit octal escape!");
+ O << AggregateString[++i];
+ O << AggregateString[++i];
+ O << AggregateString[++i];
+ CharsPrinted += 3;
+ } else {
+ O << AggregateString[++i];
+ ++CharsPrinted;
+ }
+ }
+ O << "\"";
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/TableGen/TableGenBackend.h b/contrib/llvm/include/llvm/TableGen/TableGenBackend.h
new file mode 100644
index 0000000..d226f1f
--- /dev/null
+++ b/contrib/llvm/include/llvm/TableGen/TableGenBackend.h
@@ -0,0 +1,28 @@
+//===- llvm/TableGen/TableGenBackend.h - Backend utilities ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Useful utilities for TableGen backends.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TABLEGEN_TABLEGENBACKEND_H
+#define LLVM_TABLEGEN_TABLEGENBACKEND_H
+
+namespace llvm {
+
+class StringRef;
+class raw_ostream;
+
+/// emitSourceFileHeader - Output an LLVM style file header to the specified
+/// raw_ostream.
+void emitSourceFileHeader(StringRef Desc, raw_ostream &OS);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/CostTable.h b/contrib/llvm/include/llvm/Target/CostTable.h
new file mode 100644
index 0000000..2499f5c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/CostTable.h
@@ -0,0 +1,71 @@
+//===-- CostTable.h - Instruction Cost Table handling -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Cost tables and simple lookup functions
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_COSTTABLE_H_
+#define LLVM_TARGET_COSTTABLE_H_
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CodeGen/MachineValueType.h"
+
+namespace llvm {
+
+/// Cost Table Entry
+struct CostTblEntry {
+ int ISD;
+ MVT::SimpleValueType Type;
+ unsigned Cost;
+};
+
+/// Find in cost table, TypeTy must be comparable to CompareTy by ==
+inline const CostTblEntry *CostTableLookup(ArrayRef<CostTblEntry> Tbl,
+ int ISD, MVT Ty) {
+ auto I = std::find_if(Tbl.begin(), Tbl.end(),
+ [=](const CostTblEntry &Entry) {
+ return ISD == Entry.ISD && Ty == Entry.Type; });
+ if (I != Tbl.end())
+ return I;
+
+ // Could not find an entry.
+ return nullptr;
+}
+
+/// Type Conversion Cost Table
+struct TypeConversionCostTblEntry {
+ int ISD;
+ MVT::SimpleValueType Dst;
+ MVT::SimpleValueType Src;
+ unsigned Cost;
+};
+
+/// Find in type conversion cost table, TypeTy must be comparable to CompareTy
+/// by ==
+inline const TypeConversionCostTblEntry *
+ConvertCostTableLookup(ArrayRef<TypeConversionCostTblEntry> Tbl,
+ int ISD, MVT Dst, MVT Src) {
+ auto I = std::find_if(Tbl.begin(), Tbl.end(),
+ [=](const TypeConversionCostTblEntry &Entry) {
+ return ISD == Entry.ISD && Src == Entry.Src &&
+ Dst == Entry.Dst;
+ });
+ if (I != Tbl.end())
+ return I;
+
+ // Could not find an entry.
+ return nullptr;
+}
+
+} // namespace llvm
+
+
+#endif /* LLVM_TARGET_COSTTABLE_H_ */
diff --git a/contrib/llvm/include/llvm/Target/Target.td b/contrib/llvm/include/llvm/Target/Target.td
new file mode 100644
index 0000000..79046b2
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/Target.td
@@ -0,0 +1,1237 @@
+//===- Target.td - Target Independent TableGen interface ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces which should be
+// implemented by each target which is using a TableGen based code generator.
+//
+//===----------------------------------------------------------------------===//
+
+// Include all information about LLVM intrinsics.
+include "llvm/IR/Intrinsics.td"
+
+//===----------------------------------------------------------------------===//
+// Register file description - These classes are used to fill in the target
+// description classes.
+
+class RegisterClass; // Forward def
+
+// SubRegIndex - Use instances of SubRegIndex to identify subregisters.
+class SubRegIndex<int size, int offset = 0> {
+ string Namespace = "";
+
+ // Size - Size (in bits) of the sub-registers represented by this index.
+ int Size = size;
+
+ // Offset - Offset of the first bit that is part of this sub-register index.
+ // Set it to -1 if the same index is used to represent sub-registers that can
+ // be at different offsets (for example when using an index to access an
+ // element in a register tuple).
+ int Offset = offset;
+
+ // ComposedOf - A list of two SubRegIndex instances, [A, B].
+ // This indicates that this SubRegIndex is the result of composing A and B.
+ // See ComposedSubRegIndex.
+ list<SubRegIndex> ComposedOf = [];
+
+ // CoveringSubRegIndices - A list of two or more sub-register indexes that
+ // cover this sub-register.
+ //
+ // This field should normally be left blank as TableGen can infer it.
+ //
+ // TableGen automatically detects sub-registers that straddle the registers
+ // in the SubRegs field of a Register definition. For example:
+ //
+ // Q0 = dsub_0 -> D0, dsub_1 -> D1
+ // Q1 = dsub_0 -> D2, dsub_1 -> D3
+ // D1_D2 = dsub_0 -> D1, dsub_1 -> D2
+ // QQ0 = qsub_0 -> Q0, qsub_1 -> Q1
+ //
+ // TableGen will infer that D1_D2 is a sub-register of QQ0. It will be given
+ // the synthetic index dsub_1_dsub_2 unless some SubRegIndex is defined with
+ // CoveringSubRegIndices = [dsub_1, dsub_2].
+ list<SubRegIndex> CoveringSubRegIndices = [];
+}
+
+// ComposedSubRegIndex - A sub-register that is the result of composing A and B.
+// Offset is set to the sum of A and B's Offsets. Size is set to B's Size.
+class ComposedSubRegIndex<SubRegIndex A, SubRegIndex B>
+ : SubRegIndex<B.Size, !if(!eq(A.Offset, -1), -1,
+ !if(!eq(B.Offset, -1), -1,
+ !add(A.Offset, B.Offset)))> {
+ // See SubRegIndex.
+ let ComposedOf = [A, B];
+}
+
+// RegAltNameIndex - The alternate name set to use for register operands of
+// this register class when printing.
+class RegAltNameIndex {
+ string Namespace = "";
+}
+def NoRegAltName : RegAltNameIndex;
+
+// Register - You should define one instance of this class for each register
+// in the target machine. String n will become the "name" of the register.
+class Register<string n, list<string> altNames = []> {
+ string Namespace = "";
+ string AsmName = n;
+ list<string> AltNames = altNames;
+
+ // Aliases - A list of registers that this register overlaps with. A read or
+ // modification of this register can potentially read or modify the aliased
+ // registers.
+ list<Register> Aliases = [];
+
+ // SubRegs - A list of registers that are parts of this register. Note these
+ // are "immediate" sub-registers and the registers within the list do not
+ // themselves overlap. e.g. For X86, EAX's SubRegs list contains only [AX],
+ // not [AX, AH, AL].
+ list<Register> SubRegs = [];
+
+ // SubRegIndices - For each register in SubRegs, specify the SubRegIndex used
+ // to address it. Sub-sub-register indices are automatically inherited from
+ // SubRegs.
+ list<SubRegIndex> SubRegIndices = [];
+
+ // RegAltNameIndices - The alternate name indices which are valid for this
+ // register.
+ list<RegAltNameIndex> RegAltNameIndices = [];
+
+ // DwarfNumbers - Numbers used internally by gcc/gdb to identify the register.
+ // These values can be determined by locating the <target>.h file in the
+ // directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES. The
+ // order of these names correspond to the enumeration used by gcc. A value of
+ // -1 indicates that the gcc number is undefined and -2 that register number
+ // is invalid for this mode/flavour.
+ list<int> DwarfNumbers = [];
+
+ // CostPerUse - Additional cost of instructions using this register compared
+ // to other registers in its class. The register allocator will try to
+ // minimize the number of instructions using a register with a CostPerUse.
+ // This is used by the x86-64 and ARM Thumb targets where some registers
+ // require larger instruction encodings.
+ int CostPerUse = 0;
+
+ // CoveredBySubRegs - When this bit is set, the value of this register is
+ // completely determined by the value of its sub-registers. For example, the
+ // x86 register AX is covered by its sub-registers AL and AH, but EAX is not
+ // covered by its sub-register AX.
+ bit CoveredBySubRegs = 0;
+
+ // HWEncoding - The target specific hardware encoding for this register.
+ bits<16> HWEncoding = 0;
+}
+
+// RegisterWithSubRegs - This can be used to define instances of Register which
+// need to specify sub-registers.
+// List "subregs" specifies which registers are sub-registers to this one. This
+// is used to populate the SubRegs and AliasSet fields of TargetRegisterDesc.
+// This allows the code generator to be careful not to put two values with
+// overlapping live ranges into registers which alias.
+class RegisterWithSubRegs<string n, list<Register> subregs> : Register<n> {
+ let SubRegs = subregs;
+}
+
+// DAGOperand - An empty base class that unifies RegisterClass's and other forms
+// of Operand's that are legal as type qualifiers in DAG patterns. This should
+// only ever be used for defining multiclasses that are polymorphic over both
+// RegisterClass's and other Operand's.
+class DAGOperand { }
+
+// RegisterClass - Now that all of the registers are defined, and aliases
+// between registers are defined, specify which registers belong to which
+// register classes. This also defines the default allocation order of
+// registers by register allocators.
+//
+class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
+ dag regList, RegAltNameIndex idx = NoRegAltName>
+ : DAGOperand {
+ string Namespace = namespace;
+
+ // RegType - Specify the list ValueType of the registers in this register
+ // class. Note that all registers in a register class must have the same
+ // ValueTypes. This is a list because some targets permit storing different
+ // types in same register, for example vector values with 128-bit total size,
+ // but different count/size of items, like SSE on x86.
+ //
+ list<ValueType> RegTypes = regTypes;
+
+ // Size - Specify the spill size in bits of the registers. A default value of
+ // zero lets tablgen pick an appropriate size.
+ int Size = 0;
+
+ // Alignment - Specify the alignment required of the registers when they are
+ // stored or loaded to memory.
+ //
+ int Alignment = alignment;
+
+ // CopyCost - This value is used to specify the cost of copying a value
+ // between two registers in this register class. The default value is one
+ // meaning it takes a single instruction to perform the copying. A negative
+ // value means copying is extremely expensive or impossible.
+ int CopyCost = 1;
+
+ // MemberList - Specify which registers are in this class. If the
+ // allocation_order_* method are not specified, this also defines the order of
+ // allocation used by the register allocator.
+ //
+ dag MemberList = regList;
+
+ // AltNameIndex - The alternate register name to use when printing operands
+ // of this register class. Every register in the register class must have
+ // a valid alternate name for the given index.
+ RegAltNameIndex altNameIndex = idx;
+
+ // isAllocatable - Specify that the register class can be used for virtual
+ // registers and register allocation. Some register classes are only used to
+ // model instruction operand constraints, and should have isAllocatable = 0.
+ bit isAllocatable = 1;
+
+ // AltOrders - List of alternative allocation orders. The default order is
+ // MemberList itself, and that is good enough for most targets since the
+ // register allocators automatically remove reserved registers and move
+ // callee-saved registers to the end.
+ list<dag> AltOrders = [];
+
+ // AltOrderSelect - The body of a function that selects the allocation order
+ // to use in a given machine function. The code will be inserted in a
+ // function like this:
+ //
+ // static inline unsigned f(const MachineFunction &MF) { ... }
+ //
+ // The function should return 0 to select the default order defined by
+ // MemberList, 1 to select the first AltOrders entry and so on.
+ code AltOrderSelect = [{}];
+
+ // Specify allocation priority for register allocators using a greedy
+ // heuristic. Classes with higher priority values are assigned first. This is
+ // useful as it is sometimes beneficial to assign registers to highly
+ // constrained classes first. The value has to be in the range [0,63].
+ int AllocationPriority = 0;
+}
+
+// The memberList in a RegisterClass is a dag of set operations. TableGen
+// evaluates these set operations and expand them into register lists. These
+// are the most common operation, see test/TableGen/SetTheory.td for more
+// examples of what is possible:
+//
+// (add R0, R1, R2) - Set Union. Each argument can be an individual register, a
+// register class, or a sub-expression. This is also the way to simply list
+// registers.
+//
+// (sub GPR, SP) - Set difference. Subtract the last arguments from the first.
+//
+// (and GPR, CSR) - Set intersection. All registers from the first set that are
+// also in the second set.
+//
+// (sequence "R%u", 0, 15) -> [R0, R1, ..., R15]. Generate a sequence of
+// numbered registers. Takes an optional 4th operand which is a stride to use
+// when generating the sequence.
+//
+// (shl GPR, 4) - Remove the first N elements.
+//
+// (trunc GPR, 4) - Truncate after the first N elements.
+//
+// (rotl GPR, 1) - Rotate N places to the left.
+//
+// (rotr GPR, 1) - Rotate N places to the right.
+//
+// (decimate GPR, 2) - Pick every N'th element, starting with the first.
+//
+// (interleave A, B, ...) - Interleave the elements from each argument list.
+//
+// All of these operators work on ordered sets, not lists. That means
+// duplicates are removed from sub-expressions.
+
+// Set operators. The rest is defined in TargetSelectionDAG.td.
+def sequence;
+def decimate;
+def interleave;
+
+// RegisterTuples - Automatically generate super-registers by forming tuples of
+// sub-registers. This is useful for modeling register sequence constraints
+// with pseudo-registers that are larger than the architectural registers.
+//
+// The sub-register lists are zipped together:
+//
+// def EvenOdd : RegisterTuples<[sube, subo], [(add R0, R2), (add R1, R3)]>;
+//
+// Generates the same registers as:
+//
+// let SubRegIndices = [sube, subo] in {
+// def R0_R1 : RegisterWithSubRegs<"", [R0, R1]>;
+// def R2_R3 : RegisterWithSubRegs<"", [R2, R3]>;
+// }
+//
+// The generated pseudo-registers inherit super-classes and fields from their
+// first sub-register. Most fields from the Register class are inferred, and
+// the AsmName and Dwarf numbers are cleared.
+//
+// RegisterTuples instances can be used in other set operations to form
+// register classes and so on. This is the only way of using the generated
+// registers.
+class RegisterTuples<list<SubRegIndex> Indices, list<dag> Regs> {
+ // SubRegs - N lists of registers to be zipped up. Super-registers are
+ // synthesized from the first element of each SubRegs list, the second
+ // element and so on.
+ list<dag> SubRegs = Regs;
+
+ // SubRegIndices - N SubRegIndex instances. This provides the names of the
+ // sub-registers in the synthesized super-registers.
+ list<SubRegIndex> SubRegIndices = Indices;
+}
+
+
+//===----------------------------------------------------------------------===//
+// DwarfRegNum - This class provides a mapping of the llvm register enumeration
+// to the register numbering used by gcc and gdb. These values are used by a
+// debug information writer to describe where values may be located during
+// execution.
+class DwarfRegNum<list<int> Numbers> {
+ // DwarfNumbers - Numbers used internally by gcc/gdb to identify the register.
+ // These values can be determined by locating the <target>.h file in the
+ // directory llvmgcc/gcc/config/<target>/ and looking for REGISTER_NAMES. The
+ // order of these names correspond to the enumeration used by gcc. A value of
+ // -1 indicates that the gcc number is undefined and -2 that register number
+ // is invalid for this mode/flavour.
+ list<int> DwarfNumbers = Numbers;
+}
+
+// DwarfRegAlias - This class declares that a given register uses the same dwarf
+// numbers as another one. This is useful for making it clear that the two
+// registers do have the same number. It also lets us build a mapping
+// from dwarf register number to llvm register.
+class DwarfRegAlias<Register reg> {
+ Register DwarfAlias = reg;
+}
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for scheduling
+//
+include "llvm/Target/TargetSchedule.td"
+
+class Predicate; // Forward def
+
+//===----------------------------------------------------------------------===//
+// Instruction set description - These classes correspond to the C++ classes in
+// the Target/TargetInstrInfo.h file.
+//
+class Instruction {
+ string Namespace = "";
+
+ dag OutOperandList; // An dag containing the MI def operand list.
+ dag InOperandList; // An dag containing the MI use operand list.
+ string AsmString = ""; // The .s format to print the instruction with.
+
+ // Pattern - Set to the DAG pattern for this instruction, if we know of one,
+ // otherwise, uninitialized.
+ list<dag> Pattern;
+
+ // The follow state will eventually be inferred automatically from the
+ // instruction pattern.
+
+ list<Register> Uses = []; // Default to using no non-operand registers
+ list<Register> Defs = []; // Default to modifying no non-operand registers
+
+ // Predicates - List of predicates which will be turned into isel matching
+ // code.
+ list<Predicate> Predicates = [];
+
+ // Size - Size of encoded instruction, or zero if the size cannot be determined
+ // from the opcode.
+ int Size = 0;
+
+ // DecoderNamespace - The "namespace" in which this instruction exists, on
+ // targets like ARM which multiple ISA namespaces exist.
+ string DecoderNamespace = "";
+
+ // Code size, for instruction selection.
+ // FIXME: What does this actually mean?
+ int CodeSize = 0;
+
+ // Added complexity passed onto matching pattern.
+ int AddedComplexity = 0;
+
+ // These bits capture information about the high-level semantics of the
+ // instruction.
+ bit isReturn = 0; // Is this instruction a return instruction?
+ bit isBranch = 0; // Is this instruction a branch instruction?
+ bit isIndirectBranch = 0; // Is this instruction an indirect branch?
+ bit isCompare = 0; // Is this instruction a comparison instruction?
+ bit isMoveImm = 0; // Is this instruction a move immediate instruction?
+ bit isBitcast = 0; // Is this instruction a bitcast instruction?
+ bit isSelect = 0; // Is this instruction a select instruction?
+ bit isBarrier = 0; // Can control flow fall through this instruction?
+ bit isCall = 0; // Is this instruction a call instruction?
+ bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand?
+ bit mayLoad = ?; // Is it possible for this inst to read memory?
+ bit mayStore = ?; // Is it possible for this inst to write memory?
+ bit isConvertibleToThreeAddress = 0; // Can this 2-addr instruction promote?
+ bit isCommutable = 0; // Is this 3 operand instruction commutable?
+ bit isTerminator = 0; // Is this part of the terminator for a basic block?
+ bit isReMaterializable = 0; // Is this instruction re-materializable?
+ bit isPredicable = 0; // Is this instruction predicable?
+ bit hasDelaySlot = 0; // Does this instruction have an delay slot?
+ bit usesCustomInserter = 0; // Pseudo instr needing special help.
+ bit hasPostISelHook = 0; // To be *adjusted* after isel by target hook.
+ bit hasCtrlDep = 0; // Does this instruction r/w ctrl-flow chains?
+ bit isNotDuplicable = 0; // Is it unsafe to duplicate this instruction?
+ bit isConvergent = 0; // Is this instruction convergent?
+ bit isAsCheapAsAMove = 0; // As cheap (or cheaper) than a move instruction.
+ bit hasExtraSrcRegAllocReq = 0; // Sources have special regalloc requirement?
+ bit hasExtraDefRegAllocReq = 0; // Defs have special regalloc requirement?
+ bit isRegSequence = 0; // Is this instruction a kind of reg sequence?
+ // If so, make sure to override
+ // TargetInstrInfo::getRegSequenceLikeInputs.
+ bit isPseudo = 0; // Is this instruction a pseudo-instruction?
+ // If so, won't have encoding information for
+ // the [MC]CodeEmitter stuff.
+ bit isExtractSubreg = 0; // Is this instruction a kind of extract subreg?
+ // If so, make sure to override
+ // TargetInstrInfo::getExtractSubregLikeInputs.
+ bit isInsertSubreg = 0; // Is this instruction a kind of insert subreg?
+ // If so, make sure to override
+ // TargetInstrInfo::getInsertSubregLikeInputs.
+
+ // Side effect flags - When set, the flags have these meanings:
+ //
+ // hasSideEffects - The instruction has side effects that are not
+ // captured by any operands of the instruction or other flags.
+ //
+ bit hasSideEffects = ?;
+
+ // Is this instruction a "real" instruction (with a distinct machine
+ // encoding), or is it a pseudo instruction used for codegen modeling
+ // purposes.
+ // FIXME: For now this is distinct from isPseudo, above, as code-gen-only
+ // instructions can (and often do) still have encoding information
+ // associated with them. Once we've migrated all of them over to true
+ // pseudo-instructions that are lowered to real instructions prior to
+ // the printer/emitter, we can remove this attribute and just use isPseudo.
+ //
+ // The intended use is:
+ // isPseudo: Does not have encoding information and should be expanded,
+ // at the latest, during lowering to MCInst.
+ //
+ // isCodeGenOnly: Does have encoding information and can go through to the
+ // CodeEmitter unchanged, but duplicates a canonical instruction
+ // definition's encoding and should be ignored when constructing the
+ // assembler match tables.
+ bit isCodeGenOnly = 0;
+
+ // Is this instruction a pseudo instruction for use by the assembler parser.
+ bit isAsmParserOnly = 0;
+
+ InstrItinClass Itinerary = NoItinerary;// Execution steps used for scheduling.
+
+ // Scheduling information from TargetSchedule.td.
+ list<SchedReadWrite> SchedRW;
+
+ string Constraints = ""; // OperandConstraint, e.g. $src = $dst.
+
+ /// DisableEncoding - List of operand names (e.g. "$op1,$op2") that should not
+ /// be encoded into the output machineinstr.
+ string DisableEncoding = "";
+
+ string PostEncoderMethod = "";
+ string DecoderMethod = "";
+
+ // Is the instruction decoder method able to completely determine if the
+ // given instruction is valid or not. If the TableGen definition of the
+ // instruction specifies bitpattern A??B where A and B are static bits, the
+ // hasCompleteDecoder flag says whether the decoder method fully handles the
+ // ?? space, i.e. if it is a final arbiter for the instruction validity.
+ // If not then the decoder attempts to continue decoding when the decoder
+ // method fails.
+ //
+ // This allows to handle situations where the encoding is not fully
+ // orthogonal. Example:
+ // * InstA with bitpattern 0b0000????,
+ // * InstB with bitpattern 0b000000?? but the associated decoder method
+ // DecodeInstB() returns Fail when ?? is 0b00 or 0b11.
+ //
+ // The decoder tries to decode a bitpattern that matches both InstA and
+ // InstB bitpatterns first as InstB (because it is the most specific
+ // encoding). In the default case (hasCompleteDecoder = 1), when
+ // DecodeInstB() returns Fail the bitpattern gets rejected. By setting
+ // hasCompleteDecoder = 0 in InstB, the decoder is informed that
+ // DecodeInstB() is not able to determine if all possible values of ?? are
+ // valid or not. If DecodeInstB() returns Fail the decoder will attempt to
+ // decode the bitpattern as InstA too.
+ bit hasCompleteDecoder = 1;
+
+ /// Target-specific flags. This becomes the TSFlags field in TargetInstrDesc.
+ bits<64> TSFlags = 0;
+
+ ///@name Assembler Parser Support
+ ///@{
+
+ string AsmMatchConverter = "";
+
+ /// TwoOperandAliasConstraint - Enable TableGen to auto-generate a
+ /// two-operand matcher inst-alias for a three operand instruction.
+ /// For example, the arm instruction "add r3, r3, r5" can be written
+ /// as "add r3, r5". The constraint is of the same form as a tied-operand
+ /// constraint. For example, "$Rn = $Rd".
+ string TwoOperandAliasConstraint = "";
+
+ ///@}
+
+ /// UseNamedOperandTable - If set, the operand indices of this instruction
+ /// can be queried via the getNamedOperandIdx() function which is generated
+ /// by TableGen.
+ bit UseNamedOperandTable = 0;
+}
+
+/// PseudoInstExpansion - Expansion information for a pseudo-instruction.
+/// Which instruction it expands to and how the operands map from the
+/// pseudo.
+class PseudoInstExpansion<dag Result> {
+ dag ResultInst = Result; // The instruction to generate.
+ bit isPseudo = 1;
+}
+
+/// Predicates - These are extra conditionals which are turned into instruction
+/// selector matching code. Currently each predicate is just a string.
+class Predicate<string cond> {
+ string CondString = cond;
+
+ /// AssemblerMatcherPredicate - If this feature can be used by the assembler
+ /// matcher, this is true. Targets should set this by inheriting their
+ /// feature from the AssemblerPredicate class in addition to Predicate.
+ bit AssemblerMatcherPredicate = 0;
+
+ /// AssemblerCondString - Name of the subtarget feature being tested used
+ /// as alternative condition string used for assembler matcher.
+ /// e.g. "ModeThumb" is translated to "(Bits & ModeThumb) != 0".
+ /// "!ModeThumb" is translated to "(Bits & ModeThumb) == 0".
+ /// It can also list multiple features separated by ",".
+ /// e.g. "ModeThumb,FeatureThumb2" is translated to
+ /// "(Bits & ModeThumb) != 0 && (Bits & FeatureThumb2) != 0".
+ string AssemblerCondString = "";
+
+ /// PredicateName - User-level name to use for the predicate. Mainly for use
+ /// in diagnostics such as missing feature errors in the asm matcher.
+ string PredicateName = "";
+}
+
+/// NoHonorSignDependentRounding - This predicate is true if support for
+/// sign-dependent-rounding is not enabled.
+def NoHonorSignDependentRounding
+ : Predicate<"!TM.Options.HonorSignDependentRoundingFPMath()">;
+
+class Requires<list<Predicate> preds> {
+ list<Predicate> Predicates = preds;
+}
+
+/// ops definition - This is just a simple marker used to identify the operand
+/// list for an instruction. outs and ins are identical both syntactically and
+/// semantically; they are used to define def operands and use operands to
+/// improve readibility. This should be used like this:
+/// (outs R32:$dst), (ins R32:$src1, R32:$src2) or something similar.
+def ops;
+def outs;
+def ins;
+
+/// variable_ops definition - Mark this instruction as taking a variable number
+/// of operands.
+def variable_ops;
+
+
+/// PointerLikeRegClass - Values that are designed to have pointer width are
+/// derived from this. TableGen treats the register class as having a symbolic
+/// type that it doesn't know, and resolves the actual regclass to use by using
+/// the TargetRegisterInfo::getPointerRegClass() hook at codegen time.
+class PointerLikeRegClass<int Kind> {
+ int RegClassKind = Kind;
+}
+
+
+/// ptr_rc definition - Mark this operand as being a pointer value whose
+/// register class is resolved dynamically via a callback to TargetInstrInfo.
+/// FIXME: We should probably change this to a class which contain a list of
+/// flags. But currently we have but one flag.
+def ptr_rc : PointerLikeRegClass<0>;
+
+/// unknown definition - Mark this operand as being of unknown type, causing
+/// it to be resolved by inference in the context it is used.
+class unknown_class;
+def unknown : unknown_class;
+
+/// AsmOperandClass - Representation for the kinds of operands which the target
+/// specific parser can create and the assembly matcher may need to distinguish.
+///
+/// Operand classes are used to define the order in which instructions are
+/// matched, to ensure that the instruction which gets matched for any
+/// particular list of operands is deterministic.
+///
+/// The target specific parser must be able to classify a parsed operand into a
+/// unique class which does not partially overlap with any other classes. It can
+/// match a subset of some other class, in which case the super class field
+/// should be defined.
+class AsmOperandClass {
+ /// The name to use for this class, which should be usable as an enum value.
+ string Name = ?;
+
+ /// The super classes of this operand.
+ list<AsmOperandClass> SuperClasses = [];
+
+ /// The name of the method on the target specific operand to call to test
+ /// whether the operand is an instance of this class. If not set, this will
+ /// default to "isFoo", where Foo is the AsmOperandClass name. The method
+ /// signature should be:
+ /// bool isFoo() const;
+ string PredicateMethod = ?;
+
+ /// The name of the method on the target specific operand to call to add the
+ /// target specific operand to an MCInst. If not set, this will default to
+ /// "addFooOperands", where Foo is the AsmOperandClass name. The method
+ /// signature should be:
+ /// void addFooOperands(MCInst &Inst, unsigned N) const;
+ string RenderMethod = ?;
+
+ /// The name of the method on the target specific operand to call to custom
+ /// handle the operand parsing. This is useful when the operands do not relate
+ /// to immediates or registers and are very instruction specific (as flags to
+ /// set in a processor register, coprocessor number, ...).
+ string ParserMethod = ?;
+
+ // The diagnostic type to present when referencing this operand in a
+ // match failure error message. By default, use a generic "invalid operand"
+ // diagnostic. The target AsmParser maps these codes to text.
+ string DiagnosticType = "";
+}
+
+def ImmAsmOperand : AsmOperandClass {
+ let Name = "Imm";
+}
+
+/// Operand Types - These provide the built-in operand types that may be used
+/// by a target. Targets can optionally provide their own operand types as
+/// needed, though this should not be needed for RISC targets.
+class Operand<ValueType ty> : DAGOperand {
+ ValueType Type = ty;
+ string PrintMethod = "printOperand";
+ string EncoderMethod = "";
+ string DecoderMethod = "";
+ bit hasCompleteDecoder = 1;
+ string OperandNamespace = "MCOI";
+ string OperandType = "OPERAND_UNKNOWN";
+ dag MIOperandInfo = (ops);
+
+ // MCOperandPredicate - Optionally, a code fragment operating on
+ // const MCOperand &MCOp, and returning a bool, to indicate if
+ // the value of MCOp is valid for the specific subclass of Operand
+ code MCOperandPredicate;
+
+ // ParserMatchClass - The "match class" that operands of this type fit
+ // in. Match classes are used to define the order in which instructions are
+ // match, to ensure that which instructions gets matched is deterministic.
+ //
+ // The target specific parser must be able to classify an parsed operand into
+ // a unique class, which does not partially overlap with any other classes. It
+ // can match a subset of some other class, in which case the AsmOperandClass
+ // should declare the other operand as one of its super classes.
+ AsmOperandClass ParserMatchClass = ImmAsmOperand;
+}
+
+class RegisterOperand<RegisterClass regclass, string pm = "printOperand">
+ : DAGOperand {
+ // RegClass - The register class of the operand.
+ RegisterClass RegClass = regclass;
+ // PrintMethod - The target method to call to print register operands of
+ // this type. The method normally will just use an alt-name index to look
+ // up the name to print. Default to the generic printOperand().
+ string PrintMethod = pm;
+ // ParserMatchClass - The "match class" that operands of this type fit
+ // in. Match classes are used to define the order in which instructions are
+ // match, to ensure that which instructions gets matched is deterministic.
+ //
+ // The target specific parser must be able to classify an parsed operand into
+ // a unique class, which does not partially overlap with any other classes. It
+ // can match a subset of some other class, in which case the AsmOperandClass
+ // should declare the other operand as one of its super classes.
+ AsmOperandClass ParserMatchClass;
+
+ string OperandNamespace = "MCOI";
+ string OperandType = "OPERAND_REGISTER";
+}
+
+let OperandType = "OPERAND_IMMEDIATE" in {
+def i1imm : Operand<i1>;
+def i8imm : Operand<i8>;
+def i16imm : Operand<i16>;
+def i32imm : Operand<i32>;
+def i64imm : Operand<i64>;
+
+def f32imm : Operand<f32>;
+def f64imm : Operand<f64>;
+}
+
+/// zero_reg definition - Special node to stand for the zero register.
+///
+def zero_reg;
+
+/// All operands which the MC layer classifies as predicates should inherit from
+/// this class in some manner. This is already handled for the most commonly
+/// used PredicateOperand, but may be useful in other circumstances.
+class PredicateOp;
+
+/// OperandWithDefaultOps - This Operand class can be used as the parent class
+/// for an Operand that needs to be initialized with a default value if
+/// no value is supplied in a pattern. This class can be used to simplify the
+/// pattern definitions for instructions that have target specific flags
+/// encoded as immediate operands.
+class OperandWithDefaultOps<ValueType ty, dag defaultops>
+ : Operand<ty> {
+ dag DefaultOps = defaultops;
+}
+
+/// PredicateOperand - This can be used to define a predicate operand for an
+/// instruction. OpTypes specifies the MIOperandInfo for the operand, and
+/// AlwaysVal specifies the value of this predicate when set to "always
+/// execute".
+class PredicateOperand<ValueType ty, dag OpTypes, dag AlwaysVal>
+ : OperandWithDefaultOps<ty, AlwaysVal>, PredicateOp {
+ let MIOperandInfo = OpTypes;
+}
+
+/// OptionalDefOperand - This is used to define a optional definition operand
+/// for an instruction. DefaultOps is the register the operand represents if
+/// none is supplied, e.g. zero_reg.
+class OptionalDefOperand<ValueType ty, dag OpTypes, dag defaultops>
+ : OperandWithDefaultOps<ty, defaultops> {
+ let MIOperandInfo = OpTypes;
+}
+
+
+// InstrInfo - This class should only be instantiated once to provide parameters
+// which are global to the target machine.
+//
+class InstrInfo {
+ // Target can specify its instructions in either big or little-endian formats.
+ // For instance, while both Sparc and PowerPC are big-endian platforms, the
+ // Sparc manual specifies its instructions in the format [31..0] (big), while
+ // PowerPC specifies them using the format [0..31] (little).
+ bit isLittleEndianEncoding = 0;
+
+ // The instruction properties mayLoad, mayStore, and hasSideEffects are unset
+ // by default, and TableGen will infer their value from the instruction
+ // pattern when possible.
+ //
+ // Normally, TableGen will issue an error it it can't infer the value of a
+ // property that hasn't been set explicitly. When guessInstructionProperties
+ // is set, it will guess a safe value instead.
+ //
+ // This option is a temporary migration help. It will go away.
+ bit guessInstructionProperties = 1;
+
+ // TableGen's instruction encoder generator has support for matching operands
+ // to bit-field variables both by name and by position. While matching by
+ // name is preferred, this is currently not possible for complex operands,
+ // and some targets still reply on the positional encoding rules. When
+ // generating a decoder for such targets, the positional encoding rules must
+ // be used by the decoder generator as well.
+ //
+ // This option is temporary; it will go away once the TableGen decoder
+ // generator has better support for complex operands and targets have
+ // migrated away from using positionally encoded operands.
+ bit decodePositionallyEncodedOperands = 0;
+
+ // When set, this indicates that there will be no overlap between those
+ // operands that are matched by ordering (positional operands) and those
+ // matched by name.
+ //
+ // This option is temporary; it will go away once the TableGen decoder
+ // generator has better support for complex operands and targets have
+ // migrated away from using positionally encoded operands.
+ bit noNamedPositionallyEncodedOperands = 0;
+}
+
+// Standard Pseudo Instructions.
+// This list must match TargetOpcodes.h and CodeGenTarget.cpp.
+// Only these instructions are allowed in the TargetOpcode namespace.
+let isCodeGenOnly = 1, isPseudo = 1, Namespace = "TargetOpcode" in {
+def PHI : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
+ let AsmString = "PHINODE";
+}
+def INLINEASM : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
+ let AsmString = "";
+ let hasSideEffects = 0; // Note side effect is encoded in an operand.
+}
+def CFI_INSTRUCTION : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "";
+ let hasCtrlDep = 1;
+ let isNotDuplicable = 1;
+}
+def EH_LABEL : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "";
+ let hasCtrlDep = 1;
+ let isNotDuplicable = 1;
+}
+def GC_LABEL : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "";
+ let hasCtrlDep = 1;
+ let isNotDuplicable = 1;
+}
+def KILL : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
+ let AsmString = "";
+ let hasSideEffects = 0;
+}
+def EXTRACT_SUBREG : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$supersrc, i32imm:$subidx);
+ let AsmString = "";
+ let hasSideEffects = 0;
+}
+def INSERT_SUBREG : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$supersrc, unknown:$subsrc, i32imm:$subidx);
+ let AsmString = "";
+ let hasSideEffects = 0;
+ let Constraints = "$supersrc = $dst";
+}
+def IMPLICIT_DEF : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins);
+ let AsmString = "";
+ let hasSideEffects = 0;
+ let isReMaterializable = 1;
+ let isAsCheapAsAMove = 1;
+}
+def SUBREG_TO_REG : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$implsrc, unknown:$subsrc, i32imm:$subidx);
+ let AsmString = "";
+ let hasSideEffects = 0;
+}
+def COPY_TO_REGCLASS : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$src, i32imm:$regclass);
+ let AsmString = "";
+ let hasSideEffects = 0;
+ let isAsCheapAsAMove = 1;
+}
+def DBG_VALUE : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
+ let AsmString = "DBG_VALUE";
+ let hasSideEffects = 0;
+}
+def REG_SEQUENCE : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$supersrc, variable_ops);
+ let AsmString = "";
+ let hasSideEffects = 0;
+ let isAsCheapAsAMove = 1;
+}
+def COPY : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins unknown:$src);
+ let AsmString = "";
+ let hasSideEffects = 0;
+ let isAsCheapAsAMove = 1;
+}
+def BUNDLE : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
+ let AsmString = "BUNDLE";
+}
+def LIFETIME_START : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "LIFETIME_START";
+ let hasSideEffects = 0;
+}
+def LIFETIME_END : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i32imm:$id);
+ let AsmString = "LIFETIME_END";
+ let hasSideEffects = 0;
+}
+def STACKMAP : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins i64imm:$id, i32imm:$nbytes, variable_ops);
+ let isCall = 1;
+ let mayLoad = 1;
+ let usesCustomInserter = 1;
+}
+def PATCHPOINT : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins i64imm:$id, i32imm:$nbytes, unknown:$callee,
+ i32imm:$nargs, i32imm:$cc, variable_ops);
+ let isCall = 1;
+ let mayLoad = 1;
+ let usesCustomInserter = 1;
+}
+def STATEPOINT : Instruction {
+ let OutOperandList = (outs);
+ let InOperandList = (ins variable_ops);
+ let usesCustomInserter = 1;
+ let mayLoad = 1;
+ let mayStore = 1;
+ let hasSideEffects = 1;
+ let isCall = 1;
+}
+def LOAD_STACK_GUARD : Instruction {
+ let OutOperandList = (outs ptr_rc:$dst);
+ let InOperandList = (ins);
+ let mayLoad = 1;
+ bit isReMaterializable = 1;
+ let hasSideEffects = 0;
+ bit isPseudo = 1;
+}
+def LOCAL_ESCAPE : Instruction {
+ // This instruction is really just a label. It has to be part of the chain so
+ // that it doesn't get dropped from the DAG, but it produces nothing and has
+ // no side effects.
+ let OutOperandList = (outs);
+ let InOperandList = (ins ptr_rc:$symbol, i32imm:$id);
+ let hasSideEffects = 0;
+ let hasCtrlDep = 1;
+}
+def FAULTING_LOAD_OP : Instruction {
+ let OutOperandList = (outs unknown:$dst);
+ let InOperandList = (ins variable_ops);
+ let usesCustomInserter = 1;
+ let mayLoad = 1;
+}
+}
+
+//===----------------------------------------------------------------------===//
+// AsmParser - This class can be implemented by targets that wish to implement
+// .s file parsing.
+//
+// Subtargets can have multiple different assembly parsers (e.g. AT&T vs Intel
+// syntax on X86 for example).
+//
+class AsmParser {
+ // AsmParserClassName - This specifies the suffix to use for the asmparser
+ // class. Generated AsmParser classes are always prefixed with the target
+ // name.
+ string AsmParserClassName = "AsmParser";
+
+ // AsmParserInstCleanup - If non-empty, this is the name of a custom member
+ // function of the AsmParser class to call on every matched instruction.
+ // This can be used to perform target specific instruction post-processing.
+ string AsmParserInstCleanup = "";
+
+ // ShouldEmitMatchRegisterName - Set to false if the target needs a hand
+ // written register name matcher
+ bit ShouldEmitMatchRegisterName = 1;
+}
+def DefaultAsmParser : AsmParser;
+
+//===----------------------------------------------------------------------===//
+// AsmParserVariant - Subtargets can have multiple different assembly parsers
+// (e.g. AT&T vs Intel syntax on X86 for example). This class can be
+// implemented by targets to describe such variants.
+//
+class AsmParserVariant {
+ // Variant - AsmParsers can be of multiple different variants. Variants are
+ // used to support targets that need to parser multiple formats for the
+ // assembly language.
+ int Variant = 0;
+
+ // Name - The AsmParser variant name (e.g., AT&T vs Intel).
+ string Name = "";
+
+ // CommentDelimiter - If given, the delimiter string used to recognize
+ // comments which are hard coded in the .td assembler strings for individual
+ // instructions.
+ string CommentDelimiter = "";
+
+ // RegisterPrefix - If given, the token prefix which indicates a register
+ // token. This is used by the matcher to automatically recognize hard coded
+ // register tokens as constrained registers, instead of tokens, for the
+ // purposes of matching.
+ string RegisterPrefix = "";
+
+ // TokenizingCharacters - Characters that are standalone tokens
+ string TokenizingCharacters = "[]*!";
+
+ // SeparatorCharacters - Characters that are not tokens
+ string SeparatorCharacters = " \t,";
+
+ // BreakCharacters - Characters that start new identifiers
+ string BreakCharacters = "";
+}
+def DefaultAsmParserVariant : AsmParserVariant;
+
+/// AssemblerPredicate - This is a Predicate that can be used when the assembler
+/// matches instructions and aliases.
+class AssemblerPredicate<string cond, string name = ""> {
+ bit AssemblerMatcherPredicate = 1;
+ string AssemblerCondString = cond;
+ string PredicateName = name;
+}
+
+/// TokenAlias - This class allows targets to define assembler token
+/// operand aliases. That is, a token literal operand which is equivalent
+/// to another, canonical, token literal. For example, ARM allows:
+/// vmov.u32 s4, #0 -> vmov.i32, #0
+/// 'u32' is a more specific designator for the 32-bit integer type specifier
+/// and is legal for any instruction which accepts 'i32' as a datatype suffix.
+/// def : TokenAlias<".u32", ".i32">;
+///
+/// This works by marking the match class of 'From' as a subclass of the
+/// match class of 'To'.
+class TokenAlias<string From, string To> {
+ string FromToken = From;
+ string ToToken = To;
+}
+
+/// MnemonicAlias - This class allows targets to define assembler mnemonic
+/// aliases. This should be used when all forms of one mnemonic are accepted
+/// with a different mnemonic. For example, X86 allows:
+/// sal %al, 1 -> shl %al, 1
+/// sal %ax, %cl -> shl %ax, %cl
+/// sal %eax, %cl -> shl %eax, %cl
+/// etc. Though "sal" is accepted with many forms, all of them are directly
+/// translated to a shl, so it can be handled with (in the case of X86, it
+/// actually has one for each suffix as well):
+/// def : MnemonicAlias<"sal", "shl">;
+///
+/// Mnemonic aliases are mapped before any other translation in the match phase,
+/// and do allow Requires predicates, e.g.:
+///
+/// def : MnemonicAlias<"pushf", "pushfq">, Requires<[In64BitMode]>;
+/// def : MnemonicAlias<"pushf", "pushfl">, Requires<[In32BitMode]>;
+///
+/// Mnemonic aliases can also be constrained to specific variants, e.g.:
+///
+/// def : MnemonicAlias<"pushf", "pushfq", "att">, Requires<[In64BitMode]>;
+///
+/// If no variant (e.g., "att" or "intel") is specified then the alias is
+/// applied unconditionally.
+class MnemonicAlias<string From, string To, string VariantName = ""> {
+ string FromMnemonic = From;
+ string ToMnemonic = To;
+ string AsmVariantName = VariantName;
+
+ // Predicates - Predicates that must be true for this remapping to happen.
+ list<Predicate> Predicates = [];
+}
+
+/// InstAlias - This defines an alternate assembly syntax that is allowed to
+/// match an instruction that has a different (more canonical) assembly
+/// representation.
+class InstAlias<string Asm, dag Result, int Emit = 1> {
+ string AsmString = Asm; // The .s format to match the instruction with.
+ dag ResultInst = Result; // The MCInst to generate.
+
+ // This determines which order the InstPrinter detects aliases for
+ // printing. A larger value makes the alias more likely to be
+ // emitted. The Instruction's own definition is notionally 0.5, so 0
+ // disables printing and 1 enables it if there are no conflicting aliases.
+ int EmitPriority = Emit;
+
+ // Predicates - Predicates that must be true for this to match.
+ list<Predicate> Predicates = [];
+
+ // If the instruction specified in Result has defined an AsmMatchConverter
+ // then setting this to 1 will cause the alias to use the AsmMatchConverter
+ // function when converting the OperandVector into an MCInst instead of the
+ // function that is generated by the dag Result.
+ // Setting this to 0 will cause the alias to ignore the Result instruction's
+ // defined AsmMatchConverter and instead use the function generated by the
+ // dag Result.
+ bit UseInstAsmMatchConverter = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// AsmWriter - This class can be implemented by targets that need to customize
+// the format of the .s file writer.
+//
+// Subtargets can have multiple different asmwriters (e.g. AT&T vs Intel syntax
+// on X86 for example).
+//
+class AsmWriter {
+ // AsmWriterClassName - This specifies the suffix to use for the asmwriter
+ // class. Generated AsmWriter classes are always prefixed with the target
+ // name.
+ string AsmWriterClassName = "InstPrinter";
+
+ // PassSubtarget - Determines whether MCSubtargetInfo should be passed to
+ // the various print methods.
+ // FIXME: Remove after all ports are updated.
+ int PassSubtarget = 0;
+
+ // Variant - AsmWriters can be of multiple different variants. Variants are
+ // used to support targets that need to emit assembly code in ways that are
+ // mostly the same for different targets, but have minor differences in
+ // syntax. If the asmstring contains {|} characters in them, this integer
+ // will specify which alternative to use. For example "{x|y|z}" with Variant
+ // == 1, will expand to "y".
+ int Variant = 0;
+}
+def DefaultAsmWriter : AsmWriter;
+
+
+//===----------------------------------------------------------------------===//
+// Target - This class contains the "global" target information
+//
+class Target {
+ // InstructionSet - Instruction set description for this target.
+ InstrInfo InstructionSet;
+
+ // AssemblyParsers - The AsmParser instances available for this target.
+ list<AsmParser> AssemblyParsers = [DefaultAsmParser];
+
+ /// AssemblyParserVariants - The AsmParserVariant instances available for
+ /// this target.
+ list<AsmParserVariant> AssemblyParserVariants = [DefaultAsmParserVariant];
+
+ // AssemblyWriters - The AsmWriter instances available for this target.
+ list<AsmWriter> AssemblyWriters = [DefaultAsmWriter];
+}
+
+//===----------------------------------------------------------------------===//
+// SubtargetFeature - A characteristic of the chip set.
+//
+class SubtargetFeature<string n, string a, string v, string d,
+ list<SubtargetFeature> i = []> {
+ // Name - Feature name. Used by command line (-mattr=) to determine the
+ // appropriate target chip.
+ //
+ string Name = n;
+
+ // Attribute - Attribute to be set by feature.
+ //
+ string Attribute = a;
+
+ // Value - Value the attribute to be set to by feature.
+ //
+ string Value = v;
+
+ // Desc - Feature description. Used by command line (-mattr=) to display help
+ // information.
+ //
+ string Desc = d;
+
+ // Implies - Features that this feature implies are present. If one of those
+ // features isn't set, then this one shouldn't be set either.
+ //
+ list<SubtargetFeature> Implies = i;
+}
+
+/// Specifies a Subtarget feature that this instruction is deprecated on.
+class Deprecated<SubtargetFeature dep> {
+ SubtargetFeature DeprecatedFeatureMask = dep;
+}
+
+/// A custom predicate used to determine if an instruction is
+/// deprecated or not.
+class ComplexDeprecationPredicate<string dep> {
+ string ComplexDeprecationPredicate = dep;
+}
+
+//===----------------------------------------------------------------------===//
+// Processor chip sets - These values represent each of the chip sets supported
+// by the scheduler. Each Processor definition requires corresponding
+// instruction itineraries.
+//
+class Processor<string n, ProcessorItineraries pi, list<SubtargetFeature> f> {
+ // Name - Chip set name. Used by command line (-mcpu=) to determine the
+ // appropriate target chip.
+ //
+ string Name = n;
+
+ // SchedModel - The machine model for scheduling and instruction cost.
+ //
+ SchedMachineModel SchedModel = NoSchedModel;
+
+ // ProcItin - The scheduling information for the target processor.
+ //
+ ProcessorItineraries ProcItin = pi;
+
+ // Features - list of
+ list<SubtargetFeature> Features = f;
+}
+
+// ProcessorModel allows subtargets to specify the more general
+// SchedMachineModel instead if a ProcessorItinerary. Subtargets will
+// gradually move to this newer form.
+//
+// Although this class always passes NoItineraries to the Processor
+// class, the SchedMachineModel may still define valid Itineraries.
+class ProcessorModel<string n, SchedMachineModel m, list<SubtargetFeature> f>
+ : Processor<n, NoItineraries, f> {
+ let SchedModel = m;
+}
+
+//===----------------------------------------------------------------------===//
+// InstrMapping - This class is used to create mapping tables to relate
+// instructions with each other based on the values specified in RowFields,
+// ColFields, KeyCol and ValueCols.
+//
+class InstrMapping {
+ // FilterClass - Used to limit search space only to the instructions that
+ // define the relationship modeled by this InstrMapping record.
+ string FilterClass;
+
+ // RowFields - List of fields/attributes that should be same for all the
+ // instructions in a row of the relation table. Think of this as a set of
+ // properties shared by all the instructions related by this relationship
+ // model and is used to categorize instructions into subgroups. For instance,
+ // if we want to define a relation that maps 'Add' instruction to its
+ // predicated forms, we can define RowFields like this:
+ //
+ // let RowFields = BaseOp
+ // All add instruction predicated/non-predicated will have to set their BaseOp
+ // to the same value.
+ //
+ // def Add: { let BaseOp = 'ADD'; let predSense = 'nopred' }
+ // def Add_predtrue: { let BaseOp = 'ADD'; let predSense = 'true' }
+ // def Add_predfalse: { let BaseOp = 'ADD'; let predSense = 'false' }
+ list<string> RowFields = [];
+
+ // List of fields/attributes that are same for all the instructions
+ // in a column of the relation table.
+ // Ex: let ColFields = 'predSense' -- It means that the columns are arranged
+ // based on the 'predSense' values. All the instruction in a specific
+ // column have the same value and it is fixed for the column according
+ // to the values set in 'ValueCols'.
+ list<string> ColFields = [];
+
+ // Values for the fields/attributes listed in 'ColFields'.
+ // Ex: let KeyCol = 'nopred' -- It means that the key instruction (instruction
+ // that models this relation) should be non-predicated.
+ // In the example above, 'Add' is the key instruction.
+ list<string> KeyCol = [];
+
+ // List of values for the fields/attributes listed in 'ColFields', one for
+ // each column in the relation table.
+ //
+ // Ex: let ValueCols = [['true'],['false']] -- It adds two columns in the
+ // table. First column requires all the instructions to have predSense
+ // set to 'true' and second column requires it to be 'false'.
+ list<list<string> > ValueCols = [];
+}
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for calling conventions.
+//
+include "llvm/Target/TargetCallingConv.td"
+
+//===----------------------------------------------------------------------===//
+// Pull in the common support for DAG isel generation.
+//
+include "llvm/Target/TargetSelectionDAG.td"
diff --git a/contrib/llvm/include/llvm/Target/TargetCallingConv.h b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
new file mode 100644
index 0000000..0c6c1f1
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetCallingConv.h
@@ -0,0 +1,202 @@
+//===-- llvm/Target/TargetCallingConv.h - Calling Convention ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines types for working with calling-convention information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETCALLINGCONV_H
+#define LLVM_TARGET_TARGETCALLINGCONV_H
+
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
+#include <string>
+#include <limits.h>
+
+namespace llvm {
+
+namespace ISD {
+ struct ArgFlagsTy {
+ private:
+ static const uint64_t NoFlagSet = 0ULL;
+ static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
+ static const uint64_t ZExtOffs = 0;
+ static const uint64_t SExt = 1ULL<<1; ///< Sign extended
+ static const uint64_t SExtOffs = 1;
+ static const uint64_t InReg = 1ULL<<2; ///< Passed in register
+ static const uint64_t InRegOffs = 2;
+ static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
+ static const uint64_t SRetOffs = 3;
+ static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
+ static const uint64_t ByValOffs = 4;
+ static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
+ static const uint64_t NestOffs = 5;
+ static const uint64_t Returned = 1ULL<<6; ///< Always returned
+ static const uint64_t ReturnedOffs = 6;
+ static const uint64_t ByValAlign = 0xFULL<<7; ///< Struct alignment
+ static const uint64_t ByValAlignOffs = 7;
+ static const uint64_t Split = 1ULL<<11;
+ static const uint64_t SplitOffs = 11;
+ static const uint64_t InAlloca = 1ULL<<12; ///< Passed with inalloca
+ static const uint64_t InAllocaOffs = 12;
+ static const uint64_t SplitEnd = 1ULL<<13; ///< Last part of a split
+ static const uint64_t SplitEndOffs = 13;
+ static const uint64_t OrigAlign = 0x1FULL<<27;
+ static const uint64_t OrigAlignOffs = 27;
+ static const uint64_t ByValSize = 0x3fffffffULL<<32; ///< Struct size
+ static const uint64_t ByValSizeOffs = 32;
+ static const uint64_t InConsecutiveRegsLast = 0x1ULL<<62; ///< Struct size
+ static const uint64_t InConsecutiveRegsLastOffs = 62;
+ static const uint64_t InConsecutiveRegs = 0x1ULL<<63; ///< Struct size
+ static const uint64_t InConsecutiveRegsOffs = 63;
+
+ static const uint64_t One = 1ULL; ///< 1 of this type, for shifts
+
+ uint64_t Flags;
+ public:
+ ArgFlagsTy() : Flags(0) { }
+
+ bool isZExt() const { return Flags & ZExt; }
+ void setZExt() { Flags |= One << ZExtOffs; }
+
+ bool isSExt() const { return Flags & SExt; }
+ void setSExt() { Flags |= One << SExtOffs; }
+
+ bool isInReg() const { return Flags & InReg; }
+ void setInReg() { Flags |= One << InRegOffs; }
+
+ bool isSRet() const { return Flags & SRet; }
+ void setSRet() { Flags |= One << SRetOffs; }
+
+ bool isByVal() const { return Flags & ByVal; }
+ void setByVal() { Flags |= One << ByValOffs; }
+
+ bool isInAlloca() const { return Flags & InAlloca; }
+ void setInAlloca() { Flags |= One << InAllocaOffs; }
+
+ bool isNest() const { return Flags & Nest; }
+ void setNest() { Flags |= One << NestOffs; }
+
+ bool isReturned() const { return Flags & Returned; }
+ void setReturned() { Flags |= One << ReturnedOffs; }
+
+ bool isInConsecutiveRegs() const { return Flags & InConsecutiveRegs; }
+ void setInConsecutiveRegs() { Flags |= One << InConsecutiveRegsOffs; }
+
+ bool isInConsecutiveRegsLast() const { return Flags & InConsecutiveRegsLast; }
+ void setInConsecutiveRegsLast() { Flags |= One << InConsecutiveRegsLastOffs; }
+
+ unsigned getByValAlign() const {
+ return (unsigned)
+ ((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
+ }
+ void setByValAlign(unsigned A) {
+ Flags = (Flags & ~ByValAlign) |
+ (uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
+ }
+
+ bool isSplit() const { return Flags & Split; }
+ void setSplit() { Flags |= One << SplitOffs; }
+
+ bool isSplitEnd() const { return Flags & SplitEnd; }
+ void setSplitEnd() { Flags |= One << SplitEndOffs; }
+
+ unsigned getOrigAlign() const {
+ return (unsigned)
+ ((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
+ }
+ void setOrigAlign(unsigned A) {
+ Flags = (Flags & ~OrigAlign) |
+ (uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
+ }
+
+ unsigned getByValSize() const {
+ return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
+ }
+ void setByValSize(unsigned S) {
+ Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
+ }
+
+ /// getRawBits - Represent the flags as a bunch of bits.
+ uint64_t getRawBits() const { return Flags; }
+ };
+
+ /// InputArg - This struct carries flags and type information about a
+ /// single incoming (formal) argument or incoming (from the perspective
+ /// of the caller) return value virtual register.
+ ///
+ struct InputArg {
+ ArgFlagsTy Flags;
+ MVT VT;
+ EVT ArgVT;
+ bool Used;
+
+ /// Index original Function's argument.
+ unsigned OrigArgIndex;
+ /// Sentinel value for implicit machine-level input arguments.
+ static const unsigned NoArgIndex = UINT_MAX;
+
+ /// Offset in bytes of current input value relative to the beginning of
+ /// original argument. E.g. if argument was splitted into four 32 bit
+ /// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12.
+ unsigned PartOffset;
+
+ InputArg() : VT(MVT::Other), Used(false) {}
+ InputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool used,
+ unsigned origIdx, unsigned partOffs)
+ : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
+ VT = vt.getSimpleVT();
+ ArgVT = argvt;
+ }
+
+ bool isOrigArg() const {
+ return OrigArgIndex != NoArgIndex;
+ }
+
+ unsigned getOrigArgIndex() const {
+ assert(OrigArgIndex != NoArgIndex && "Implicit machine-level argument");
+ return OrigArgIndex;
+ }
+ };
+
+ /// OutputArg - This struct carries flags and a value for a
+ /// single outgoing (actual) argument or outgoing (from the perspective
+ /// of the caller) return value virtual register.
+ ///
+ struct OutputArg {
+ ArgFlagsTy Flags;
+ MVT VT;
+ EVT ArgVT;
+
+ /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...".
+ bool IsFixed;
+
+ /// Index original Function's argument.
+ unsigned OrigArgIndex;
+
+ /// Offset in bytes of current output value relative to the beginning of
+ /// original argument. E.g. if argument was splitted into four 32 bit
+ /// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12.
+ unsigned PartOffset;
+
+ OutputArg() : IsFixed(false) {}
+ OutputArg(ArgFlagsTy flags, EVT vt, EVT argvt, bool isfixed,
+ unsigned origIdx, unsigned partOffs)
+ : Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx),
+ PartOffset(partOffs) {
+ VT = vt.getSimpleVT();
+ ArgVT = argvt;
+ }
+ };
+}
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetCallingConv.td b/contrib/llvm/include/llvm/Target/TargetCallingConv.td
new file mode 100644
index 0000000..2e766c4
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetCallingConv.td
@@ -0,0 +1,177 @@
+//===- TargetCallingConv.td - Target Calling Conventions ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces with which targets
+// describe their calling conventions.
+//
+//===----------------------------------------------------------------------===//
+
+class CCAction;
+class CallingConv;
+
+/// CCCustom - Calls a custom arg handling function.
+class CCCustom<string fn> : CCAction {
+ string FuncName = fn;
+}
+
+/// CCPredicateAction - Instances of this class check some predicate, then
+/// delegate to another action if the predicate is true.
+class CCPredicateAction<CCAction A> : CCAction {
+ CCAction SubAction = A;
+}
+
+/// CCIfType - If the current argument is one of the specified types, apply
+/// Action A.
+class CCIfType<list<ValueType> vts, CCAction A> : CCPredicateAction<A> {
+ list<ValueType> VTs = vts;
+}
+
+/// CCIf - If the predicate matches, apply A.
+class CCIf<string predicate, CCAction A> : CCPredicateAction<A> {
+ string Predicate = predicate;
+}
+
+/// CCIfByVal - If the current argument has ByVal parameter attribute, apply
+/// Action A.
+class CCIfByVal<CCAction A> : CCIf<"ArgFlags.isByVal()", A> {
+}
+
+/// CCIfConsecutiveRegs - If the current argument has InConsecutiveRegs
+/// parameter attribute, apply Action A.
+class CCIfConsecutiveRegs<CCAction A> : CCIf<"ArgFlags.isInConsecutiveRegs()", A> {
+}
+
+/// CCIfCC - Match if the current calling convention is 'CC'.
+class CCIfCC<string CC, CCAction A>
+ : CCIf<!strconcat("State.getCallingConv() == ", CC), A> {}
+
+/// CCIfInReg - If this argument is marked with the 'inreg' attribute, apply
+/// the specified action.
+class CCIfInReg<CCAction A> : CCIf<"ArgFlags.isInReg()", A> {}
+
+/// CCIfNest - If this argument is marked with the 'nest' attribute, apply
+/// the specified action.
+class CCIfNest<CCAction A> : CCIf<"ArgFlags.isNest()", A> {}
+
+/// CCIfSplit - If this argument is marked with the 'split' attribute, apply
+/// the specified action.
+class CCIfSplit<CCAction A> : CCIf<"ArgFlags.isSplit()", A> {}
+
+/// CCIfSRet - If this argument is marked with the 'sret' attribute, apply
+/// the specified action.
+class CCIfSRet<CCAction A> : CCIf<"ArgFlags.isSRet()", A> {}
+
+/// CCIfVarArg - If the current function is vararg - apply the action
+class CCIfVarArg<CCAction A> : CCIf<"State.isVarArg()", A> {}
+
+/// CCIfNotVarArg - If the current function is not vararg - apply the action
+class CCIfNotVarArg<CCAction A> : CCIf<"!State.isVarArg()", A> {}
+
+/// CCAssignToReg - This action matches if there is a register in the specified
+/// list that is still available. If so, it assigns the value to the first
+/// available register and succeeds.
+class CCAssignToReg<list<Register> regList> : CCAction {
+ list<Register> RegList = regList;
+}
+
+/// CCAssignToRegWithShadow - Same as CCAssignToReg, but with list of registers
+/// which became shadowed, when some register is used.
+class CCAssignToRegWithShadow<list<Register> regList,
+ list<Register> shadowList> : CCAction {
+ list<Register> RegList = regList;
+ list<Register> ShadowRegList = shadowList;
+}
+
+/// CCAssignToStack - This action always matches: it assigns the value to a
+/// stack slot of the specified size and alignment on the stack. If size is
+/// zero then the ABI size is used; if align is zero then the ABI alignment
+/// is used - these may depend on the target or subtarget.
+class CCAssignToStack<int size, int align> : CCAction {
+ int Size = size;
+ int Align = align;
+}
+
+/// CCAssignToStackWithShadow - Same as CCAssignToStack, but with a list of
+/// registers to be shadowed. Note that, unlike CCAssignToRegWithShadow, this
+/// shadows ALL of the registers in shadowList.
+class CCAssignToStackWithShadow<int size,
+ int align,
+ list<Register> shadowList> : CCAction {
+ int Size = size;
+ int Align = align;
+ list<Register> ShadowRegList = shadowList;
+}
+
+/// CCPassByVal - This action always matches: it assigns the value to a stack
+/// slot to implement ByVal aggregate parameter passing. Size and alignment
+/// specify the minimum size and alignment for the stack slot.
+class CCPassByVal<int size, int align> : CCAction {
+ int Size = size;
+ int Align = align;
+}
+
+/// CCPromoteToType - If applied, this promotes the specified current value to
+/// the specified type.
+class CCPromoteToType<ValueType destTy> : CCAction {
+ ValueType DestTy = destTy;
+}
+
+/// CCPromoteToUpperBitsInType - If applied, this promotes the specified current
+/// value to the specified type and shifts the value into the upper bits.
+class CCPromoteToUpperBitsInType<ValueType destTy> : CCAction {
+ ValueType DestTy = destTy;
+}
+
+/// CCBitConvertToType - If applied, this bitconverts the specified current
+/// value to the specified type.
+class CCBitConvertToType<ValueType destTy> : CCAction {
+ ValueType DestTy = destTy;
+}
+
+/// CCPassIndirect - If applied, this stores the value to stack and passes the pointer
+/// as normal argument.
+class CCPassIndirect<ValueType destTy> : CCAction {
+ ValueType DestTy = destTy;
+}
+
+/// CCDelegateTo - This action invokes the specified sub-calling-convention. It
+/// is successful if the specified CC matches.
+class CCDelegateTo<CallingConv cc> : CCAction {
+ CallingConv CC = cc;
+}
+
+/// CallingConv - An instance of this is used to define each calling convention
+/// that the target supports.
+class CallingConv<list<CCAction> actions> {
+ list<CCAction> Actions = actions;
+ bit Custom = 0;
+}
+
+/// CustomCallingConv - An instance of this is used to declare calling
+/// conventions that are implemented using a custom function of the same name.
+class CustomCallingConv : CallingConv<[]> {
+ let Custom = 1;
+}
+
+/// CalleeSavedRegs - A list of callee saved registers for a given calling
+/// convention. The order of registers is used by PrologEpilogInsertion when
+/// allocation stack slots for saved registers.
+///
+/// For each CalleeSavedRegs def, TableGen will emit a FOO_SaveList array for
+/// returning from getCalleeSavedRegs(), and a FOO_RegMask bit mask suitable for
+/// returning from getCallPreservedMask().
+class CalleeSavedRegs<dag saves> {
+ dag SaveList = saves;
+
+ // Registers that are also preserved across function calls, but should not be
+ // included in the generated FOO_SaveList array. These registers will be
+ // included in the FOO_RegMask bit mask. This can be used for registers that
+ // are saved automatically, like the SPARC register windows.
+ dag OtherPreserved;
+}
diff --git a/contrib/llvm/include/llvm/Target/TargetFrameLowering.h b/contrib/llvm/include/llvm/Target/TargetFrameLowering.h
new file mode 100644
index 0000000..cadd07d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetFrameLowering.h
@@ -0,0 +1,318 @@
+//===-- llvm/Target/TargetFrameLowering.h ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to describe the layout of a stack frame on the target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETFRAMELOWERING_H
+#define LLVM_TARGET_TARGETFRAMELOWERING_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include <utility>
+#include <vector>
+
+namespace llvm {
+ class BitVector;
+ class CalleeSavedInfo;
+ class MachineFunction;
+ class RegScavenger;
+
+/// Information about stack frame layout on the target. It holds the direction
+/// of stack growth, the known stack alignment on entry to each function, and
+/// the offset to the locals area.
+///
+/// The offset to the local area is the offset from the stack pointer on
+/// function entry to the first location where function data (local variables,
+/// spill locations) can be stored.
+class TargetFrameLowering {
+public:
+ enum StackDirection {
+ StackGrowsUp, // Adding to the stack increases the stack address
+ StackGrowsDown // Adding to the stack decreases the stack address
+ };
+
+ // Maps a callee saved register to a stack slot with a fixed offset.
+ struct SpillSlot {
+ unsigned Reg;
+ int Offset; // Offset relative to stack pointer on function entry.
+ };
+private:
+ StackDirection StackDir;
+ unsigned StackAlignment;
+ unsigned TransientStackAlignment;
+ int LocalAreaOffset;
+ bool StackRealignable;
+public:
+ TargetFrameLowering(StackDirection D, unsigned StackAl, int LAO,
+ unsigned TransAl = 1, bool StackReal = true)
+ : StackDir(D), StackAlignment(StackAl), TransientStackAlignment(TransAl),
+ LocalAreaOffset(LAO), StackRealignable(StackReal) {}
+
+ virtual ~TargetFrameLowering();
+
+ // These methods return information that describes the abstract stack layout
+ // of the target machine.
+
+ /// getStackGrowthDirection - Return the direction the stack grows
+ ///
+ StackDirection getStackGrowthDirection() const { return StackDir; }
+
+ /// getStackAlignment - This method returns the number of bytes to which the
+ /// stack pointer must be aligned on entry to a function. Typically, this
+ /// is the largest alignment for any data object in the target.
+ ///
+ unsigned getStackAlignment() const { return StackAlignment; }
+
+ /// alignSPAdjust - This method aligns the stack adjustment to the correct
+ /// alignment.
+ ///
+ int alignSPAdjust(int SPAdj) const {
+ if (SPAdj < 0) {
+ SPAdj = -RoundUpToAlignment(-SPAdj, StackAlignment);
+ } else {
+ SPAdj = RoundUpToAlignment(SPAdj, StackAlignment);
+ }
+ return SPAdj;
+ }
+
+ /// getTransientStackAlignment - This method returns the number of bytes to
+ /// which the stack pointer must be aligned at all times, even between
+ /// calls.
+ ///
+ unsigned getTransientStackAlignment() const {
+ return TransientStackAlignment;
+ }
+
+ /// isStackRealignable - This method returns whether the stack can be
+ /// realigned.
+ bool isStackRealignable() const {
+ return StackRealignable;
+ }
+
+ /// Return the skew that has to be applied to stack alignment under
+ /// certain conditions (e.g. stack was adjusted before function \p MF
+ /// was called).
+ virtual unsigned getStackAlignmentSkew(const MachineFunction &MF) const;
+
+ /// getOffsetOfLocalArea - This method returns the offset of the local area
+ /// from the stack pointer on entrance to a function.
+ ///
+ int getOffsetOfLocalArea() const { return LocalAreaOffset; }
+
+ /// isFPCloseToIncomingSP - Return true if the frame pointer is close to
+ /// the incoming stack pointer, false if it is close to the post-prologue
+ /// stack pointer.
+ virtual bool isFPCloseToIncomingSP() const { return true; }
+
+ /// assignCalleeSavedSpillSlots - Allows target to override spill slot
+ /// assignment logic. If implemented, assignCalleeSavedSpillSlots() should
+ /// assign frame slots to all CSI entries and return true. If this method
+ /// returns false, spill slots will be assigned using generic implementation.
+ /// assignCalleeSavedSpillSlots() may add, delete or rearrange elements of
+ /// CSI.
+ virtual bool
+ assignCalleeSavedSpillSlots(MachineFunction &MF,
+ const TargetRegisterInfo *TRI,
+ std::vector<CalleeSavedInfo> &CSI) const {
+ return false;
+ }
+
+ /// getCalleeSavedSpillSlots - This method returns a pointer to an array of
+ /// pairs, that contains an entry for each callee saved register that must be
+ /// spilled to a particular stack location if it is spilled.
+ ///
+ /// Each entry in this array contains a <register,offset> pair, indicating the
+ /// fixed offset from the incoming stack pointer that each register should be
+ /// spilled at. If a register is not listed here, the code generator is
+ /// allowed to spill it anywhere it chooses.
+ ///
+ virtual const SpillSlot *
+ getCalleeSavedSpillSlots(unsigned &NumEntries) const {
+ NumEntries = 0;
+ return nullptr;
+ }
+
+ /// targetHandlesStackFrameRounding - Returns true if the target is
+ /// responsible for rounding up the stack frame (probably at emitPrologue
+ /// time).
+ virtual bool targetHandlesStackFrameRounding() const {
+ return false;
+ }
+
+ /// Returns true if the target will correctly handle shrink wrapping.
+ virtual bool enableShrinkWrapping(const MachineFunction &MF) const {
+ return false;
+ }
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ virtual void emitPrologue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const = 0;
+ virtual void emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const = 0;
+
+ /// Replace a StackProbe stub (if any) with the actual probe code inline
+ virtual void inlineStackProbe(MachineFunction &MF,
+ MachineBasicBlock &PrologueMBB) const {}
+
+ /// Adjust the prologue to have the function use segmented stacks. This works
+ /// by adding a check even before the "normal" function prologue.
+ virtual void adjustForSegmentedStacks(MachineFunction &MF,
+ MachineBasicBlock &PrologueMBB) const {}
+
+ /// Adjust the prologue to add Erlang Run-Time System (ERTS) specific code in
+ /// the assembly prologue to explicitly handle the stack.
+ virtual void adjustForHiPEPrologue(MachineFunction &MF,
+ MachineBasicBlock &PrologueMBB) const {}
+
+ /// Adjust the prologue to add an allocation at a fixed offset from the frame
+ /// pointer.
+ virtual void
+ adjustForFrameAllocatePrologue(MachineFunction &MF,
+ MachineBasicBlock &PrologueMBB) const {}
+
+ /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
+ /// saved registers and returns true if it isn't possible / profitable to do
+ /// so by issuing a series of store instructions via
+ /// storeRegToStackSlot(). Returns false otherwise.
+ virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ return false;
+ }
+
+ /// restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee
+ /// saved registers and returns true if it isn't possible / profitable to do
+ /// so by issuing a series of load instructions via loadRegToStackSlot().
+ /// Returns false otherwise.
+ virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ return false;
+ }
+
+ /// Return true if the target needs to disable frame pointer elimination.
+ virtual bool noFramePointerElim(const MachineFunction &MF) const;
+
+ /// hasFP - Return true if the specified function should have a dedicated
+ /// frame pointer register. For most targets this is true only if the function
+ /// has variable sized allocas or if frame pointer elimination is disabled.
+ virtual bool hasFP(const MachineFunction &MF) const = 0;
+
+ /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
+ /// not required, we reserve argument space for call sites in the function
+ /// immediately on entry to the current function. This eliminates the need for
+ /// add/sub sp brackets around call sites. Returns true if the call frame is
+ /// included as part of the stack frame.
+ virtual bool hasReservedCallFrame(const MachineFunction &MF) const {
+ return !hasFP(MF);
+ }
+
+ /// canSimplifyCallFramePseudos - When possible, it's best to simplify the
+ /// call frame pseudo ops before doing frame index elimination. This is
+ /// possible only when frame index references between the pseudos won't
+ /// need adjusting for the call frame adjustments. Normally, that's true
+ /// if the function has a reserved call frame or a frame pointer. Some
+ /// targets (Thumb2, for example) may have more complicated criteria,
+ /// however, and can override this behavior.
+ virtual bool canSimplifyCallFramePseudos(const MachineFunction &MF) const {
+ return hasReservedCallFrame(MF) || hasFP(MF);
+ }
+
+ // needsFrameIndexResolution - Do we need to perform FI resolution for
+ // this function. Normally, this is required only when the function
+ // has any stack objects. However, targets may want to override this.
+ virtual bool needsFrameIndexResolution(const MachineFunction &MF) const;
+
+ /// getFrameIndexReference - This method should return the base register
+ /// and offset used to reference a frame index location. The offset is
+ /// returned directly, and the base register is returned via FrameReg.
+ virtual int getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const;
+
+ /// Same as above, except that the 'base register' will always be RSP, not
+ /// RBP on x86. This is generally used for emitting statepoint or EH tables
+ /// that use offsets from RSP.
+ /// TODO: This should really be a parameterizable choice.
+ virtual int getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const {
+ // default to calling normal version, we override this on x86 only
+ llvm_unreachable("unimplemented for non-x86");
+ return 0;
+ }
+
+ /// This method determines which of the registers reported by
+ /// TargetRegisterInfo::getCalleeSavedRegs() should actually get saved.
+ /// The default implementation checks populates the \p SavedRegs bitset with
+ /// all registers which are modified in the function, targets may override
+ /// this function to save additional registers.
+ /// This method also sets up the register scavenger ensuring there is a free
+ /// register or a frameindex available.
+ virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
+ RegScavenger *RS = nullptr) const;
+
+ /// processFunctionBeforeFrameFinalized - This method is called immediately
+ /// before the specified function's frame layout (MF.getFrameInfo()) is
+ /// finalized. Once the frame is finalized, MO_FrameIndex operands are
+ /// replaced with direct constants. This method is optional.
+ ///
+ virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
+ RegScavenger *RS = nullptr) const {
+ }
+
+ virtual unsigned getWinEHParentFrameOffset(const MachineFunction &MF) const {
+ report_fatal_error("WinEH not implemented for this target");
+ }
+
+ /// eliminateCallFramePseudoInstr - This method is called during prolog/epilog
+ /// code insertion to eliminate call frame setup and destroy pseudo
+ /// instructions (but only if the Target is using them). It is responsible
+ /// for eliminating these instructions, replacing them with concrete
+ /// instructions. This method need only be implemented if using call frame
+ /// setup/destroy pseudo instructions.
+ ///
+ virtual void
+ eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const {
+ llvm_unreachable("Call Frame Pseudo Instructions do not exist on this "
+ "target!");
+ }
+
+ /// Check whether or not the given \p MBB can be used as a prologue
+ /// for the target.
+ /// The prologue will be inserted first in this basic block.
+ /// This method is used by the shrink-wrapping pass to decide if
+ /// \p MBB will be correctly handled by the target.
+ /// As soon as the target enable shrink-wrapping without overriding
+ /// this method, we assume that each basic block is a valid
+ /// prologue.
+ virtual bool canUseAsPrologue(const MachineBasicBlock &MBB) const {
+ return true;
+ }
+
+ /// Check whether or not the given \p MBB can be used as a epilogue
+ /// for the target.
+ /// The epilogue will be inserted before the first terminator of that block.
+ /// This method is used by the shrink-wrapping pass to decide if
+ /// \p MBB will be correctly handled by the target.
+ /// As soon as the target enable shrink-wrapping without overriding
+ /// this method, we assume that each basic block is a valid
+ /// epilogue.
+ virtual bool canUseAsEpilogue(const MachineBasicBlock &MBB) const {
+ return true;
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetInstrInfo.h b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
new file mode 100644
index 0000000..0cebcf1
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetInstrInfo.h
@@ -0,0 +1,1440 @@
+//===-- llvm/Target/TargetInstrInfo.h - Instruction Info --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the target machine instruction set to the code generator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETINSTRINFO_H
+#define LLVM_TARGET_TARGETINSTRINFO_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/CodeGen/MachineCombinerPattern.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/Support/BranchProbability.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+
+namespace llvm {
+
+class InstrItineraryData;
+class LiveVariables;
+class MCAsmInfo;
+class MachineMemOperand;
+class MachineRegisterInfo;
+class MDNode;
+class MCInst;
+struct MCSchedModel;
+class MCSymbolRefExpr;
+class SDNode;
+class ScheduleHazardRecognizer;
+class SelectionDAG;
+class ScheduleDAG;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+class TargetSubtargetInfo;
+class TargetSchedModel;
+class DFAPacketizer;
+
+template<class T> class SmallVectorImpl;
+
+
+//---------------------------------------------------------------------------
+///
+/// TargetInstrInfo - Interface to description of machine instruction set
+///
+class TargetInstrInfo : public MCInstrInfo {
+ TargetInstrInfo(const TargetInstrInfo &) = delete;
+ void operator=(const TargetInstrInfo &) = delete;
+public:
+ TargetInstrInfo(unsigned CFSetupOpcode = ~0u, unsigned CFDestroyOpcode = ~0u,
+ unsigned CatchRetOpcode = ~0u)
+ : CallFrameSetupOpcode(CFSetupOpcode),
+ CallFrameDestroyOpcode(CFDestroyOpcode),
+ CatchRetOpcode(CatchRetOpcode) {}
+
+ virtual ~TargetInstrInfo();
+
+ static bool isGenericOpcode(unsigned Opc) {
+ return Opc <= TargetOpcode::GENERIC_OP_END;
+ }
+
+ /// Given a machine instruction descriptor, returns the register
+ /// class constraint for OpNum, or NULL.
+ const TargetRegisterClass *getRegClass(const MCInstrDesc &TID,
+ unsigned OpNum,
+ const TargetRegisterInfo *TRI,
+ const MachineFunction &MF) const;
+
+ /// Return true if the instruction is trivially rematerializable, meaning it
+ /// has no side effects and requires no operands that aren't always available.
+ /// This means the only allowed uses are constants and unallocatable physical
+ /// registers so that the instructions result is independent of the place
+ /// in the function.
+ bool isTriviallyReMaterializable(const MachineInstr *MI,
+ AliasAnalysis *AA = nullptr) const {
+ return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF ||
+ (MI->getDesc().isRematerializable() &&
+ (isReallyTriviallyReMaterializable(MI, AA) ||
+ isReallyTriviallyReMaterializableGeneric(MI, AA)));
+ }
+
+protected:
+ /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
+ /// set, this hook lets the target specify whether the instruction is actually
+ /// trivially rematerializable, taking into consideration its operands. This
+ /// predicate must return false if the instruction has any side effects other
+ /// than producing a value, or if it requres any address registers that are
+ /// not always available.
+ /// Requirements must be check as stated in isTriviallyReMaterializable() .
+ virtual bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
+ AliasAnalysis *AA) const {
+ return false;
+ }
+
+ /// This method commutes the operands of the given machine instruction MI.
+ /// The operands to be commuted are specified by their indices OpIdx1 and
+ /// OpIdx2.
+ ///
+ /// If a target has any instructions that are commutable but require
+ /// converting to different instructions or making non-trivial changes
+ /// to commute them, this method can be overloaded to do that.
+ /// The default implementation simply swaps the commutable operands.
+ ///
+ /// If NewMI is false, MI is modified in place and returned; otherwise, a
+ /// new machine instruction is created and returned.
+ ///
+ /// Do not call this method for a non-commutable instruction.
+ /// Even though the instruction is commutable, the method may still
+ /// fail to commute the operands, null pointer is returned in such cases.
+ virtual MachineInstr *commuteInstructionImpl(MachineInstr *MI,
+ bool NewMI,
+ unsigned OpIdx1,
+ unsigned OpIdx2) const;
+
+ /// Assigns the (CommutableOpIdx1, CommutableOpIdx2) pair of commutable
+ /// operand indices to (ResultIdx1, ResultIdx2).
+ /// One or both input values of the pair: (ResultIdx1, ResultIdx2) may be
+ /// predefined to some indices or be undefined (designated by the special
+ /// value 'CommuteAnyOperandIndex').
+ /// The predefined result indices cannot be re-defined.
+ /// The function returns true iff after the result pair redefinition
+ /// the fixed result pair is equal to or equivalent to the source pair of
+ /// indices: (CommutableOpIdx1, CommutableOpIdx2). It is assumed here that
+ /// the pairs (x,y) and (y,x) are equivalent.
+ static bool fixCommutedOpIndices(unsigned &ResultIdx1,
+ unsigned &ResultIdx2,
+ unsigned CommutableOpIdx1,
+ unsigned CommutableOpIdx2);
+
+private:
+ /// For instructions with opcodes for which the M_REMATERIALIZABLE flag is
+ /// set and the target hook isReallyTriviallyReMaterializable returns false,
+ /// this function does target-independent tests to determine if the
+ /// instruction is really trivially rematerializable.
+ bool isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
+ AliasAnalysis *AA) const;
+
+public:
+ /// These methods return the opcode of the frame setup/destroy instructions
+ /// if they exist (-1 otherwise). Some targets use pseudo instructions in
+ /// order to abstract away the difference between operating with a frame
+ /// pointer and operating without, through the use of these two instructions.
+ ///
+ unsigned getCallFrameSetupOpcode() const { return CallFrameSetupOpcode; }
+ unsigned getCallFrameDestroyOpcode() const { return CallFrameDestroyOpcode; }
+
+ unsigned getCatchReturnOpcode() const { return CatchRetOpcode; }
+
+ /// Returns the actual stack pointer adjustment made by an instruction
+ /// as part of a call sequence. By default, only call frame setup/destroy
+ /// instructions adjust the stack, but targets may want to override this
+ /// to enable more fine-grained adjustment, or adjust by a different value.
+ virtual int getSPAdjust(const MachineInstr *MI) const;
+
+ /// Return true if the instruction is a "coalescable" extension instruction.
+ /// That is, it's like a copy where it's legal for the source to overlap the
+ /// destination. e.g. X86::MOVSX64rr32. If this returns true, then it's
+ /// expected the pre-extension value is available as a subreg of the result
+ /// register. This also returns the sub-register index in SubIdx.
+ virtual bool isCoalescableExtInstr(const MachineInstr &MI,
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned &SubIdx) const {
+ return false;
+ }
+
+ /// If the specified machine instruction is a direct
+ /// load from a stack slot, return the virtual or physical register number of
+ /// the destination along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than loading from the stack slot.
+ virtual unsigned isLoadFromStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ return 0;
+ }
+
+ /// Check for post-frame ptr elimination stack locations as well.
+ /// This uses a heuristic so it isn't reliable for correctness.
+ virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
+ int &FrameIndex) const {
+ return 0;
+ }
+
+ /// If the specified machine instruction has a load from a stack slot,
+ /// return true along with the FrameIndex of the loaded stack slot and the
+ /// machine mem operand containing the reference.
+ /// If not, return false. Unlike isLoadFromStackSlot, this returns true for
+ /// any instructions that loads from the stack. This is just a hint, as some
+ /// cases may be missed.
+ virtual bool hasLoadFromStackSlot(const MachineInstr *MI,
+ const MachineMemOperand *&MMO,
+ int &FrameIndex) const;
+
+ /// If the specified machine instruction is a direct
+ /// store to a stack slot, return the virtual or physical register number of
+ /// the source reg along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than storing to the stack slot.
+ virtual unsigned isStoreToStackSlot(const MachineInstr *MI,
+ int &FrameIndex) const {
+ return 0;
+ }
+
+ /// Check for post-frame ptr elimination stack locations as well.
+ /// This uses a heuristic, so it isn't reliable for correctness.
+ virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
+ int &FrameIndex) const {
+ return 0;
+ }
+
+ /// If the specified machine instruction has a store to a stack slot,
+ /// return true along with the FrameIndex of the loaded stack slot and the
+ /// machine mem operand containing the reference.
+ /// If not, return false. Unlike isStoreToStackSlot,
+ /// this returns true for any instructions that stores to the
+ /// stack. This is just a hint, as some cases may be missed.
+ virtual bool hasStoreToStackSlot(const MachineInstr *MI,
+ const MachineMemOperand *&MMO,
+ int &FrameIndex) const;
+
+ /// Return true if the specified machine instruction
+ /// is a copy of one stack slot to another and has no other effect.
+ /// Provide the identity of the two frame indices.
+ virtual bool isStackSlotCopy(const MachineInstr *MI, int &DestFrameIndex,
+ int &SrcFrameIndex) const {
+ return false;
+ }
+
+ /// Compute the size in bytes and offset within a stack slot of a spilled
+ /// register or subregister.
+ ///
+ /// \param [out] Size in bytes of the spilled value.
+ /// \param [out] Offset in bytes within the stack slot.
+ /// \returns true if both Size and Offset are successfully computed.
+ ///
+ /// Not all subregisters have computable spill slots. For example,
+ /// subregisters registers may not be byte-sized, and a pair of discontiguous
+ /// subregisters has no single offset.
+ ///
+ /// Targets with nontrivial bigendian implementations may need to override
+ /// this, particularly to support spilled vector registers.
+ virtual bool getStackSlotRange(const TargetRegisterClass *RC, unsigned SubIdx,
+ unsigned &Size, unsigned &Offset,
+ const MachineFunction &MF) const;
+
+ /// Return true if the instruction is as cheap as a move instruction.
+ ///
+ /// Targets for different archs need to override this, and different
+ /// micro-architectures can also be finely tuned inside.
+ virtual bool isAsCheapAsAMove(const MachineInstr *MI) const {
+ return MI->isAsCheapAsAMove();
+ }
+
+ /// Re-issue the specified 'original' instruction at the
+ /// specific location targeting a new destination register.
+ /// The register in Orig->getOperand(0).getReg() will be substituted by
+ /// DestReg:SubIdx. Any existing subreg index is preserved or composed with
+ /// SubIdx.
+ virtual void reMaterialize(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, unsigned SubIdx,
+ const MachineInstr *Orig,
+ const TargetRegisterInfo &TRI) const;
+
+ /// Create a duplicate of the Orig instruction in MF. This is like
+ /// MachineFunction::CloneMachineInstr(), but the target may update operands
+ /// that are required to be unique.
+ ///
+ /// The instruction must be duplicable as indicated by isNotDuplicable().
+ virtual MachineInstr *duplicate(MachineInstr *Orig,
+ MachineFunction &MF) const;
+
+ /// This method must be implemented by targets that
+ /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
+ /// may be able to convert a two-address instruction into one or more true
+ /// three-address instructions on demand. This allows the X86 target (for
+ /// example) to convert ADD and SHL instructions into LEA instructions if they
+ /// would require register copies due to two-addressness.
+ ///
+ /// This method returns a null pointer if the transformation cannot be
+ /// performed, otherwise it returns the last new instruction.
+ ///
+ virtual MachineInstr *
+ convertToThreeAddress(MachineFunction::iterator &MFI,
+ MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const {
+ return nullptr;
+ }
+
+ // This constant can be used as an input value of operand index passed to
+ // the method findCommutedOpIndices() to tell the method that the
+ // corresponding operand index is not pre-defined and that the method
+ // can pick any commutable operand.
+ static const unsigned CommuteAnyOperandIndex = ~0U;
+
+ /// This method commutes the operands of the given machine instruction MI.
+ ///
+ /// The operands to be commuted are specified by their indices OpIdx1 and
+ /// OpIdx2. OpIdx1 and OpIdx2 arguments may be set to a special value
+ /// 'CommuteAnyOperandIndex', which means that the method is free to choose
+ /// any arbitrarily chosen commutable operand. If both arguments are set to
+ /// 'CommuteAnyOperandIndex' then the method looks for 2 different commutable
+ /// operands; then commutes them if such operands could be found.
+ ///
+ /// If NewMI is false, MI is modified in place and returned; otherwise, a
+ /// new machine instruction is created and returned.
+ ///
+ /// Do not call this method for a non-commutable instruction or
+ /// for non-commuable operands.
+ /// Even though the instruction is commutable, the method may still
+ /// fail to commute the operands, null pointer is returned in such cases.
+ MachineInstr *
+ commuteInstruction(MachineInstr *MI,
+ bool NewMI = false,
+ unsigned OpIdx1 = CommuteAnyOperandIndex,
+ unsigned OpIdx2 = CommuteAnyOperandIndex) const;
+
+ /// Returns true iff the routine could find two commutable operands in the
+ /// given machine instruction.
+ /// The 'SrcOpIdx1' and 'SrcOpIdx2' are INPUT and OUTPUT arguments.
+ /// If any of the INPUT values is set to the special value
+ /// 'CommuteAnyOperandIndex' then the method arbitrarily picks a commutable
+ /// operand, then returns its index in the corresponding argument.
+ /// If both of INPUT values are set to 'CommuteAnyOperandIndex' then method
+ /// looks for 2 commutable operands.
+ /// If INPUT values refer to some operands of MI, then the method simply
+ /// returns true if the corresponding operands are commutable and returns
+ /// false otherwise.
+ ///
+ /// For example, calling this method this way:
+ /// unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
+ /// findCommutedOpIndices(MI, Op1, Op2);
+ /// can be interpreted as a query asking to find an operand that would be
+ /// commutable with the operand#1.
+ virtual bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1,
+ unsigned &SrcOpIdx2) const;
+
+ /// A pair composed of a register and a sub-register index.
+ /// Used to give some type checking when modeling Reg:SubReg.
+ struct RegSubRegPair {
+ unsigned Reg;
+ unsigned SubReg;
+ RegSubRegPair(unsigned Reg = 0, unsigned SubReg = 0)
+ : Reg(Reg), SubReg(SubReg) {}
+ };
+ /// A pair composed of a pair of a register and a sub-register index,
+ /// and another sub-register index.
+ /// Used to give some type checking when modeling Reg:SubReg1, SubReg2.
+ struct RegSubRegPairAndIdx : RegSubRegPair {
+ unsigned SubIdx;
+ RegSubRegPairAndIdx(unsigned Reg = 0, unsigned SubReg = 0,
+ unsigned SubIdx = 0)
+ : RegSubRegPair(Reg, SubReg), SubIdx(SubIdx) {}
+ };
+
+ /// Build the equivalent inputs of a REG_SEQUENCE for the given \p MI
+ /// and \p DefIdx.
+ /// \p [out] InputRegs of the equivalent REG_SEQUENCE. Each element of
+ /// the list is modeled as <Reg:SubReg, SubIdx>.
+ /// E.g., REG_SEQUENCE vreg1:sub1, sub0, vreg2, sub1 would produce
+ /// two elements:
+ /// - vreg1:sub1, sub0
+ /// - vreg2<:0>, sub1
+ ///
+ /// \returns true if it is possible to build such an input sequence
+ /// with the pair \p MI, \p DefIdx. False otherwise.
+ ///
+ /// \pre MI.isRegSequence() or MI.isRegSequenceLike().
+ ///
+ /// \note The generic implementation does not provide any support for
+ /// MI.isRegSequenceLike(). In other words, one has to override
+ /// getRegSequenceLikeInputs for target specific instructions.
+ bool
+ getRegSequenceInputs(const MachineInstr &MI, unsigned DefIdx,
+ SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const;
+
+ /// Build the equivalent inputs of a EXTRACT_SUBREG for the given \p MI
+ /// and \p DefIdx.
+ /// \p [out] InputReg of the equivalent EXTRACT_SUBREG.
+ /// E.g., EXTRACT_SUBREG vreg1:sub1, sub0, sub1 would produce:
+ /// - vreg1:sub1, sub0
+ ///
+ /// \returns true if it is possible to build such an input sequence
+ /// with the pair \p MI, \p DefIdx. False otherwise.
+ ///
+ /// \pre MI.isExtractSubreg() or MI.isExtractSubregLike().
+ ///
+ /// \note The generic implementation does not provide any support for
+ /// MI.isExtractSubregLike(). In other words, one has to override
+ /// getExtractSubregLikeInputs for target specific instructions.
+ bool
+ getExtractSubregInputs(const MachineInstr &MI, unsigned DefIdx,
+ RegSubRegPairAndIdx &InputReg) const;
+
+ /// Build the equivalent inputs of a INSERT_SUBREG for the given \p MI
+ /// and \p DefIdx.
+ /// \p [out] BaseReg and \p [out] InsertedReg contain
+ /// the equivalent inputs of INSERT_SUBREG.
+ /// E.g., INSERT_SUBREG vreg0:sub0, vreg1:sub1, sub3 would produce:
+ /// - BaseReg: vreg0:sub0
+ /// - InsertedReg: vreg1:sub1, sub3
+ ///
+ /// \returns true if it is possible to build such an input sequence
+ /// with the pair \p MI, \p DefIdx. False otherwise.
+ ///
+ /// \pre MI.isInsertSubreg() or MI.isInsertSubregLike().
+ ///
+ /// \note The generic implementation does not provide any support for
+ /// MI.isInsertSubregLike(). In other words, one has to override
+ /// getInsertSubregLikeInputs for target specific instructions.
+ bool
+ getInsertSubregInputs(const MachineInstr &MI, unsigned DefIdx,
+ RegSubRegPair &BaseReg,
+ RegSubRegPairAndIdx &InsertedReg) const;
+
+
+ /// Return true if two machine instructions would produce identical values.
+ /// By default, this is only true when the two instructions
+ /// are deemed identical except for defs. If this function is called when the
+ /// IR is still in SSA form, the caller can pass the MachineRegisterInfo for
+ /// aggressive checks.
+ virtual bool produceSameValue(const MachineInstr *MI0,
+ const MachineInstr *MI1,
+ const MachineRegisterInfo *MRI = nullptr) const;
+
+ /// Analyze the branching code at the end of MBB, returning
+ /// true if it cannot be understood (e.g. it's a switch dispatch or isn't
+ /// implemented for a target). Upon success, this returns false and returns
+ /// with the following information in various cases:
+ ///
+ /// 1. If this block ends with no branches (it just falls through to its succ)
+ /// just return false, leaving TBB/FBB null.
+ /// 2. If this block ends with only an unconditional branch, it sets TBB to be
+ /// the destination block.
+ /// 3. If this block ends with a conditional branch and it falls through to a
+ /// successor block, it sets TBB to be the branch destination block and a
+ /// list of operands that evaluate the condition. These operands can be
+ /// passed to other TargetInstrInfo methods to create new branches.
+ /// 4. If this block ends with a conditional branch followed by an
+ /// unconditional branch, it returns the 'true' destination in TBB, the
+ /// 'false' destination in FBB, and a list of operands that evaluate the
+ /// condition. These operands can be passed to other TargetInstrInfo
+ /// methods to create new branches.
+ ///
+ /// Note that RemoveBranch and InsertBranch must be implemented to support
+ /// cases where this method returns success.
+ ///
+ /// If AllowModify is true, then this routine is allowed to modify the basic
+ /// block (e.g. delete instructions after the unconditional branch).
+ ///
+ virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify = false) const {
+ return true;
+ }
+
+ /// Represents a predicate at the MachineFunction level. The control flow a
+ /// MachineBranchPredicate represents is:
+ ///
+ /// Reg <def>= LHS `Predicate` RHS == ConditionDef
+ /// if Reg then goto TrueDest else goto FalseDest
+ ///
+ struct MachineBranchPredicate {
+ enum ComparePredicate {
+ PRED_EQ, // True if two values are equal
+ PRED_NE, // True if two values are not equal
+ PRED_INVALID // Sentinel value
+ };
+
+ ComparePredicate Predicate;
+ MachineOperand LHS;
+ MachineOperand RHS;
+ MachineBasicBlock *TrueDest;
+ MachineBasicBlock *FalseDest;
+ MachineInstr *ConditionDef;
+
+ /// SingleUseCondition is true if ConditionDef is dead except for the
+ /// branch(es) at the end of the basic block.
+ ///
+ bool SingleUseCondition;
+
+ explicit MachineBranchPredicate()
+ : Predicate(PRED_INVALID), LHS(MachineOperand::CreateImm(0)),
+ RHS(MachineOperand::CreateImm(0)), TrueDest(nullptr),
+ FalseDest(nullptr), ConditionDef(nullptr), SingleUseCondition(false) {
+ }
+ };
+
+ /// Analyze the branching code at the end of MBB and parse it into the
+ /// MachineBranchPredicate structure if possible. Returns false on success
+ /// and true on failure.
+ ///
+ /// If AllowModify is true, then this routine is allowed to modify the basic
+ /// block (e.g. delete instructions after the unconditional branch).
+ ///
+ virtual bool AnalyzeBranchPredicate(MachineBasicBlock &MBB,
+ MachineBranchPredicate &MBP,
+ bool AllowModify = false) const {
+ return true;
+ }
+
+ /// Remove the branching code at the end of the specific MBB.
+ /// This is only invoked in cases where AnalyzeBranch returns success. It
+ /// returns the number of instructions that were removed.
+ virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::RemoveBranch!");
+ }
+
+ /// Insert branch code into the end of the specified MachineBasicBlock.
+ /// The operands to this method are the same as those
+ /// returned by AnalyzeBranch. This is only invoked in cases where
+ /// AnalyzeBranch returns success. It returns the number of instructions
+ /// inserted.
+ ///
+ /// It is also invoked by tail merging to add unconditional branches in
+ /// cases where AnalyzeBranch doesn't apply because there was no original
+ /// branch to analyze. At least this much must be implemented, else tail
+ /// merging needs to be disabled.
+ virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ DebugLoc DL) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::InsertBranch!");
+ }
+
+ /// Delete the instruction OldInst and everything after it, replacing it with
+ /// an unconditional branch to NewDest. This is used by the tail merging pass.
+ virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
+ MachineBasicBlock *NewDest) const;
+
+ /// Get an instruction that performs an unconditional branch to the given
+ /// symbol.
+ virtual void
+ getUnconditionalBranch(MCInst &MI,
+ const MCSymbolRefExpr *BranchTarget) const {
+ llvm_unreachable("Target didn't implement "
+ "TargetInstrInfo::getUnconditionalBranch!");
+ }
+
+ /// Get a machine trap instruction.
+ virtual void getTrap(MCInst &MI) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::getTrap!");
+ }
+
+ /// Get a number of bytes that suffices to hold
+ /// either the instruction returned by getUnconditionalBranch or the
+ /// instruction returned by getTrap. This only makes sense because
+ /// getUnconditionalBranch returns a single, specific instruction. This
+ /// information is needed by the jumptable construction code, since it must
+ /// decide how many bytes to use for a jumptable entry so it can generate the
+ /// right mask.
+ ///
+ /// Note that if the jumptable instruction requires alignment, then that
+ /// alignment should be factored into this required bound so that the
+ /// resulting bound gives the right alignment for the instruction.
+ virtual unsigned getJumpInstrTableEntryBound() const {
+ // This method gets called by LLVMTargetMachine always, so it can't fail
+ // just because there happens to be no implementation for this target.
+ // Any code that tries to use a jumptable annotation without defining
+ // getUnconditionalBranch on the appropriate Target will fail anyway, and
+ // the value returned here won't matter in that case.
+ return 0;
+ }
+
+ /// Return true if it's legal to split the given basic
+ /// block at the specified instruction (i.e. instruction would be the start
+ /// of a new basic block).
+ virtual bool isLegalToSplitMBBAt(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI) const {
+ return true;
+ }
+
+ /// Return true if it's profitable to predicate
+ /// instructions with accumulated instruction latency of "NumCycles"
+ /// of the specified basic block, where the probability of the instructions
+ /// being executed is given by Probability, and Confidence is a measure
+ /// of our confidence that it will be properly predicted.
+ virtual
+ bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ unsigned ExtraPredCycles,
+ BranchProbability Probability) const {
+ return false;
+ }
+
+ /// Second variant of isProfitableToIfCvt. This one
+ /// checks for the case where two basic blocks from true and false path
+ /// of a if-then-else (diamond) are predicated on mutally exclusive
+ /// predicates, where the probability of the true path being taken is given
+ /// by Probability, and Confidence is a measure of our confidence that it
+ /// will be properly predicted.
+ virtual bool
+ isProfitableToIfCvt(MachineBasicBlock &TMBB,
+ unsigned NumTCycles, unsigned ExtraTCycles,
+ MachineBasicBlock &FMBB,
+ unsigned NumFCycles, unsigned ExtraFCycles,
+ BranchProbability Probability) const {
+ return false;
+ }
+
+ /// Return true if it's profitable for if-converter to duplicate instructions
+ /// of specified accumulated instruction latencies in the specified MBB to
+ /// enable if-conversion.
+ /// The probability of the instructions being executed is given by
+ /// Probability, and Confidence is a measure of our confidence that it
+ /// will be properly predicted.
+ virtual bool
+ isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
+ BranchProbability Probability) const {
+ return false;
+ }
+
+ /// Return true if it's profitable to unpredicate
+ /// one side of a 'diamond', i.e. two sides of if-else predicated on mutually
+ /// exclusive predicates.
+ /// e.g.
+ /// subeq r0, r1, #1
+ /// addne r0, r1, #1
+ /// =>
+ /// sub r0, r1, #1
+ /// addne r0, r1, #1
+ ///
+ /// This may be profitable is conditional instructions are always executed.
+ virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB,
+ MachineBasicBlock &FMBB) const {
+ return false;
+ }
+
+ /// Return true if it is possible to insert a select
+ /// instruction that chooses between TrueReg and FalseReg based on the
+ /// condition code in Cond.
+ ///
+ /// When successful, also return the latency in cycles from TrueReg,
+ /// FalseReg, and Cond to the destination register. In most cases, a select
+ /// instruction will be 1 cycle, so CondCycles = TrueCycles = FalseCycles = 1
+ ///
+ /// Some x86 implementations have 2-cycle cmov instructions.
+ ///
+ /// @param MBB Block where select instruction would be inserted.
+ /// @param Cond Condition returned by AnalyzeBranch.
+ /// @param TrueReg Virtual register to select when Cond is true.
+ /// @param FalseReg Virtual register to select when Cond is false.
+ /// @param CondCycles Latency from Cond+Branch to select output.
+ /// @param TrueCycles Latency from TrueReg to select output.
+ /// @param FalseCycles Latency from FalseReg to select output.
+ virtual bool canInsertSelect(const MachineBasicBlock &MBB,
+ ArrayRef<MachineOperand> Cond,
+ unsigned TrueReg, unsigned FalseReg,
+ int &CondCycles,
+ int &TrueCycles, int &FalseCycles) const {
+ return false;
+ }
+
+ /// Insert a select instruction into MBB before I that will copy TrueReg to
+ /// DstReg when Cond is true, and FalseReg to DstReg when Cond is false.
+ ///
+ /// This function can only be called after canInsertSelect() returned true.
+ /// The condition in Cond comes from AnalyzeBranch, and it can be assumed
+ /// that the same flags or registers required by Cond are available at the
+ /// insertion point.
+ ///
+ /// @param MBB Block where select instruction should be inserted.
+ /// @param I Insertion point.
+ /// @param DL Source location for debugging.
+ /// @param DstReg Virtual register to be defined by select instruction.
+ /// @param Cond Condition as computed by AnalyzeBranch.
+ /// @param TrueReg Virtual register to copy when Cond is true.
+ /// @param FalseReg Virtual register to copy when Cons is false.
+ virtual void insertSelect(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, DebugLoc DL,
+ unsigned DstReg, ArrayRef<MachineOperand> Cond,
+ unsigned TrueReg, unsigned FalseReg) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::insertSelect!");
+ }
+
+ /// Analyze the given select instruction, returning true if
+ /// it cannot be understood. It is assumed that MI->isSelect() is true.
+ ///
+ /// When successful, return the controlling condition and the operands that
+ /// determine the true and false result values.
+ ///
+ /// Result = SELECT Cond, TrueOp, FalseOp
+ ///
+ /// Some targets can optimize select instructions, for example by predicating
+ /// the instruction defining one of the operands. Such targets should set
+ /// Optimizable.
+ ///
+ /// @param MI Select instruction to analyze.
+ /// @param Cond Condition controlling the select.
+ /// @param TrueOp Operand number of the value selected when Cond is true.
+ /// @param FalseOp Operand number of the value selected when Cond is false.
+ /// @param Optimizable Returned as true if MI is optimizable.
+ /// @returns False on success.
+ virtual bool analyzeSelect(const MachineInstr *MI,
+ SmallVectorImpl<MachineOperand> &Cond,
+ unsigned &TrueOp, unsigned &FalseOp,
+ bool &Optimizable) const {
+ assert(MI && MI->getDesc().isSelect() && "MI must be a select instruction");
+ return true;
+ }
+
+ /// Given a select instruction that was understood by
+ /// analyzeSelect and returned Optimizable = true, attempt to optimize MI by
+ /// merging it with one of its operands. Returns NULL on failure.
+ ///
+ /// When successful, returns the new select instruction. The client is
+ /// responsible for deleting MI.
+ ///
+ /// If both sides of the select can be optimized, PreferFalse is used to pick
+ /// a side.
+ ///
+ /// @param MI Optimizable select instruction.
+ /// @param NewMIs Set that record all MIs in the basic block up to \p
+ /// MI. Has to be updated with any newly created MI or deleted ones.
+ /// @param PreferFalse Try to optimize FalseOp instead of TrueOp.
+ /// @returns Optimized instruction or NULL.
+ virtual MachineInstr *optimizeSelect(MachineInstr *MI,
+ SmallPtrSetImpl<MachineInstr *> &NewMIs,
+ bool PreferFalse = false) const {
+ // This function must be implemented if Optimizable is ever set.
+ llvm_unreachable("Target must implement TargetInstrInfo::optimizeSelect!");
+ }
+
+ /// Emit instructions to copy a pair of physical registers.
+ ///
+ /// This function should support copies within any legal register class as
+ /// well as any cross-class copies created during instruction selection.
+ ///
+ /// The source and destination registers may overlap, which may require a
+ /// careful implementation when multiple copy instructions are required for
+ /// large registers. See for example the ARM target.
+ virtual void copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI, DebugLoc DL,
+ unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const {
+ llvm_unreachable("Target didn't implement TargetInstrInfo::copyPhysReg!");
+ }
+
+ /// Store the specified register of the given register class to the specified
+ /// stack frame index. The store instruction is to be added to the given
+ /// machine basic block before the specified machine instruction. If isKill
+ /// is true, the register operand is the last use and must be marked kill.
+ virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ llvm_unreachable("Target didn't implement "
+ "TargetInstrInfo::storeRegToStackSlot!");
+ }
+
+ /// Load the specified register of the given register class from the specified
+ /// stack frame index. The load instruction is to be added to the given
+ /// machine basic block before the specified machine instruction.
+ virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ llvm_unreachable("Target didn't implement "
+ "TargetInstrInfo::loadRegFromStackSlot!");
+ }
+
+ /// This function is called for all pseudo instructions
+ /// that remain after register allocation. Many pseudo instructions are
+ /// created to help register allocation. This is the place to convert them
+ /// into real instructions. The target can edit MI in place, or it can insert
+ /// new instructions and erase MI. The function should return true if
+ /// anything was changed.
+ virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
+ return false;
+ }
+
+ /// Attempt to fold a load or store of the specified stack
+ /// slot into the specified machine instruction for the specified operand(s).
+ /// If this is possible, a new instruction is returned with the specified
+ /// operand folded, otherwise NULL is returned.
+ /// The new instruction is inserted before MI, and the client is responsible
+ /// for removing the old instruction.
+ MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
+ ArrayRef<unsigned> Ops, int FrameIndex) const;
+
+ /// Same as the previous version except it allows folding of any load and
+ /// store from / to any address, not just from a specific stack slot.
+ MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI,
+ ArrayRef<unsigned> Ops,
+ MachineInstr *LoadMI) const;
+
+ /// Return true when there is potentially a faster code sequence
+ /// for an instruction chain ending in \p Root. All potential patterns are
+ /// returned in the \p Pattern vector. Pattern should be sorted in priority
+ /// order since the pattern evaluator stops checking as soon as it finds a
+ /// faster sequence.
+ /// \param Root - Instruction that could be combined with one of its operands
+ /// \param Patterns - Vector of possible combination patterns
+ virtual bool getMachineCombinerPatterns(
+ MachineInstr &Root,
+ SmallVectorImpl<MachineCombinerPattern> &Patterns) const;
+
+ /// Return true if the input \P Inst is part of a chain of dependent ops
+ /// that are suitable for reassociation, otherwise return false.
+ /// If the instruction's operands must be commuted to have a previous
+ /// instruction of the same type define the first source operand, \P Commuted
+ /// will be set to true.
+ bool isReassociationCandidate(const MachineInstr &Inst, bool &Commuted) const;
+
+ /// Return true when \P Inst is both associative and commutative.
+ virtual bool isAssociativeAndCommutative(const MachineInstr &Inst) const {
+ return false;
+ }
+
+ /// Return true when \P Inst has reassociable operands in the same \P MBB.
+ virtual bool hasReassociableOperands(const MachineInstr &Inst,
+ const MachineBasicBlock *MBB) const;
+
+ /// Return true when \P Inst has reassociable sibling.
+ bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const;
+
+ /// When getMachineCombinerPatterns() finds patterns, this function generates
+ /// the instructions that could replace the original code sequence. The client
+ /// has to decide whether the actual replacement is beneficial or not.
+ /// \param Root - Instruction that could be combined with one of its operands
+ /// \param Pattern - Combination pattern for Root
+ /// \param InsInstrs - Vector of new instructions that implement P
+ /// \param DelInstrs - Old instructions, including Root, that could be
+ /// replaced by InsInstr
+ /// \param InstrIdxForVirtReg - map of virtual register to instruction in
+ /// InsInstr that defines it
+ virtual void genAlternativeCodeSequence(
+ MachineInstr &Root, MachineCombinerPattern Pattern,
+ SmallVectorImpl<MachineInstr *> &InsInstrs,
+ SmallVectorImpl<MachineInstr *> &DelInstrs,
+ DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
+
+ /// Attempt to reassociate \P Root and \P Prev according to \P Pattern to
+ /// reduce critical path length.
+ void reassociateOps(MachineInstr &Root, MachineInstr &Prev,
+ MachineCombinerPattern Pattern,
+ SmallVectorImpl<MachineInstr *> &InsInstrs,
+ SmallVectorImpl<MachineInstr *> &DelInstrs,
+ DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const;
+
+ /// This is an architecture-specific helper function of reassociateOps.
+ /// Set special operand attributes for new instructions after reassociation.
+ virtual void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2,
+ MachineInstr &NewMI1,
+ MachineInstr &NewMI2) const {
+ return;
+ };
+
+ /// Return true when a target supports MachineCombiner.
+ virtual bool useMachineCombiner() const { return false; }
+
+protected:
+ /// Target-dependent implementation for foldMemoryOperand.
+ /// Target-independent code in foldMemoryOperand will
+ /// take care of adding a MachineMemOperand to the newly created instruction.
+ /// The instruction and any auxiliary instructions necessary will be inserted
+ /// at InsertPt.
+ virtual MachineInstr *foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
+ return nullptr;
+ }
+
+ /// Target-dependent implementation for foldMemoryOperand.
+ /// Target-independent code in foldMemoryOperand will
+ /// take care of adding a MachineMemOperand to the newly created instruction.
+ /// The instruction and any auxiliary instructions necessary will be inserted
+ /// at InsertPt.
+ virtual MachineInstr *foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
+ return nullptr;
+ }
+
+ /// \brief Target-dependent implementation of getRegSequenceInputs.
+ ///
+ /// \returns true if it is possible to build the equivalent
+ /// REG_SEQUENCE inputs with the pair \p MI, \p DefIdx. False otherwise.
+ ///
+ /// \pre MI.isRegSequenceLike().
+ ///
+ /// \see TargetInstrInfo::getRegSequenceInputs.
+ virtual bool getRegSequenceLikeInputs(
+ const MachineInstr &MI, unsigned DefIdx,
+ SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
+ return false;
+ }
+
+ /// \brief Target-dependent implementation of getExtractSubregInputs.
+ ///
+ /// \returns true if it is possible to build the equivalent
+ /// EXTRACT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
+ ///
+ /// \pre MI.isExtractSubregLike().
+ ///
+ /// \see TargetInstrInfo::getExtractSubregInputs.
+ virtual bool getExtractSubregLikeInputs(
+ const MachineInstr &MI, unsigned DefIdx,
+ RegSubRegPairAndIdx &InputReg) const {
+ return false;
+ }
+
+ /// \brief Target-dependent implementation of getInsertSubregInputs.
+ ///
+ /// \returns true if it is possible to build the equivalent
+ /// INSERT_SUBREG inputs with the pair \p MI, \p DefIdx. False otherwise.
+ ///
+ /// \pre MI.isInsertSubregLike().
+ ///
+ /// \see TargetInstrInfo::getInsertSubregInputs.
+ virtual bool
+ getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx,
+ RegSubRegPair &BaseReg,
+ RegSubRegPairAndIdx &InsertedReg) const {
+ return false;
+ }
+
+public:
+ /// unfoldMemoryOperand - Separate a single instruction which folded a load or
+ /// a store or a load and a store into two or more instruction. If this is
+ /// possible, returns true as well as the new instructions by reference.
+ virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
+ unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
+ SmallVectorImpl<MachineInstr*> &NewMIs) const{
+ return false;
+ }
+
+ virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
+ SmallVectorImpl<SDNode*> &NewNodes) const {
+ return false;
+ }
+
+ /// Returns the opcode of the would be new
+ /// instruction after load / store are unfolded from an instruction of the
+ /// specified opcode. It returns zero if the specified unfolding is not
+ /// possible. If LoadRegIndex is non-null, it is filled in with the operand
+ /// index of the operand which will hold the register holding the loaded
+ /// value.
+ virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
+ bool UnfoldLoad, bool UnfoldStore,
+ unsigned *LoadRegIndex = nullptr) const {
+ return 0;
+ }
+
+ /// This is used by the pre-regalloc scheduler to determine if two loads are
+ /// loading from the same base address. It should only return true if the base
+ /// pointers are the same and the only differences between the two addresses
+ /// are the offset. It also returns the offsets by reference.
+ virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
+ int64_t &Offset1, int64_t &Offset2) const {
+ return false;
+ }
+
+ /// This is a used by the pre-regalloc scheduler to determine (in conjunction
+ /// with areLoadsFromSameBasePtr) if two loads should be scheduled together.
+ /// On some targets if two loads are loading from
+ /// addresses in the same cache line, it's better if they are scheduled
+ /// together. This function takes two integers that represent the load offsets
+ /// from the common base address. It returns true if it decides it's desirable
+ /// to schedule the two loads together. "NumLoads" is the number of loads that
+ /// have already been scheduled after Load1.
+ virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
+ int64_t Offset1, int64_t Offset2,
+ unsigned NumLoads) const {
+ return false;
+ }
+
+ /// Get the base register and byte offset of an instruction that reads/writes
+ /// memory.
+ virtual bool getMemOpBaseRegImmOfs(MachineInstr *MemOp, unsigned &BaseReg,
+ unsigned &Offset,
+ const TargetRegisterInfo *TRI) const {
+ return false;
+ }
+
+ virtual bool enableClusterLoads() const { return false; }
+
+ virtual bool shouldClusterLoads(MachineInstr *FirstLdSt,
+ MachineInstr *SecondLdSt,
+ unsigned NumLoads) const {
+ return false;
+ }
+
+ /// Can this target fuse the given instructions if they are scheduled
+ /// adjacent.
+ virtual bool shouldScheduleAdjacent(MachineInstr* First,
+ MachineInstr *Second) const {
+ return false;
+ }
+
+ /// Reverses the branch condition of the specified condition list,
+ /// returning false on success and true if it cannot be reversed.
+ virtual
+ bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
+ return true;
+ }
+
+ /// Insert a noop into the instruction stream at the specified point.
+ virtual void insertNoop(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI) const;
+
+
+ /// Return the noop instruction to use for a noop.
+ virtual void getNoopForMachoTarget(MCInst &NopInst) const;
+
+
+ /// Returns true if the instruction is already predicated.
+ virtual bool isPredicated(const MachineInstr *MI) const {
+ return false;
+ }
+
+ /// Returns true if the instruction is a
+ /// terminator instruction that has not been predicated.
+ virtual bool isUnpredicatedTerminator(const MachineInstr *MI) const;
+
+ /// Convert the instruction into a predicated instruction.
+ /// It returns true if the operation was successful.
+ virtual
+ bool PredicateInstruction(MachineInstr *MI,
+ ArrayRef<MachineOperand> Pred) const;
+
+ /// Returns true if the first specified predicate
+ /// subsumes the second, e.g. GE subsumes GT.
+ virtual
+ bool SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
+ ArrayRef<MachineOperand> Pred2) const {
+ return false;
+ }
+
+ /// If the specified instruction defines any predicate
+ /// or condition code register(s) used for predication, returns true as well
+ /// as the definition predicate(s) by reference.
+ virtual bool DefinesPredicate(MachineInstr *MI,
+ std::vector<MachineOperand> &Pred) const {
+ return false;
+ }
+
+ /// Return true if the specified instruction can be predicated.
+ /// By default, this returns true for every instruction with a
+ /// PredicateOperand.
+ virtual bool isPredicable(MachineInstr *MI) const {
+ return MI->getDesc().isPredicable();
+ }
+
+ /// Return true if it's safe to move a machine
+ /// instruction that defines the specified register class.
+ virtual bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
+ return true;
+ }
+
+ /// Test if the given instruction should be considered a scheduling boundary.
+ /// This primarily includes labels and terminators.
+ virtual bool isSchedulingBoundary(const MachineInstr *MI,
+ const MachineBasicBlock *MBB,
+ const MachineFunction &MF) const;
+
+ /// Measure the specified inline asm to determine an approximation of its
+ /// length.
+ virtual unsigned getInlineAsmLength(const char *Str,
+ const MCAsmInfo &MAI) const;
+
+ /// Allocate and return a hazard recognizer to use for this target when
+ /// scheduling the machine instructions before register allocation.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
+ const ScheduleDAG *DAG) const;
+
+ /// Allocate and return a hazard recognizer to use for this target when
+ /// scheduling the machine instructions before register allocation.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetMIHazardRecognizer(const InstrItineraryData*,
+ const ScheduleDAG *DAG) const;
+
+ /// Allocate and return a hazard recognizer to use for this target when
+ /// scheduling the machine instructions after register allocation.
+ virtual ScheduleHazardRecognizer*
+ CreateTargetPostRAHazardRecognizer(const InstrItineraryData*,
+ const ScheduleDAG *DAG) const;
+
+ /// Provide a global flag for disabling the PreRA hazard recognizer that
+ /// targets may choose to honor.
+ bool usePreRAHazardRecognizer() const;
+
+ /// For a comparison instruction, return the source registers
+ /// in SrcReg and SrcReg2 if having two register operands, and the value it
+ /// compares against in CmpValue. Return true if the comparison instruction
+ /// can be analyzed.
+ virtual bool analyzeCompare(const MachineInstr *MI,
+ unsigned &SrcReg, unsigned &SrcReg2,
+ int &Mask, int &Value) const {
+ return false;
+ }
+
+ /// See if the comparison instruction can be converted
+ /// into something more efficient. E.g., on ARM most instructions can set the
+ /// flags register, obviating the need for a separate CMP.
+ virtual bool optimizeCompareInstr(MachineInstr *CmpInstr,
+ unsigned SrcReg, unsigned SrcReg2,
+ int Mask, int Value,
+ const MachineRegisterInfo *MRI) const {
+ return false;
+ }
+ virtual bool optimizeCondBranch(MachineInstr *MI) const { return false; }
+
+ /// Try to remove the load by folding it to a register operand at the use.
+ /// We fold the load instructions if and only if the
+ /// def and use are in the same BB. We only look at one load and see
+ /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register
+ /// defined by the load we are trying to fold. DefMI returns the machine
+ /// instruction that defines FoldAsLoadDefReg, and the function returns
+ /// the machine instruction generated due to folding.
+ virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
+ const MachineRegisterInfo *MRI,
+ unsigned &FoldAsLoadDefReg,
+ MachineInstr *&DefMI) const {
+ return nullptr;
+ }
+
+ /// 'Reg' is known to be defined by a move immediate instruction,
+ /// try to fold the immediate into the use instruction.
+ /// If MRI->hasOneNonDBGUse(Reg) is true, and this function returns true,
+ /// then the caller may assume that DefMI has been erased from its parent
+ /// block. The caller may assume that it will not be erased by this
+ /// function otherwise.
+ virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI,
+ unsigned Reg, MachineRegisterInfo *MRI) const {
+ return false;
+ }
+
+ /// Return the number of u-operations the given machine
+ /// instruction will be decoded to on the target cpu. The itinerary's
+ /// IssueWidth is the number of microops that can be dispatched each
+ /// cycle. An instruction with zero microops takes no dispatch resources.
+ virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData,
+ const MachineInstr *MI) const;
+
+ /// Return true for pseudo instructions that don't consume any
+ /// machine resources in their current form. These are common cases that the
+ /// scheduler should consider free, rather than conservatively handling them
+ /// as instructions with no itinerary.
+ bool isZeroCost(unsigned Opcode) const {
+ return Opcode <= TargetOpcode::COPY;
+ }
+
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ SDNode *DefNode, unsigned DefIdx,
+ SDNode *UseNode, unsigned UseIdx) const;
+
+ /// Compute and return the use operand latency of a given pair of def and use.
+ /// In most cases, the static scheduling itinerary was enough to determine the
+ /// operand latency. But it may not be possible for instructions with variable
+ /// number of defs / uses.
+ ///
+ /// This is a raw interface to the itinerary that may be directly overridden
+ /// by a target. Use computeOperandLatency to get the best estimate of
+ /// latency.
+ virtual int getOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI,
+ unsigned UseIdx) const;
+
+ /// Compute and return the latency of the given data
+ /// dependent def and use when the operand indices are already known.
+ unsigned computeOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx)
+ const;
+
+ /// Compute the instruction latency of a given instruction.
+ /// If the instruction has higher cost when predicated, it's returned via
+ /// PredCost.
+ virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *MI,
+ unsigned *PredCost = nullptr) const;
+
+ virtual unsigned getPredicationCost(const MachineInstr *MI) const;
+
+ virtual int getInstrLatency(const InstrItineraryData *ItinData,
+ SDNode *Node) const;
+
+ /// Return the default expected latency for a def based on it's opcode.
+ unsigned defaultDefLatency(const MCSchedModel &SchedModel,
+ const MachineInstr *DefMI) const;
+
+ int computeDefOperandLatency(const InstrItineraryData *ItinData,
+ const MachineInstr *DefMI) const;
+
+ /// Return true if this opcode has high latency to its result.
+ virtual bool isHighLatencyDef(int opc) const { return false; }
+
+ /// Compute operand latency between a def of 'Reg'
+ /// and a use in the current loop. Return true if the target considered
+ /// it 'high'. This is used by optimization passes such as machine LICM to
+ /// determine whether it makes sense to hoist an instruction out even in a
+ /// high register pressure situation.
+ virtual
+ bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
+ const MachineRegisterInfo *MRI,
+ const MachineInstr *DefMI, unsigned DefIdx,
+ const MachineInstr *UseMI, unsigned UseIdx) const {
+ return false;
+ }
+
+ /// Compute operand latency of a def of 'Reg'. Return true
+ /// if the target considered it 'low'.
+ virtual
+ bool hasLowDefLatency(const TargetSchedModel &SchedModel,
+ const MachineInstr *DefMI, unsigned DefIdx) const;
+
+ /// Perform target-specific instruction verification.
+ virtual
+ bool verifyInstruction(const MachineInstr *MI, StringRef &ErrInfo) const {
+ return true;
+ }
+
+ /// Return the current execution domain and bit mask of
+ /// possible domains for instruction.
+ ///
+ /// Some micro-architectures have multiple execution domains, and multiple
+ /// opcodes that perform the same operation in different domains. For
+ /// example, the x86 architecture provides the por, orps, and orpd
+ /// instructions that all do the same thing. There is a latency penalty if a
+ /// register is written in one domain and read in another.
+ ///
+ /// This function returns a pair (domain, mask) containing the execution
+ /// domain of MI, and a bit mask of possible domains. The setExecutionDomain
+ /// function can be used to change the opcode to one of the domains in the
+ /// bit mask. Instructions whose execution domain can't be changed should
+ /// return a 0 mask.
+ ///
+ /// The execution domain numbers don't have any special meaning except domain
+ /// 0 is used for instructions that are not associated with any interesting
+ /// execution domain.
+ ///
+ virtual std::pair<uint16_t, uint16_t>
+ getExecutionDomain(const MachineInstr *MI) const {
+ return std::make_pair(0, 0);
+ }
+
+ /// Change the opcode of MI to execute in Domain.
+ ///
+ /// The bit (1 << Domain) must be set in the mask returned from
+ /// getExecutionDomain(MI).
+ virtual void setExecutionDomain(MachineInstr *MI, unsigned Domain) const {}
+
+
+ /// Returns the preferred minimum clearance
+ /// before an instruction with an unwanted partial register update.
+ ///
+ /// Some instructions only write part of a register, and implicitly need to
+ /// read the other parts of the register. This may cause unwanted stalls
+ /// preventing otherwise unrelated instructions from executing in parallel in
+ /// an out-of-order CPU.
+ ///
+ /// For example, the x86 instruction cvtsi2ss writes its result to bits
+ /// [31:0] of the destination xmm register. Bits [127:32] are unaffected, so
+ /// the instruction needs to wait for the old value of the register to become
+ /// available:
+ ///
+ /// addps %xmm1, %xmm0
+ /// movaps %xmm0, (%rax)
+ /// cvtsi2ss %rbx, %xmm0
+ ///
+ /// In the code above, the cvtsi2ss instruction needs to wait for the addps
+ /// instruction before it can issue, even though the high bits of %xmm0
+ /// probably aren't needed.
+ ///
+ /// This hook returns the preferred clearance before MI, measured in
+ /// instructions. Other defs of MI's operand OpNum are avoided in the last N
+ /// instructions before MI. It should only return a positive value for
+ /// unwanted dependencies. If the old bits of the defined register have
+ /// useful values, or if MI is determined to otherwise read the dependency,
+ /// the hook should return 0.
+ ///
+ /// The unwanted dependency may be handled by:
+ ///
+ /// 1. Allocating the same register for an MI def and use. That makes the
+ /// unwanted dependency identical to a required dependency.
+ ///
+ /// 2. Allocating a register for the def that has no defs in the previous N
+ /// instructions.
+ ///
+ /// 3. Calling breakPartialRegDependency() with the same arguments. This
+ /// allows the target to insert a dependency breaking instruction.
+ ///
+ virtual unsigned
+ getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {
+ // The default implementation returns 0 for no partial register dependency.
+ return 0;
+ }
+
+ /// \brief Return the minimum clearance before an instruction that reads an
+ /// unused register.
+ ///
+ /// For example, AVX instructions may copy part of a register operand into
+ /// the unused high bits of the destination register.
+ ///
+ /// vcvtsi2sdq %rax, %xmm0<undef>, %xmm14
+ ///
+ /// In the code above, vcvtsi2sdq copies %xmm0[127:64] into %xmm14 creating a
+ /// false dependence on any previous write to %xmm0.
+ ///
+ /// This hook works similarly to getPartialRegUpdateClearance, except that it
+ /// does not take an operand index. Instead sets \p OpNum to the index of the
+ /// unused register.
+ virtual unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
+ const TargetRegisterInfo *TRI) const {
+ // The default implementation returns 0 for no undef register dependency.
+ return 0;
+ }
+
+ /// Insert a dependency-breaking instruction
+ /// before MI to eliminate an unwanted dependency on OpNum.
+ ///
+ /// If it wasn't possible to avoid a def in the last N instructions before MI
+ /// (see getPartialRegUpdateClearance), this hook will be called to break the
+ /// unwanted dependency.
+ ///
+ /// On x86, an xorps instruction can be used as a dependency breaker:
+ ///
+ /// addps %xmm1, %xmm0
+ /// movaps %xmm0, (%rax)
+ /// xorps %xmm0, %xmm0
+ /// cvtsi2ss %rbx, %xmm0
+ ///
+ /// An <imp-kill> operand should be added to MI if an instruction was
+ /// inserted. This ties the instructions together in the post-ra scheduler.
+ ///
+ virtual void
+ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
+ const TargetRegisterInfo *TRI) const {}
+
+ /// Create machine specific model for scheduling.
+ virtual DFAPacketizer *
+ CreateTargetScheduleState(const TargetSubtargetInfo &) const {
+ return nullptr;
+ }
+
+ // Sometimes, it is possible for the target
+ // to tell, even without aliasing information, that two MIs access different
+ // memory addresses. This function returns true if two MIs access different
+ // memory addresses and false otherwise.
+ virtual bool
+ areMemAccessesTriviallyDisjoint(MachineInstr *MIa, MachineInstr *MIb,
+ AliasAnalysis *AA = nullptr) const {
+ assert(MIa && (MIa->mayLoad() || MIa->mayStore()) &&
+ "MIa must load from or modify a memory location");
+ assert(MIb && (MIb->mayLoad() || MIb->mayStore()) &&
+ "MIb must load from or modify a memory location");
+ return false;
+ }
+
+ /// \brief Return the value to use for the MachineCSE's LookAheadLimit,
+ /// which is a heuristic used for CSE'ing phys reg defs.
+ virtual unsigned getMachineCSELookAheadLimit () const {
+ // The default lookahead is small to prevent unprofitable quadratic
+ // behavior.
+ return 5;
+ }
+
+ /// Return an array that contains the ids of the target indices (used for the
+ /// TargetIndex machine operand) and their names.
+ ///
+ /// MIR Serialization is able to serialize only the target indices that are
+ /// defined by this method.
+ virtual ArrayRef<std::pair<int, const char *>>
+ getSerializableTargetIndices() const {
+ return None;
+ }
+
+ /// Decompose the machine operand's target flags into two values - the direct
+ /// target flag value and any of bit flags that are applied.
+ virtual std::pair<unsigned, unsigned>
+ decomposeMachineOperandsTargetFlags(unsigned /*TF*/) const {
+ return std::make_pair(0u, 0u);
+ }
+
+ /// Return an array that contains the direct target flag values and their
+ /// names.
+ ///
+ /// MIR Serialization is able to serialize only the target flags that are
+ /// defined by this method.
+ virtual ArrayRef<std::pair<unsigned, const char *>>
+ getSerializableDirectMachineOperandTargetFlags() const {
+ return None;
+ }
+
+ /// Return an array that contains the bitmask target flag values and their
+ /// names.
+ ///
+ /// MIR Serialization is able to serialize only the target flags that are
+ /// defined by this method.
+ virtual ArrayRef<std::pair<unsigned, const char *>>
+ getSerializableBitmaskMachineOperandTargetFlags() const {
+ return None;
+ }
+
+private:
+ unsigned CallFrameSetupOpcode, CallFrameDestroyOpcode;
+ unsigned CatchRetOpcode;
+};
+
+/// \brief Provide DenseMapInfo for TargetInstrInfo::RegSubRegPair.
+template<>
+struct DenseMapInfo<TargetInstrInfo::RegSubRegPair> {
+ typedef DenseMapInfo<unsigned> RegInfo;
+
+ static inline TargetInstrInfo::RegSubRegPair getEmptyKey() {
+ return TargetInstrInfo::RegSubRegPair(RegInfo::getEmptyKey(),
+ RegInfo::getEmptyKey());
+ }
+ static inline TargetInstrInfo::RegSubRegPair getTombstoneKey() {
+ return TargetInstrInfo::RegSubRegPair(RegInfo::getTombstoneKey(),
+ RegInfo::getTombstoneKey());
+ }
+ /// \brief Reuse getHashValue implementation from
+ /// std::pair<unsigned, unsigned>.
+ static unsigned getHashValue(const TargetInstrInfo::RegSubRegPair &Val) {
+ std::pair<unsigned, unsigned> PairVal =
+ std::make_pair(Val.Reg, Val.SubReg);
+ return DenseMapInfo<std::pair<unsigned, unsigned>>::getHashValue(PairVal);
+ }
+ static bool isEqual(const TargetInstrInfo::RegSubRegPair &LHS,
+ const TargetInstrInfo::RegSubRegPair &RHS) {
+ return RegInfo::isEqual(LHS.Reg, RHS.Reg) &&
+ RegInfo::isEqual(LHS.SubReg, RHS.SubReg);
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetIntrinsicInfo.h b/contrib/llvm/include/llvm/Target/TargetIntrinsicInfo.h
new file mode 100644
index 0000000..c630f5b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetIntrinsicInfo.h
@@ -0,0 +1,65 @@
+//===-- llvm/Target/TargetIntrinsicInfo.h - Instruction Info ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the target intrinsic instructions to the code generator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETINTRINSICINFO_H
+#define LLVM_TARGET_TARGETINTRINSICINFO_H
+
+#include "llvm/Support/Compiler.h"
+#include <string>
+
+namespace llvm {
+
+class Function;
+class Module;
+class Type;
+
+//---------------------------------------------------------------------------
+///
+/// TargetIntrinsicInfo - Interface to description of machine instruction set
+///
+class TargetIntrinsicInfo {
+ TargetIntrinsicInfo(const TargetIntrinsicInfo &) = delete;
+ void operator=(const TargetIntrinsicInfo &) = delete;
+public:
+ TargetIntrinsicInfo();
+ virtual ~TargetIntrinsicInfo();
+
+ /// Return the name of a target intrinsic, e.g. "llvm.bfin.ssync".
+ /// The Tys and numTys parameters are for intrinsics with overloaded types
+ /// (e.g., those using iAny or fAny). For a declaration for an overloaded
+ /// intrinsic, Tys should point to an array of numTys pointers to Type,
+ /// and must provide exactly one type for each overloaded type in the
+ /// intrinsic.
+ virtual std::string getName(unsigned IID, Type **Tys = nullptr,
+ unsigned numTys = 0) const = 0;
+
+ /// Look up target intrinsic by name. Return intrinsic ID or 0 for unknown
+ /// names.
+ virtual unsigned lookupName(const char *Name, unsigned Len) const =0;
+
+ /// Return the target intrinsic ID of a function, or 0.
+ virtual unsigned getIntrinsicID(Function *F) const;
+
+ /// Returns true if the intrinsic can be overloaded.
+ virtual bool isOverloaded(unsigned IID) const = 0;
+
+ /// Create or insert an LLVM Function declaration for an intrinsic,
+ /// and return it. The Tys and numTys are for intrinsics with overloaded
+ /// types. See above for more information.
+ virtual Function *getDeclaration(Module *M, unsigned ID, Type **Tys = nullptr,
+ unsigned numTys = 0) const = 0;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetItinerary.td b/contrib/llvm/include/llvm/Target/TargetItinerary.td
new file mode 100644
index 0000000..a37bbf2
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetItinerary.td
@@ -0,0 +1,152 @@
+//===- TargetItinerary.td - Target Itinierary Description --*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent scheduling interfaces
+// which should be implemented by each target that uses instruction
+// itineraries for scheduling. Itineraries are details reservation
+// tables for each instruction class. They are most appropriate for
+// in-order machine with complicated scheduling or bundling constraints.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Processor functional unit - These values represent the function units
+// available across all chip sets for the target. Eg., IntUnit, FPUnit, ...
+// These may be independent values for each chip set or may be shared across
+// all chip sets of the target. Each functional unit is treated as a resource
+// during scheduling and has an affect instruction order based on availability
+// during a time interval.
+//
+class FuncUnit;
+
+//===----------------------------------------------------------------------===//
+// Pipeline bypass / forwarding - These values specifies the symbolic names of
+// pipeline bypasses which can be used to forward results of instructions
+// that are forwarded to uses.
+class Bypass;
+def NoBypass : Bypass;
+
+class ReservationKind<bits<1> val> {
+ int Value = val;
+}
+
+def Required : ReservationKind<0>;
+def Reserved : ReservationKind<1>;
+
+//===----------------------------------------------------------------------===//
+// Instruction stage - These values represent a non-pipelined step in
+// the execution of an instruction. Cycles represents the number of
+// discrete time slots needed to complete the stage. Units represent
+// the choice of functional units that can be used to complete the
+// stage. Eg. IntUnit1, IntUnit2. NextCycles indicates how many
+// cycles should elapse from the start of this stage to the start of
+// the next stage in the itinerary. For example:
+//
+// A stage is specified in one of two ways:
+//
+// InstrStage<1, [FU_x, FU_y]> - TimeInc defaults to Cycles
+// InstrStage<1, [FU_x, FU_y], 0> - TimeInc explicit
+//
+
+class InstrStage<int cycles, list<FuncUnit> units,
+ int timeinc = -1,
+ ReservationKind kind = Required> {
+ int Cycles = cycles; // length of stage in machine cycles
+ list<FuncUnit> Units = units; // choice of functional units
+ int TimeInc = timeinc; // cycles till start of next stage
+ int Kind = kind.Value; // kind of FU reservation
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction itinerary - An itinerary represents a sequential series of steps
+// required to complete an instruction. Itineraries are represented as lists of
+// instruction stages.
+//
+
+//===----------------------------------------------------------------------===//
+// Instruction itinerary classes - These values represent 'named' instruction
+// itinerary. Using named itineraries simplifies managing groups of
+// instructions across chip sets. An instruction uses the same itinerary class
+// across all chip sets. Thus a new chip set can be added without modifying
+// instruction information.
+//
+class InstrItinClass;
+def NoItinerary : InstrItinClass;
+
+//===----------------------------------------------------------------------===//
+// Instruction itinerary data - These values provide a runtime map of an
+// instruction itinerary class (name) to its itinerary data.
+//
+// NumMicroOps represents the number of micro-operations that each instruction
+// in the class are decoded to. If the number is zero, then it means the
+// instruction can decode into variable number of micro-ops and it must be
+// determined dynamically. This directly relates to the itineraries
+// global IssueWidth property, which constrains the number of microops
+// that can issue per cycle.
+//
+// OperandCycles are optional "cycle counts". They specify the cycle after
+// instruction issue the values which correspond to specific operand indices
+// are defined or read. Bypasses are optional "pipeline forwarding pathes", if
+// a def by an instruction is available on a specific bypass and the use can
+// read from the same bypass, then the operand use latency is reduced by one.
+//
+// InstrItinData<IIC_iLoad_i , [InstrStage<1, [A9_Pipe1]>,
+// InstrStage<1, [A9_AGU]>],
+// [3, 1], [A9_LdBypass]>,
+// InstrItinData<IIC_iMVNr , [InstrStage<1, [A9_Pipe0, A9_Pipe1]>],
+// [1, 1], [NoBypass, A9_LdBypass]>,
+//
+// In this example, the instruction of IIC_iLoadi reads its input on cycle 1
+// (after issue) and the result of the load is available on cycle 3. The result
+// is available via forwarding path A9_LdBypass. If it's used by the first
+// source operand of instructions of IIC_iMVNr class, then the operand latency
+// is reduced by 1.
+class InstrItinData<InstrItinClass Class, list<InstrStage> stages,
+ list<int> operandcycles = [],
+ list<Bypass> bypasses = [], int uops = 1> {
+ InstrItinClass TheClass = Class;
+ int NumMicroOps = uops;
+ list<InstrStage> Stages = stages;
+ list<int> OperandCycles = operandcycles;
+ list<Bypass> Bypasses = bypasses;
+}
+
+//===----------------------------------------------------------------------===//
+// Processor itineraries - These values represent the set of all itinerary
+// classes for a given chip set.
+//
+// Set property values to -1 to use the default.
+// See InstrItineraryProps for comments and defaults.
+class ProcessorItineraries<list<FuncUnit> fu, list<Bypass> bp,
+ list<InstrItinData> iid> {
+ list<FuncUnit> FU = fu;
+ list<Bypass> BP = bp;
+ list<InstrItinData> IID = iid;
+}
+
+// NoItineraries - A marker that can be used by processors without schedule
+// info. Subtargets using NoItineraries can bypass the scheduler's
+// expensive HazardRecognizer because no reservation table is needed.
+def NoItineraries : ProcessorItineraries<[], [], []>;
+
+//===----------------------------------------------------------------------===//
+// Combo Function Unit data - This is a map of combo function unit names to
+// the list of functional units that are included in the combination.
+//
+class ComboFuncData<FuncUnit ComboFunc, list<FuncUnit> funclist> {
+ FuncUnit TheComboFunc = ComboFunc;
+ list<FuncUnit> FuncList = funclist;
+}
+
+//===----------------------------------------------------------------------===//
+// Combo Function Units - This is a list of all combo function unit data.
+class ComboFuncUnits<list<ComboFuncData> cfd> {
+ list<ComboFuncData> CFD = cfd;
+}
+
diff --git a/contrib/llvm/include/llvm/Target/TargetLowering.h b/contrib/llvm/include/llvm/Target/TargetLowering.h
new file mode 100644
index 0000000..140c3659
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetLowering.h
@@ -0,0 +1,2879 @@
+//===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file describes how to lower LLVM code to machine code. This has two
+/// main components:
+///
+/// 1. Which ValueTypes are natively supported by the target.
+/// 2. Which operations are supported for supported ValueTypes.
+/// 3. Cost thresholds for alternative implementations of certain operations.
+///
+/// In addition it has a few other components, like information about FP
+/// immediates.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETLOWERING_H
+#define LLVM_TARGET_TARGETLOWERING_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/DAGCombine.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Target/TargetCallingConv.h"
+#include "llvm/Target/TargetMachine.h"
+#include <climits>
+#include <map>
+#include <vector>
+
+namespace llvm {
+ class CallInst;
+ class CCState;
+ class FastISel;
+ class FunctionLoweringInfo;
+ class ImmutableCallSite;
+ class IntrinsicInst;
+ class MachineBasicBlock;
+ class MachineFunction;
+ class MachineInstr;
+ class MachineJumpTableInfo;
+ class MachineLoop;
+ class Mangler;
+ class MCContext;
+ class MCExpr;
+ class MCSymbol;
+ template<typename T> class SmallVectorImpl;
+ class DataLayout;
+ class TargetRegisterClass;
+ class TargetLibraryInfo;
+ class TargetLoweringObjectFile;
+ class Value;
+
+ namespace Sched {
+ enum Preference {
+ None, // No preference
+ Source, // Follow source order.
+ RegPressure, // Scheduling for lowest register pressure.
+ Hybrid, // Scheduling for both latency and register pressure.
+ ILP, // Scheduling for ILP in low register pressure mode.
+ VLIW // Scheduling for VLIW targets.
+ };
+ }
+
+/// This base class for TargetLowering contains the SelectionDAG-independent
+/// parts that can be used from the rest of CodeGen.
+class TargetLoweringBase {
+ TargetLoweringBase(const TargetLoweringBase&) = delete;
+ void operator=(const TargetLoweringBase&) = delete;
+
+public:
+ /// This enum indicates whether operations are valid for a target, and if not,
+ /// what action should be used to make them valid.
+ enum LegalizeAction : uint8_t {
+ Legal, // The target natively supports this operation.
+ Promote, // This operation should be executed in a larger type.
+ Expand, // Try to expand this to other ops, otherwise use a libcall.
+ LibCall, // Don't try to expand this to other ops, always use a libcall.
+ Custom // Use the LowerOperation hook to implement custom lowering.
+ };
+
+ /// This enum indicates whether a types are legal for a target, and if not,
+ /// what action should be used to make them valid.
+ enum LegalizeTypeAction : uint8_t {
+ TypeLegal, // The target natively supports this type.
+ TypePromoteInteger, // Replace this integer with a larger one.
+ TypeExpandInteger, // Split this integer into two of half the size.
+ TypeSoftenFloat, // Convert this float to a same size integer type,
+ // if an operation is not supported in target HW.
+ TypeExpandFloat, // Split this float into two of half the size.
+ TypeScalarizeVector, // Replace this one-element vector with its element.
+ TypeSplitVector, // Split this vector into two of half the size.
+ TypeWidenVector, // This vector should be widened into a larger vector.
+ TypePromoteFloat // Replace this float with a larger one.
+ };
+
+ /// LegalizeKind holds the legalization kind that needs to happen to EVT
+ /// in order to type-legalize it.
+ typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
+
+ /// Enum that describes how the target represents true/false values.
+ enum BooleanContent {
+ UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
+ ZeroOrOneBooleanContent, // All bits zero except for bit 0.
+ ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
+ };
+
+ /// Enum that describes what type of support for selects the target has.
+ enum SelectSupportKind {
+ ScalarValSelect, // The target supports scalar selects (ex: cmov).
+ ScalarCondVectorVal, // The target supports selects with a scalar condition
+ // and vector values (ex: cmov).
+ VectorMaskSelect // The target supports vector selects with a vector
+ // mask (ex: x86 blends).
+ };
+
+ /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
+ /// to, if at all. Exists because different targets have different levels of
+ /// support for these atomic instructions, and also have different options
+ /// w.r.t. what they should expand to.
+ enum class AtomicExpansionKind {
+ None, // Don't expand the instruction.
+ LLSC, // Expand the instruction into loadlinked/storeconditional; used
+ // by ARM/AArch64.
+ LLOnly, // Expand the (load) instruction into just a load-linked, which has
+ // greater atomic guarantees than a normal load.
+ CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
+ };
+
+ static ISD::NodeType getExtendForContent(BooleanContent Content) {
+ switch (Content) {
+ case UndefinedBooleanContent:
+ // Extend by adding rubbish bits.
+ return ISD::ANY_EXTEND;
+ case ZeroOrOneBooleanContent:
+ // Extend by adding zero bits.
+ return ISD::ZERO_EXTEND;
+ case ZeroOrNegativeOneBooleanContent:
+ // Extend by copying the sign bit.
+ return ISD::SIGN_EXTEND;
+ }
+ llvm_unreachable("Invalid content kind");
+ }
+
+ /// NOTE: The TargetMachine owns TLOF.
+ explicit TargetLoweringBase(const TargetMachine &TM);
+ virtual ~TargetLoweringBase() {}
+
+protected:
+ /// \brief Initialize all of the actions to default values.
+ void initActions();
+
+public:
+ const TargetMachine &getTargetMachine() const { return TM; }
+
+ virtual bool useSoftFloat() const { return false; }
+
+ /// Return the pointer type for the given address space, defaults to
+ /// the pointer type from the data layout.
+ /// FIXME: The default needs to be removed once all the code is updated.
+ MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
+ return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
+ }
+
+ /// EVT is not used in-tree, but is used by out-of-tree target.
+ /// A documentation for this function would be nice...
+ virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
+
+ EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
+
+ /// Returns the type to be used for the index operand of:
+ /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
+ /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
+ virtual MVT getVectorIdxTy(const DataLayout &DL) const {
+ return getPointerTy(DL);
+ }
+
+ /// Return true if the select operation is expensive for this target.
+ bool isSelectExpensive() const { return SelectIsExpensive; }
+
+ virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
+ return true;
+ }
+
+ /// Return true if multiple condition registers are available.
+ bool hasMultipleConditionRegisters() const {
+ return HasMultipleConditionRegisters;
+ }
+
+ /// Return true if the target has BitExtract instructions.
+ bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
+
+ /// Return the preferred vector type legalization action.
+ virtual TargetLoweringBase::LegalizeTypeAction
+ getPreferredVectorAction(EVT VT) const {
+ // The default action for one element vectors is to scalarize
+ if (VT.getVectorNumElements() == 1)
+ return TypeScalarizeVector;
+ // The default action for other vectors is to promote
+ return TypePromoteInteger;
+ }
+
+ // There are two general methods for expanding a BUILD_VECTOR node:
+ // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
+ // them together.
+ // 2. Build the vector on the stack and then load it.
+ // If this function returns true, then method (1) will be used, subject to
+ // the constraint that all of the necessary shuffles are legal (as determined
+ // by isShuffleMaskLegal). If this function returns false, then method (2) is
+ // always used. The vector type, and the number of defined values, are
+ // provided.
+ virtual bool
+ shouldExpandBuildVectorWithShuffles(EVT /* VT */,
+ unsigned DefinedValues) const {
+ return DefinedValues < 3;
+ }
+
+ /// Return true if integer divide is usually cheaper than a sequence of
+ /// several shifts, adds, and multiplies for this target.
+ /// The definition of "cheaper" may depend on whether we're optimizing
+ /// for speed or for size.
+ virtual bool isIntDivCheap(EVT VT, AttributeSet Attr) const {
+ return false;
+ }
+
+ /// Return true if sqrt(x) is as cheap or cheaper than 1 / rsqrt(x)
+ bool isFsqrtCheap() const {
+ return FsqrtIsCheap;
+ }
+
+ /// Returns true if target has indicated at least one type should be bypassed.
+ bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
+
+ /// Returns map of slow types for division or remainder with corresponding
+ /// fast types
+ const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
+ return BypassSlowDivWidths;
+ }
+
+ /// Return true if Flow Control is an expensive operation that should be
+ /// avoided.
+ bool isJumpExpensive() const { return JumpIsExpensive; }
+
+ /// Return true if selects are only cheaper than branches if the branch is
+ /// unlikely to be predicted right.
+ bool isPredictableSelectExpensive() const {
+ return PredictableSelectIsExpensive;
+ }
+
+ /// isLoadBitCastBeneficial() - Return true if the following transform
+ /// is beneficial.
+ /// fold (conv (load x)) -> (load (conv*)x)
+ /// On architectures that don't natively support some vector loads
+ /// efficiently, casting the load to a smaller vector of larger types and
+ /// loading is more efficient, however, this can be undone by optimizations in
+ /// dag combiner.
+ virtual bool isLoadBitCastBeneficial(EVT /* Load */,
+ EVT /* Bitcast */) const {
+ return true;
+ }
+
+ /// Return true if it is expected to be cheaper to do a store of a non-zero
+ /// vector constant with the given size and type for the address space than to
+ /// store the individual scalar element constants.
+ virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
+ unsigned NumElem,
+ unsigned AddrSpace) const {
+ return false;
+ }
+
+ /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
+ virtual bool isCheapToSpeculateCttz() const {
+ return false;
+ }
+
+ /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
+ virtual bool isCheapToSpeculateCtlz() const {
+ return false;
+ }
+
+ /// \brief Return if the target supports combining a
+ /// chain like:
+ /// \code
+ /// %andResult = and %val1, #imm-with-one-bit-set;
+ /// %icmpResult = icmp %andResult, 0
+ /// br i1 %icmpResult, label %dest1, label %dest2
+ /// \endcode
+ /// into a single machine instruction of a form like:
+ /// \code
+ /// brOnBitSet %register, #bitNumber, dest
+ /// \endcode
+ bool isMaskAndBranchFoldingLegal() const {
+ return MaskAndBranchFoldingIsLegal;
+ }
+
+ /// \brief Return true if the target wants to use the optimization that
+ /// turns ext(promotableInst1(...(promotableInstN(load)))) into
+ /// promotedInst1(...(promotedInstN(ext(load)))).
+ bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
+
+ /// Return true if the target can combine store(extractelement VectorTy,
+ /// Idx).
+ /// \p Cost[out] gives the cost of that transformation when this is true.
+ virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
+ unsigned &Cost) const {
+ return false;
+ }
+
+ /// Return true if target supports floating point exceptions.
+ bool hasFloatingPointExceptions() const {
+ return HasFloatingPointExceptions;
+ }
+
+ /// Return true if target always beneficiates from combining into FMA for a
+ /// given value type. This must typically return false on targets where FMA
+ /// takes more cycles to execute than FADD.
+ virtual bool enableAggressiveFMAFusion(EVT VT) const {
+ return false;
+ }
+
+ /// Return the ValueType of the result of SETCC operations.
+ virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
+ EVT VT) const;
+
+ /// Return the ValueType for comparison libcalls. Comparions libcalls include
+ /// floating point comparion calls, and Ordered/Unordered check calls on
+ /// floating point numbers.
+ virtual
+ MVT::SimpleValueType getCmpLibcallReturnType() const;
+
+ /// For targets without i1 registers, this gives the nature of the high-bits
+ /// of boolean values held in types wider than i1.
+ ///
+ /// "Boolean values" are special true/false values produced by nodes like
+ /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
+ /// Not to be confused with general values promoted from i1. Some cpus
+ /// distinguish between vectors of boolean and scalars; the isVec parameter
+ /// selects between the two kinds. For example on X86 a scalar boolean should
+ /// be zero extended from i1, while the elements of a vector of booleans
+ /// should be sign extended from i1.
+ ///
+ /// Some cpus also treat floating point types the same way as they treat
+ /// vectors instead of the way they treat scalars.
+ BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
+ if (isVec)
+ return BooleanVectorContents;
+ return isFloat ? BooleanFloatContents : BooleanContents;
+ }
+
+ BooleanContent getBooleanContents(EVT Type) const {
+ return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
+ }
+
+ /// Return target scheduling preference.
+ Sched::Preference getSchedulingPreference() const {
+ return SchedPreferenceInfo;
+ }
+
+ /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
+ /// for different nodes. This function returns the preference (or none) for
+ /// the given node.
+ virtual Sched::Preference getSchedulingPreference(SDNode *) const {
+ return Sched::None;
+ }
+
+ /// Return the register class that should be used for the specified value
+ /// type.
+ virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
+ const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
+ assert(RC && "This value type is not natively supported!");
+ return RC;
+ }
+
+ /// Return the 'representative' register class for the specified value
+ /// type.
+ ///
+ /// The 'representative' register class is the largest legal super-reg
+ /// register class for the register class of the value type. For example, on
+ /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
+ /// register class is GR64 on x86_64.
+ virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
+ const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
+ return RC;
+ }
+
+ /// Return the cost of the 'representative' register class for the specified
+ /// value type.
+ virtual uint8_t getRepRegClassCostFor(MVT VT) const {
+ return RepRegClassCostForVT[VT.SimpleTy];
+ }
+
+ /// Return true if the target has native support for the specified value type.
+ /// This means that it has a register that directly holds it without
+ /// promotions or expansions.
+ bool isTypeLegal(EVT VT) const {
+ assert(!VT.isSimple() ||
+ (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
+ return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
+ }
+
+ class ValueTypeActionImpl {
+ /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
+ /// that indicates how instruction selection should deal with the type.
+ LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
+
+ public:
+ ValueTypeActionImpl() {
+ std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
+ TypeLegal);
+ }
+
+ LegalizeTypeAction getTypeAction(MVT VT) const {
+ return ValueTypeActions[VT.SimpleTy];
+ }
+
+ void setTypeAction(MVT VT, LegalizeTypeAction Action) {
+ ValueTypeActions[VT.SimpleTy] = Action;
+ }
+ };
+
+ const ValueTypeActionImpl &getValueTypeActions() const {
+ return ValueTypeActions;
+ }
+
+ /// Return how we should legalize values of this type, either it is already
+ /// legal (return 'Legal') or we need to promote it to a larger type (return
+ /// 'Promote'), or we need to expand it into multiple registers of smaller
+ /// integer type (return 'Expand'). 'Custom' is not an option.
+ LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
+ return getTypeConversion(Context, VT).first;
+ }
+ LegalizeTypeAction getTypeAction(MVT VT) const {
+ return ValueTypeActions.getTypeAction(VT);
+ }
+
+ /// For types supported by the target, this is an identity function. For
+ /// types that must be promoted to larger types, this returns the larger type
+ /// to promote to. For integer types that are larger than the largest integer
+ /// register, this contains one step in the expansion to get to the smaller
+ /// register. For illegal floating point types, this returns the integer type
+ /// to transform to.
+ EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
+ return getTypeConversion(Context, VT).second;
+ }
+
+ /// For types supported by the target, this is an identity function. For
+ /// types that must be expanded (i.e. integer types that are larger than the
+ /// largest integer register or illegal floating point types), this returns
+ /// the largest legal type it will be expanded to.
+ EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
+ assert(!VT.isVector());
+ while (true) {
+ switch (getTypeAction(Context, VT)) {
+ case TypeLegal:
+ return VT;
+ case TypeExpandInteger:
+ VT = getTypeToTransformTo(Context, VT);
+ break;
+ default:
+ llvm_unreachable("Type is not legal nor is it to be expanded!");
+ }
+ }
+ }
+
+ /// Vector types are broken down into some number of legal first class types.
+ /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
+ /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
+ /// turns into 4 EVT::i32 values with both PPC and X86.
+ ///
+ /// This method returns the number of registers needed, and the VT for each
+ /// register. It also returns the VT and quantity of the intermediate values
+ /// before they are promoted/expanded.
+ unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
+ EVT &IntermediateVT,
+ unsigned &NumIntermediates,
+ MVT &RegisterVT) const;
+
+ struct IntrinsicInfo {
+ unsigned opc; // target opcode
+ EVT memVT; // memory VT
+ const Value* ptrVal; // value representing memory location
+ int offset; // offset off of ptrVal
+ unsigned size; // the size of the memory location
+ // (taken from memVT if zero)
+ unsigned align; // alignment
+ bool vol; // is volatile?
+ bool readMem; // reads memory?
+ bool writeMem; // writes memory?
+
+ IntrinsicInfo() : opc(0), ptrVal(nullptr), offset(0), size(0), align(1),
+ vol(false), readMem(false), writeMem(false) {}
+ };
+
+ /// Given an intrinsic, checks if on the target the intrinsic will need to map
+ /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
+ /// true and store the intrinsic information into the IntrinsicInfo that was
+ /// passed to the function.
+ virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
+ unsigned /*Intrinsic*/) const {
+ return false;
+ }
+
+ /// Returns true if the target can instruction select the specified FP
+ /// immediate natively. If false, the legalizer will materialize the FP
+ /// immediate as a load from a constant pool.
+ virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
+ return false;
+ }
+
+ /// Targets can use this to indicate that they only support *some*
+ /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
+ /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
+ /// legal.
+ virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
+ EVT /*VT*/) const {
+ return true;
+ }
+
+ /// Returns true if the operation can trap for the value type.
+ ///
+ /// VT must be a legal type. By default, we optimistically assume most
+ /// operations don't trap except for divide and remainder.
+ virtual bool canOpTrap(unsigned Op, EVT VT) const;
+
+ /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
+ /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
+ /// a VAND with a constant pool entry.
+ virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
+ EVT /*VT*/) const {
+ return false;
+ }
+
+ /// Return how this operation should be treated: either it is legal, needs to
+ /// be promoted to a larger size, needs to be expanded to some other code
+ /// sequence, or the target has a custom expander for it.
+ LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
+ if (VT.isExtended()) return Expand;
+ // If a target-specific SDNode requires legalization, require the target
+ // to provide custom legalization for it.
+ if (Op > array_lengthof(OpActions[0])) return Custom;
+ return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
+ }
+
+ /// Return true if the specified operation is legal on this target or can be
+ /// made legal with custom lowering. This is used to help guide high-level
+ /// lowering decisions.
+ bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
+ return (VT == MVT::Other || isTypeLegal(VT)) &&
+ (getOperationAction(Op, VT) == Legal ||
+ getOperationAction(Op, VT) == Custom);
+ }
+
+ /// Return true if the specified operation is legal on this target or can be
+ /// made legal using promotion. This is used to help guide high-level lowering
+ /// decisions.
+ bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
+ return (VT == MVT::Other || isTypeLegal(VT)) &&
+ (getOperationAction(Op, VT) == Legal ||
+ getOperationAction(Op, VT) == Promote);
+ }
+
+ /// Return true if the specified operation is illegal on this target or
+ /// unlikely to be made legal with custom lowering. This is used to help guide
+ /// high-level lowering decisions.
+ bool isOperationExpand(unsigned Op, EVT VT) const {
+ return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
+ }
+
+ /// Return true if the specified operation is legal on this target.
+ bool isOperationLegal(unsigned Op, EVT VT) const {
+ return (VT == MVT::Other || isTypeLegal(VT)) &&
+ getOperationAction(Op, VT) == Legal;
+ }
+
+ /// Return how this load with extension should be treated: either it is legal,
+ /// needs to be promoted to a larger size, needs to be expanded to some other
+ /// code sequence, or the target has a custom expander for it.
+ LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
+ EVT MemVT) const {
+ if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
+ unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
+ unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
+ MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
+ return LoadExtActions[ValI][MemI][ExtType];
+ }
+
+ /// Return true if the specified load with extension is legal on this target.
+ bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
+ return ValVT.isSimple() && MemVT.isSimple() &&
+ getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
+ }
+
+ /// Return true if the specified load with extension is legal or custom
+ /// on this target.
+ bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
+ return ValVT.isSimple() && MemVT.isSimple() &&
+ (getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
+ getLoadExtAction(ExtType, ValVT, MemVT) == Custom);
+ }
+
+ /// Return how this store with truncation should be treated: either it is
+ /// legal, needs to be promoted to a larger size, needs to be expanded to some
+ /// other code sequence, or the target has a custom expander for it.
+ LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
+ if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
+ unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
+ unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
+ assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
+ "Table isn't big enough!");
+ return TruncStoreActions[ValI][MemI];
+ }
+
+ /// Return true if the specified store with truncation is legal on this
+ /// target.
+ bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
+ return isTypeLegal(ValVT) && MemVT.isSimple() &&
+ getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
+ }
+
+ /// Return how the indexed load should be treated: either it is legal, needs
+ /// to be promoted to a larger size, needs to be expanded to some other code
+ /// sequence, or the target has a custom expander for it.
+ LegalizeAction
+ getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
+ assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
+ "Table isn't big enough!");
+ unsigned Ty = (unsigned)VT.SimpleTy;
+ return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
+ }
+
+ /// Return true if the specified indexed load is legal on this target.
+ bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
+ return VT.isSimple() &&
+ (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
+ getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
+ }
+
+ /// Return how the indexed store should be treated: either it is legal, needs
+ /// to be promoted to a larger size, needs to be expanded to some other code
+ /// sequence, or the target has a custom expander for it.
+ LegalizeAction
+ getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
+ assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
+ "Table isn't big enough!");
+ unsigned Ty = (unsigned)VT.SimpleTy;
+ return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
+ }
+
+ /// Return true if the specified indexed load is legal on this target.
+ bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
+ return VT.isSimple() &&
+ (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
+ getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
+ }
+
+ /// Return how the condition code should be treated: either it is legal, needs
+ /// to be expanded to some other code sequence, or the target has a custom
+ /// expander for it.
+ LegalizeAction
+ getCondCodeAction(ISD::CondCode CC, MVT VT) const {
+ assert((unsigned)CC < array_lengthof(CondCodeActions) &&
+ ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
+ "Table isn't big enough!");
+ // See setCondCodeAction for how this is encoded.
+ uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
+ uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
+ LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
+ assert(Action != Promote && "Can't promote condition code!");
+ return Action;
+ }
+
+ /// Return true if the specified condition code is legal on this target.
+ bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
+ return
+ getCondCodeAction(CC, VT) == Legal ||
+ getCondCodeAction(CC, VT) == Custom;
+ }
+
+
+ /// If the action for this operation is to promote, this method returns the
+ /// ValueType to promote to.
+ MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
+ assert(getOperationAction(Op, VT) == Promote &&
+ "This operation isn't promoted!");
+
+ // See if this has an explicit type specified.
+ std::map<std::pair<unsigned, MVT::SimpleValueType>,
+ MVT::SimpleValueType>::const_iterator PTTI =
+ PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
+ if (PTTI != PromoteToType.end()) return PTTI->second;
+
+ assert((VT.isInteger() || VT.isFloatingPoint()) &&
+ "Cannot autopromote this type, add it with AddPromotedToType.");
+
+ MVT NVT = VT;
+ do {
+ NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
+ assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
+ "Didn't find type to promote to!");
+ } while (!isTypeLegal(NVT) ||
+ getOperationAction(Op, NVT) == Promote);
+ return NVT;
+ }
+
+ /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
+ /// operations except for the pointer size. If AllowUnknown is true, this
+ /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
+ /// otherwise it will assert.
+ EVT getValueType(const DataLayout &DL, Type *Ty,
+ bool AllowUnknown = false) const {
+ // Lower scalar pointers to native pointer types.
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty))
+ return getPointerTy(DL, PTy->getAddressSpace());
+
+ if (Ty->isVectorTy()) {
+ VectorType *VTy = cast<VectorType>(Ty);
+ Type *Elm = VTy->getElementType();
+ // Lower vectors of pointers to native pointer types.
+ if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
+ EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
+ Elm = PointerTy.getTypeForEVT(Ty->getContext());
+ }
+
+ return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
+ VTy->getNumElements());
+ }
+ return EVT::getEVT(Ty, AllowUnknown);
+ }
+
+ /// Return the MVT corresponding to this LLVM type. See getValueType.
+ MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
+ bool AllowUnknown = false) const {
+ return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
+ }
+
+ /// Return the desired alignment for ByVal or InAlloca aggregate function
+ /// arguments in the caller parameter area. This is the actual alignment, not
+ /// its logarithm.
+ virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
+
+ /// Return the type of registers that this ValueType will eventually require.
+ MVT getRegisterType(MVT VT) const {
+ assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
+ return RegisterTypeForVT[VT.SimpleTy];
+ }
+
+ /// Return the type of registers that this ValueType will eventually require.
+ MVT getRegisterType(LLVMContext &Context, EVT VT) const {
+ if (VT.isSimple()) {
+ assert((unsigned)VT.getSimpleVT().SimpleTy <
+ array_lengthof(RegisterTypeForVT));
+ return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
+ }
+ if (VT.isVector()) {
+ EVT VT1;
+ MVT RegisterVT;
+ unsigned NumIntermediates;
+ (void)getVectorTypeBreakdown(Context, VT, VT1,
+ NumIntermediates, RegisterVT);
+ return RegisterVT;
+ }
+ if (VT.isInteger()) {
+ return getRegisterType(Context, getTypeToTransformTo(Context, VT));
+ }
+ llvm_unreachable("Unsupported extended type!");
+ }
+
+ /// Return the number of registers that this ValueType will eventually
+ /// require.
+ ///
+ /// This is one for any types promoted to live in larger registers, but may be
+ /// more than one for types (like i64) that are split into pieces. For types
+ /// like i140, which are first promoted then expanded, it is the number of
+ /// registers needed to hold all the bits of the original type. For an i140
+ /// on a 32 bit machine this means 5 registers.
+ unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
+ if (VT.isSimple()) {
+ assert((unsigned)VT.getSimpleVT().SimpleTy <
+ array_lengthof(NumRegistersForVT));
+ return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
+ }
+ if (VT.isVector()) {
+ EVT VT1;
+ MVT VT2;
+ unsigned NumIntermediates;
+ return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
+ }
+ if (VT.isInteger()) {
+ unsigned BitWidth = VT.getSizeInBits();
+ unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
+ return (BitWidth + RegWidth - 1) / RegWidth;
+ }
+ llvm_unreachable("Unsupported extended type!");
+ }
+
+ /// If true, then instruction selection should seek to shrink the FP constant
+ /// of the specified type to a smaller type in order to save space and / or
+ /// reduce runtime.
+ virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
+
+ // Return true if it is profitable to reduce the given load node to a smaller
+ // type.
+ //
+ // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
+ virtual bool shouldReduceLoadWidth(SDNode *Load,
+ ISD::LoadExtType ExtTy,
+ EVT NewVT) const {
+ return true;
+ }
+
+ /// When splitting a value of the specified type into parts, does the Lo
+ /// or Hi part come first? This usually follows the endianness, except
+ /// for ppcf128, where the Hi part always comes first.
+ bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
+ return DL.isBigEndian() || VT == MVT::ppcf128;
+ }
+
+ /// If true, the target has custom DAG combine transformations that it can
+ /// perform for the specified node.
+ bool hasTargetDAGCombine(ISD::NodeType NT) const {
+ assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
+ return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
+ }
+
+ unsigned getGatherAllAliasesMaxDepth() const {
+ return GatherAllAliasesMaxDepth;
+ }
+
+ /// \brief Get maximum # of store operations permitted for llvm.memset
+ ///
+ /// This function returns the maximum number of store operations permitted
+ /// to replace a call to llvm.memset. The value is set by the target at the
+ /// performance threshold for such a replacement. If OptSize is true,
+ /// return the limit for functions that have OptSize attribute.
+ unsigned getMaxStoresPerMemset(bool OptSize) const {
+ return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
+ }
+
+ /// \brief Get maximum # of store operations permitted for llvm.memcpy
+ ///
+ /// This function returns the maximum number of store operations permitted
+ /// to replace a call to llvm.memcpy. The value is set by the target at the
+ /// performance threshold for such a replacement. If OptSize is true,
+ /// return the limit for functions that have OptSize attribute.
+ unsigned getMaxStoresPerMemcpy(bool OptSize) const {
+ return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
+ }
+
+ /// \brief Get maximum # of store operations permitted for llvm.memmove
+ ///
+ /// This function returns the maximum number of store operations permitted
+ /// to replace a call to llvm.memmove. The value is set by the target at the
+ /// performance threshold for such a replacement. If OptSize is true,
+ /// return the limit for functions that have OptSize attribute.
+ unsigned getMaxStoresPerMemmove(bool OptSize) const {
+ return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
+ }
+
+ /// \brief Determine if the target supports unaligned memory accesses.
+ ///
+ /// This function returns true if the target allows unaligned memory accesses
+ /// of the specified type in the given address space. If true, it also returns
+ /// whether the unaligned memory access is "fast" in the last argument by
+ /// reference. This is used, for example, in situations where an array
+ /// copy/move/set is converted to a sequence of store operations. Its use
+ /// helps to ensure that such replacements don't generate code that causes an
+ /// alignment error (trap) on the target machine.
+ virtual bool allowsMisalignedMemoryAccesses(EVT,
+ unsigned AddrSpace = 0,
+ unsigned Align = 1,
+ bool * /*Fast*/ = nullptr) const {
+ return false;
+ }
+
+ /// Return true if the target supports a memory access of this type for the
+ /// given address space and alignment. If the access is allowed, the optional
+ /// final parameter returns if the access is also fast (as defined by the
+ /// target).
+ bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
+ unsigned AddrSpace = 0, unsigned Alignment = 1,
+ bool *Fast = nullptr) const;
+
+ /// Returns the target specific optimal type for load and store operations as
+ /// a result of memset, memcpy, and memmove lowering.
+ ///
+ /// If DstAlign is zero that means it's safe to destination alignment can
+ /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
+ /// a need to check it against alignment requirement, probably because the
+ /// source does not need to be loaded. If 'IsMemset' is true, that means it's
+ /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
+ /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
+ /// does not need to be loaded. It returns EVT::Other if the type should be
+ /// determined using generic target-independent logic.
+ virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
+ unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
+ bool /*IsMemset*/,
+ bool /*ZeroMemset*/,
+ bool /*MemcpyStrSrc*/,
+ MachineFunction &/*MF*/) const {
+ return MVT::Other;
+ }
+
+ /// Returns true if it's safe to use load / store of the specified type to
+ /// expand memcpy / memset inline.
+ ///
+ /// This is mostly true for all types except for some special cases. For
+ /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
+ /// fstpl which also does type conversion. Note the specified type doesn't
+ /// have to be legal as the hook is used before type legalization.
+ virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
+
+ /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
+ bool usesUnderscoreSetJmp() const {
+ return UseUnderscoreSetJmp;
+ }
+
+ /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
+ bool usesUnderscoreLongJmp() const {
+ return UseUnderscoreLongJmp;
+ }
+
+ /// Return integer threshold on number of blocks to use jump tables rather
+ /// than if sequence.
+ int getMinimumJumpTableEntries() const {
+ return MinimumJumpTableEntries;
+ }
+
+ /// If a physical register, this specifies the register that
+ /// llvm.savestack/llvm.restorestack should save and restore.
+ unsigned getStackPointerRegisterToSaveRestore() const {
+ return StackPointerRegisterToSaveRestore;
+ }
+
+ /// If a physical register, this returns the register that receives the
+ /// exception address on entry to an EH pad.
+ virtual unsigned
+ getExceptionPointerRegister(const Constant *PersonalityFn) const {
+ // 0 is guaranteed to be the NoRegister value on all targets
+ return 0;
+ }
+
+ /// If a physical register, this returns the register that receives the
+ /// exception typeid on entry to a landing pad.
+ virtual unsigned
+ getExceptionSelectorRegister(const Constant *PersonalityFn) const {
+ // 0 is guaranteed to be the NoRegister value on all targets
+ return 0;
+ }
+
+ /// Returns the target's jmp_buf size in bytes (if never set, the default is
+ /// 200)
+ unsigned getJumpBufSize() const {
+ return JumpBufSize;
+ }
+
+ /// Returns the target's jmp_buf alignment in bytes (if never set, the default
+ /// is 0)
+ unsigned getJumpBufAlignment() const {
+ return JumpBufAlignment;
+ }
+
+ /// Return the minimum stack alignment of an argument.
+ unsigned getMinStackArgumentAlignment() const {
+ return MinStackArgumentAlignment;
+ }
+
+ /// Return the minimum function alignment.
+ unsigned getMinFunctionAlignment() const {
+ return MinFunctionAlignment;
+ }
+
+ /// Return the preferred function alignment.
+ unsigned getPrefFunctionAlignment() const {
+ return PrefFunctionAlignment;
+ }
+
+ /// Return the preferred loop alignment.
+ virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
+ return PrefLoopAlignment;
+ }
+
+ /// Return whether the DAG builder should automatically insert fences and
+ /// reduce ordering for atomics.
+ bool getInsertFencesForAtomic() const {
+ return InsertFencesForAtomic;
+ }
+
+ /// Return true if the target stores stack protector cookies at a fixed offset
+ /// in some non-standard address space, and populates the address space and
+ /// offset as appropriate.
+ virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
+ unsigned &/*Offset*/) const {
+ return false;
+ }
+
+ /// If the target has a standard location for the unsafe stack pointer,
+ /// returns the address of that location. Otherwise, returns nullptr.
+ virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
+
+ /// Returns true if a cast between SrcAS and DestAS is a noop.
+ virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
+ return false;
+ }
+
+ /// Return true if the pointer arguments to CI should be aligned by aligning
+ /// the object whose address is being passed. If so then MinSize is set to the
+ /// minimum size the object must be to be aligned and PrefAlign is set to the
+ /// preferred alignment.
+ virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
+ unsigned & /*PrefAlign*/) const {
+ return false;
+ }
+
+ //===--------------------------------------------------------------------===//
+ /// \name Helpers for TargetTransformInfo implementations
+ /// @{
+
+ /// Get the ISD node that corresponds to the Instruction class opcode.
+ int InstructionOpcodeToISD(unsigned Opcode) const;
+
+ /// Estimate the cost of type-legalization and the legalized type.
+ std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
+ Type *Ty) const;
+
+ /// @}
+
+ //===--------------------------------------------------------------------===//
+ /// \name Helpers for atomic expansion.
+ /// @{
+
+ /// Perform a load-linked operation on Addr, returning a "Value *" with the
+ /// corresponding pointee type. This may entail some non-trivial operations to
+ /// truncate or reconstruct types that will be illegal in the backend. See
+ /// ARMISelLowering for an example implementation.
+ virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+ AtomicOrdering Ord) const {
+ llvm_unreachable("Load linked unimplemented on this target");
+ }
+
+ /// Perform a store-conditional operation to Addr. Return the status of the
+ /// store. This should be 0 if the store succeeded, non-zero otherwise.
+ virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
+ Value *Addr, AtomicOrdering Ord) const {
+ llvm_unreachable("Store conditional unimplemented on this target");
+ }
+
+ /// Inserts in the IR a target-specific intrinsic specifying a fence.
+ /// It is called by AtomicExpandPass before expanding an
+ /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad.
+ /// RMW and CmpXchg set both IsStore and IsLoad to true.
+ /// This function should either return a nullptr, or a pointer to an IR-level
+ /// Instruction*. Even complex fence sequences can be represented by a
+ /// single Instruction* through an intrinsic to be lowered later.
+ /// Backends with !getInsertFencesForAtomic() should keep a no-op here.
+ /// Backends should override this method to produce target-specific intrinsic
+ /// for their fences.
+ /// FIXME: Please note that the default implementation here in terms of
+ /// IR-level fences exists for historical/compatibility reasons and is
+ /// *unsound* ! Fences cannot, in general, be used to restore sequential
+ /// consistency. For example, consider the following example:
+ /// atomic<int> x = y = 0;
+ /// int r1, r2, r3, r4;
+ /// Thread 0:
+ /// x.store(1);
+ /// Thread 1:
+ /// y.store(1);
+ /// Thread 2:
+ /// r1 = x.load();
+ /// r2 = y.load();
+ /// Thread 3:
+ /// r3 = y.load();
+ /// r4 = x.load();
+ /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
+ /// seq_cst. But if they are lowered to monotonic accesses, no amount of
+ /// IR-level fences can prevent it.
+ /// @{
+ virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
+ AtomicOrdering Ord, bool IsStore,
+ bool IsLoad) const {
+ if (!getInsertFencesForAtomic())
+ return nullptr;
+
+ if (isAtLeastRelease(Ord) && IsStore)
+ return Builder.CreateFence(Ord);
+ else
+ return nullptr;
+ }
+
+ virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
+ AtomicOrdering Ord, bool IsStore,
+ bool IsLoad) const {
+ if (!getInsertFencesForAtomic())
+ return nullptr;
+
+ if (isAtLeastAcquire(Ord))
+ return Builder.CreateFence(Ord);
+ else
+ return nullptr;
+ }
+ /// @}
+
+ // Emits code that executes when the comparison result in the ll/sc
+ // expansion of a cmpxchg instruction is such that the store-conditional will
+ // not execute. This makes it possible to balance out the load-linked with
+ // a dedicated instruction, if desired.
+ // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
+ // be unnecessarily held, except if clrex, inserted by this hook, is executed.
+ virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
+
+ /// Returns true if the given (atomic) store should be expanded by the
+ /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
+ virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
+ return false;
+ }
+
+ /// Returns true if arguments should be sign-extended in lib calls.
+ virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
+ return IsSigned;
+ }
+
+ /// Returns how the given (atomic) load should be expanded by the
+ /// IR-level AtomicExpand pass.
+ virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
+ return AtomicExpansionKind::None;
+ }
+
+ /// Returns true if the given atomic cmpxchg should be expanded by the
+ /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
+ /// (through emitLoadLinked() and emitStoreConditional()).
+ virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
+ return false;
+ }
+
+ /// Returns how the IR-level AtomicExpand pass should expand the given
+ /// AtomicRMW, if at all. Default is to never expand.
+ virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
+ return AtomicExpansionKind::None;
+ }
+
+ /// On some platforms, an AtomicRMW that never actually modifies the value
+ /// (such as fetch_add of 0) can be turned into a fence followed by an
+ /// atomic load. This may sound useless, but it makes it possible for the
+ /// processor to keep the cacheline shared, dramatically improving
+ /// performance. And such idempotent RMWs are useful for implementing some
+ /// kinds of locks, see for example (justification + benchmarks):
+ /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
+ /// This method tries doing that transformation, returning the atomic load if
+ /// it succeeds, and nullptr otherwise.
+ /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
+ /// another round of expansion.
+ virtual LoadInst *
+ lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
+ return nullptr;
+ }
+
+ /// Returns true if we should normalize
+ /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
+ /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
+ /// that it saves us from materializing N0 and N1 in an integer register.
+ /// Targets that are able to perform and/or on flags should return false here.
+ virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
+ EVT VT) const {
+ // If a target has multiple condition registers, then it likely has logical
+ // operations on those registers.
+ if (hasMultipleConditionRegisters())
+ return false;
+ // Only do the transform if the value won't be split into multiple
+ // registers.
+ LegalizeTypeAction Action = getTypeAction(Context, VT);
+ return Action != TypeExpandInteger && Action != TypeExpandFloat &&
+ Action != TypeSplitVector;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // TargetLowering Configuration Methods - These methods should be invoked by
+ // the derived class constructor to configure this object for the target.
+ //
+protected:
+ /// Specify how the target extends the result of integer and floating point
+ /// boolean values from i1 to a wider type. See getBooleanContents.
+ void setBooleanContents(BooleanContent Ty) {
+ BooleanContents = Ty;
+ BooleanFloatContents = Ty;
+ }
+
+ /// Specify how the target extends the result of integer and floating point
+ /// boolean values from i1 to a wider type. See getBooleanContents.
+ void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
+ BooleanContents = IntTy;
+ BooleanFloatContents = FloatTy;
+ }
+
+ /// Specify how the target extends the result of a vector boolean value from a
+ /// vector of i1 to a wider type. See getBooleanContents.
+ void setBooleanVectorContents(BooleanContent Ty) {
+ BooleanVectorContents = Ty;
+ }
+
+ /// Specify the target scheduling preference.
+ void setSchedulingPreference(Sched::Preference Pref) {
+ SchedPreferenceInfo = Pref;
+ }
+
+ /// Indicate whether this target prefers to use _setjmp to implement
+ /// llvm.setjmp or the version without _. Defaults to false.
+ void setUseUnderscoreSetJmp(bool Val) {
+ UseUnderscoreSetJmp = Val;
+ }
+
+ /// Indicate whether this target prefers to use _longjmp to implement
+ /// llvm.longjmp or the version without _. Defaults to false.
+ void setUseUnderscoreLongJmp(bool Val) {
+ UseUnderscoreLongJmp = Val;
+ }
+
+ /// Indicate the number of blocks to generate jump tables rather than if
+ /// sequence.
+ void setMinimumJumpTableEntries(int Val) {
+ MinimumJumpTableEntries = Val;
+ }
+
+ /// If set to a physical register, this specifies the register that
+ /// llvm.savestack/llvm.restorestack should save and restore.
+ void setStackPointerRegisterToSaveRestore(unsigned R) {
+ StackPointerRegisterToSaveRestore = R;
+ }
+
+ /// Tells the code generator not to expand operations into sequences that use
+ /// the select operations if possible.
+ void setSelectIsExpensive(bool isExpensive = true) {
+ SelectIsExpensive = isExpensive;
+ }
+
+ /// Tells the code generator that the target has multiple (allocatable)
+ /// condition registers that can be used to store the results of comparisons
+ /// for use by selects and conditional branches. With multiple condition
+ /// registers, the code generator will not aggressively sink comparisons into
+ /// the blocks of their users.
+ void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
+ HasMultipleConditionRegisters = hasManyRegs;
+ }
+
+ /// Tells the code generator that the target has BitExtract instructions.
+ /// The code generator will aggressively sink "shift"s into the blocks of
+ /// their users if the users will generate "and" instructions which can be
+ /// combined with "shift" to BitExtract instructions.
+ void setHasExtractBitsInsn(bool hasExtractInsn = true) {
+ HasExtractBitsInsn = hasExtractInsn;
+ }
+
+ /// Tells the code generator not to expand logic operations on comparison
+ /// predicates into separate sequences that increase the amount of flow
+ /// control.
+ void setJumpIsExpensive(bool isExpensive = true);
+
+ /// Tells the code generator that fsqrt is cheap, and should not be replaced
+ /// with an alternative sequence of instructions.
+ void setFsqrtIsCheap(bool isCheap = true) { FsqrtIsCheap = isCheap; }
+
+ /// Tells the code generator that this target supports floating point
+ /// exceptions and cares about preserving floating point exception behavior.
+ void setHasFloatingPointExceptions(bool FPExceptions = true) {
+ HasFloatingPointExceptions = FPExceptions;
+ }
+
+ /// Tells the code generator which bitwidths to bypass.
+ void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
+ BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
+ }
+
+ /// Add the specified register class as an available regclass for the
+ /// specified value type. This indicates the selector can handle values of
+ /// that class natively.
+ void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
+ assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
+ AvailableRegClasses.push_back(std::make_pair(VT, RC));
+ RegClassForVT[VT.SimpleTy] = RC;
+ }
+
+ /// Remove all register classes.
+ void clearRegisterClasses() {
+ std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
+
+ AvailableRegClasses.clear();
+ }
+
+ /// \brief Remove all operation actions.
+ void clearOperationActions() {
+ }
+
+ /// Return the largest legal super-reg register class of the register class
+ /// for the specified type and its associated "cost".
+ virtual std::pair<const TargetRegisterClass *, uint8_t>
+ findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
+
+ /// Once all of the register classes are added, this allows us to compute
+ /// derived properties we expose.
+ void computeRegisterProperties(const TargetRegisterInfo *TRI);
+
+ /// Indicate that the specified operation does not work with the specified
+ /// type and indicate what to do about it.
+ void setOperationAction(unsigned Op, MVT VT,
+ LegalizeAction Action) {
+ assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
+ OpActions[(unsigned)VT.SimpleTy][Op] = Action;
+ }
+
+ /// Indicate that the specified load with extension does not work with the
+ /// specified type and indicate what to do about it.
+ void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
+ LegalizeAction Action) {
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
+ MemVT.isValid() && "Table isn't big enough!");
+ LoadExtActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy][ExtType] = Action;
+ }
+
+ /// Indicate that the specified truncating store does not work with the
+ /// specified type and indicate what to do about it.
+ void setTruncStoreAction(MVT ValVT, MVT MemVT,
+ LegalizeAction Action) {
+ assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
+ TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
+ }
+
+ /// Indicate that the specified indexed load does or does not work with the
+ /// specified type and indicate what to do abort it.
+ ///
+ /// NOTE: All indexed mode loads are initialized to Expand in
+ /// TargetLowering.cpp
+ void setIndexedLoadAction(unsigned IdxMode, MVT VT,
+ LegalizeAction Action) {
+ assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
+ (unsigned)Action < 0xf && "Table isn't big enough!");
+ // Load action are kept in the upper half.
+ IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
+ IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
+ }
+
+ /// Indicate that the specified indexed store does or does not work with the
+ /// specified type and indicate what to do about it.
+ ///
+ /// NOTE: All indexed mode stores are initialized to Expand in
+ /// TargetLowering.cpp
+ void setIndexedStoreAction(unsigned IdxMode, MVT VT,
+ LegalizeAction Action) {
+ assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
+ (unsigned)Action < 0xf && "Table isn't big enough!");
+ // Store action are kept in the lower half.
+ IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
+ IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
+ }
+
+ /// Indicate that the specified condition code is or isn't supported on the
+ /// target and indicate what to do about it.
+ void setCondCodeAction(ISD::CondCode CC, MVT VT,
+ LegalizeAction Action) {
+ assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
+ "Table isn't big enough!");
+ assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
+ /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
+ /// value and the upper 29 bits index into the second dimension of the array
+ /// to select what 32-bit value to use.
+ uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
+ CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
+ CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
+ }
+
+ /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
+ /// to trying a larger integer/fp until it can find one that works. If that
+ /// default is insufficient, this method can be used by the target to override
+ /// the default.
+ void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
+ PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
+ }
+
+ /// Targets should invoke this method for each target independent node that
+ /// they want to provide a custom DAG combiner for by implementing the
+ /// PerformDAGCombine virtual method.
+ void setTargetDAGCombine(ISD::NodeType NT) {
+ assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
+ TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
+ }
+
+ /// Set the target's required jmp_buf buffer size (in bytes); default is 200
+ void setJumpBufSize(unsigned Size) {
+ JumpBufSize = Size;
+ }
+
+ /// Set the target's required jmp_buf buffer alignment (in bytes); default is
+ /// 0
+ void setJumpBufAlignment(unsigned Align) {
+ JumpBufAlignment = Align;
+ }
+
+ /// Set the target's minimum function alignment (in log2(bytes))
+ void setMinFunctionAlignment(unsigned Align) {
+ MinFunctionAlignment = Align;
+ }
+
+ /// Set the target's preferred function alignment. This should be set if
+ /// there is a performance benefit to higher-than-minimum alignment (in
+ /// log2(bytes))
+ void setPrefFunctionAlignment(unsigned Align) {
+ PrefFunctionAlignment = Align;
+ }
+
+ /// Set the target's preferred loop alignment. Default alignment is zero, it
+ /// means the target does not care about loop alignment. The alignment is
+ /// specified in log2(bytes). The target may also override
+ /// getPrefLoopAlignment to provide per-loop values.
+ void setPrefLoopAlignment(unsigned Align) {
+ PrefLoopAlignment = Align;
+ }
+
+ /// Set the minimum stack alignment of an argument (in log2(bytes)).
+ void setMinStackArgumentAlignment(unsigned Align) {
+ MinStackArgumentAlignment = Align;
+ }
+
+ /// Set if the DAG builder should automatically insert fences and reduce the
+ /// order of atomic memory operations to Monotonic.
+ void setInsertFencesForAtomic(bool fence) {
+ InsertFencesForAtomic = fence;
+ }
+
+public:
+ //===--------------------------------------------------------------------===//
+ // Addressing mode description hooks (used by LSR etc).
+ //
+
+ /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
+ /// instructions reading the address. This allows as much computation as
+ /// possible to be done in the address mode for that operand. This hook lets
+ /// targets also pass back when this should be done on intrinsics which
+ /// load/store.
+ virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
+ SmallVectorImpl<Value*> &/*Ops*/,
+ Type *&/*AccessTy*/,
+ unsigned AddrSpace = 0) const {
+ return false;
+ }
+
+ /// This represents an addressing mode of:
+ /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
+ /// If BaseGV is null, there is no BaseGV.
+ /// If BaseOffs is zero, there is no base offset.
+ /// If HasBaseReg is false, there is no base register.
+ /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
+ /// no scale.
+ struct AddrMode {
+ GlobalValue *BaseGV;
+ int64_t BaseOffs;
+ bool HasBaseReg;
+ int64_t Scale;
+ AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
+ };
+
+ /// Return true if the addressing mode represented by AM is legal for this
+ /// target, for a load/store of the specified type.
+ ///
+ /// The type may be VoidTy, in which case only return true if the addressing
+ /// mode is legal for a load/store of any legal type. TODO: Handle
+ /// pre/postinc as well.
+ ///
+ /// If the address space cannot be determined, it will be -1.
+ ///
+ /// TODO: Remove default argument
+ virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
+ Type *Ty, unsigned AddrSpace) const;
+
+ /// \brief Return the cost of the scaling factor used in the addressing mode
+ /// represented by AM for this target, for a load/store of the specified type.
+ ///
+ /// If the AM is supported, the return value must be >= 0.
+ /// If the AM is not supported, it returns a negative value.
+ /// TODO: Handle pre/postinc as well.
+ /// TODO: Remove default argument
+ virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
+ Type *Ty, unsigned AS = 0) const {
+ // Default: assume that any scaling factor used in a legal AM is free.
+ if (isLegalAddressingMode(DL, AM, Ty, AS))
+ return 0;
+ return -1;
+ }
+
+ /// Return true if the specified immediate is legal icmp immediate, that is
+ /// the target has icmp instructions which can compare a register against the
+ /// immediate without having to materialize the immediate into a register.
+ virtual bool isLegalICmpImmediate(int64_t) const {
+ return true;
+ }
+
+ /// Return true if the specified immediate is legal add immediate, that is the
+ /// target has add instructions which can add a register with the immediate
+ /// without having to materialize the immediate into a register.
+ virtual bool isLegalAddImmediate(int64_t) const {
+ return true;
+ }
+
+ /// Return true if it's significantly cheaper to shift a vector by a uniform
+ /// scalar than by an amount which will vary across each lane. On x86, for
+ /// example, there is a "psllw" instruction for the former case, but no simple
+ /// instruction for a general "a << b" operation on vectors.
+ virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
+ return false;
+ }
+
+ /// Return true if it's free to truncate a value of type FromTy to type
+ /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
+ /// by referencing its sub-register AX.
+ /// Targets must return false when FromTy <= ToTy.
+ virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
+ return false;
+ }
+
+ /// Return true if a truncation from FromTy to ToTy is permitted when deciding
+ /// whether a call is in tail position. Typically this means that both results
+ /// would be assigned to the same register or stack slot, but it could mean
+ /// the target performs adequate checks of its own before proceeding with the
+ /// tail call. Targets must return false when FromTy <= ToTy.
+ virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
+ return false;
+ }
+
+ virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
+ return false;
+ }
+
+ virtual bool isProfitableToHoist(Instruction *I) const { return true; }
+
+ /// Return true if the extension represented by \p I is free.
+ /// Unlikely the is[Z|FP]ExtFree family which is based on types,
+ /// this method can use the context provided by \p I to decide
+ /// whether or not \p I is free.
+ /// This method extends the behavior of the is[Z|FP]ExtFree family.
+ /// In other words, if is[Z|FP]Free returns true, then this method
+ /// returns true as well. The converse is not true.
+ /// The target can perform the adequate checks by overriding isExtFreeImpl.
+ /// \pre \p I must be a sign, zero, or fp extension.
+ bool isExtFree(const Instruction *I) const {
+ switch (I->getOpcode()) {
+ case Instruction::FPExt:
+ if (isFPExtFree(EVT::getEVT(I->getType())))
+ return true;
+ break;
+ case Instruction::ZExt:
+ if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
+ return true;
+ break;
+ case Instruction::SExt:
+ break;
+ default:
+ llvm_unreachable("Instruction is not an extension");
+ }
+ return isExtFreeImpl(I);
+ }
+
+ /// Return true if any actual instruction that defines a value of type FromTy
+ /// implicitly zero-extends the value to ToTy in the result register.
+ ///
+ /// The function should return true when it is likely that the truncate can
+ /// be freely folded with an instruction defining a value of FromTy. If
+ /// the defining instruction is unknown (because you're looking at a
+ /// function argument, PHI, etc.) then the target may require an
+ /// explicit truncate, which is not necessarily free, but this function
+ /// does not deal with those cases.
+ /// Targets must return false when FromTy >= ToTy.
+ virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
+ return false;
+ }
+
+ virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
+ return false;
+ }
+
+ /// Return true if the target supplies and combines to a paired load
+ /// two loaded values of type LoadedType next to each other in memory.
+ /// RequiredAlignment gives the minimal alignment constraints that must be met
+ /// to be able to select this paired load.
+ ///
+ /// This information is *not* used to generate actual paired loads, but it is
+ /// used to generate a sequence of loads that is easier to combine into a
+ /// paired load.
+ /// For instance, something like this:
+ /// a = load i64* addr
+ /// b = trunc i64 a to i32
+ /// c = lshr i64 a, 32
+ /// d = trunc i64 c to i32
+ /// will be optimized into:
+ /// b = load i32* addr1
+ /// d = load i32* addr2
+ /// Where addr1 = addr2 +/- sizeof(i32).
+ ///
+ /// In other words, unless the target performs a post-isel load combining,
+ /// this information should not be provided because it will generate more
+ /// loads.
+ virtual bool hasPairedLoad(Type * /*LoadedType*/,
+ unsigned & /*RequiredAligment*/) const {
+ return false;
+ }
+
+ virtual bool hasPairedLoad(EVT /*LoadedType*/,
+ unsigned & /*RequiredAligment*/) const {
+ return false;
+ }
+
+ /// \brief Get the maximum supported factor for interleaved memory accesses.
+ /// Default to be the minimum interleave factor: 2.
+ virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
+
+ /// \brief Lower an interleaved load to target specific intrinsics. Return
+ /// true on success.
+ ///
+ /// \p LI is the vector load instruction.
+ /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
+ /// \p Indices is the corresponding indices for each shufflevector.
+ /// \p Factor is the interleave factor.
+ virtual bool lowerInterleavedLoad(LoadInst *LI,
+ ArrayRef<ShuffleVectorInst *> Shuffles,
+ ArrayRef<unsigned> Indices,
+ unsigned Factor) const {
+ return false;
+ }
+
+ /// \brief Lower an interleaved store to target specific intrinsics. Return
+ /// true on success.
+ ///
+ /// \p SI is the vector store instruction.
+ /// \p SVI is the shufflevector to RE-interleave the stored vector.
+ /// \p Factor is the interleave factor.
+ virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
+ unsigned Factor) const {
+ return false;
+ }
+
+ /// Return true if zero-extending the specific node Val to type VT2 is free
+ /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
+ /// because it's folded such as X86 zero-extending loads).
+ virtual bool isZExtFree(SDValue Val, EVT VT2) const {
+ return isZExtFree(Val.getValueType(), VT2);
+ }
+
+ /// Return true if an fpext operation is free (for instance, because
+ /// single-precision floating-point numbers are implicitly extended to
+ /// double-precision).
+ virtual bool isFPExtFree(EVT VT) const {
+ assert(VT.isFloatingPoint());
+ return false;
+ }
+
+ /// Return true if folding a vector load into ExtVal (a sign, zero, or any
+ /// extend node) is profitable.
+ virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
+
+ /// Return true if an fneg operation is free to the point where it is never
+ /// worthwhile to replace it with a bitwise operation.
+ virtual bool isFNegFree(EVT VT) const {
+ assert(VT.isFloatingPoint());
+ return false;
+ }
+
+ /// Return true if an fabs operation is free to the point where it is never
+ /// worthwhile to replace it with a bitwise operation.
+ virtual bool isFAbsFree(EVT VT) const {
+ assert(VT.isFloatingPoint());
+ return false;
+ }
+
+ /// Return true if an FMA operation is faster than a pair of fmul and fadd
+ /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
+ /// returns true, otherwise fmuladd is expanded to fmul + fadd.
+ ///
+ /// NOTE: This may be called before legalization on types for which FMAs are
+ /// not legal, but should return true if those types will eventually legalize
+ /// to types that support FMAs. After legalization, it will only be called on
+ /// types that support FMAs (via Legal or Custom actions)
+ virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
+ return false;
+ }
+
+ /// Return true if it's profitable to narrow operations of type VT1 to
+ /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
+ /// i32 to i16.
+ virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
+ return false;
+ }
+
+ /// \brief Return true if it is beneficial to convert a load of a constant to
+ /// just the constant itself.
+ /// On some targets it might be more efficient to use a combination of
+ /// arithmetic instructions to materialize the constant instead of loading it
+ /// from a constant pool.
+ virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const {
+ return false;
+ }
+
+ /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
+ /// with this index. This is needed because EXTRACT_SUBVECTOR usually
+ /// has custom lowering that depends on the index of the first element,
+ /// and only the target knows which lowering is cheap.
+ virtual bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const {
+ return false;
+ }
+
+ // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
+ // even if the vector itself has multiple uses.
+ virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
+ return false;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Runtime Library hooks
+ //
+
+ /// Rename the default libcall routine name for the specified libcall.
+ void setLibcallName(RTLIB::Libcall Call, const char *Name) {
+ LibcallRoutineNames[Call] = Name;
+ }
+
+ /// Get the libcall routine name for the specified libcall.
+ const char *getLibcallName(RTLIB::Libcall Call) const {
+ return LibcallRoutineNames[Call];
+ }
+
+ /// Override the default CondCode to be used to test the result of the
+ /// comparison libcall against zero.
+ void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
+ CmpLibcallCCs[Call] = CC;
+ }
+
+ /// Get the CondCode that's to be used to test the result of the comparison
+ /// libcall against zero.
+ ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
+ return CmpLibcallCCs[Call];
+ }
+
+ /// Set the CallingConv that should be used for the specified libcall.
+ void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
+ LibcallCallingConvs[Call] = CC;
+ }
+
+ /// Get the CallingConv that should be used for the specified libcall.
+ CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
+ return LibcallCallingConvs[Call];
+ }
+
+private:
+ const TargetMachine &TM;
+
+ /// Tells the code generator not to expand operations into sequences that use
+ /// the select operations if possible.
+ bool SelectIsExpensive;
+
+ /// Tells the code generator that the target has multiple (allocatable)
+ /// condition registers that can be used to store the results of comparisons
+ /// for use by selects and conditional branches. With multiple condition
+ /// registers, the code generator will not aggressively sink comparisons into
+ /// the blocks of their users.
+ bool HasMultipleConditionRegisters;
+
+ /// Tells the code generator that the target has BitExtract instructions.
+ /// The code generator will aggressively sink "shift"s into the blocks of
+ /// their users if the users will generate "and" instructions which can be
+ /// combined with "shift" to BitExtract instructions.
+ bool HasExtractBitsInsn;
+
+ // Don't expand fsqrt with an approximation based on the inverse sqrt.
+ bool FsqrtIsCheap;
+
+ /// Tells the code generator to bypass slow divide or remainder
+ /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
+ /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
+ /// div/rem when the operands are positive and less than 256.
+ DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
+
+ /// Tells the code generator that it shouldn't generate extra flow control
+ /// instructions and should attempt to combine flow control instructions via
+ /// predication.
+ bool JumpIsExpensive;
+
+ /// Whether the target supports or cares about preserving floating point
+ /// exception behavior.
+ bool HasFloatingPointExceptions;
+
+ /// This target prefers to use _setjmp to implement llvm.setjmp.
+ ///
+ /// Defaults to false.
+ bool UseUnderscoreSetJmp;
+
+ /// This target prefers to use _longjmp to implement llvm.longjmp.
+ ///
+ /// Defaults to false.
+ bool UseUnderscoreLongJmp;
+
+ /// Number of blocks threshold to use jump tables.
+ int MinimumJumpTableEntries;
+
+ /// Information about the contents of the high-bits in boolean values held in
+ /// a type wider than i1. See getBooleanContents.
+ BooleanContent BooleanContents;
+
+ /// Information about the contents of the high-bits in boolean values held in
+ /// a type wider than i1. See getBooleanContents.
+ BooleanContent BooleanFloatContents;
+
+ /// Information about the contents of the high-bits in boolean vector values
+ /// when the element type is wider than i1. See getBooleanContents.
+ BooleanContent BooleanVectorContents;
+
+ /// The target scheduling preference: shortest possible total cycles or lowest
+ /// register usage.
+ Sched::Preference SchedPreferenceInfo;
+
+ /// The size, in bytes, of the target's jmp_buf buffers
+ unsigned JumpBufSize;
+
+ /// The alignment, in bytes, of the target's jmp_buf buffers
+ unsigned JumpBufAlignment;
+
+ /// The minimum alignment that any argument on the stack needs to have.
+ unsigned MinStackArgumentAlignment;
+
+ /// The minimum function alignment (used when optimizing for size, and to
+ /// prevent explicitly provided alignment from leading to incorrect code).
+ unsigned MinFunctionAlignment;
+
+ /// The preferred function alignment (used when alignment unspecified and
+ /// optimizing for speed).
+ unsigned PrefFunctionAlignment;
+
+ /// The preferred loop alignment.
+ unsigned PrefLoopAlignment;
+
+ /// Whether the DAG builder should automatically insert fences and reduce
+ /// ordering for atomics. (This will be set for for most architectures with
+ /// weak memory ordering.)
+ bool InsertFencesForAtomic;
+
+ /// If set to a physical register, this specifies the register that
+ /// llvm.savestack/llvm.restorestack should save and restore.
+ unsigned StackPointerRegisterToSaveRestore;
+
+ /// This indicates the default register class to use for each ValueType the
+ /// target supports natively.
+ const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
+ unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
+ MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
+
+ /// This indicates the "representative" register class to use for each
+ /// ValueType the target supports natively. This information is used by the
+ /// scheduler to track register pressure. By default, the representative
+ /// register class is the largest legal super-reg register class of the
+ /// register class of the specified type. e.g. On x86, i8, i16, and i32's
+ /// representative class would be GR32.
+ const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
+
+ /// This indicates the "cost" of the "representative" register class for each
+ /// ValueType. The cost is used by the scheduler to approximate register
+ /// pressure.
+ uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
+
+ /// For any value types we are promoting or expanding, this contains the value
+ /// type that we are changing to. For Expanded types, this contains one step
+ /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
+ /// (e.g. i64 -> i16). For types natively supported by the system, this holds
+ /// the same type (e.g. i32 -> i32).
+ MVT TransformToType[MVT::LAST_VALUETYPE];
+
+ /// For each operation and each value type, keep a LegalizeAction that
+ /// indicates how instruction selection should deal with the operation. Most
+ /// operations are Legal (aka, supported natively by the target), but
+ /// operations that are not should be described. Note that operations on
+ /// non-legal value types are not described here.
+ LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
+
+ /// For each load extension type and each value type, keep a LegalizeAction
+ /// that indicates how instruction selection should deal with a load of a
+ /// specific value type and extension type.
+ LegalizeAction LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]
+ [ISD::LAST_LOADEXT_TYPE];
+
+ /// For each value type pair keep a LegalizeAction that indicates whether a
+ /// truncating store of a specific value type and truncating type is legal.
+ LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
+
+ /// For each indexed mode and each value type, keep a pair of LegalizeAction
+ /// that indicates how instruction selection should deal with the load /
+ /// store.
+ ///
+ /// The first dimension is the value_type for the reference. The second
+ /// dimension represents the various modes for load store.
+ uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
+
+ /// For each condition code (ISD::CondCode) keep a LegalizeAction that
+ /// indicates how instruction selection should deal with the condition code.
+ ///
+ /// Because each CC action takes up 4 bits, we need to have the array size be
+ /// large enough to fit all of the value types. This can be done by rounding
+ /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
+ uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
+
+protected:
+ ValueTypeActionImpl ValueTypeActions;
+
+private:
+ LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
+
+private:
+ std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
+
+ /// Targets can specify ISD nodes that they would like PerformDAGCombine
+ /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
+ /// array.
+ unsigned char
+ TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
+
+ /// For operations that must be promoted to a specific type, this holds the
+ /// destination type. This map should be sparse, so don't hold it as an
+ /// array.
+ ///
+ /// Targets add entries to this map with AddPromotedToType(..), clients access
+ /// this with getTypeToPromoteTo(..).
+ std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
+ PromoteToType;
+
+ /// Stores the name each libcall.
+ const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
+
+ /// The ISD::CondCode that should be used to test the result of each of the
+ /// comparison libcall against zero.
+ ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
+
+ /// Stores the CallingConv that should be used for each libcall.
+ CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
+
+protected:
+ /// Return true if the extension represented by \p I is free.
+ /// \pre \p I is a sign, zero, or fp extension and
+ /// is[Z|FP]ExtFree of the related types is not true.
+ virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
+
+ /// Depth that GatherAllAliases should should continue looking for chain
+ /// dependencies when trying to find a more preferrable chain. As an
+ /// approximation, this should be more than the number of consecutive stores
+ /// expected to be merged.
+ unsigned GatherAllAliasesMaxDepth;
+
+ /// \brief Specify maximum number of store instructions per memset call.
+ ///
+ /// When lowering \@llvm.memset this field specifies the maximum number of
+ /// store operations that may be substituted for the call to memset. Targets
+ /// must set this value based on the cost threshold for that target. Targets
+ /// should assume that the memset will be done using as many of the largest
+ /// store operations first, followed by smaller ones, if necessary, per
+ /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
+ /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
+ /// store. This only applies to setting a constant array of a constant size.
+ unsigned MaxStoresPerMemset;
+
+ /// Maximum number of stores operations that may be substituted for the call
+ /// to memset, used for functions with OptSize attribute.
+ unsigned MaxStoresPerMemsetOptSize;
+
+ /// \brief Specify maximum bytes of store instructions per memcpy call.
+ ///
+ /// When lowering \@llvm.memcpy this field specifies the maximum number of
+ /// store operations that may be substituted for a call to memcpy. Targets
+ /// must set this value based on the cost threshold for that target. Targets
+ /// should assume that the memcpy will be done using as many of the largest
+ /// store operations first, followed by smaller ones, if necessary, per
+ /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
+ /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
+ /// and one 1-byte store. This only applies to copying a constant array of
+ /// constant size.
+ unsigned MaxStoresPerMemcpy;
+
+ /// Maximum number of store operations that may be substituted for a call to
+ /// memcpy, used for functions with OptSize attribute.
+ unsigned MaxStoresPerMemcpyOptSize;
+
+ /// \brief Specify maximum bytes of store instructions per memmove call.
+ ///
+ /// When lowering \@llvm.memmove this field specifies the maximum number of
+ /// store instructions that may be substituted for a call to memmove. Targets
+ /// must set this value based on the cost threshold for that target. Targets
+ /// should assume that the memmove will be done using as many of the largest
+ /// store operations first, followed by smaller ones, if necessary, per
+ /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
+ /// with 8-bit alignment would result in nine 1-byte stores. This only
+ /// applies to copying a constant array of constant size.
+ unsigned MaxStoresPerMemmove;
+
+ /// Maximum number of store instructions that may be substituted for a call to
+ /// memmove, used for functions with OptSize attribute.
+ unsigned MaxStoresPerMemmoveOptSize;
+
+ /// Tells the code generator that select is more expensive than a branch if
+ /// the branch is usually predicted right.
+ bool PredictableSelectIsExpensive;
+
+ /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
+ /// a mask of a single bit, a compare, and a branch into a single instruction.
+ bool MaskAndBranchFoldingIsLegal;
+
+ /// \see enableExtLdPromotion.
+ bool EnableExtLdPromotion;
+
+protected:
+ /// Return true if the value types that can be represented by the specified
+ /// register class are all legal.
+ bool isLegalRC(const TargetRegisterClass *RC) const;
+
+ /// Replace/modify any TargetFrameIndex operands with a targte-dependent
+ /// sequence of memory operands that is recognized by PrologEpilogInserter.
+ MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
+};
+
+/// This class defines information used to lower LLVM code to legal SelectionDAG
+/// operators that the target instruction selector can accept natively.
+///
+/// This class also defines callbacks that targets must implement to lower
+/// target-specific constructs to SelectionDAG operators.
+class TargetLowering : public TargetLoweringBase {
+ TargetLowering(const TargetLowering&) = delete;
+ void operator=(const TargetLowering&) = delete;
+
+public:
+ /// NOTE: The TargetMachine owns TLOF.
+ explicit TargetLowering(const TargetMachine &TM);
+
+ /// Returns true by value, base pointer and offset pointer and addressing mode
+ /// by reference if the node's address can be legally represented as
+ /// pre-indexed load / store address.
+ virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
+ SDValue &/*Offset*/,
+ ISD::MemIndexedMode &/*AM*/,
+ SelectionDAG &/*DAG*/) const {
+ return false;
+ }
+
+ /// Returns true by value, base pointer and offset pointer and addressing mode
+ /// by reference if this node can be combined with a load / store to form a
+ /// post-indexed load / store.
+ virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
+ SDValue &/*Base*/,
+ SDValue &/*Offset*/,
+ ISD::MemIndexedMode &/*AM*/,
+ SelectionDAG &/*DAG*/) const {
+ return false;
+ }
+
+ /// Return the entry encoding for a jump table in the current function. The
+ /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
+ virtual unsigned getJumpTableEncoding() const;
+
+ virtual const MCExpr *
+ LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
+ const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
+ MCContext &/*Ctx*/) const {
+ llvm_unreachable("Need to implement this hook if target has custom JTIs");
+ }
+
+ /// Returns relocation base for the given PIC jumptable.
+ virtual SDValue getPICJumpTableRelocBase(SDValue Table,
+ SelectionDAG &DAG) const;
+
+ /// This returns the relocation base for the given PIC jumptable, the same as
+ /// getPICJumpTableRelocBase, but as an MCExpr.
+ virtual const MCExpr *
+ getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
+ unsigned JTI, MCContext &Ctx) const;
+
+ /// Return true if folding a constant offset with the given GlobalAddress is
+ /// legal. It is frequently not legal in PIC relocation models.
+ virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
+
+ bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
+ SDValue &Chain) const;
+
+ void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
+ SDValue &NewLHS, SDValue &NewRHS,
+ ISD::CondCode &CCCode, SDLoc DL) const;
+
+ /// Returns a pair of (return value, chain).
+ /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
+ std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
+ EVT RetVT, ArrayRef<SDValue> Ops,
+ bool isSigned, SDLoc dl,
+ bool doesNotReturn = false,
+ bool isReturnValueUsed = true) const;
+
+ //===--------------------------------------------------------------------===//
+ // TargetLowering Optimization Methods
+ //
+
+ /// A convenience struct that encapsulates a DAG, and two SDValues for
+ /// returning information from TargetLowering to its clients that want to
+ /// combine.
+ struct TargetLoweringOpt {
+ SelectionDAG &DAG;
+ bool LegalTys;
+ bool LegalOps;
+ SDValue Old;
+ SDValue New;
+
+ explicit TargetLoweringOpt(SelectionDAG &InDAG,
+ bool LT, bool LO) :
+ DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
+
+ bool LegalTypes() const { return LegalTys; }
+ bool LegalOperations() const { return LegalOps; }
+
+ bool CombineTo(SDValue O, SDValue N) {
+ Old = O;
+ New = N;
+ return true;
+ }
+
+ /// Check to see if the specified operand of the specified instruction is a
+ /// constant integer. If so, check to see if there are any bits set in the
+ /// constant that are not demanded. If so, shrink the constant and return
+ /// true.
+ bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
+
+ /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
+ /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
+ /// generalized for targets with other types of implicit widening casts.
+ bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
+ SDLoc dl);
+ };
+
+ /// Look at Op. At this point, we know that only the DemandedMask bits of the
+ /// result of Op are ever used downstream. If we can use this information to
+ /// simplify Op, create a new simplified DAG node and return true, returning
+ /// the original and new nodes in Old and New. Otherwise, analyze the
+ /// expression and return a mask of KnownOne and KnownZero bits for the
+ /// expression (used to simplify the caller). The KnownZero/One bits may only
+ /// be accurate for those bits in the DemandedMask.
+ bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
+ APInt &KnownZero, APInt &KnownOne,
+ TargetLoweringOpt &TLO, unsigned Depth = 0) const;
+
+ /// Determine which of the bits specified in Mask are known to be either zero
+ /// or one and return them in the KnownZero/KnownOne bitsets.
+ virtual void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const;
+
+ /// This method can be implemented by targets that want to expose additional
+ /// information about sign bits to the DAG Combiner.
+ virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const;
+
+ struct DAGCombinerInfo {
+ void *DC; // The DAG Combiner object.
+ CombineLevel Level;
+ bool CalledByLegalizer;
+ public:
+ SelectionDAG &DAG;
+
+ DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
+ : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
+
+ bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
+ bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
+ bool isAfterLegalizeVectorOps() const {
+ return Level == AfterLegalizeDAG;
+ }
+ CombineLevel getDAGCombineLevel() { return Level; }
+ bool isCalledByLegalizer() const { return CalledByLegalizer; }
+
+ void AddToWorklist(SDNode *N);
+ void RemoveFromWorklist(SDNode *N);
+ SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
+ SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
+ SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
+
+ void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
+ };
+
+ /// Return if the N is a constant or constant vector equal to the true value
+ /// from getBooleanContents().
+ bool isConstTrueVal(const SDNode *N) const;
+
+ /// Return if the N is a constant or constant vector equal to the false value
+ /// from getBooleanContents().
+ bool isConstFalseVal(const SDNode *N) const;
+
+ /// Try to simplify a setcc built with the specified operands and cc. If it is
+ /// unable to simplify it, return a null SDValue.
+ SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
+ ISD::CondCode Cond, bool foldBooleans,
+ DAGCombinerInfo &DCI, SDLoc dl) const;
+
+ /// Returns true (and the GlobalValue and the offset) if the node is a
+ /// GlobalAddress + offset.
+ virtual bool
+ isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
+
+ /// This method will be invoked for all target nodes and for any
+ /// target-independent nodes that the target has registered with invoke it
+ /// for.
+ ///
+ /// The semantics are as follows:
+ /// Return Value:
+ /// SDValue.Val == 0 - No change was made
+ /// SDValue.Val == N - N was replaced, is dead, and is already handled.
+ /// otherwise - N should be replaced by the returned Operand.
+ ///
+ /// In addition, methods provided by DAGCombinerInfo may be used to perform
+ /// more complex transformations.
+ ///
+ virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+
+ /// Return true if it is profitable to move a following shift through this
+ // node, adjusting any immediate operands as necessary to preserve semantics.
+ // This transformation may not be desirable if it disrupts a particularly
+ // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
+ // By default, it returns true.
+ virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
+ return true;
+ }
+
+ /// Return true if the target has native support for the specified value type
+ /// and it is 'desirable' to use the type for the given node type. e.g. On x86
+ /// i16 is legal, but undesirable since i16 instruction encodings are longer
+ /// and some i16 instructions are slow.
+ virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
+ // By default, assume all legal types are desirable.
+ return isTypeLegal(VT);
+ }
+
+ /// Return true if it is profitable for dag combiner to transform a floating
+ /// point op of specified opcode to a equivalent op of an integer
+ /// type. e.g. f32 load -> i32 load can be profitable on ARM.
+ virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
+ EVT /*VT*/) const {
+ return false;
+ }
+
+ /// This method query the target whether it is beneficial for dag combiner to
+ /// promote the specified node. If true, it should return the desired
+ /// promotion type by reference.
+ virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
+ return false;
+ }
+
+ /// Return true if the target supports that a subset of CSRs for the given
+ /// machine function is handled explicitly via copies.
+ virtual bool supportSplitCSR(MachineFunction *MF) const {
+ return false;
+ }
+
+ /// Perform necessary initialization to handle a subset of CSRs explicitly
+ /// via copies. This function is called at the beginning of instruction
+ /// selection.
+ virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
+ llvm_unreachable("Not Implemented");
+ }
+
+ /// Insert explicit copies in entry and exit blocks. We copy a subset of
+ /// CSRs to virtual registers in the entry block, and copy them back to
+ /// physical registers in the exit blocks. This function is called at the end
+ /// of instruction selection.
+ virtual void insertCopiesSplitCSR(
+ MachineBasicBlock *Entry,
+ const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
+ llvm_unreachable("Not Implemented");
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Lowering methods - These methods must be implemented by targets so that
+ // the SelectionDAGBuilder code knows how to lower these.
+ //
+
+ /// This hook must be implemented to lower the incoming (formal) arguments,
+ /// described by the Ins array, into the specified DAG. The implementation
+ /// should fill in the InVals array with legal-type argument values, and
+ /// return the resulting token chain value.
+ ///
+ virtual SDValue
+ LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
+ bool /*isVarArg*/,
+ const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
+ SDLoc /*dl*/, SelectionDAG &/*DAG*/,
+ SmallVectorImpl<SDValue> &/*InVals*/) const {
+ llvm_unreachable("Not Implemented");
+ }
+
+ struct ArgListEntry {
+ SDValue Node;
+ Type* Ty;
+ bool isSExt : 1;
+ bool isZExt : 1;
+ bool isInReg : 1;
+ bool isSRet : 1;
+ bool isNest : 1;
+ bool isByVal : 1;
+ bool isInAlloca : 1;
+ bool isReturned : 1;
+ uint16_t Alignment;
+
+ ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
+ isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
+ isReturned(false), Alignment(0) { }
+
+ void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
+ };
+ typedef std::vector<ArgListEntry> ArgListTy;
+
+ /// This structure contains all information that is necessary for lowering
+ /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
+ /// needs to lower a call, and targets will see this struct in their LowerCall
+ /// implementation.
+ struct CallLoweringInfo {
+ SDValue Chain;
+ Type *RetTy;
+ bool RetSExt : 1;
+ bool RetZExt : 1;
+ bool IsVarArg : 1;
+ bool IsInReg : 1;
+ bool DoesNotReturn : 1;
+ bool IsReturnValueUsed : 1;
+
+ // IsTailCall should be modified by implementations of
+ // TargetLowering::LowerCall that perform tail call conversions.
+ bool IsTailCall;
+
+ unsigned NumFixedArgs;
+ CallingConv::ID CallConv;
+ SDValue Callee;
+ ArgListTy Args;
+ SelectionDAG &DAG;
+ SDLoc DL;
+ ImmutableCallSite *CS;
+ bool IsPatchPoint;
+ SmallVector<ISD::OutputArg, 32> Outs;
+ SmallVector<SDValue, 32> OutVals;
+ SmallVector<ISD::InputArg, 32> Ins;
+
+ CallLoweringInfo(SelectionDAG &DAG)
+ : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
+ IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
+ IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
+ DAG(DAG), CS(nullptr), IsPatchPoint(false) {}
+
+ CallLoweringInfo &setDebugLoc(SDLoc dl) {
+ DL = dl;
+ return *this;
+ }
+
+ CallLoweringInfo &setChain(SDValue InChain) {
+ Chain = InChain;
+ return *this;
+ }
+
+ CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
+ SDValue Target, ArgListTy &&ArgsList,
+ unsigned FixedArgs = -1) {
+ RetTy = ResultType;
+ Callee = Target;
+ CallConv = CC;
+ NumFixedArgs =
+ (FixedArgs == static_cast<unsigned>(-1) ? Args.size() : FixedArgs);
+ Args = std::move(ArgsList);
+ return *this;
+ }
+
+ CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
+ SDValue Target, ArgListTy &&ArgsList,
+ ImmutableCallSite &Call) {
+ RetTy = ResultType;
+
+ IsInReg = Call.paramHasAttr(0, Attribute::InReg);
+ DoesNotReturn = Call.doesNotReturn();
+ IsVarArg = FTy->isVarArg();
+ IsReturnValueUsed = !Call.getInstruction()->use_empty();
+ RetSExt = Call.paramHasAttr(0, Attribute::SExt);
+ RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
+
+ Callee = Target;
+
+ CallConv = Call.getCallingConv();
+ NumFixedArgs = FTy->getNumParams();
+ Args = std::move(ArgsList);
+
+ CS = &Call;
+
+ return *this;
+ }
+
+ CallLoweringInfo &setInRegister(bool Value = true) {
+ IsInReg = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setNoReturn(bool Value = true) {
+ DoesNotReturn = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setVarArg(bool Value = true) {
+ IsVarArg = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setTailCall(bool Value = true) {
+ IsTailCall = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setDiscardResult(bool Value = true) {
+ IsReturnValueUsed = !Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setSExtResult(bool Value = true) {
+ RetSExt = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setZExtResult(bool Value = true) {
+ RetZExt = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setIsPatchPoint(bool Value = true) {
+ IsPatchPoint = Value;
+ return *this;
+ }
+
+ ArgListTy &getArgs() {
+ return Args;
+ }
+
+ };
+
+ /// This function lowers an abstract call to a function into an actual call.
+ /// This returns a pair of operands. The first element is the return value
+ /// for the function (if RetTy is not VoidTy). The second element is the
+ /// outgoing token chain. It calls LowerCall to do the actual lowering.
+ std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
+
+ /// This hook must be implemented to lower calls into the specified
+ /// DAG. The outgoing arguments to the call are described by the Outs array,
+ /// and the values to be returned by the call are described by the Ins
+ /// array. The implementation should fill in the InVals array with legal-type
+ /// return values from the call, and return the resulting token chain value.
+ virtual SDValue
+ LowerCall(CallLoweringInfo &/*CLI*/,
+ SmallVectorImpl<SDValue> &/*InVals*/) const {
+ llvm_unreachable("Not Implemented");
+ }
+
+ /// Target-specific cleanup for formal ByVal parameters.
+ virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
+
+ /// This hook should be implemented to check whether the return values
+ /// described by the Outs array can fit into the return registers. If false
+ /// is returned, an sret-demotion is performed.
+ virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
+ MachineFunction &/*MF*/, bool /*isVarArg*/,
+ const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
+ LLVMContext &/*Context*/) const
+ {
+ // Return true by default to get preexisting behavior.
+ return true;
+ }
+
+ /// This hook must be implemented to lower outgoing return values, described
+ /// by the Outs array, into the specified DAG. The implementation should
+ /// return the resulting token chain value.
+ virtual SDValue
+ LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
+ bool /*isVarArg*/,
+ const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
+ const SmallVectorImpl<SDValue> &/*OutVals*/,
+ SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
+ llvm_unreachable("Not Implemented");
+ }
+
+ /// Return true if result of the specified node is used by a return node
+ /// only. It also compute and return the input chain for the tail call.
+ ///
+ /// This is used to determine whether it is possible to codegen a libcall as
+ /// tail call at legalization time.
+ virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
+ return false;
+ }
+
+ /// Return true if the target may be able emit the call instruction as a tail
+ /// call. This is used by optimization passes to determine if it's profitable
+ /// to duplicate return instructions to enable tailcall optimization.
+ virtual bool mayBeEmittedAsTailCall(CallInst *) const {
+ return false;
+ }
+
+ /// Return the builtin name for the __builtin___clear_cache intrinsic
+ /// Default is to invoke the clear cache library call
+ virtual const char * getClearCacheBuiltinName() const {
+ return "__clear_cache";
+ }
+
+ /// Return the register ID of the name passed in. Used by named register
+ /// global variables extension. There is no target-independent behaviour
+ /// so the default action is to bail.
+ virtual unsigned getRegisterByName(const char* RegName, EVT VT,
+ SelectionDAG &DAG) const {
+ report_fatal_error("Named registers not implemented for this target");
+ }
+
+ /// Return the type that should be used to zero or sign extend a
+ /// zeroext/signext integer argument or return value. FIXME: Most C calling
+ /// convention requires the return type to be promoted, but this is not true
+ /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
+ /// calling conventions. The frontend should handle this and include all of
+ /// the necessary information.
+ virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
+ ISD::NodeType /*ExtendKind*/) const {
+ EVT MinVT = getRegisterType(Context, MVT::i32);
+ return VT.bitsLT(MinVT) ? MinVT : VT;
+ }
+
+ /// For some targets, an LLVM struct type must be broken down into multiple
+ /// simple types, but the calling convention specifies that the entire struct
+ /// must be passed in a block of consecutive registers.
+ virtual bool
+ functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
+ bool isVarArg) const {
+ return false;
+ }
+
+ /// Returns a 0 terminated array of registers that can be safely used as
+ /// scratch registers.
+ virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
+ return nullptr;
+ }
+
+ /// This callback is used to prepare for a volatile or atomic load.
+ /// It takes a chain node as input and returns the chain for the load itself.
+ ///
+ /// Having a callback like this is necessary for targets like SystemZ,
+ /// which allows a CPU to reuse the result of a previous load indefinitely,
+ /// even if a cache-coherent store is performed by another CPU. The default
+ /// implementation does nothing.
+ virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
+ SelectionDAG &DAG) const {
+ return Chain;
+ }
+
+ /// This callback is invoked by the type legalizer to legalize nodes with an
+ /// illegal operand type but legal result types. It replaces the
+ /// LowerOperation callback in the type Legalizer. The reason we can not do
+ /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
+ /// use this callback.
+ ///
+ /// TODO: Consider merging with ReplaceNodeResults.
+ ///
+ /// The target places new result values for the node in Results (their number
+ /// and types must exactly match those of the original return values of
+ /// the node), or leaves Results empty, which indicates that the node is not
+ /// to be custom lowered after all.
+ /// The default implementation calls LowerOperation.
+ virtual void LowerOperationWrapper(SDNode *N,
+ SmallVectorImpl<SDValue> &Results,
+ SelectionDAG &DAG) const;
+
+ /// This callback is invoked for operations that are unsupported by the
+ /// target, which are registered to use 'custom' lowering, and whose defined
+ /// values are all legal. If the target has no operations that require custom
+ /// lowering, it need not implement this. The default implementation of this
+ /// aborts.
+ virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+
+ /// This callback is invoked when a node result type is illegal for the
+ /// target, and the operation was registered to use 'custom' lowering for that
+ /// result type. The target places new result values for the node in Results
+ /// (their number and types must exactly match those of the original return
+ /// values of the node), or leaves Results empty, which indicates that the
+ /// node is not to be custom lowered after all.
+ ///
+ /// If the target has no operations that require custom lowering, it need not
+ /// implement this. The default implementation aborts.
+ virtual void ReplaceNodeResults(SDNode * /*N*/,
+ SmallVectorImpl<SDValue> &/*Results*/,
+ SelectionDAG &/*DAG*/) const {
+ llvm_unreachable("ReplaceNodeResults not implemented for this target!");
+ }
+
+ /// This method returns the name of a target specific DAG node.
+ virtual const char *getTargetNodeName(unsigned Opcode) const;
+
+ /// This method returns a target specific FastISel object, or null if the
+ /// target does not support "fast" ISel.
+ virtual FastISel *createFastISel(FunctionLoweringInfo &,
+ const TargetLibraryInfo *) const {
+ return nullptr;
+ }
+
+
+ bool verifyReturnAddressArgumentIsConstant(SDValue Op,
+ SelectionDAG &DAG) const;
+
+ //===--------------------------------------------------------------------===//
+ // Inline Asm Support hooks
+ //
+
+ /// This hook allows the target to expand an inline asm call to be explicit
+ /// llvm code if it wants to. This is useful for turning simple inline asms
+ /// into LLVM intrinsics, which gives the compiler more information about the
+ /// behavior of the code.
+ virtual bool ExpandInlineAsm(CallInst *) const {
+ return false;
+ }
+
+ enum ConstraintType {
+ C_Register, // Constraint represents specific register(s).
+ C_RegisterClass, // Constraint represents any of register(s) in class.
+ C_Memory, // Memory constraint.
+ C_Other, // Something else.
+ C_Unknown // Unsupported constraint.
+ };
+
+ enum ConstraintWeight {
+ // Generic weights.
+ CW_Invalid = -1, // No match.
+ CW_Okay = 0, // Acceptable.
+ CW_Good = 1, // Good weight.
+ CW_Better = 2, // Better weight.
+ CW_Best = 3, // Best weight.
+
+ // Well-known weights.
+ CW_SpecificReg = CW_Okay, // Specific register operands.
+ CW_Register = CW_Good, // Register operands.
+ CW_Memory = CW_Better, // Memory operands.
+ CW_Constant = CW_Best, // Constant operand.
+ CW_Default = CW_Okay // Default or don't know type.
+ };
+
+ /// This contains information for each constraint that we are lowering.
+ struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
+ /// This contains the actual string for the code, like "m". TargetLowering
+ /// picks the 'best' code from ConstraintInfo::Codes that most closely
+ /// matches the operand.
+ std::string ConstraintCode;
+
+ /// Information about the constraint code, e.g. Register, RegisterClass,
+ /// Memory, Other, Unknown.
+ TargetLowering::ConstraintType ConstraintType;
+
+ /// If this is the result output operand or a clobber, this is null,
+ /// otherwise it is the incoming operand to the CallInst. This gets
+ /// modified as the asm is processed.
+ Value *CallOperandVal;
+
+ /// The ValueType for the operand value.
+ MVT ConstraintVT;
+
+ /// Return true of this is an input operand that is a matching constraint
+ /// like "4".
+ bool isMatchingInputConstraint() const;
+
+ /// If this is an input matching constraint, this method returns the output
+ /// operand it matches.
+ unsigned getMatchedOperand() const;
+
+ /// Copy constructor for copying from a ConstraintInfo.
+ AsmOperandInfo(InlineAsm::ConstraintInfo Info)
+ : InlineAsm::ConstraintInfo(std::move(Info)),
+ ConstraintType(TargetLowering::C_Unknown), CallOperandVal(nullptr),
+ ConstraintVT(MVT::Other) {}
+ };
+
+ typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
+
+ /// Split up the constraint string from the inline assembly value into the
+ /// specific constraints and their prefixes, and also tie in the associated
+ /// operand values. If this returns an empty vector, and if the constraint
+ /// string itself isn't empty, there was an error parsing.
+ virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
+ const TargetRegisterInfo *TRI,
+ ImmutableCallSite CS) const;
+
+ /// Examine constraint type and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ virtual ConstraintWeight getMultipleConstraintMatchWeight(
+ AsmOperandInfo &info, int maIndex) const;
+
+ /// Examine constraint string and operand type and determine a weight value.
+ /// The operand object must already have been set up with the operand type.
+ virtual ConstraintWeight getSingleConstraintMatchWeight(
+ AsmOperandInfo &info, const char *constraint) const;
+
+ /// Determines the constraint code and constraint type to use for the specific
+ /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
+ /// If the actual operand being passed in is available, it can be passed in as
+ /// Op, otherwise an empty SDValue can be passed.
+ virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
+ SDValue Op,
+ SelectionDAG *DAG = nullptr) const;
+
+ /// Given a constraint, return the type of constraint it is for this target.
+ virtual ConstraintType getConstraintType(StringRef Constraint) const;
+
+ /// Given a physical register constraint (e.g. {edx}), return the register
+ /// number and the register class for the register.
+ ///
+ /// Given a register class constraint, like 'r', if this corresponds directly
+ /// to an LLVM register class, return a register of 0 and the register class
+ /// pointer.
+ ///
+ /// This should only be used for C_Register constraints. On error, this
+ /// returns a register number of 0 and a null register class pointer.
+ virtual std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint, MVT VT) const;
+
+ virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
+ if (ConstraintCode == "i")
+ return InlineAsm::Constraint_i;
+ else if (ConstraintCode == "m")
+ return InlineAsm::Constraint_m;
+ return InlineAsm::Constraint_Unknown;
+ }
+
+ /// Try to replace an X constraint, which matches anything, with another that
+ /// has more specific requirements based on the type of the corresponding
+ /// operand. This returns null if there is no replacement to make.
+ virtual const char *LowerXConstraint(EVT ConstraintVT) const;
+
+ /// Lower the specified operand into the Ops vector. If it is invalid, don't
+ /// add anything to Ops.
+ virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const;
+
+ //===--------------------------------------------------------------------===//
+ // Div utility functions
+ //
+ SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+ bool IsAfterLegalization,
+ std::vector<SDNode *> *Created) const;
+ SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+ bool IsAfterLegalization,
+ std::vector<SDNode *> *Created) const;
+
+ /// Targets may override this function to provide custom SDIV lowering for
+ /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
+ /// assumes SDIV is expensive and replaces it with a series of other integer
+ /// operations.
+ virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
+ SelectionDAG &DAG,
+ std::vector<SDNode *> *Created) const;
+
+ /// Indicate whether this target prefers to combine FDIVs with the same
+ /// divisor. If the transform should never be done, return zero. If the
+ /// transform should be done, return the minimum number of divisor uses
+ /// that must exist.
+ virtual unsigned combineRepeatedFPDivisors() const {
+ return 0;
+ }
+
+ /// Hooks for building estimates in place of slower divisions and square
+ /// roots.
+
+ /// Return a reciprocal square root estimate value for the input operand.
+ /// The RefinementSteps output is the number of Newton-Raphson refinement
+ /// iterations required to generate a sufficient (though not necessarily
+ /// IEEE-754 compliant) estimate for the value type.
+ /// The boolean UseOneConstNR output is used to select a Newton-Raphson
+ /// algorithm implementation that uses one constant or two constants.
+ /// A target may choose to implement its own refinement within this function.
+ /// If that's true, then return '0' as the number of RefinementSteps to avoid
+ /// any further refinement of the estimate.
+ /// An empty SDValue return means no estimate sequence can be created.
+ virtual SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps,
+ bool &UseOneConstNR) const {
+ return SDValue();
+ }
+
+ /// Return a reciprocal estimate value for the input operand.
+ /// The RefinementSteps output is the number of Newton-Raphson refinement
+ /// iterations required to generate a sufficient (though not necessarily
+ /// IEEE-754 compliant) estimate for the value type.
+ /// A target may choose to implement its own refinement within this function.
+ /// If that's true, then return '0' as the number of RefinementSteps to avoid
+ /// any further refinement of the estimate.
+ /// An empty SDValue return means no estimate sequence can be created.
+ virtual SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
+ unsigned &RefinementSteps) const {
+ return SDValue();
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Legalization utility functions
+ //
+
+ /// Expand a MUL into two nodes. One that computes the high bits of
+ /// the result and one that computes the low bits.
+ /// \param HiLoVT The value type to use for the Lo and Hi nodes.
+ /// \param LL Low bits of the LHS of the MUL. You can use this parameter
+ /// if you want to control how low bits are extracted from the LHS.
+ /// \param LH High bits of the LHS of the MUL. See LL for meaning.
+ /// \param RL Low bits of the RHS of the MUL. See LL for meaning
+ /// \param RH High bits of the RHS of the MUL. See LL for meaning.
+ /// \returns true if the node has been expanded. false if it has not
+ bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
+ SelectionDAG &DAG, SDValue LL = SDValue(),
+ SDValue LH = SDValue(), SDValue RL = SDValue(),
+ SDValue RH = SDValue()) const;
+
+ /// Expand float(f32) to SINT(i64) conversion
+ /// \param N Node to expand
+ /// \param Result output after conversion
+ /// \returns True, if the expansion was successful, false otherwise
+ bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
+
+ //===--------------------------------------------------------------------===//
+ // Instruction Emitting Hooks
+ //
+
+ /// This method should be implemented by targets that mark instructions with
+ /// the 'usesCustomInserter' flag. These instructions are special in various
+ /// ways, which require special support to insert. The specified MachineInstr
+ /// is created but not inserted into any basic blocks, and this method is
+ /// called to expand it into a sequence of instructions, potentially also
+ /// creating new basic blocks and control flow.
+ /// As long as the returned basic block is different (i.e., we created a new
+ /// one), the custom inserter is free to modify the rest of \p MBB.
+ virtual MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
+
+ /// This method should be implemented by targets that mark instructions with
+ /// the 'hasPostISelHook' flag. These instructions must be adjusted after
+ /// instruction selection by target hooks. e.g. To fill in optional defs for
+ /// ARM 's' setting instructions.
+ virtual void
+ AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
+
+ /// If this function returns true, SelectionDAGBuilder emits a
+ /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
+ virtual bool useLoadStackGuardNode() const {
+ return false;
+ }
+
+ /// Lower TLS global address SDNode for target independent emulated TLS model.
+ virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
+ SelectionDAG &DAG) const;
+};
+
+/// Given an LLVM IR type and return type attributes, compute the return value
+/// EVTs and flags, and optionally also the offsets, if the return value is
+/// being lowered to memory.
+void GetReturnInfo(Type *ReturnType, AttributeSet attr,
+ SmallVectorImpl<ISD::OutputArg> &Outs,
+ const TargetLowering &TLI, const DataLayout &DL);
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h b/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
new file mode 100644
index 0000000..cb52698
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetLoweringObjectFile.h
@@ -0,0 +1,194 @@
+//===-- llvm/Target/TargetLoweringObjectFile.h - Object Info ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements classes used to handle lowerings specific to common
+// object file formats.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
+#define LLVM_TARGET_TARGETLOWERINGOBJECTFILE_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/SectionKind.h"
+
+namespace llvm {
+ class MachineModuleInfo;
+ class Mangler;
+ class MCContext;
+ class MCExpr;
+ class MCSection;
+ class MCSymbol;
+ class MCSymbolRefExpr;
+ class MCStreamer;
+ class MCValue;
+ class ConstantExpr;
+ class GlobalValue;
+ class TargetMachine;
+
+class TargetLoweringObjectFile : public MCObjectFileInfo {
+ MCContext *Ctx;
+
+ TargetLoweringObjectFile(
+ const TargetLoweringObjectFile&) = delete;
+ void operator=(const TargetLoweringObjectFile&) = delete;
+
+protected:
+ bool SupportIndirectSymViaGOTPCRel;
+ bool SupportGOTPCRelWithOffset;
+
+public:
+ MCContext &getContext() const { return *Ctx; }
+
+ TargetLoweringObjectFile()
+ : MCObjectFileInfo(), Ctx(nullptr), SupportIndirectSymViaGOTPCRel(false),
+ SupportGOTPCRelWithOffset(true) {}
+
+ virtual ~TargetLoweringObjectFile();
+
+ /// This method must be called before any actual lowering is done. This
+ /// specifies the current context for codegen, and gives the lowering
+ /// implementations a chance to set up their default sections.
+ virtual void Initialize(MCContext &ctx, const TargetMachine &TM);
+
+ virtual void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
+ const MCSymbol *Sym) const;
+
+ /// Emit the module flags that the platform cares about.
+ virtual void emitModuleFlags(MCStreamer &Streamer,
+ ArrayRef<Module::ModuleFlagEntry> Flags,
+ Mangler &Mang, const TargetMachine &TM) const {}
+
+ /// Given a constant with the SectionKind, return a section that it should be
+ /// placed in.
+ virtual MCSection *getSectionForConstant(const DataLayout &DL,
+ SectionKind Kind,
+ const Constant *C) const;
+
+ /// Classify the specified global variable into a set of target independent
+ /// categories embodied in SectionKind.
+ static SectionKind getKindForGlobal(const GlobalValue *GV,
+ const TargetMachine &TM);
+
+ /// This method computes the appropriate section to emit the specified global
+ /// variable or function definition. This should not be passed external (or
+ /// available externally) globals.
+ MCSection *SectionForGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang, const TargetMachine &TM) const;
+
+ /// This method computes the appropriate section to emit the specified global
+ /// variable or function definition. This should not be passed external (or
+ /// available externally) globals.
+ MCSection *SectionForGlobal(const GlobalValue *GV, Mangler &Mang,
+ const TargetMachine &TM) const {
+ return SectionForGlobal(GV, getKindForGlobal(GV, TM), Mang, TM);
+ }
+
+ virtual void getNameWithPrefix(SmallVectorImpl<char> &OutName,
+ const GlobalValue *GV, Mangler &Mang,
+ const TargetMachine &TM) const;
+
+ virtual MCSection *getSectionForJumpTable(const Function &F, Mangler &Mang,
+ const TargetMachine &TM) const;
+
+ virtual bool shouldPutJumpTableInFunctionSection(bool UsesLabelDifference,
+ const Function &F) const;
+
+ /// Targets should implement this method to assign a section to globals with
+ /// an explicit section specfied. The implementation of this method can
+ /// assume that GV->hasSection() is true.
+ virtual MCSection *
+ getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
+ Mangler &Mang, const TargetMachine &TM) const = 0;
+
+ /// Allow the target to completely override section assignment of a global.
+ virtual const MCSection *getSpecialCasedSectionGlobals(const GlobalValue *GV,
+ SectionKind Kind,
+ Mangler &Mang) const {
+ return nullptr;
+ }
+
+ /// Return an MCExpr to use for a reference to the specified global variable
+ /// from exception handling information.
+ virtual const MCExpr *
+ getTTypeGlobalReference(const GlobalValue *GV, unsigned Encoding,
+ Mangler &Mang, const TargetMachine &TM,
+ MachineModuleInfo *MMI, MCStreamer &Streamer) const;
+
+ /// Return the MCSymbol for a private symbol with global value name as its
+ /// base, with the specified suffix.
+ MCSymbol *getSymbolWithGlobalValueBase(const GlobalValue *GV,
+ StringRef Suffix, Mangler &Mang,
+ const TargetMachine &TM) const;
+
+ // The symbol that gets passed to .cfi_personality.
+ virtual MCSymbol *getCFIPersonalitySymbol(const GlobalValue *GV,
+ Mangler &Mang,
+ const TargetMachine &TM,
+ MachineModuleInfo *MMI) const;
+
+ const MCExpr *
+ getTTypeReference(const MCSymbolRefExpr *Sym, unsigned Encoding,
+ MCStreamer &Streamer) const;
+
+ virtual MCSection *getStaticCtorSection(unsigned Priority,
+ const MCSymbol *KeySym) const {
+ return StaticCtorSection;
+ }
+
+ virtual MCSection *getStaticDtorSection(unsigned Priority,
+ const MCSymbol *KeySym) const {
+ return StaticDtorSection;
+ }
+
+ /// \brief Create a symbol reference to describe the given TLS variable when
+ /// emitting the address in debug info.
+ virtual const MCExpr *getDebugThreadLocalSymbol(const MCSymbol *Sym) const;
+
+ virtual const MCExpr *
+ getExecutableRelativeSymbol(const ConstantExpr *CE, Mangler &Mang,
+ const TargetMachine &TM) const {
+ return nullptr;
+ }
+
+ /// \brief Target supports replacing a data "PC"-relative access to a symbol
+ /// through another symbol, by accessing the later via a GOT entry instead?
+ bool supportIndirectSymViaGOTPCRel() const {
+ return SupportIndirectSymViaGOTPCRel;
+ }
+
+ /// \brief Target GOT "PC"-relative relocation supports encoding an additional
+ /// binary expression with an offset?
+ bool supportGOTPCRelWithOffset() const {
+ return SupportGOTPCRelWithOffset;
+ }
+
+ /// \brief Get the target specific PC relative GOT entry relocation
+ virtual const MCExpr *getIndirectSymViaGOTPCRel(const MCSymbol *Sym,
+ const MCValue &MV,
+ int64_t Offset,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const {
+ return nullptr;
+ }
+
+ virtual void emitLinkerFlagsForGlobal(raw_ostream &OS, const GlobalValue *GV,
+ const Mangler &Mang) const {}
+
+protected:
+ virtual MCSection *SelectSectionForGlobal(const GlobalValue *GV,
+ SectionKind Kind, Mangler &Mang,
+ const TargetMachine &TM) const = 0;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetMachine.h b/contrib/llvm/include/llvm/Target/TargetMachine.h
new file mode 100644
index 0000000..74e91b5
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetMachine.h
@@ -0,0 +1,311 @@
+//===-- llvm/Target/TargetMachine.h - Target Information --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TargetMachine and LLVMTargetMachine classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETMACHINE_H
+#define LLVM_TARGET_TARGETMACHINE_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Target/TargetOptions.h"
+#include <cassert>
+#include <string>
+
+namespace llvm {
+
+class InstrItineraryData;
+class GlobalValue;
+class Mangler;
+class MachineFunctionInitializer;
+class MCAsmInfo;
+class MCCodeGenInfo;
+class MCContext;
+class MCInstrInfo;
+class MCRegisterInfo;
+class MCSubtargetInfo;
+class MCSymbol;
+class Target;
+class DataLayout;
+class TargetLibraryInfo;
+class TargetFrameLowering;
+class TargetIRAnalysis;
+class TargetIntrinsicInfo;
+class TargetLowering;
+class TargetPassConfig;
+class TargetRegisterInfo;
+class TargetSelectionDAGInfo;
+class TargetSubtargetInfo;
+class TargetTransformInfo;
+class formatted_raw_ostream;
+class raw_ostream;
+class raw_pwrite_stream;
+class TargetLoweringObjectFile;
+
+// The old pass manager infrastructure is hidden in a legacy namespace now.
+namespace legacy {
+class PassManagerBase;
+}
+using legacy::PassManagerBase;
+
+//===----------------------------------------------------------------------===//
+///
+/// Primary interface to the complete machine description for the target
+/// machine. All target-specific information should be accessible through this
+/// interface.
+///
+class TargetMachine {
+ TargetMachine(const TargetMachine &) = delete;
+ void operator=(const TargetMachine &) = delete;
+protected: // Can only create subclasses.
+ TargetMachine(const Target &T, StringRef DataLayoutString,
+ const Triple &TargetTriple, StringRef CPU, StringRef FS,
+ const TargetOptions &Options);
+
+ /// The Target that this machine was created for.
+ const Target &TheTarget;
+
+ /// DataLayout for the target: keep ABI type size and alignment.
+ ///
+ /// The DataLayout is created based on the string representation provided
+ /// during construction. It is kept here only to avoid reparsing the string
+ /// but should not really be used during compilation, because it has an
+ /// internal cache that is context specific.
+ const DataLayout DL;
+
+ /// Triple string, CPU name, and target feature strings the TargetMachine
+ /// instance is created with.
+ Triple TargetTriple;
+ std::string TargetCPU;
+ std::string TargetFS;
+
+ /// Low level target information such as relocation model. Non-const to
+ /// allow resetting optimization level per-function.
+ MCCodeGenInfo *CodeGenInfo;
+
+ /// Contains target specific asm information.
+ const MCAsmInfo *AsmInfo;
+
+ const MCRegisterInfo *MRI;
+ const MCInstrInfo *MII;
+ const MCSubtargetInfo *STI;
+
+ unsigned RequireStructuredCFG : 1;
+ unsigned O0WantsFastISel : 1;
+
+ /// This API is here to support the C API, deprecated in 3.7 release.
+ /// This should never be used outside of legacy existing client.
+ const DataLayout &getDataLayout() const { return DL; }
+ friend struct C_API_PRIVATE_ACCESS;
+
+public:
+ mutable TargetOptions Options;
+
+ virtual ~TargetMachine();
+
+ const Target &getTarget() const { return TheTarget; }
+
+ const Triple &getTargetTriple() const { return TargetTriple; }
+ StringRef getTargetCPU() const { return TargetCPU; }
+ StringRef getTargetFeatureString() const { return TargetFS; }
+
+ /// Virtual method implemented by subclasses that returns a reference to that
+ /// target's TargetSubtargetInfo-derived member variable.
+ virtual const TargetSubtargetInfo *getSubtargetImpl(const Function &) const {
+ return nullptr;
+ }
+ virtual TargetLoweringObjectFile *getObjFileLowering() const {
+ return nullptr;
+ }
+
+ /// This method returns a pointer to the specified type of
+ /// TargetSubtargetInfo. In debug builds, it verifies that the object being
+ /// returned is of the correct type.
+ template <typename STC> const STC &getSubtarget(const Function &F) const {
+ return *static_cast<const STC*>(getSubtargetImpl(F));
+ }
+
+ /// Create a DataLayout.
+ const DataLayout createDataLayout() const { return DL; }
+
+ /// Test if a DataLayout if compatible with the CodeGen for this target.
+ ///
+ /// The LLVM Module owns a DataLayout that is used for the target independent
+ /// optimizations and code generation. This hook provides a target specific
+ /// check on the validity of this DataLayout.
+ bool isCompatibleDataLayout(const DataLayout &Candidate) const {
+ return DL == Candidate;
+ }
+
+ /// Get the pointer size for this target.
+ ///
+ /// This is the only time the DataLayout in the TargetMachine is used.
+ unsigned getPointerSize() const { return DL.getPointerSize(); }
+
+ /// \brief Reset the target options based on the function's attributes.
+ // FIXME: Remove TargetOptions that affect per-function code generation
+ // from TargetMachine.
+ void resetTargetOptions(const Function &F) const;
+
+ /// Return target specific asm information.
+ const MCAsmInfo *getMCAsmInfo() const { return AsmInfo; }
+
+ const MCRegisterInfo *getMCRegisterInfo() const { return MRI; }
+ const MCInstrInfo *getMCInstrInfo() const { return MII; }
+ const MCSubtargetInfo *getMCSubtargetInfo() const { return STI; }
+
+ /// If intrinsic information is available, return it. If not, return null.
+ virtual const TargetIntrinsicInfo *getIntrinsicInfo() const {
+ return nullptr;
+ }
+
+ bool requiresStructuredCFG() const { return RequireStructuredCFG; }
+ void setRequiresStructuredCFG(bool Value) { RequireStructuredCFG = Value; }
+
+ /// Returns the code generation relocation model. The choices are static, PIC,
+ /// and dynamic-no-pic, and target default.
+ Reloc::Model getRelocationModel() const;
+
+ /// Returns the code model. The choices are small, kernel, medium, large, and
+ /// target default.
+ CodeModel::Model getCodeModel() const;
+
+ /// Returns the TLS model which should be used for the given global variable.
+ TLSModel::Model getTLSModel(const GlobalValue *GV) const;
+
+ /// Returns the optimization level: None, Less, Default, or Aggressive.
+ CodeGenOpt::Level getOptLevel() const;
+
+ /// \brief Overrides the optimization level.
+ void setOptLevel(CodeGenOpt::Level Level) const;
+
+ void setFastISel(bool Enable) { Options.EnableFastISel = Enable; }
+ bool getO0WantsFastISel() { return O0WantsFastISel; }
+ void setO0WantsFastISel(bool Enable) { O0WantsFastISel = Enable; }
+
+ bool shouldPrintMachineCode() const { return Options.PrintMachineCode; }
+
+ /// Returns the default value of asm verbosity.
+ ///
+ bool getAsmVerbosityDefault() const {
+ return Options.MCOptions.AsmVerbose;
+ }
+
+ bool getUniqueSectionNames() const { return Options.UniqueSectionNames; }
+
+ /// Return true if data objects should be emitted into their own section,
+ /// corresponds to -fdata-sections.
+ bool getDataSections() const {
+ return Options.DataSections;
+ }
+
+ /// Return true if functions should be emitted into their own section,
+ /// corresponding to -ffunction-sections.
+ bool getFunctionSections() const {
+ return Options.FunctionSections;
+ }
+
+ /// \brief Get a \c TargetIRAnalysis appropriate for the target.
+ ///
+ /// This is used to construct the new pass manager's target IR analysis pass,
+ /// set up appropriately for this target machine. Even the old pass manager
+ /// uses this to answer queries about the IR.
+ virtual TargetIRAnalysis getTargetIRAnalysis();
+
+ /// These enums are meant to be passed into addPassesToEmitFile to indicate
+ /// what type of file to emit, and returned by it to indicate what type of
+ /// file could actually be made.
+ enum CodeGenFileType {
+ CGFT_AssemblyFile,
+ CGFT_ObjectFile,
+ CGFT_Null // Do not emit any output.
+ };
+
+ /// Add passes to the specified pass manager to get the specified file
+ /// emitted. Typically this will involve several steps of code generation.
+ /// This method should return true if emission of this file type is not
+ /// supported, or false on success.
+ virtual bool addPassesToEmitFile(
+ PassManagerBase &, raw_pwrite_stream &, CodeGenFileType,
+ bool /*DisableVerify*/ = true, AnalysisID /*StartBefore*/ = nullptr,
+ AnalysisID /*StartAfter*/ = nullptr, AnalysisID /*StopAfter*/ = nullptr,
+ MachineFunctionInitializer * /*MFInitializer*/ = nullptr) {
+ return true;
+ }
+
+ /// Add passes to the specified pass manager to get machine code emitted with
+ /// the MCJIT. This method returns true if machine code is not supported. It
+ /// fills the MCContext Ctx pointer which can be used to build custom
+ /// MCStreamer.
+ ///
+ virtual bool addPassesToEmitMC(PassManagerBase &, MCContext *&,
+ raw_pwrite_stream &,
+ bool /*DisableVerify*/ = true) {
+ return true;
+ }
+
+ /// True if subtarget inserts the final scheduling pass on its own.
+ ///
+ /// Branch relaxation, which must happen after block placement, can
+ /// on some targets (e.g. SystemZ) expose additional post-RA
+ /// scheduling opportunities.
+ virtual bool targetSchedulesPostRAScheduling() const { return false; };
+
+ void getNameWithPrefix(SmallVectorImpl<char> &Name, const GlobalValue *GV,
+ Mangler &Mang, bool MayAlwaysUsePrivate = false) const;
+ MCSymbol *getSymbol(const GlobalValue *GV, Mangler &Mang) const;
+};
+
+/// This class describes a target machine that is implemented with the LLVM
+/// target-independent code generator.
+///
+class LLVMTargetMachine : public TargetMachine {
+protected: // Can only create subclasses.
+ LLVMTargetMachine(const Target &T, StringRef DataLayoutString,
+ const Triple &TargetTriple, StringRef CPU, StringRef FS,
+ TargetOptions Options, Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL);
+
+ void initAsmInfo();
+public:
+ /// \brief Get a TargetIRAnalysis implementation for the target.
+ ///
+ /// This analysis will produce a TTI result which uses the common code
+ /// generator to answer queries about the IR.
+ TargetIRAnalysis getTargetIRAnalysis() override;
+
+ /// Create a pass configuration object to be used by addPassToEmitX methods
+ /// for generating a pipeline of CodeGen passes.
+ virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
+
+ /// Add passes to the specified pass manager to get the specified file
+ /// emitted. Typically this will involve several steps of code generation.
+ bool addPassesToEmitFile(
+ PassManagerBase &PM, raw_pwrite_stream &Out, CodeGenFileType FileType,
+ bool DisableVerify = true, AnalysisID StartBefore = nullptr,
+ AnalysisID StartAfter = nullptr, AnalysisID StopAfter = nullptr,
+ MachineFunctionInitializer *MFInitializer = nullptr) override;
+
+ /// Add passes to the specified pass manager to get machine code emitted with
+ /// the MCJIT. This method returns true if machine code is not supported. It
+ /// fills the MCContext Ctx pointer which can be used to build custom
+ /// MCStreamer.
+ bool addPassesToEmitMC(PassManagerBase &PM, MCContext *&Ctx,
+ raw_pwrite_stream &OS,
+ bool DisableVerify = true) override;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetOpcodes.h b/contrib/llvm/include/llvm/Target/TargetOpcodes.h
new file mode 100644
index 0000000..db37bdb
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetOpcodes.h
@@ -0,0 +1,139 @@
+//===-- llvm/Target/TargetOpcodes.h - Target Indep Opcodes ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target independent instruction opcodes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETOPCODES_H
+#define LLVM_TARGET_TARGETOPCODES_H
+
+namespace llvm {
+
+/// Invariant opcodes: All instruction sets have these as their low opcodes.
+///
+/// Every instruction defined here must also appear in Target.td and the order
+/// must be the same as in CodeGenTarget.cpp.
+///
+namespace TargetOpcode {
+enum {
+ PHI = 0,
+ INLINEASM = 1,
+ CFI_INSTRUCTION = 2,
+ EH_LABEL = 3,
+ GC_LABEL = 4,
+
+ /// KILL - This instruction is a noop that is used only to adjust the
+ /// liveness of registers. This can be useful when dealing with
+ /// sub-registers.
+ KILL = 5,
+
+ /// EXTRACT_SUBREG - This instruction takes two operands: a register
+ /// that has subregisters, and a subregister index. It returns the
+ /// extracted subregister value. This is commonly used to implement
+ /// truncation operations on target architectures which support it.
+ EXTRACT_SUBREG = 6,
+
+ /// INSERT_SUBREG - This instruction takes three operands: a register that
+ /// has subregisters, a register providing an insert value, and a
+ /// subregister index. It returns the value of the first register with the
+ /// value of the second register inserted. The first register is often
+ /// defined by an IMPLICIT_DEF, because it is commonly used to implement
+ /// anyext operations on target architectures which support it.
+ INSERT_SUBREG = 7,
+
+ /// IMPLICIT_DEF - This is the MachineInstr-level equivalent of undef.
+ IMPLICIT_DEF = 8,
+
+ /// SUBREG_TO_REG - This instruction is similar to INSERT_SUBREG except that
+ /// the first operand is an immediate integer constant. This constant is
+ /// often zero, because it is commonly used to assert that the instruction
+ /// defining the register implicitly clears the high bits.
+ SUBREG_TO_REG = 9,
+
+ /// COPY_TO_REGCLASS - This instruction is a placeholder for a plain
+ /// register-to-register copy into a specific register class. This is only
+ /// used between instruction selection and MachineInstr creation, before
+ /// virtual registers have been created for all the instructions, and it's
+ /// only needed in cases where the register classes implied by the
+ /// instructions are insufficient. It is emitted as a COPY MachineInstr.
+ COPY_TO_REGCLASS = 10,
+
+ /// DBG_VALUE - a mapping of the llvm.dbg.value intrinsic
+ DBG_VALUE = 11,
+
+ /// REG_SEQUENCE - This variadic instruction is used to form a register that
+ /// represents a consecutive sequence of sub-registers. It's used as a
+ /// register coalescing / allocation aid and must be eliminated before code
+ /// emission.
+ // In SDNode form, the first operand encodes the register class created by
+ // the REG_SEQUENCE, while each subsequent pair names a vreg + subreg index
+ // pair. Once it has been lowered to a MachineInstr, the regclass operand
+ // is no longer present.
+ /// e.g. v1027 = REG_SEQUENCE v1024, 3, v1025, 4, v1026, 5
+ /// After register coalescing references of v1024 should be replace with
+ /// v1027:3, v1025 with v1027:4, etc.
+ REG_SEQUENCE = 12,
+
+ /// COPY - Target-independent register copy. This instruction can also be
+ /// used to copy between subregisters of virtual registers.
+ COPY = 13,
+
+ /// BUNDLE - This instruction represents an instruction bundle. Instructions
+ /// which immediately follow a BUNDLE instruction which are marked with
+ /// 'InsideBundle' flag are inside the bundle.
+ BUNDLE = 14,
+
+ /// Lifetime markers.
+ LIFETIME_START = 15,
+ LIFETIME_END = 16,
+
+ /// A Stackmap instruction captures the location of live variables at its
+ /// position in the instruction stream. It is followed by a shadow of bytes
+ /// that must lie within the function and not contain another stackmap.
+ STACKMAP = 17,
+
+ /// Patchable call instruction - this instruction represents a call to a
+ /// constant address, followed by a series of NOPs. It is intended to
+ /// support optimizations for dynamic languages (such as javascript) that
+ /// rewrite calls to runtimes with more efficient code sequences.
+ /// This also implies a stack map.
+ PATCHPOINT = 18,
+
+ /// This pseudo-instruction loads the stack guard value. Targets which need
+ /// to prevent the stack guard value or address from being spilled to the
+ /// stack should override TargetLowering::emitLoadStackGuardNode and
+ /// additionally expand this pseudo after register allocation.
+ LOAD_STACK_GUARD = 19,
+
+ /// Call instruction with associated vm state for deoptimization and list
+ /// of live pointers for relocation by the garbage collector. It is
+ /// intended to support garbage collection with fully precise relocating
+ /// collectors and deoptimizations in either the callee or caller.
+ STATEPOINT = 20,
+
+ /// Instruction that records the offset of a local stack allocation passed to
+ /// llvm.localescape. It has two arguments: the symbol for the label and the
+ /// frame index of the local stack allocation.
+ LOCAL_ESCAPE = 21,
+
+ /// Loading instruction that may page fault, bundled with associated
+ /// information on how to handle such a page fault. It is intended to support
+ /// "zero cost" null checks in managed languages by allowing LLVM to fold
+ /// comparisons into existing memory operations.
+ FAULTING_LOAD_OP = 22,
+
+ /// BUILTIN_OP_END - This must be the last enum value in this list.
+ /// The target-specific post-isel opcode values start here.
+ GENERIC_OP_END = FAULTING_LOAD_OP,
+};
+} // end namespace TargetOpcode
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetOptions.h b/contrib/llvm/include/llvm/Target/TargetOptions.h
new file mode 100644
index 0000000..d98d0fa
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetOptions.h
@@ -0,0 +1,292 @@
+//===-- llvm/Target/TargetOptions.h - Target Options ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines command line option flags that are shared across various
+// targets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETOPTIONS_H
+#define LLVM_TARGET_TARGETOPTIONS_H
+
+#include "llvm/Target/TargetRecip.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include <string>
+
+namespace llvm {
+ class MachineFunction;
+ class Module;
+ class StringRef;
+
+ namespace FloatABI {
+ enum ABIType {
+ Default, // Target-specific (either soft or hard depending on triple, etc).
+ Soft, // Soft float.
+ Hard // Hard float.
+ };
+ }
+
+ namespace FPOpFusion {
+ enum FPOpFusionMode {
+ Fast, // Enable fusion of FP ops wherever it's profitable.
+ Standard, // Only allow fusion of 'blessed' ops (currently just fmuladd).
+ Strict // Never fuse FP-ops.
+ };
+ }
+
+ namespace JumpTable {
+ enum JumpTableType {
+ Single, // Use a single table for all indirect jumptable calls.
+ Arity, // Use one table per number of function parameters.
+ Simplified, // Use one table per function type, with types projected
+ // into 4 types: pointer to non-function, struct,
+ // primitive, and function pointer.
+ Full // Use one table per unique function type
+ };
+ }
+
+ namespace ThreadModel {
+ enum Model {
+ POSIX, // POSIX Threads
+ Single // Single Threaded Environment
+ };
+ }
+
+ enum class EABI {
+ Unknown,
+ Default, // Default means not specified
+ EABI4, // Target-specific (either 4, 5 or gnu depending on triple).
+ EABI5,
+ GNU
+ };
+
+ /// Identify a debugger for "tuning" the debug info.
+ ///
+ /// The "debugger tuning" concept allows us to present a more intuitive
+ /// interface that unpacks into different sets of defaults for the various
+ /// individual feature-flag settings, that suit the preferences of the
+ /// various debuggers. However, it's worth remembering that debuggers are
+ /// not the only consumers of debug info, and some variations in DWARF might
+ /// better be treated as target/platform issues. Fundamentally,
+ /// o if the feature is useful (or not) to a particular debugger, regardless
+ /// of the target, that's a tuning decision;
+ /// o if the feature is useful (or not) on a particular platform, regardless
+ /// of the debugger, that's a target decision.
+ /// It's not impossible to see both factors in some specific case.
+ ///
+ /// The "tuning" should be used to set defaults for individual feature flags
+ /// in DwarfDebug; if a given feature has a more specific command-line option,
+ /// that option should take precedence over the tuning.
+ enum class DebuggerKind {
+ Default, // No specific tuning requested.
+ GDB, // Tune debug info for gdb.
+ LLDB, // Tune debug info for lldb.
+ SCE // Tune debug info for SCE targets (e.g. PS4).
+ };
+
+ class TargetOptions {
+ public:
+ TargetOptions()
+ : PrintMachineCode(false), LessPreciseFPMADOption(false),
+ UnsafeFPMath(false), NoInfsFPMath(false), NoNaNsFPMath(false),
+ HonorSignDependentRoundingFPMathOption(false), NoZerosInBSS(false),
+ GuaranteedTailCallOpt(false), StackAlignmentOverride(0),
+ EnableFastISel(false), PositionIndependentExecutable(false),
+ UseInitArray(false), DisableIntegratedAS(false),
+ CompressDebugSections(false), FunctionSections(false),
+ DataSections(false), UniqueSectionNames(true), TrapUnreachable(false),
+ EmulatedTLS(false), FloatABIType(FloatABI::Default),
+ AllowFPOpFusion(FPOpFusion::Standard), Reciprocals(TargetRecip()),
+ JTType(JumpTable::Single), ThreadModel(ThreadModel::POSIX),
+ EABIVersion(EABI::Default), DebuggerTuning(DebuggerKind::Default) {}
+
+ /// PrintMachineCode - This flag is enabled when the -print-machineinstrs
+ /// option is specified on the command line, and should enable debugging
+ /// output from the code generator.
+ unsigned PrintMachineCode : 1;
+
+ /// DisableFramePointerElim - This returns true if frame pointer elimination
+ /// optimization should be disabled for the given machine function.
+ bool DisableFramePointerElim(const MachineFunction &MF) const;
+
+ /// LessPreciseFPMAD - This flag is enabled when the
+ /// -enable-fp-mad is specified on the command line. When this flag is off
+ /// (the default), the code generator is not allowed to generate mad
+ /// (multiply add) if the result is "less precise" than doing those
+ /// operations individually.
+ unsigned LessPreciseFPMADOption : 1;
+ bool LessPreciseFPMAD() const;
+
+ /// UnsafeFPMath - This flag is enabled when the
+ /// -enable-unsafe-fp-math flag is specified on the command line. When
+ /// this flag is off (the default), the code generator is not allowed to
+ /// produce results that are "less precise" than IEEE allows. This includes
+ /// use of X86 instructions like FSIN and FCOS instead of libcalls.
+ /// UnsafeFPMath implies LessPreciseFPMAD.
+ unsigned UnsafeFPMath : 1;
+
+ /// NoInfsFPMath - This flag is enabled when the
+ /// -enable-no-infs-fp-math flag is specified on the command line. When
+ /// this flag is off (the default), the code generator is not allowed to
+ /// assume the FP arithmetic arguments and results are never +-Infs.
+ unsigned NoInfsFPMath : 1;
+
+ /// NoNaNsFPMath - This flag is enabled when the
+ /// -enable-no-nans-fp-math flag is specified on the command line. When
+ /// this flag is off (the default), the code generator is not allowed to
+ /// assume the FP arithmetic arguments and results are never NaNs.
+ unsigned NoNaNsFPMath : 1;
+
+ /// HonorSignDependentRoundingFPMath - This returns true when the
+ /// -enable-sign-dependent-rounding-fp-math is specified. If this returns
+ /// false (the default), the code generator is allowed to assume that the
+ /// rounding behavior is the default (round-to-zero for all floating point
+ /// to integer conversions, and round-to-nearest for all other arithmetic
+ /// truncations). If this is enabled (set to true), the code generator must
+ /// assume that the rounding mode may dynamically change.
+ unsigned HonorSignDependentRoundingFPMathOption : 1;
+ bool HonorSignDependentRoundingFPMath() const;
+
+ /// NoZerosInBSS - By default some codegens place zero-initialized data to
+ /// .bss section. This flag disables such behaviour (necessary, e.g. for
+ /// crt*.o compiling).
+ unsigned NoZerosInBSS : 1;
+
+ /// GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is
+ /// specified on the commandline. When the flag is on, participating targets
+ /// will perform tail call optimization on all calls which use the fastcc
+ /// calling convention and which satisfy certain target-independent
+ /// criteria (being at the end of a function, having the same return type
+ /// as their parent function, etc.), using an alternate ABI if necessary.
+ unsigned GuaranteedTailCallOpt : 1;
+
+ /// StackAlignmentOverride - Override default stack alignment for target.
+ unsigned StackAlignmentOverride;
+
+ /// EnableFastISel - This flag enables fast-path instruction selection
+ /// which trades away generated code quality in favor of reducing
+ /// compile time.
+ unsigned EnableFastISel : 1;
+
+ /// PositionIndependentExecutable - This flag indicates whether the code
+ /// will eventually be linked into a single executable, despite the PIC
+ /// relocation model being in use. It's value is undefined (and irrelevant)
+ /// if the relocation model is anything other than PIC.
+ unsigned PositionIndependentExecutable : 1;
+
+ /// UseInitArray - Use .init_array instead of .ctors for static
+ /// constructors.
+ unsigned UseInitArray : 1;
+
+ /// Disable the integrated assembler.
+ unsigned DisableIntegratedAS : 1;
+
+ /// Compress DWARF debug sections.
+ unsigned CompressDebugSections : 1;
+
+ /// Emit functions into separate sections.
+ unsigned FunctionSections : 1;
+
+ /// Emit data into separate sections.
+ unsigned DataSections : 1;
+
+ unsigned UniqueSectionNames : 1;
+
+ /// Emit target-specific trap instruction for 'unreachable' IR instructions.
+ unsigned TrapUnreachable : 1;
+
+ /// EmulatedTLS - This flag enables emulated TLS model, using emutls
+ /// function in the runtime library..
+ unsigned EmulatedTLS : 1;
+
+ /// FloatABIType - This setting is set by -float-abi=xxx option is specfied
+ /// on the command line. This setting may either be Default, Soft, or Hard.
+ /// Default selects the target's default behavior. Soft selects the ABI for
+ /// software floating point, but does not indicate that FP hardware may not
+ /// be used. Such a combination is unfortunately popular (e.g.
+ /// arm-apple-darwin). Hard presumes that the normal FP ABI is used.
+ FloatABI::ABIType FloatABIType;
+
+ /// AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
+ /// This controls the creation of fused FP ops that store intermediate
+ /// results in higher precision than IEEE allows (E.g. FMAs).
+ ///
+ /// Fast mode - allows formation of fused FP ops whenever they're
+ /// profitable.
+ /// Standard mode - allow fusion only for 'blessed' FP ops. At present the
+ /// only blessed op is the fmuladd intrinsic. In the future more blessed ops
+ /// may be added.
+ /// Strict mode - allow fusion only if/when it can be proven that the excess
+ /// precision won't effect the result.
+ ///
+ /// Note: This option only controls formation of fused ops by the
+ /// optimizers. Fused operations that are explicitly specified (e.g. FMA
+ /// via the llvm.fma.* intrinsic) will always be honored, regardless of
+ /// the value of this option.
+ FPOpFusion::FPOpFusionMode AllowFPOpFusion;
+
+ /// This class encapsulates options for reciprocal-estimate code generation.
+ TargetRecip Reciprocals;
+
+ /// JTType - This flag specifies the type of jump-instruction table to
+ /// create for functions that have the jumptable attribute.
+ JumpTable::JumpTableType JTType;
+
+ /// ThreadModel - This flag specifies the type of threading model to assume
+ /// for things like atomics
+ ThreadModel::Model ThreadModel;
+
+ /// EABIVersion - This flag specifies the EABI version
+ EABI EABIVersion;
+
+ /// Which debugger to tune for.
+ DebuggerKind DebuggerTuning;
+
+ /// Machine level options.
+ MCTargetOptions MCOptions;
+ };
+
+// Comparison operators:
+
+
+inline bool operator==(const TargetOptions &LHS,
+ const TargetOptions &RHS) {
+#define ARE_EQUAL(X) LHS.X == RHS.X
+ return
+ ARE_EQUAL(UnsafeFPMath) &&
+ ARE_EQUAL(NoInfsFPMath) &&
+ ARE_EQUAL(NoNaNsFPMath) &&
+ ARE_EQUAL(HonorSignDependentRoundingFPMathOption) &&
+ ARE_EQUAL(NoZerosInBSS) &&
+ ARE_EQUAL(GuaranteedTailCallOpt) &&
+ ARE_EQUAL(StackAlignmentOverride) &&
+ ARE_EQUAL(EnableFastISel) &&
+ ARE_EQUAL(PositionIndependentExecutable) &&
+ ARE_EQUAL(UseInitArray) &&
+ ARE_EQUAL(TrapUnreachable) &&
+ ARE_EQUAL(EmulatedTLS) &&
+ ARE_EQUAL(FloatABIType) &&
+ ARE_EQUAL(AllowFPOpFusion) &&
+ ARE_EQUAL(Reciprocals) &&
+ ARE_EQUAL(JTType) &&
+ ARE_EQUAL(ThreadModel) &&
+ ARE_EQUAL(EABIVersion) &&
+ ARE_EQUAL(DebuggerTuning) &&
+ ARE_EQUAL(MCOptions);
+#undef ARE_EQUAL
+}
+
+inline bool operator!=(const TargetOptions &LHS,
+ const TargetOptions &RHS) {
+ return !(LHS == RHS);
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetRecip.h b/contrib/llvm/include/llvm/Target/TargetRecip.h
new file mode 100644
index 0000000..210d493
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetRecip.h
@@ -0,0 +1,73 @@
+//===--------------------- llvm/Target/TargetRecip.h ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class is used to customize machine-specific reciprocal estimate code
+// generation in a target-independent way.
+// If a target does not support operations in this specification, then code
+// generation will default to using supported operations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETRECIP_H
+#define LLVM_TARGET_TARGETRECIP_H
+
+#include "llvm/ADT/StringRef.h"
+#include <vector>
+#include <string>
+#include <map>
+
+namespace llvm {
+
+struct TargetRecip {
+public:
+ TargetRecip();
+
+ /// Initialize all or part of the operations from command-line options or
+ /// a front end.
+ TargetRecip(const std::vector<std::string> &Args);
+
+ /// Set whether a particular reciprocal operation is enabled and how many
+ /// refinement steps are needed when using it. Use "all" to set enablement
+ /// and refinement steps for all operations.
+ void setDefaults(StringRef Key, bool Enable, unsigned RefSteps);
+
+ /// Return true if the reciprocal operation has been enabled by default or
+ /// from the command-line. Return false if the operation has been disabled
+ /// by default or from the command-line.
+ bool isEnabled(StringRef Key) const;
+
+ /// Return the number of iterations necessary to refine the
+ /// the result of a machine instruction for the given reciprocal operation.
+ unsigned getRefinementSteps(StringRef Key) const;
+
+ bool operator==(const TargetRecip &Other) const;
+
+private:
+ enum {
+ Uninitialized = -1
+ };
+
+ struct RecipParams {
+ int8_t Enabled;
+ int8_t RefinementSteps;
+
+ RecipParams() : Enabled(Uninitialized), RefinementSteps(Uninitialized) {}
+ };
+
+ std::map<StringRef, RecipParams> RecipMap;
+ typedef std::map<StringRef, RecipParams>::iterator RecipIter;
+ typedef std::map<StringRef, RecipParams>::const_iterator ConstRecipIter;
+
+ bool parseGlobalParams(const std::string &Arg);
+ void parseIndividualParams(const std::vector<std::string> &Args);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
new file mode 100644
index 0000000..fccaad4
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetRegisterInfo.h
@@ -0,0 +1,982 @@
+//=== Target/TargetRegisterInfo.h - Target Register Information -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes an abstract interface used to get information about a
+// target machines register file. This information is used for a variety of
+// purposed, especially register allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETREGISTERINFO_H
+#define LLVM_TARGET_TARGETREGISTERINFO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineValueType.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Printable.h"
+#include <cassert>
+#include <functional>
+
+namespace llvm {
+
+class BitVector;
+class MachineFunction;
+class RegScavenger;
+template<class T> class SmallVectorImpl;
+class VirtRegMap;
+class raw_ostream;
+class LiveRegMatrix;
+
+/// A bitmask representing the covering of a register with sub-registers.
+///
+/// This is typically used to track liveness at sub-register granularity.
+/// Lane masks for sub-register indices are similar to register units for
+/// physical registers. The individual bits in a lane mask can't be assigned
+/// any specific meaning. They can be used to check if two sub-register
+/// indices overlap.
+///
+/// Iff the target has a register such that:
+///
+/// getSubReg(Reg, A) overlaps getSubReg(Reg, B)
+///
+/// then:
+///
+/// (getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B)) != 0
+typedef unsigned LaneBitmask;
+
+class TargetRegisterClass {
+public:
+ typedef const MCPhysReg* iterator;
+ typedef const MCPhysReg* const_iterator;
+ typedef const MVT::SimpleValueType* vt_iterator;
+ typedef const TargetRegisterClass* const * sc_iterator;
+
+ // Instance variables filled by tablegen, do not use!
+ const MCRegisterClass *MC;
+ const vt_iterator VTs;
+ const uint32_t *SubClassMask;
+ const uint16_t *SuperRegIndices;
+ const LaneBitmask LaneMask;
+ /// Classes with a higher priority value are assigned first by register
+ /// allocators using a greedy heuristic. The value is in the range [0,63].
+ const uint8_t AllocationPriority;
+ /// Whether the class supports two (or more) disjunct subregister indices.
+ const bool HasDisjunctSubRegs;
+ const sc_iterator SuperClasses;
+ ArrayRef<MCPhysReg> (*OrderFunc)(const MachineFunction&);
+
+ /// Return the register class ID number.
+ unsigned getID() const { return MC->getID(); }
+
+ /// begin/end - Return all of the registers in this class.
+ ///
+ iterator begin() const { return MC->begin(); }
+ iterator end() const { return MC->end(); }
+
+ /// Return the number of registers in this class.
+ unsigned getNumRegs() const { return MC->getNumRegs(); }
+
+ /// Return the specified register in the class.
+ unsigned getRegister(unsigned i) const {
+ return MC->getRegister(i);
+ }
+
+ /// Return true if the specified register is included in this register class.
+ /// This does not include virtual registers.
+ bool contains(unsigned Reg) const {
+ return MC->contains(Reg);
+ }
+
+ /// Return true if both registers are in this class.
+ bool contains(unsigned Reg1, unsigned Reg2) const {
+ return MC->contains(Reg1, Reg2);
+ }
+
+ /// Return the size of the register in bytes, which is also the size
+ /// of a stack slot allocated to hold a spilled copy of this register.
+ unsigned getSize() const { return MC->getSize(); }
+
+ /// Return the minimum required alignment for a register of this class.
+ unsigned getAlignment() const { return MC->getAlignment(); }
+
+ /// Return the cost of copying a value between two registers in this class.
+ /// A negative number means the register class is very expensive
+ /// to copy e.g. status flag register classes.
+ int getCopyCost() const { return MC->getCopyCost(); }
+
+ /// Return true if this register class may be used to create virtual
+ /// registers.
+ bool isAllocatable() const { return MC->isAllocatable(); }
+
+ /// Return true if this TargetRegisterClass has the ValueType vt.
+ bool hasType(MVT vt) const {
+ for(int i = 0; VTs[i] != MVT::Other; ++i)
+ if (MVT(VTs[i]) == vt)
+ return true;
+ return false;
+ }
+
+ /// vt_begin / vt_end - Loop over all of the value types that can be
+ /// represented by values in this register class.
+ vt_iterator vt_begin() const {
+ return VTs;
+ }
+
+ vt_iterator vt_end() const {
+ vt_iterator I = VTs;
+ while (*I != MVT::Other) ++I;
+ return I;
+ }
+
+ /// Return true if the specified TargetRegisterClass
+ /// is a proper sub-class of this TargetRegisterClass.
+ bool hasSubClass(const TargetRegisterClass *RC) const {
+ return RC != this && hasSubClassEq(RC);
+ }
+
+ /// Returns true if RC is a sub-class of or equal to this class.
+ bool hasSubClassEq(const TargetRegisterClass *RC) const {
+ unsigned ID = RC->getID();
+ return (SubClassMask[ID / 32] >> (ID % 32)) & 1;
+ }
+
+ /// Return true if the specified TargetRegisterClass is a
+ /// proper super-class of this TargetRegisterClass.
+ bool hasSuperClass(const TargetRegisterClass *RC) const {
+ return RC->hasSubClass(this);
+ }
+
+ /// Returns true if RC is a super-class of or equal to this class.
+ bool hasSuperClassEq(const TargetRegisterClass *RC) const {
+ return RC->hasSubClassEq(this);
+ }
+
+ /// Returns a bit vector of subclasses, including this one.
+ /// The vector is indexed by class IDs, see hasSubClassEq() above for how to
+ /// use it.
+ const uint32_t *getSubClassMask() const {
+ return SubClassMask;
+ }
+
+ /// Returns a 0-terminated list of sub-register indices that project some
+ /// super-register class into this register class. The list has an entry for
+ /// each Idx such that:
+ ///
+ /// There exists SuperRC where:
+ /// For all Reg in SuperRC:
+ /// this->contains(Reg:Idx)
+ ///
+ const uint16_t *getSuperRegIndices() const {
+ return SuperRegIndices;
+ }
+
+ /// Returns a NULL-terminated list of super-classes. The
+ /// classes are ordered by ID which is also a topological ordering from large
+ /// to small classes. The list does NOT include the current class.
+ sc_iterator getSuperClasses() const {
+ return SuperClasses;
+ }
+
+ /// Return true if this TargetRegisterClass is a subset
+ /// class of at least one other TargetRegisterClass.
+ bool isASubClass() const {
+ return SuperClasses[0] != nullptr;
+ }
+
+ /// Returns the preferred order for allocating registers from this register
+ /// class in MF. The raw order comes directly from the .td file and may
+ /// include reserved registers that are not allocatable.
+ /// Register allocators should also make sure to allocate
+ /// callee-saved registers only after all the volatiles are used. The
+ /// RegisterClassInfo class provides filtered allocation orders with
+ /// callee-saved registers moved to the end.
+ ///
+ /// The MachineFunction argument can be used to tune the allocatable
+ /// registers based on the characteristics of the function, subtarget, or
+ /// other criteria.
+ ///
+ /// By default, this method returns all registers in the class.
+ ///
+ ArrayRef<MCPhysReg> getRawAllocationOrder(const MachineFunction &MF) const {
+ return OrderFunc ? OrderFunc(MF) : makeArrayRef(begin(), getNumRegs());
+ }
+
+ /// Returns the combination of all lane masks of register in this class.
+ /// The lane masks of the registers are the combination of all lane masks
+ /// of their subregisters.
+ LaneBitmask getLaneMask() const {
+ return LaneMask;
+ }
+};
+
+/// Extra information, not in MCRegisterDesc, about registers.
+/// These are used by codegen, not by MC.
+struct TargetRegisterInfoDesc {
+ unsigned CostPerUse; // Extra cost of instructions using register.
+ bool inAllocatableClass; // Register belongs to an allocatable regclass.
+};
+
+/// Each TargetRegisterClass has a per register weight, and weight
+/// limit which must be less than the limits of its pressure sets.
+struct RegClassWeight {
+ unsigned RegWeight;
+ unsigned WeightLimit;
+};
+
+/// TargetRegisterInfo base class - We assume that the target defines a static
+/// array of TargetRegisterDesc objects that represent all of the machine
+/// registers that the target has. As such, we simply have to track a pointer
+/// to this array so that we can turn register number into a register
+/// descriptor.
+///
+class TargetRegisterInfo : public MCRegisterInfo {
+public:
+ typedef const TargetRegisterClass * const * regclass_iterator;
+private:
+ const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen
+ const char *const *SubRegIndexNames; // Names of subreg indexes.
+ // Pointer to array of lane masks, one per sub-reg index.
+ const LaneBitmask *SubRegIndexLaneMasks;
+
+ regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses
+ unsigned CoveringLanes;
+
+protected:
+ TargetRegisterInfo(const TargetRegisterInfoDesc *ID,
+ regclass_iterator RegClassBegin,
+ regclass_iterator RegClassEnd,
+ const char *const *SRINames,
+ const LaneBitmask *SRILaneMasks,
+ unsigned CoveringLanes);
+ virtual ~TargetRegisterInfo();
+public:
+
+ // Register numbers can represent physical registers, virtual registers, and
+ // sometimes stack slots. The unsigned values are divided into these ranges:
+ //
+ // 0 Not a register, can be used as a sentinel.
+ // [1;2^30) Physical registers assigned by TableGen.
+ // [2^30;2^31) Stack slots. (Rarely used.)
+ // [2^31;2^32) Virtual registers assigned by MachineRegisterInfo.
+ //
+ // Further sentinels can be allocated from the small negative integers.
+ // DenseMapInfo<unsigned> uses -1u and -2u.
+
+ /// isStackSlot - Sometimes it is useful the be able to store a non-negative
+ /// frame index in a variable that normally holds a register. isStackSlot()
+ /// returns true if Reg is in the range used for stack slots.
+ ///
+ /// Note that isVirtualRegister() and isPhysicalRegister() cannot handle stack
+ /// slots, so if a variable may contains a stack slot, always check
+ /// isStackSlot() first.
+ ///
+ static bool isStackSlot(unsigned Reg) {
+ return int(Reg) >= (1 << 30);
+ }
+
+ /// Compute the frame index from a register value representing a stack slot.
+ static int stackSlot2Index(unsigned Reg) {
+ assert(isStackSlot(Reg) && "Not a stack slot");
+ return int(Reg - (1u << 30));
+ }
+
+ /// Convert a non-negative frame index to a stack slot register value.
+ static unsigned index2StackSlot(int FI) {
+ assert(FI >= 0 && "Cannot hold a negative frame index.");
+ return FI + (1u << 30);
+ }
+
+ /// Return true if the specified register number is in
+ /// the physical register namespace.
+ static bool isPhysicalRegister(unsigned Reg) {
+ assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
+ return int(Reg) > 0;
+ }
+
+ /// Return true if the specified register number is in
+ /// the virtual register namespace.
+ static bool isVirtualRegister(unsigned Reg) {
+ assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first.");
+ return int(Reg) < 0;
+ }
+
+ /// Convert a virtual register number to a 0-based index.
+ /// The first virtual register in a function will get the index 0.
+ static unsigned virtReg2Index(unsigned Reg) {
+ assert(isVirtualRegister(Reg) && "Not a virtual register");
+ return Reg & ~(1u << 31);
+ }
+
+ /// Convert a 0-based index to a virtual register number.
+ /// This is the inverse operation of VirtReg2IndexFunctor below.
+ static unsigned index2VirtReg(unsigned Index) {
+ return Index | (1u << 31);
+ }
+
+ /// Returns the Register Class of a physical register of the given type,
+ /// picking the most sub register class of the right type that contains this
+ /// physreg.
+ const TargetRegisterClass *
+ getMinimalPhysRegClass(unsigned Reg, MVT VT = MVT::Other) const;
+
+ /// Return the maximal subclass of the given register class that is
+ /// allocatable or NULL.
+ const TargetRegisterClass *
+ getAllocatableClass(const TargetRegisterClass *RC) const;
+
+ /// Returns a bitset indexed by register number indicating if a register is
+ /// allocatable or not. If a register class is specified, returns the subset
+ /// for the class.
+ BitVector getAllocatableSet(const MachineFunction &MF,
+ const TargetRegisterClass *RC = nullptr) const;
+
+ /// Return the additional cost of using this register instead
+ /// of other registers in its class.
+ unsigned getCostPerUse(unsigned RegNo) const {
+ return InfoDesc[RegNo].CostPerUse;
+ }
+
+ /// Return true if the register is in the allocation of any register class.
+ bool isInAllocatableClass(unsigned RegNo) const {
+ return InfoDesc[RegNo].inAllocatableClass;
+ }
+
+ /// Return the human-readable symbolic target-specific
+ /// name for the specified SubRegIndex.
+ const char *getSubRegIndexName(unsigned SubIdx) const {
+ assert(SubIdx && SubIdx < getNumSubRegIndices() &&
+ "This is not a subregister index");
+ return SubRegIndexNames[SubIdx-1];
+ }
+
+ /// Return a bitmask representing the parts of a register that are covered by
+ /// SubIdx \see LaneBitmask.
+ ///
+ /// SubIdx == 0 is allowed, it has the lane mask ~0u.
+ LaneBitmask getSubRegIndexLaneMask(unsigned SubIdx) const {
+ assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index");
+ return SubRegIndexLaneMasks[SubIdx];
+ }
+
+ /// The lane masks returned by getSubRegIndexLaneMask() above can only be
+ /// used to determine if sub-registers overlap - they can't be used to
+ /// determine if a set of sub-registers completely cover another
+ /// sub-register.
+ ///
+ /// The X86 general purpose registers have two lanes corresponding to the
+ /// sub_8bit and sub_8bit_hi sub-registers. Both sub_32bit and sub_16bit have
+ /// lane masks '3', but the sub_16bit sub-register doesn't fully cover the
+ /// sub_32bit sub-register.
+ ///
+ /// On the other hand, the ARM NEON lanes fully cover their registers: The
+ /// dsub_0 sub-register is completely covered by the ssub_0 and ssub_1 lanes.
+ /// This is related to the CoveredBySubRegs property on register definitions.
+ ///
+ /// This function returns a bit mask of lanes that completely cover their
+ /// sub-registers. More precisely, given:
+ ///
+ /// Covering = getCoveringLanes();
+ /// MaskA = getSubRegIndexLaneMask(SubA);
+ /// MaskB = getSubRegIndexLaneMask(SubB);
+ ///
+ /// If (MaskA & ~(MaskB & Covering)) == 0, then SubA is completely covered by
+ /// SubB.
+ LaneBitmask getCoveringLanes() const { return CoveringLanes; }
+
+ /// Returns true if the two registers are equal or alias each other.
+ /// The registers may be virtual registers.
+ bool regsOverlap(unsigned regA, unsigned regB) const {
+ if (regA == regB) return true;
+ if (isVirtualRegister(regA) || isVirtualRegister(regB))
+ return false;
+
+ // Regunits are numerically ordered. Find a common unit.
+ MCRegUnitIterator RUA(regA, this);
+ MCRegUnitIterator RUB(regB, this);
+ do {
+ if (*RUA == *RUB) return true;
+ if (*RUA < *RUB) ++RUA;
+ else ++RUB;
+ } while (RUA.isValid() && RUB.isValid());
+ return false;
+ }
+
+ /// Returns true if Reg contains RegUnit.
+ bool hasRegUnit(unsigned Reg, unsigned RegUnit) const {
+ for (MCRegUnitIterator Units(Reg, this); Units.isValid(); ++Units)
+ if (*Units == RegUnit)
+ return true;
+ return false;
+ }
+
+ /// Return a null-terminated list of all of the callee-saved registers on
+ /// this target. The register should be in the order of desired callee-save
+ /// stack frame offset. The first register is closest to the incoming stack
+ /// pointer if stack grows down, and vice versa.
+ ///
+ virtual const MCPhysReg*
+ getCalleeSavedRegs(const MachineFunction *MF) const = 0;
+
+ virtual const MCPhysReg*
+ getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
+ return nullptr;
+ }
+
+ /// Return a mask of call-preserved registers for the given calling convention
+ /// on the current function. The mask should include all call-preserved
+ /// aliases. This is used by the register allocator to determine which
+ /// registers can be live across a call.
+ ///
+ /// The mask is an array containing (TRI::getNumRegs()+31)/32 entries.
+ /// A set bit indicates that all bits of the corresponding register are
+ /// preserved across the function call. The bit mask is expected to be
+ /// sub-register complete, i.e. if A is preserved, so are all its
+ /// sub-registers.
+ ///
+ /// Bits are numbered from the LSB, so the bit for physical register Reg can
+ /// be found as (Mask[Reg / 32] >> Reg % 32) & 1.
+ ///
+ /// A NULL pointer means that no register mask will be used, and call
+ /// instructions should use implicit-def operands to indicate call clobbered
+ /// registers.
+ ///
+ virtual const uint32_t *getCallPreservedMask(const MachineFunction &MF,
+ CallingConv::ID) const {
+ // The default mask clobbers everything. All targets should override.
+ return nullptr;
+ }
+
+ /// Return a register mask that clobbers everything.
+ virtual const uint32_t *getNoPreservedMask() const {
+ llvm_unreachable("target does not provide no presered mask");
+ }
+
+ /// Return all the call-preserved register masks defined for this target.
+ virtual ArrayRef<const uint32_t *> getRegMasks() const = 0;
+ virtual ArrayRef<const char *> getRegMaskNames() const = 0;
+
+ /// Returns a bitset indexed by physical register number indicating if a
+ /// register is a special register that has particular uses and should be
+ /// considered unavailable at all times, e.g. SP, RA. This is
+ /// used by register scavenger to determine what registers are free.
+ virtual BitVector getReservedRegs(const MachineFunction &MF) const = 0;
+
+ /// Prior to adding the live-out mask to a stackmap or patchpoint
+ /// instruction, provide the target the opportunity to adjust it (mainly to
+ /// remove pseudo-registers that should be ignored).
+ virtual void adjustStackMapLiveOutMask(uint32_t *Mask) const { }
+
+ /// Return a super-register of the specified register
+ /// Reg so its sub-register of index SubIdx is Reg.
+ unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx,
+ const TargetRegisterClass *RC) const {
+ return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC);
+ }
+
+ /// Return a subclass of the specified register
+ /// class A so that each register in it has a sub-register of the
+ /// specified sub-register index which is in the specified register class B.
+ ///
+ /// TableGen will synthesize missing A sub-classes.
+ virtual const TargetRegisterClass *
+ getMatchingSuperRegClass(const TargetRegisterClass *A,
+ const TargetRegisterClass *B, unsigned Idx) const;
+
+ // For a copy-like instruction that defines a register of class DefRC with
+ // subreg index DefSubReg, reading from another source with class SrcRC and
+ // subregister SrcSubReg return true if this is a preferrable copy
+ // instruction or an earlier use should be used.
+ virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
+ unsigned DefSubReg,
+ const TargetRegisterClass *SrcRC,
+ unsigned SrcSubReg) const;
+
+ /// Returns the largest legal sub-class of RC that
+ /// supports the sub-register index Idx.
+ /// If no such sub-class exists, return NULL.
+ /// If all registers in RC already have an Idx sub-register, return RC.
+ ///
+ /// TableGen generates a version of this function that is good enough in most
+ /// cases. Targets can override if they have constraints that TableGen
+ /// doesn't understand. For example, the x86 sub_8bit sub-register index is
+ /// supported by the full GR32 register class in 64-bit mode, but only by the
+ /// GR32_ABCD regiister class in 32-bit mode.
+ ///
+ /// TableGen will synthesize missing RC sub-classes.
+ virtual const TargetRegisterClass *
+ getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const {
+ assert(Idx == 0 && "Target has no sub-registers");
+ return RC;
+ }
+
+ /// Return the subregister index you get from composing
+ /// two subregister indices.
+ ///
+ /// The special null sub-register index composes as the identity.
+ ///
+ /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b)
+ /// returns c. Note that composeSubRegIndices does not tell you about illegal
+ /// compositions. If R does not have a subreg a, or R:a does not have a subreg
+ /// b, composeSubRegIndices doesn't tell you.
+ ///
+ /// The ARM register Q0 has two D subregs dsub_0:D0 and dsub_1:D1. It also has
+ /// ssub_0:S0 - ssub_3:S3 subregs.
+ /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2.
+ ///
+ unsigned composeSubRegIndices(unsigned a, unsigned b) const {
+ if (!a) return b;
+ if (!b) return a;
+ return composeSubRegIndicesImpl(a, b);
+ }
+
+ /// Transforms a LaneMask computed for one subregister to the lanemask that
+ /// would have been computed when composing the subsubregisters with IdxA
+ /// first. @sa composeSubRegIndices()
+ LaneBitmask composeSubRegIndexLaneMask(unsigned IdxA,
+ LaneBitmask Mask) const {
+ if (!IdxA)
+ return Mask;
+ return composeSubRegIndexLaneMaskImpl(IdxA, Mask);
+ }
+
+ /// Debugging helper: dump register in human readable form to dbgs() stream.
+ static void dumpReg(unsigned Reg, unsigned SubRegIndex = 0,
+ const TargetRegisterInfo* TRI = nullptr);
+
+protected:
+ /// Overridden by TableGen in targets that have sub-registers.
+ virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const {
+ llvm_unreachable("Target has no sub-registers");
+ }
+
+ /// Overridden by TableGen in targets that have sub-registers.
+ virtual LaneBitmask
+ composeSubRegIndexLaneMaskImpl(unsigned, LaneBitmask) const {
+ llvm_unreachable("Target has no sub-registers");
+ }
+
+public:
+ /// Find a common super-register class if it exists.
+ ///
+ /// Find a register class, SuperRC and two sub-register indices, PreA and
+ /// PreB, such that:
+ ///
+ /// 1. PreA + SubA == PreB + SubB (using composeSubRegIndices()), and
+ ///
+ /// 2. For all Reg in SuperRC: Reg:PreA in RCA and Reg:PreB in RCB, and
+ ///
+ /// 3. SuperRC->getSize() >= max(RCA->getSize(), RCB->getSize()).
+ ///
+ /// SuperRC will be chosen such that no super-class of SuperRC satisfies the
+ /// requirements, and there is no register class with a smaller spill size
+ /// that satisfies the requirements.
+ ///
+ /// SubA and SubB must not be 0. Use getMatchingSuperRegClass() instead.
+ ///
+ /// Either of the PreA and PreB sub-register indices may be returned as 0. In
+ /// that case, the returned register class will be a sub-class of the
+ /// corresponding argument register class.
+ ///
+ /// The function returns NULL if no register class can be found.
+ ///
+ const TargetRegisterClass*
+ getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA,
+ const TargetRegisterClass *RCB, unsigned SubB,
+ unsigned &PreA, unsigned &PreB) const;
+
+ //===--------------------------------------------------------------------===//
+ // Register Class Information
+ //
+
+ /// Register class iterators
+ ///
+ regclass_iterator regclass_begin() const { return RegClassBegin; }
+ regclass_iterator regclass_end() const { return RegClassEnd; }
+
+ unsigned getNumRegClasses() const {
+ return (unsigned)(regclass_end()-regclass_begin());
+ }
+
+ /// Returns the register class associated with the enumeration value.
+ /// See class MCOperandInfo.
+ const TargetRegisterClass *getRegClass(unsigned i) const {
+ assert(i < getNumRegClasses() && "Register Class ID out of range");
+ return RegClassBegin[i];
+ }
+
+ /// Returns the name of the register class.
+ const char *getRegClassName(const TargetRegisterClass *Class) const {
+ return MCRegisterInfo::getRegClassName(Class->MC);
+ }
+
+ /// Find the largest common subclass of A and B.
+ /// Return NULL if there is no common subclass.
+ /// The common subclass should contain
+ /// simple value type SVT if it is not the Any type.
+ const TargetRegisterClass *
+ getCommonSubClass(const TargetRegisterClass *A,
+ const TargetRegisterClass *B,
+ const MVT::SimpleValueType SVT =
+ MVT::SimpleValueType::Any) const;
+
+ /// Returns a TargetRegisterClass used for pointer values.
+ /// If a target supports multiple different pointer register classes,
+ /// kind specifies which one is indicated.
+ virtual const TargetRegisterClass *
+ getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const {
+ llvm_unreachable("Target didn't implement getPointerRegClass!");
+ }
+
+ /// Returns a legal register class to copy a register in the specified class
+ /// to or from. If it is possible to copy the register directly without using
+ /// a cross register class copy, return the specified RC. Returns NULL if it
+ /// is not possible to copy between two registers of the specified class.
+ virtual const TargetRegisterClass *
+ getCrossCopyRegClass(const TargetRegisterClass *RC) const {
+ return RC;
+ }
+
+ /// Returns the largest super class of RC that is legal to use in the current
+ /// sub-target and has the same spill size.
+ /// The returned register class can be used to create virtual registers which
+ /// means that all its registers can be copied and spilled.
+ virtual const TargetRegisterClass *
+ getLargestLegalSuperClass(const TargetRegisterClass *RC,
+ const MachineFunction &) const {
+ /// The default implementation is very conservative and doesn't allow the
+ /// register allocator to inflate register classes.
+ return RC;
+ }
+
+ /// Return the register pressure "high water mark" for the specific register
+ /// class. The scheduler is in high register pressure mode (for the specific
+ /// register class) if it goes over the limit.
+ ///
+ /// Note: this is the old register pressure model that relies on a manually
+ /// specified representative register class per value type.
+ virtual unsigned getRegPressureLimit(const TargetRegisterClass *RC,
+ MachineFunction &MF) const {
+ return 0;
+ }
+
+ /// Return a heuristic for the machine scheduler to compare the profitability
+ /// of increasing one register pressure set versus another. The scheduler
+ /// will prefer increasing the register pressure of the set which returns
+ /// the largest value for this function.
+ virtual unsigned getRegPressureSetScore(const MachineFunction &MF,
+ unsigned PSetID) const {
+ return PSetID;
+ }
+
+ /// Get the weight in units of pressure for this register class.
+ virtual const RegClassWeight &getRegClassWeight(
+ const TargetRegisterClass *RC) const = 0;
+
+ /// Get the weight in units of pressure for this register unit.
+ virtual unsigned getRegUnitWeight(unsigned RegUnit) const = 0;
+
+ /// Get the number of dimensions of register pressure.
+ virtual unsigned getNumRegPressureSets() const = 0;
+
+ /// Get the name of this register unit pressure set.
+ virtual const char *getRegPressureSetName(unsigned Idx) const = 0;
+
+ /// Get the register unit pressure limit for this dimension.
+ /// This limit must be adjusted dynamically for reserved registers.
+ virtual unsigned getRegPressureSetLimit(const MachineFunction &MF,
+ unsigned Idx) const = 0;
+
+ /// Get the dimensions of register pressure impacted by this register class.
+ /// Returns a -1 terminated array of pressure set IDs.
+ virtual const int *getRegClassPressureSets(
+ const TargetRegisterClass *RC) const = 0;
+
+ /// Get the dimensions of register pressure impacted by this register unit.
+ /// Returns a -1 terminated array of pressure set IDs.
+ virtual const int *getRegUnitPressureSets(unsigned RegUnit) const = 0;
+
+ /// Get a list of 'hint' registers that the register allocator should try
+ /// first when allocating a physical register for the virtual register
+ /// VirtReg. These registers are effectively moved to the front of the
+ /// allocation order.
+ ///
+ /// The Order argument is the allocation order for VirtReg's register class
+ /// as returned from RegisterClassInfo::getOrder(). The hint registers must
+ /// come from Order, and they must not be reserved.
+ ///
+ /// The default implementation of this function can resolve
+ /// target-independent hints provided to MRI::setRegAllocationHint with
+ /// HintType == 0. Targets that override this function should defer to the
+ /// default implementation if they have no reason to change the allocation
+ /// order for VirtReg. There may be target-independent hints.
+ virtual void getRegAllocationHints(unsigned VirtReg,
+ ArrayRef<MCPhysReg> Order,
+ SmallVectorImpl<MCPhysReg> &Hints,
+ const MachineFunction &MF,
+ const VirtRegMap *VRM = nullptr,
+ const LiveRegMatrix *Matrix = nullptr)
+ const;
+
+ /// A callback to allow target a chance to update register allocation hints
+ /// when a register is "changed" (e.g. coalesced) to another register.
+ /// e.g. On ARM, some virtual registers should target register pairs,
+ /// if one of pair is coalesced to another register, the allocation hint of
+ /// the other half of the pair should be changed to point to the new register.
+ virtual void updateRegAllocHint(unsigned Reg, unsigned NewReg,
+ MachineFunction &MF) const {
+ // Do nothing.
+ }
+
+ /// Allow the target to reverse allocation order of local live ranges. This
+ /// will generally allocate shorter local live ranges first. For targets with
+ /// many registers, this could reduce regalloc compile time by a large
+ /// factor. It is disabled by default for three reasons:
+ /// (1) Top-down allocation is simpler and easier to debug for targets that
+ /// don't benefit from reversing the order.
+ /// (2) Bottom-up allocation could result in poor evicition decisions on some
+ /// targets affecting the performance of compiled code.
+ /// (3) Bottom-up allocation is no longer guaranteed to optimally color.
+ virtual bool reverseLocalAssignment() const { return false; }
+
+ /// Allow the target to override the cost of using a callee-saved register for
+ /// the first time. Default value of 0 means we will use a callee-saved
+ /// register if it is available.
+ virtual unsigned getCSRFirstUseCost() const { return 0; }
+
+ /// Returns true if the target requires (and can make use of) the register
+ /// scavenger.
+ virtual bool requiresRegisterScavenging(const MachineFunction &MF) const {
+ return false;
+ }
+
+ /// Returns true if the target wants to use frame pointer based accesses to
+ /// spill to the scavenger emergency spill slot.
+ virtual bool useFPForScavengingIndex(const MachineFunction &MF) const {
+ return true;
+ }
+
+ /// Returns true if the target requires post PEI scavenging of registers for
+ /// materializing frame index constants.
+ virtual bool requiresFrameIndexScavenging(const MachineFunction &MF) const {
+ return false;
+ }
+
+ /// Returns true if the target wants the LocalStackAllocation pass to be run
+ /// and virtual base registers used for more efficient stack access.
+ virtual bool requiresVirtualBaseRegisters(const MachineFunction &MF) const {
+ return false;
+ }
+
+ /// Return true if target has reserved a spill slot in the stack frame of
+ /// the given function for the specified register. e.g. On x86, if the frame
+ /// register is required, the first fixed stack object is reserved as its
+ /// spill slot. This tells PEI not to create a new stack frame
+ /// object for the given register. It should be called only after
+ /// determineCalleeSaves().
+ virtual bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
+ int &FrameIdx) const {
+ return false;
+ }
+
+ /// Returns true if the live-ins should be tracked after register allocation.
+ virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
+ return false;
+ }
+
+ /// True if the stack can be realigned for the target.
+ virtual bool canRealignStack(const MachineFunction &MF) const;
+
+ /// True if storage within the function requires the stack pointer to be
+ /// aligned more than the normal calling convention calls for.
+ /// This cannot be overriden by the target, but canRealignStack can be
+ /// overridden.
+ bool needsStackRealignment(const MachineFunction &MF) const;
+
+ /// Get the offset from the referenced frame index in the instruction,
+ /// if there is one.
+ virtual int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
+ int Idx) const {
+ return 0;
+ }
+
+ /// Returns true if the instruction's frame index reference would be better
+ /// served by a base register other than FP or SP.
+ /// Used by LocalStackFrameAllocation to determine which frame index
+ /// references it should create new base registers for.
+ virtual bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
+ return false;
+ }
+
+ /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx
+ /// before insertion point I.
+ virtual void materializeFrameBaseRegister(MachineBasicBlock *MBB,
+ unsigned BaseReg, int FrameIdx,
+ int64_t Offset) const {
+ llvm_unreachable("materializeFrameBaseRegister does not exist on this "
+ "target");
+ }
+
+ /// Resolve a frame index operand of an instruction
+ /// to reference the indicated base register plus offset instead.
+ virtual void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
+ int64_t Offset) const {
+ llvm_unreachable("resolveFrameIndex does not exist on this target");
+ }
+
+ /// Determine whether a given base register plus offset immediate is
+ /// encodable to resolve a frame index.
+ virtual bool isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg,
+ int64_t Offset) const {
+ llvm_unreachable("isFrameOffsetLegal does not exist on this target");
+ }
+
+ /// Spill the register so it can be used by the register scavenger.
+ /// Return true if the register was spilled, false otherwise.
+ /// If this function does not spill the register, the scavenger
+ /// will instead spill it to the emergency spill slot.
+ ///
+ virtual bool saveScavengerRegister(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ MachineBasicBlock::iterator &UseMI,
+ const TargetRegisterClass *RC,
+ unsigned Reg) const {
+ return false;
+ }
+
+ /// This method must be overriden to eliminate abstract frame indices from
+ /// instructions which may use them. The instruction referenced by the
+ /// iterator contains an MO_FrameIndex operand which must be eliminated by
+ /// this method. This method may modify or replace the specified instruction,
+ /// as long as it keeps the iterator pointing at the finished product.
+ /// SPAdj is the SP adjustment due to call frame setup instruction.
+ /// FIOperandNum is the FI operand number.
+ virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const = 0;
+
+ //===--------------------------------------------------------------------===//
+ /// Subtarget Hooks
+
+ /// \brief SrcRC and DstRC will be morphed into NewRC if this returns true.
+ virtual bool shouldCoalesce(MachineInstr *MI,
+ const TargetRegisterClass *SrcRC,
+ unsigned SubReg,
+ const TargetRegisterClass *DstRC,
+ unsigned DstSubReg,
+ const TargetRegisterClass *NewRC) const
+ { return true; }
+
+ //===--------------------------------------------------------------------===//
+ /// Debug information queries.
+
+ /// getFrameRegister - This method should return the register used as a base
+ /// for values allocated in the current stack frame.
+ virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
+};
+
+
+//===----------------------------------------------------------------------===//
+// SuperRegClassIterator
+//===----------------------------------------------------------------------===//
+//
+// Iterate over the possible super-registers for a given register class. The
+// iterator will visit a list of pairs (Idx, Mask) corresponding to the
+// possible classes of super-registers.
+//
+// Each bit mask will have at least one set bit, and each set bit in Mask
+// corresponds to a SuperRC such that:
+//
+// For all Reg in SuperRC: Reg:Idx is in RC.
+//
+// The iterator can include (O, RC->getSubClassMask()) as the first entry which
+// also satisfies the above requirement, assuming Reg:0 == Reg.
+//
+class SuperRegClassIterator {
+ const unsigned RCMaskWords;
+ unsigned SubReg;
+ const uint16_t *Idx;
+ const uint32_t *Mask;
+
+public:
+ /// Create a SuperRegClassIterator that visits all the super-register classes
+ /// of RC. When IncludeSelf is set, also include the (0, sub-classes) entry.
+ SuperRegClassIterator(const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI,
+ bool IncludeSelf = false)
+ : RCMaskWords((TRI->getNumRegClasses() + 31) / 32),
+ SubReg(0),
+ Idx(RC->getSuperRegIndices()),
+ Mask(RC->getSubClassMask()) {
+ if (!IncludeSelf)
+ ++*this;
+ }
+
+ /// Returns true if this iterator is still pointing at a valid entry.
+ bool isValid() const { return Idx; }
+
+ /// Returns the current sub-register index.
+ unsigned getSubReg() const { return SubReg; }
+
+ /// Returns the bit mask if register classes that getSubReg() projects into
+ /// RC.
+ const uint32_t *getMask() const { return Mask; }
+
+ /// Advance iterator to the next entry.
+ void operator++() {
+ assert(isValid() && "Cannot move iterator past end.");
+ Mask += RCMaskWords;
+ SubReg = *Idx++;
+ if (!SubReg)
+ Idx = nullptr;
+ }
+};
+
+// This is useful when building IndexedMaps keyed on virtual registers
+struct VirtReg2IndexFunctor : public std::unary_function<unsigned, unsigned> {
+ unsigned operator()(unsigned Reg) const {
+ return TargetRegisterInfo::virtReg2Index(Reg);
+ }
+};
+
+/// Prints virtual and physical registers with or without a TRI instance.
+///
+/// The format is:
+/// %noreg - NoRegister
+/// %vreg5 - a virtual register.
+/// %vreg5:sub_8bit - a virtual register with sub-register index (with TRI).
+/// %EAX - a physical register
+/// %physreg17 - a physical register when no TRI instance given.
+///
+/// Usage: OS << PrintReg(Reg, TRI) << '\n';
+Printable PrintReg(unsigned Reg, const TargetRegisterInfo *TRI = nullptr,
+ unsigned SubRegIdx = 0);
+
+/// Create Printable object to print register units on a \ref raw_ostream.
+///
+/// Register units are named after their root registers:
+///
+/// AL - Single root.
+/// FP0~ST7 - Dual roots.
+///
+/// Usage: OS << PrintRegUnit(Unit, TRI) << '\n';
+Printable PrintRegUnit(unsigned Unit, const TargetRegisterInfo *TRI);
+
+/// \brief Create Printable object to print virtual registers and physical
+/// registers on a \ref raw_ostream.
+Printable PrintVRegOrUnit(unsigned VRegOrUnit, const TargetRegisterInfo *TRI);
+
+/// Create Printable object to print LaneBitmasks on a \ref raw_ostream.
+Printable PrintLaneMask(LaneBitmask LaneMask);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetSchedule.td b/contrib/llvm/include/llvm/Target/TargetSchedule.td
new file mode 100644
index 0000000..89db37c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetSchedule.td
@@ -0,0 +1,419 @@
+//===- TargetSchedule.td - Target Independent Scheduling ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent scheduling interfaces which should
+// be implemented by each target which is using TableGen based scheduling.
+//
+// The SchedMachineModel is defined by subtargets for three categories of data:
+// 1. Basic properties for coarse grained instruction cost model.
+// 2. Scheduler Read/Write resources for simple per-opcode cost model.
+// 3. Instruction itineraties for detailed reservation tables.
+//
+// (1) Basic properties are defined by the SchedMachineModel
+// class. Target hooks allow subtargets to associate opcodes with
+// those properties.
+//
+// (2) A per-operand machine model can be implemented in any
+// combination of the following ways:
+//
+// A. Associate per-operand SchedReadWrite types with Instructions by
+// modifying the Instruction definition to inherit from Sched. For
+// each subtarget, define WriteRes and ReadAdvance to associate
+// processor resources and latency with each SchedReadWrite type.
+//
+// B. In each instruction definition, name an ItineraryClass. For each
+// subtarget, define ItinRW entries to map ItineraryClass to
+// per-operand SchedReadWrite types. Unlike method A, these types may
+// be subtarget specific and can be directly associated with resources
+// by defining SchedWriteRes and SchedReadAdvance.
+//
+// C. In the subtarget, map SchedReadWrite types to specific
+// opcodes. This overrides any SchedReadWrite types or
+// ItineraryClasses defined by the Instruction. As in method B, the
+// subtarget can directly associate resources with SchedReadWrite
+// types by defining SchedWriteRes and SchedReadAdvance.
+//
+// D. In either the target or subtarget, define SchedWriteVariant or
+// SchedReadVariant to map one SchedReadWrite type onto another
+// sequence of SchedReadWrite types. This allows dynamic selection of
+// an instruction's machine model via custom C++ code. It also allows
+// a machine-independent SchedReadWrite type to map to a sequence of
+// machine-dependent types.
+//
+// (3) A per-pipeline-stage machine model can be implemented by providing
+// Itineraries in addition to mapping instructions to ItineraryClasses.
+//===----------------------------------------------------------------------===//
+
+// Include legacy support for instruction itineraries.
+include "llvm/Target/TargetItinerary.td"
+
+class Instruction; // Forward def
+
+// DAG operator that interprets the DAG args as Instruction defs.
+def instrs;
+
+// DAG operator that interprets each DAG arg as a regex pattern for
+// matching Instruction opcode names.
+// The regex must match the beginning of the opcode (as in Python re.match).
+// To avoid matching prefixes, append '$' to the pattern.
+def instregex;
+
+// Define the SchedMachineModel and provide basic properties for
+// coarse grained instruction cost model. Default values for the
+// properties are defined in MCSchedModel. A value of "-1" in the
+// target description's SchedMachineModel indicates that the property
+// is not overriden by the target.
+//
+// Target hooks allow subtargets to associate LoadLatency and
+// HighLatency with groups of opcodes.
+//
+// See MCSchedule.h for detailed comments.
+class SchedMachineModel {
+ int IssueWidth = -1; // Max micro-ops that may be scheduled per cycle.
+ int MinLatency = -1; // Determines which instructions are allowed in a group.
+ // (-1) inorder (0) ooo, (1): inorder +var latencies.
+ int MicroOpBufferSize = -1; // Max micro-ops that can be buffered.
+ int LoopMicroOpBufferSize = -1; // Max micro-ops that can be buffered for
+ // optimized loop dispatch/execution.
+ int LoadLatency = -1; // Cycles for loads to access the cache.
+ int HighLatency = -1; // Approximation of cycles for "high latency" ops.
+ int MispredictPenalty = -1; // Extra cycles for a mispredicted branch.
+
+ // Per-cycle resources tables.
+ ProcessorItineraries Itineraries = NoItineraries;
+
+ bit PostRAScheduler = 0; // Enable Post RegAlloc Scheduler pass.
+
+ // Subtargets that define a model for only a subset of instructions
+ // that have a scheduling class (itinerary class or SchedRW list)
+ // and may actually be generated for that subtarget must clear this
+ // bit. Otherwise, the scheduler considers an unmodelled opcode to
+ // be an error. This should only be set during initial bringup,
+ // or there will be no way to catch simple errors in the model
+ // resulting from changes to the instruction definitions.
+ bit CompleteModel = 1;
+
+ bit NoModel = 0; // Special tag to indicate missing machine model.
+}
+
+def NoSchedModel : SchedMachineModel {
+ let NoModel = 1;
+}
+
+// Define a kind of processor resource that may be common across
+// similar subtargets.
+class ProcResourceKind;
+
+// Define a number of interchangeable processor resources. NumUnits
+// determines the throughput of instructions that require the resource.
+//
+// An optional Super resource may be given to model these resources as
+// a subset of the more general super resources. Using one of these
+// resources implies using one of the super resoruces.
+//
+// ProcResourceUnits normally model a few buffered resources within an
+// out-of-order engine. Buffered resources may be held for multiple
+// clock cycles, but the scheduler does not pin them to a particular
+// clock cycle relative to instruction dispatch. Setting BufferSize=0
+// changes this to an in-order issue/dispatch resource. In this case,
+// the scheduler counts down from the cycle that the instruction
+// issues in-order, forcing a stall whenever a subsequent instruction
+// requires the same resource until the number of ResourceCyles
+// specified in WriteRes expire. Setting BufferSize=1 changes this to
+// an in-order latency resource. In this case, the scheduler models
+// producer/consumer stalls between instructions that use the
+// resource.
+//
+// Examples (all assume an out-of-order engine):
+//
+// Use BufferSize = -1 for "issue ports" fed by a unified reservation
+// station. Here the size of the reservation station is modeled by
+// MicroOpBufferSize, which should be the minimum size of either the
+// register rename pool, unified reservation station, or reorder
+// buffer.
+//
+// Use BufferSize = 0 for resources that force "dispatch/issue
+// groups". (Different processors define dispath/issue
+// differently. Here we refer to stage between decoding into micro-ops
+// and moving them into a reservation station.) Normally NumMicroOps
+// is sufficient to limit dispatch/issue groups. However, some
+// processors can form groups of with only certain combinitions of
+// instruction types. e.g. POWER7.
+//
+// Use BufferSize = 1 for in-order execution units. This is used for
+// an in-order pipeline within an out-of-order core where scheduling
+// dependent operations back-to-back is guaranteed to cause a
+// bubble. e.g. Cortex-a9 floating-point.
+//
+// Use BufferSize > 1 for out-of-order executions units with a
+// separate reservation station. This simply models the size of the
+// reservation station.
+//
+// To model both dispatch/issue groups and in-order execution units,
+// create two types of units, one with BufferSize=0 and one with
+// BufferSize=1.
+//
+// SchedModel ties these units to a processor for any stand-alone defs
+// of this class. Instances of subclass ProcResource will be automatically
+// attached to a processor, so SchedModel is not needed.
+class ProcResourceUnits<ProcResourceKind kind, int num> {
+ ProcResourceKind Kind = kind;
+ int NumUnits = num;
+ ProcResourceKind Super = ?;
+ int BufferSize = -1;
+ SchedMachineModel SchedModel = ?;
+}
+
+// EponymousProcResourceKind helps implement ProcResourceUnits by
+// allowing a ProcResourceUnits definition to reference itself. It
+// should not be referenced anywhere else.
+def EponymousProcResourceKind : ProcResourceKind;
+
+// Subtargets typically define processor resource kind and number of
+// units in one place.
+class ProcResource<int num> : ProcResourceKind,
+ ProcResourceUnits<EponymousProcResourceKind, num>;
+
+class ProcResGroup<list<ProcResource> resources> : ProcResourceKind {
+ list<ProcResource> Resources = resources;
+ SchedMachineModel SchedModel = ?;
+ int BufferSize = -1;
+}
+
+// A target architecture may define SchedReadWrite types and associate
+// them with instruction operands.
+class SchedReadWrite;
+
+// List the per-operand types that map to the machine model of an
+// instruction. One SchedWrite type must be listed for each explicit
+// def operand in order. Additional SchedWrite types may optionally be
+// listed for implicit def operands. SchedRead types may optionally
+// be listed for use operands in order. The order of defs relative to
+// uses is insignificant. This way, the same SchedReadWrite list may
+// be used for multiple forms of an operation. For example, a
+// two-address instruction could have two tied operands or single
+// operand that both reads and writes a reg. In both cases we have a
+// single SchedWrite and single SchedRead in any order.
+class Sched<list<SchedReadWrite> schedrw> {
+ list<SchedReadWrite> SchedRW = schedrw;
+}
+
+// Define a scheduler resource associated with a def operand.
+class SchedWrite : SchedReadWrite;
+def NoWrite : SchedWrite;
+
+// Define a scheduler resource associated with a use operand.
+class SchedRead : SchedReadWrite;
+
+// Define a SchedWrite that is modeled as a sequence of other
+// SchedWrites with additive latency. This allows a single operand to
+// be mapped the resources composed from a set of previously defined
+// SchedWrites.
+//
+// If the final write in this sequence is a SchedWriteVariant marked
+// Variadic, then the list of prior writes are distributed across all
+// operands after resolving the predicate for the final write.
+//
+// SchedModel silences warnings but is ignored.
+class WriteSequence<list<SchedWrite> writes, int rep = 1> : SchedWrite {
+ list<SchedWrite> Writes = writes;
+ int Repeat = rep;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Define values common to WriteRes and SchedWriteRes.
+//
+// SchedModel ties these resources to a processor.
+class ProcWriteResources<list<ProcResourceKind> resources> {
+ list<ProcResourceKind> ProcResources = resources;
+ list<int> ResourceCycles = [];
+ int Latency = 1;
+ int NumMicroOps = 1;
+ bit BeginGroup = 0;
+ bit EndGroup = 0;
+ // Allow a processor to mark some scheduling classes as unsupported
+ // for stronger verification.
+ bit Unsupported = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Define the resources and latency of a SchedWrite. This will be used
+// directly by targets that have no itinerary classes. In this case,
+// SchedWrite is defined by the target, while WriteResources is
+// defined by the subtarget, and maps the SchedWrite to processor
+// resources.
+//
+// If a target already has itinerary classes, SchedWriteResources can
+// be used instead to define subtarget specific SchedWrites and map
+// them to processor resources in one place. Then ItinRW can map
+// itinerary classes to the subtarget's SchedWrites.
+//
+// ProcResources indicates the set of resources consumed by the write.
+// Optionally, ResourceCycles indicates the number of cycles the
+// resource is consumed. Each ResourceCycles item is paired with the
+// ProcResource item at the same position in its list. Since
+// ResourceCycles are rarely specialized, the list may be
+// incomplete. By default, resources are consumed for a single cycle,
+// regardless of latency, which models a fully pipelined processing
+// unit. A value of 0 for ResourceCycles means that the resource must
+// be available but is not consumed, which is only relevant for
+// unbuffered resources.
+//
+// By default, each SchedWrite takes one micro-op, which is counted
+// against the processor's IssueWidth limit. If an instruction can
+// write multiple registers with a single micro-op, the subtarget
+// should define one of the writes to be zero micro-ops. If a
+// subtarget requires multiple micro-ops to write a single result, it
+// should either override the write's NumMicroOps to be greater than 1
+// or require additional writes. Extra writes can be required either
+// by defining a WriteSequence, or simply listing extra writes in the
+// instruction's list of writers beyond the number of "def"
+// operands. The scheduler assumes that all micro-ops must be
+// dispatched in the same cycle. These micro-ops may be required to
+// begin or end the current dispatch group.
+class WriteRes<SchedWrite write, list<ProcResourceKind> resources>
+ : ProcWriteResources<resources> {
+ SchedWrite WriteType = write;
+}
+
+// Directly name a set of WriteResources defining a new SchedWrite
+// type at the same time. This class is unaware of its SchedModel so
+// must be referenced by InstRW or ItinRW.
+class SchedWriteRes<list<ProcResourceKind> resources> : SchedWrite,
+ ProcWriteResources<resources>;
+
+// Define values common to ReadAdvance and SchedReadAdvance.
+//
+// SchedModel ties these resources to a processor.
+class ProcReadAdvance<int cycles, list<SchedWrite> writes = []> {
+ int Cycles = cycles;
+ list<SchedWrite> ValidWrites = writes;
+ // Allow a processor to mark some scheduling classes as unsupported
+ // for stronger verification.
+ bit Unsupported = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// A processor may define a ReadAdvance associated with a SchedRead
+// to reduce latency of a prior write by N cycles. A negative advance
+// effectively increases latency, which may be used for cross-domain
+// stalls.
+//
+// A ReadAdvance may be associated with a list of SchedWrites
+// to implement pipeline bypass. The Writes list may be empty to
+// indicate operands that are always read this number of Cycles later
+// than a normal register read, allowing the read's parent instruction
+// to issue earlier relative to the writer.
+class ReadAdvance<SchedRead read, int cycles, list<SchedWrite> writes = []>
+ : ProcReadAdvance<cycles, writes> {
+ SchedRead ReadType = read;
+}
+
+// Directly associate a new SchedRead type with a delay and optional
+// pipeline bypess. For use with InstRW or ItinRW.
+class SchedReadAdvance<int cycles, list<SchedWrite> writes = []> : SchedRead,
+ ProcReadAdvance<cycles, writes>;
+
+// Define SchedRead defaults. Reads seldom need special treatment.
+def ReadDefault : SchedRead;
+def NoReadAdvance : SchedReadAdvance<0>;
+
+// Define shared code that will be in the same scope as all
+// SchedPredicates. Available variables are:
+// (const MachineInstr *MI, const TargetSchedModel *SchedModel)
+class PredicateProlog<code c> {
+ code Code = c;
+}
+
+// Define a predicate to determine which SchedVariant applies to a
+// particular MachineInstr. The code snippet is used as an
+// if-statement's expression. Available variables are MI, SchedModel,
+// and anything defined in a PredicateProlog.
+//
+// SchedModel silences warnings but is ignored.
+class SchedPredicate<code pred> {
+ SchedMachineModel SchedModel = ?;
+ code Predicate = pred;
+}
+def NoSchedPred : SchedPredicate<[{true}]>;
+
+// Associate a predicate with a list of SchedReadWrites. By default,
+// the selected SchedReadWrites are still associated with a single
+// operand and assumed to execute sequentially with additive
+// latency. However, if the parent SchedWriteVariant or
+// SchedReadVariant is marked "Variadic", then each Selected
+// SchedReadWrite is mapped in place to the instruction's variadic
+// operands. In this case, latency is not additive. If the current Variant
+// is already part of a Sequence, then that entire chain leading up to
+// the Variant is distributed over the variadic operands.
+class SchedVar<SchedPredicate pred, list<SchedReadWrite> selected> {
+ SchedPredicate Predicate = pred;
+ list<SchedReadWrite> Selected = selected;
+}
+
+// SchedModel silences warnings but is ignored.
+class SchedVariant<list<SchedVar> variants> {
+ list<SchedVar> Variants = variants;
+ bit Variadic = 0;
+ SchedMachineModel SchedModel = ?;
+}
+
+// A SchedWriteVariant is a single SchedWrite type that maps to a list
+// of SchedWrite types under the conditions defined by its predicates.
+//
+// A Variadic write is expanded to cover multiple "def" operands. The
+// SchedVariant's Expansion list is then interpreted as one write
+// per-operand instead of the usual sequential writes feeding a single
+// operand.
+class SchedWriteVariant<list<SchedVar> variants> : SchedWrite,
+ SchedVariant<variants> {
+}
+
+// A SchedReadVariant is a single SchedRead type that maps to a list
+// of SchedRead types under the conditions defined by its predicates.
+//
+// A Variadic write is expanded to cover multiple "readsReg" operands as
+// explained above.
+class SchedReadVariant<list<SchedVar> variants> : SchedRead,
+ SchedVariant<variants> {
+}
+
+// Map a set of opcodes to a list of SchedReadWrite types. This allows
+// the subtarget to easily override specific operations.
+//
+// SchedModel ties this opcode mapping to a processor.
+class InstRW<list<SchedReadWrite> rw, dag instrlist> {
+ list<SchedReadWrite> OperandReadWrites = rw;
+ dag Instrs = instrlist;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Map a set of itinerary classes to SchedReadWrite resources. This is
+// used to bootstrap a target (e.g. ARM) when itineraries already
+// exist and changing InstrInfo is undesirable.
+//
+// SchedModel ties this ItineraryClass mapping to a processor.
+class ItinRW<list<SchedReadWrite> rw, list<InstrItinClass> iic> {
+ list<InstrItinClass> MatchedItinClasses = iic;
+ list<SchedReadWrite> OperandReadWrites = rw;
+ SchedMachineModel SchedModel = ?;
+}
+
+// Alias a target-defined SchedReadWrite to a processor specific
+// SchedReadWrite. This allows a subtarget to easily map a
+// SchedReadWrite type onto a WriteSequence, SchedWriteVariant, or
+// SchedReadVariant.
+//
+// SchedModel will usually be provided by surrounding let statement
+// and ties this SchedAlias mapping to a processor.
+class SchedAlias<SchedReadWrite match, SchedReadWrite alias> {
+ SchedReadWrite MatchRW = match;
+ SchedReadWrite AliasRW = alias;
+ SchedMachineModel SchedModel = ?;
+}
diff --git a/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
new file mode 100644
index 0000000..5654736
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -0,0 +1,1150 @@
+//===- TargetSelectionDAG.td - Common code for DAG isels ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the target-independent interfaces used by SelectionDAG
+// instruction selection generators.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Type Constraint definitions.
+//
+// Note that the semantics of these constraints are hard coded into tblgen. To
+// modify or add constraints, you have to hack tblgen.
+//
+
+class SDTypeConstraint<int opnum> {
+ int OperandNum = opnum;
+}
+
+// SDTCisVT - The specified operand has exactly this VT.
+class SDTCisVT<int OpNum, ValueType vt> : SDTypeConstraint<OpNum> {
+ ValueType VT = vt;
+}
+
+class SDTCisPtrTy<int OpNum> : SDTypeConstraint<OpNum>;
+
+// SDTCisInt - The specified operand has integer type.
+class SDTCisInt<int OpNum> : SDTypeConstraint<OpNum>;
+
+// SDTCisFP - The specified operand has floating-point type.
+class SDTCisFP<int OpNum> : SDTypeConstraint<OpNum>;
+
+// SDTCisVec - The specified operand has a vector type.
+class SDTCisVec<int OpNum> : SDTypeConstraint<OpNum>;
+
+// SDTCisSameAs - The two specified operands have identical types.
+class SDTCisSameAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
+ int OtherOperandNum = OtherOp;
+}
+
+// SDTCisVTSmallerThanOp - The specified operand is a VT SDNode, and its type is
+// smaller than the 'Other' operand.
+class SDTCisVTSmallerThanOp<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
+ int OtherOperandNum = OtherOp;
+}
+
+class SDTCisOpSmallerThanOp<int SmallOp, int BigOp> : SDTypeConstraint<SmallOp>{
+ int BigOperandNum = BigOp;
+}
+
+/// SDTCisEltOfVec - This indicates that ThisOp is a scalar type of the same
+/// type as the element type of OtherOp, which is a vector type.
+class SDTCisEltOfVec<int ThisOp, int OtherOp>
+ : SDTypeConstraint<ThisOp> {
+ int OtherOpNum = OtherOp;
+}
+
+/// SDTCisSubVecOfVec - This indicates that ThisOp is a vector type
+/// with length less that of OtherOp, which is a vector type.
+class SDTCisSubVecOfVec<int ThisOp, int OtherOp>
+ : SDTypeConstraint<ThisOp> {
+ int OtherOpNum = OtherOp;
+}
+
+// SDTCVecEltisVT - The specified operand is vector type with element type
+// of VT.
+class SDTCVecEltisVT<int OpNum, ValueType vt> : SDTypeConstraint<OpNum> {
+ ValueType VT = vt;
+}
+
+// SDTCisSameNumEltsAs - The two specified operands have identical number
+// of elements.
+class SDTCisSameNumEltsAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
+ int OtherOperandNum = OtherOp;
+}
+
+// SDTCisSameSizeAs - The two specified operands have identical size.
+class SDTCisSameSizeAs<int OpNum, int OtherOp> : SDTypeConstraint<OpNum> {
+ int OtherOperandNum = OtherOp;
+}
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Type Profile definitions.
+//
+// These use the constraints defined above to describe the type requirements of
+// the various nodes. These are not hard coded into tblgen, allowing targets to
+// add their own if needed.
+//
+
+// SDTypeProfile - This profile describes the type requirements of a Selection
+// DAG node.
+class SDTypeProfile<int numresults, int numoperands,
+ list<SDTypeConstraint> constraints> {
+ int NumResults = numresults;
+ int NumOperands = numoperands;
+ list<SDTypeConstraint> Constraints = constraints;
+}
+
+// Builtin profiles.
+def SDTIntLeaf: SDTypeProfile<1, 0, [SDTCisInt<0>]>; // for 'imm'.
+def SDTFPLeaf : SDTypeProfile<1, 0, [SDTCisFP<0>]>; // for 'fpimm'.
+def SDTPtrLeaf: SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>; // for '&g'.
+def SDTOther : SDTypeProfile<1, 0, [SDTCisVT<0, OtherVT>]>; // for 'vt'.
+def SDTUNDEF : SDTypeProfile<1, 0, []>; // for 'undef'.
+def SDTUnaryOp : SDTypeProfile<1, 1, []>; // for bitconvert.
+
+def SDTIntBinOp : SDTypeProfile<1, 2, [ // add, and, or, xor, udiv, etc.
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisInt<0>
+]>;
+def SDTIntShiftOp : SDTypeProfile<1, 2, [ // shl, sra, srl
+ SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisInt<2>
+]>;
+def SDTIntBinHiLoOp : SDTypeProfile<2, 2, [ // mulhi, mullo, sdivrem, udivrem
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>,SDTCisInt<0>
+]>;
+
+def SDTFPBinOp : SDTypeProfile<1, 2, [ // fadd, fmul, etc.
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>
+]>;
+def SDTFPSignOp : SDTypeProfile<1, 2, [ // fcopysign.
+ SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisFP<2>
+]>;
+def SDTFPTernaryOp : SDTypeProfile<1, 3, [ // fmadd, fnmsub, etc.
+ SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisSameAs<0, 3>, SDTCisFP<0>
+]>;
+def SDTIntUnaryOp : SDTypeProfile<1, 1, [ // ctlz
+ SDTCisSameAs<0, 1>, SDTCisInt<0>
+]>;
+def SDTIntExtendOp : SDTypeProfile<1, 1, [ // sext, zext, anyext
+ SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
+]>;
+def SDTIntTruncOp : SDTypeProfile<1, 1, [ // trunc
+ SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<0, 1>
+]>;
+def SDTFPUnaryOp : SDTypeProfile<1, 1, [ // fneg, fsqrt, etc
+ SDTCisSameAs<0, 1>, SDTCisFP<0>
+]>;
+def SDTFPRoundOp : SDTypeProfile<1, 1, [ // fround
+ SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<0, 1>
+]>;
+def SDTFPExtendOp : SDTypeProfile<1, 1, [ // fextend
+ SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>
+]>;
+def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp
+ SDTCisFP<0>, SDTCisInt<1>
+]>;
+def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int
+ SDTCisInt<0>, SDTCisFP<1>
+]>;
+def SDTExtInreg : SDTypeProfile<1, 2, [ // sext_inreg
+ SDTCisSameAs<0, 1>, SDTCisInt<0>, SDTCisVT<2, OtherVT>,
+ SDTCisVTSmallerThanOp<2, 1>
+]>;
+
+def SDTSetCC : SDTypeProfile<1, 3, [ // setcc
+ SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
+]>;
+
+def SDTSelect : SDTypeProfile<1, 3, [ // select
+ SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>
+]>;
+
+def SDTVSelect : SDTypeProfile<1, 3, [ // vselect
+ SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>
+]>;
+
+def SDTSelectCC : SDTypeProfile<1, 5, [ // select_cc
+ SDTCisSameAs<1, 2>, SDTCisSameAs<3, 4>, SDTCisSameAs<0, 3>,
+ SDTCisVT<5, OtherVT>
+]>;
+
+def SDTBr : SDTypeProfile<0, 1, [ // br
+ SDTCisVT<0, OtherVT>
+]>;
+
+def SDTBrCC : SDTypeProfile<0, 4, [ // brcc
+ SDTCisVT<0, OtherVT>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
+]>;
+
+def SDTBrcond : SDTypeProfile<0, 2, [ // brcond
+ SDTCisInt<0>, SDTCisVT<1, OtherVT>
+]>;
+
+def SDTBrind : SDTypeProfile<0, 1, [ // brind
+ SDTCisPtrTy<0>
+]>;
+
+def SDTCatchret : SDTypeProfile<0, 2, [ // catchret
+ SDTCisVT<0, OtherVT>, SDTCisVT<1, OtherVT>
+]>;
+
+def SDTNone : SDTypeProfile<0, 0, []>; // ret, trap
+
+def SDTLoad : SDTypeProfile<1, 1, [ // load
+ SDTCisPtrTy<1>
+]>;
+
+def SDTStore : SDTypeProfile<0, 2, [ // store
+ SDTCisPtrTy<1>
+]>;
+
+def SDTIStore : SDTypeProfile<1, 3, [ // indexed store
+ SDTCisSameAs<0, 2>, SDTCisPtrTy<0>, SDTCisPtrTy<3>
+]>;
+
+def SDTMaskedStore: SDTypeProfile<0, 3, [ // masked store
+ SDTCisPtrTy<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisSameNumEltsAs<1, 2>
+]>;
+
+def SDTMaskedLoad: SDTypeProfile<1, 3, [ // masked load
+ SDTCisVec<0>, SDTCisPtrTy<1>, SDTCisVec<2>, SDTCisSameAs<0, 3>,
+ SDTCisSameNumEltsAs<0, 2>
+]>;
+
+def SDTMaskedGather: SDTypeProfile<2, 3, [ // masked gather
+ SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<1, 3>,
+ SDTCisPtrTy<4>, SDTCVecEltisVT<1, i1>, SDTCisSameNumEltsAs<0, 1>
+]>;
+
+def SDTMaskedScatter: SDTypeProfile<1, 3, [ // masked scatter
+ SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>, SDTCisSameNumEltsAs<0, 1>,
+ SDTCVecEltisVT<0, i1>, SDTCisPtrTy<3>
+]>;
+
+def SDTVecShuffle : SDTypeProfile<1, 2, [
+ SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>
+]>;
+def SDTVecExtract : SDTypeProfile<1, 2, [ // vector extract
+ SDTCisEltOfVec<0, 1>, SDTCisPtrTy<2>
+]>;
+def SDTVecInsert : SDTypeProfile<1, 3, [ // vector insert
+ SDTCisEltOfVec<2, 1>, SDTCisSameAs<0, 1>, SDTCisPtrTy<3>
+]>;
+
+def SDTSubVecExtract : SDTypeProfile<1, 2, [// subvector extract
+ SDTCisSubVecOfVec<0,1>, SDTCisInt<2>
+]>;
+def SDTSubVecInsert : SDTypeProfile<1, 3, [ // subvector insert
+ SDTCisSubVecOfVec<2, 1>, SDTCisSameAs<0,1>, SDTCisInt<3>
+]>;
+
+def SDTPrefetch : SDTypeProfile<0, 4, [ // prefetch
+ SDTCisPtrTy<0>, SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>, SDTCisInt<1>
+]>;
+
+def SDTMemBarrier : SDTypeProfile<0, 5, [ // memory barrier
+ SDTCisSameAs<0,1>, SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisSameAs<0,4>,
+ SDTCisInt<0>
+]>;
+def SDTAtomicFence : SDTypeProfile<0, 2, [
+ SDTCisSameAs<0,1>, SDTCisPtrTy<0>
+]>;
+def SDTAtomic3 : SDTypeProfile<1, 3, [
+ SDTCisSameAs<0,2>, SDTCisSameAs<0,3>, SDTCisInt<0>, SDTCisPtrTy<1>
+]>;
+def SDTAtomic2 : SDTypeProfile<1, 2, [
+ SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1>
+]>;
+def SDTAtomicStore : SDTypeProfile<0, 2, [
+ SDTCisPtrTy<0>, SDTCisInt<1>
+]>;
+def SDTAtomicLoad : SDTypeProfile<1, 1, [
+ SDTCisInt<0>, SDTCisPtrTy<1>
+]>;
+
+def SDTConvertOp : SDTypeProfile<1, 5, [ //cvtss, su, us, uu, ff, fs, fu, sf, su
+ SDTCisVT<2, OtherVT>, SDTCisVT<3, OtherVT>, SDTCisPtrTy<4>, SDTCisPtrTy<5>
+]>;
+
+class SDCallSeqStart<list<SDTypeConstraint> constraints> :
+ SDTypeProfile<0, 1, constraints>;
+class SDCallSeqEnd<list<SDTypeConstraint> constraints> :
+ SDTypeProfile<0, 2, constraints>;
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Node Properties.
+//
+// Note: These are hard coded into tblgen.
+//
+class SDNodeProperty;
+def SDNPCommutative : SDNodeProperty; // X op Y == Y op X
+def SDNPAssociative : SDNodeProperty; // (X op Y) op Z == X op (Y op Z)
+def SDNPHasChain : SDNodeProperty; // R/W chain operand and result
+def SDNPOutGlue : SDNodeProperty; // Write a flag result
+def SDNPInGlue : SDNodeProperty; // Read a flag operand
+def SDNPOptInGlue : SDNodeProperty; // Optionally read a flag operand
+def SDNPMayStore : SDNodeProperty; // May write to memory, sets 'mayStore'.
+def SDNPMayLoad : SDNodeProperty; // May read memory, sets 'mayLoad'.
+def SDNPSideEffect : SDNodeProperty; // Sets 'HasUnmodelledSideEffects'.
+def SDNPMemOperand : SDNodeProperty; // Touches memory, has assoc MemOperand
+def SDNPVariadic : SDNodeProperty; // Node has variable arguments.
+def SDNPWantRoot : SDNodeProperty; // ComplexPattern gets the root of match
+def SDNPWantParent : SDNodeProperty; // ComplexPattern gets the parent
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Pattern Operations
+class SDPatternOperator;
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Node definitions.
+//
+class SDNode<string opcode, SDTypeProfile typeprof,
+ list<SDNodeProperty> props = [], string sdclass = "SDNode">
+ : SDPatternOperator {
+ string Opcode = opcode;
+ string SDClass = sdclass;
+ list<SDNodeProperty> Properties = props;
+ SDTypeProfile TypeProfile = typeprof;
+}
+
+// Special TableGen-recognized dag nodes
+def set;
+def implicit;
+def node;
+def srcvalue;
+
+def imm : SDNode<"ISD::Constant" , SDTIntLeaf , [], "ConstantSDNode">;
+def timm : SDNode<"ISD::TargetConstant",SDTIntLeaf, [], "ConstantSDNode">;
+def fpimm : SDNode<"ISD::ConstantFP", SDTFPLeaf , [], "ConstantFPSDNode">;
+def vt : SDNode<"ISD::VALUETYPE" , SDTOther , [], "VTSDNode">;
+def bb : SDNode<"ISD::BasicBlock", SDTOther , [], "BasicBlockSDNode">;
+def cond : SDNode<"ISD::CONDCODE" , SDTOther , [], "CondCodeSDNode">;
+def undef : SDNode<"ISD::UNDEF" , SDTUNDEF , []>;
+def globaladdr : SDNode<"ISD::GlobalAddress", SDTPtrLeaf, [],
+ "GlobalAddressSDNode">;
+def tglobaladdr : SDNode<"ISD::TargetGlobalAddress", SDTPtrLeaf, [],
+ "GlobalAddressSDNode">;
+def globaltlsaddr : SDNode<"ISD::GlobalTLSAddress", SDTPtrLeaf, [],
+ "GlobalAddressSDNode">;
+def tglobaltlsaddr : SDNode<"ISD::TargetGlobalTLSAddress", SDTPtrLeaf, [],
+ "GlobalAddressSDNode">;
+def constpool : SDNode<"ISD::ConstantPool", SDTPtrLeaf, [],
+ "ConstantPoolSDNode">;
+def tconstpool : SDNode<"ISD::TargetConstantPool", SDTPtrLeaf, [],
+ "ConstantPoolSDNode">;
+def jumptable : SDNode<"ISD::JumpTable", SDTPtrLeaf, [],
+ "JumpTableSDNode">;
+def tjumptable : SDNode<"ISD::TargetJumpTable", SDTPtrLeaf, [],
+ "JumpTableSDNode">;
+def frameindex : SDNode<"ISD::FrameIndex", SDTPtrLeaf, [],
+ "FrameIndexSDNode">;
+def tframeindex : SDNode<"ISD::TargetFrameIndex", SDTPtrLeaf, [],
+ "FrameIndexSDNode">;
+def externalsym : SDNode<"ISD::ExternalSymbol", SDTPtrLeaf, [],
+ "ExternalSymbolSDNode">;
+def texternalsym: SDNode<"ISD::TargetExternalSymbol", SDTPtrLeaf, [],
+ "ExternalSymbolSDNode">;
+def mcsym: SDNode<"ISD::MCSymbol", SDTPtrLeaf, [], "MCSymbolSDNode">;
+def blockaddress : SDNode<"ISD::BlockAddress", SDTPtrLeaf, [],
+ "BlockAddressSDNode">;
+def tblockaddress: SDNode<"ISD::TargetBlockAddress", SDTPtrLeaf, [],
+ "BlockAddressSDNode">;
+
+def add : SDNode<"ISD::ADD" , SDTIntBinOp ,
+ [SDNPCommutative, SDNPAssociative]>;
+def sub : SDNode<"ISD::SUB" , SDTIntBinOp>;
+def mul : SDNode<"ISD::MUL" , SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def mulhs : SDNode<"ISD::MULHS" , SDTIntBinOp, [SDNPCommutative]>;
+def mulhu : SDNode<"ISD::MULHU" , SDTIntBinOp, [SDNPCommutative]>;
+def smullohi : SDNode<"ISD::SMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>;
+def umullohi : SDNode<"ISD::UMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>;
+def sdiv : SDNode<"ISD::SDIV" , SDTIntBinOp>;
+def udiv : SDNode<"ISD::UDIV" , SDTIntBinOp>;
+def srem : SDNode<"ISD::SREM" , SDTIntBinOp>;
+def urem : SDNode<"ISD::UREM" , SDTIntBinOp>;
+def sdivrem : SDNode<"ISD::SDIVREM" , SDTIntBinHiLoOp>;
+def udivrem : SDNode<"ISD::UDIVREM" , SDTIntBinHiLoOp>;
+def srl : SDNode<"ISD::SRL" , SDTIntShiftOp>;
+def sra : SDNode<"ISD::SRA" , SDTIntShiftOp>;
+def shl : SDNode<"ISD::SHL" , SDTIntShiftOp>;
+def rotl : SDNode<"ISD::ROTL" , SDTIntShiftOp>;
+def rotr : SDNode<"ISD::ROTR" , SDTIntShiftOp>;
+def and : SDNode<"ISD::AND" , SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def or : SDNode<"ISD::OR" , SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def xor : SDNode<"ISD::XOR" , SDTIntBinOp,
+ [SDNPCommutative, SDNPAssociative]>;
+def addc : SDNode<"ISD::ADDC" , SDTIntBinOp,
+ [SDNPCommutative, SDNPOutGlue]>;
+def adde : SDNode<"ISD::ADDE" , SDTIntBinOp,
+ [SDNPCommutative, SDNPOutGlue, SDNPInGlue]>;
+def subc : SDNode<"ISD::SUBC" , SDTIntBinOp,
+ [SDNPOutGlue]>;
+def sube : SDNode<"ISD::SUBE" , SDTIntBinOp,
+ [SDNPOutGlue, SDNPInGlue]>;
+def smin : SDNode<"ISD::SMIN" , SDTIntBinOp>;
+def smax : SDNode<"ISD::SMAX" , SDTIntBinOp>;
+def umin : SDNode<"ISD::UMIN" , SDTIntBinOp>;
+def umax : SDNode<"ISD::UMAX" , SDTIntBinOp>;
+
+def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>;
+def bitreverse : SDNode<"ISD::BITREVERSE" , SDTIntUnaryOp>;
+def bswap : SDNode<"ISD::BSWAP" , SDTIntUnaryOp>;
+def ctlz : SDNode<"ISD::CTLZ" , SDTIntUnaryOp>;
+def cttz : SDNode<"ISD::CTTZ" , SDTIntUnaryOp>;
+def ctpop : SDNode<"ISD::CTPOP" , SDTIntUnaryOp>;
+def ctlz_zero_undef : SDNode<"ISD::CTLZ_ZERO_UNDEF", SDTIntUnaryOp>;
+def cttz_zero_undef : SDNode<"ISD::CTTZ_ZERO_UNDEF", SDTIntUnaryOp>;
+def sext : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>;
+def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>;
+def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>;
+def trunc : SDNode<"ISD::TRUNCATE" , SDTIntTruncOp>;
+def bitconvert : SDNode<"ISD::BITCAST" , SDTUnaryOp>;
+def addrspacecast : SDNode<"ISD::ADDRSPACECAST", SDTUnaryOp>;
+def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>;
+def insertelt : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>;
+
+def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>;
+def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>;
+def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>;
+def fdiv : SDNode<"ISD::FDIV" , SDTFPBinOp>;
+def frem : SDNode<"ISD::FREM" , SDTFPBinOp>;
+def fma : SDNode<"ISD::FMA" , SDTFPTernaryOp>;
+def fmad : SDNode<"ISD::FMAD" , SDTFPTernaryOp>;
+def fabs : SDNode<"ISD::FABS" , SDTFPUnaryOp>;
+def fminnum : SDNode<"ISD::FMINNUM" , SDTFPBinOp>;
+def fmaxnum : SDNode<"ISD::FMAXNUM" , SDTFPBinOp>;
+def fminnan : SDNode<"ISD::FMINNAN" , SDTFPBinOp>;
+def fmaxnan : SDNode<"ISD::FMAXNAN" , SDTFPBinOp>;
+def fgetsign : SDNode<"ISD::FGETSIGN" , SDTFPToIntOp>;
+def fneg : SDNode<"ISD::FNEG" , SDTFPUnaryOp>;
+def fsqrt : SDNode<"ISD::FSQRT" , SDTFPUnaryOp>;
+def fsin : SDNode<"ISD::FSIN" , SDTFPUnaryOp>;
+def fcos : SDNode<"ISD::FCOS" , SDTFPUnaryOp>;
+def fexp2 : SDNode<"ISD::FEXP2" , SDTFPUnaryOp>;
+def fpow : SDNode<"ISD::FPOW" , SDTFPBinOp>;
+def flog2 : SDNode<"ISD::FLOG2" , SDTFPUnaryOp>;
+def frint : SDNode<"ISD::FRINT" , SDTFPUnaryOp>;
+def ftrunc : SDNode<"ISD::FTRUNC" , SDTFPUnaryOp>;
+def fceil : SDNode<"ISD::FCEIL" , SDTFPUnaryOp>;
+def ffloor : SDNode<"ISD::FFLOOR" , SDTFPUnaryOp>;
+def fnearbyint : SDNode<"ISD::FNEARBYINT" , SDTFPUnaryOp>;
+def frnd : SDNode<"ISD::FROUND" , SDTFPUnaryOp>;
+
+def fround : SDNode<"ISD::FP_ROUND" , SDTFPRoundOp>;
+def fextend : SDNode<"ISD::FP_EXTEND" , SDTFPExtendOp>;
+def fcopysign : SDNode<"ISD::FCOPYSIGN" , SDTFPSignOp>;
+
+def sint_to_fp : SDNode<"ISD::SINT_TO_FP" , SDTIntToFPOp>;
+def uint_to_fp : SDNode<"ISD::UINT_TO_FP" , SDTIntToFPOp>;
+def fp_to_sint : SDNode<"ISD::FP_TO_SINT" , SDTFPToIntOp>;
+def fp_to_uint : SDNode<"ISD::FP_TO_UINT" , SDTFPToIntOp>;
+def f16_to_fp : SDNode<"ISD::FP16_TO_FP" , SDTIntToFPOp>;
+def fp_to_f16 : SDNode<"ISD::FP_TO_FP16" , SDTFPToIntOp>;
+
+def setcc : SDNode<"ISD::SETCC" , SDTSetCC>;
+def select : SDNode<"ISD::SELECT" , SDTSelect>;
+def vselect : SDNode<"ISD::VSELECT" , SDTVSelect>;
+def selectcc : SDNode<"ISD::SELECT_CC" , SDTSelectCC>;
+
+def brcc : SDNode<"ISD::BR_CC" , SDTBrCC, [SDNPHasChain]>;
+def brcond : SDNode<"ISD::BRCOND" , SDTBrcond, [SDNPHasChain]>;
+def brind : SDNode<"ISD::BRIND" , SDTBrind, [SDNPHasChain]>;
+def br : SDNode<"ISD::BR" , SDTBr, [SDNPHasChain]>;
+def catchret : SDNode<"ISD::CATCHRET" , SDTCatchret,
+ [SDNPHasChain, SDNPSideEffect]>;
+def cleanupret : SDNode<"ISD::CLEANUPRET" , SDTNone, [SDNPHasChain]>;
+def catchpad : SDNode<"ISD::CATCHPAD" , SDTNone,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+def trap : SDNode<"ISD::TRAP" , SDTNone,
+ [SDNPHasChain, SDNPSideEffect]>;
+def debugtrap : SDNode<"ISD::DEBUGTRAP" , SDTNone,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+def prefetch : SDNode<"ISD::PREFETCH" , SDTPrefetch,
+ [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
+ SDNPMemOperand]>;
+
+def readcyclecounter : SDNode<"ISD::READCYCLECOUNTER", SDTIntLeaf,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+def atomic_fence : SDNode<"ISD::ATOMIC_FENCE" , SDTAtomicFence,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+def atomic_cmp_swap : SDNode<"ISD::ATOMIC_CMP_SWAP" , SDTAtomic3,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_add : SDNode<"ISD::ATOMIC_LOAD_ADD" , SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_swap : SDNode<"ISD::ATOMIC_SWAP", SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_sub : SDNode<"ISD::ATOMIC_LOAD_SUB" , SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_and : SDNode<"ISD::ATOMIC_LOAD_AND" , SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_or : SDNode<"ISD::ATOMIC_LOAD_OR" , SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_xor : SDNode<"ISD::ATOMIC_LOAD_XOR" , SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_nand: SDNode<"ISD::ATOMIC_LOAD_NAND", SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_min : SDNode<"ISD::ATOMIC_LOAD_MIN", SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_max : SDNode<"ISD::ATOMIC_LOAD_MAX", SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_store : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+
+def masked_store : SDNode<"ISD::MSTORE", SDTMaskedStore,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def masked_load : SDNode<"ISD::MLOAD", SDTMaskedLoad,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def masked_scatter : SDNode<"ISD::MSCATTER", SDTMaskedScatter,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def masked_gather : SDNode<"ISD::MGATHER", SDTMaskedGather,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+
+// Do not use ld, st directly. Use load, extload, sextload, zextload, store,
+// and truncst (see below).
+def ld : SDNode<"ISD::LOAD" , SDTLoad,
+ [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
+def st : SDNode<"ISD::STORE" , SDTStore,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+def ist : SDNode<"ISD::STORE" , SDTIStore,
+ [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
+
+def vector_shuffle : SDNode<"ISD::VECTOR_SHUFFLE", SDTVecShuffle, []>;
+def build_vector : SDNode<"ISD::BUILD_VECTOR", SDTypeProfile<1, -1, []>, []>;
+def scalar_to_vector : SDNode<"ISD::SCALAR_TO_VECTOR", SDTypeProfile<1, 1, []>,
+ []>;
+
+// vector_extract/vector_insert are deprecated. extractelt/insertelt
+// are preferred.
+def vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT",
+ SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>;
+def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
+ SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
+def concat_vectors : SDNode<"ISD::CONCAT_VECTORS",
+ SDTypeProfile<1, 2, [SDTCisSubVecOfVec<1, 0>, SDTCisSameAs<1, 2>]>,[]>;
+
+// This operator does not do subvector type checking. The ARM
+// backend, at least, needs it.
+def vector_extract_subvec : SDNode<"ISD::EXTRACT_SUBVECTOR",
+ SDTypeProfile<1, 2, [SDTCisInt<2>, SDTCisVec<1>, SDTCisVec<0>]>,
+ []>;
+
+// This operator does subvector type checking.
+def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>;
+def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>;
+
+// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
+// these internally. Don't reference these directly.
+def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
+ SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
+ [SDNPHasChain]>;
+def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
+ SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>,
+ [SDNPHasChain]>;
+def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
+ SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>;
+
+// Do not use cvt directly. Use cvt forms below
+def cvt : SDNode<"ISD::CONVERT_RNDSAT", SDTConvertOp>;
+
+def SDT_assertext : SDTypeProfile<1, 1,
+ [SDTCisInt<0>, SDTCisInt<1>, SDTCisSameAs<1, 0>]>;
+def assertsext : SDNode<"ISD::AssertSext", SDT_assertext>;
+def assertzext : SDNode<"ISD::AssertZext", SDT_assertext>;
+
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Condition Codes
+
+class CondCode; // ISD::CondCode enums
+def SETOEQ : CondCode; def SETOGT : CondCode;
+def SETOGE : CondCode; def SETOLT : CondCode; def SETOLE : CondCode;
+def SETONE : CondCode; def SETO : CondCode; def SETUO : CondCode;
+def SETUEQ : CondCode; def SETUGT : CondCode; def SETUGE : CondCode;
+def SETULT : CondCode; def SETULE : CondCode; def SETUNE : CondCode;
+
+def SETEQ : CondCode; def SETGT : CondCode; def SETGE : CondCode;
+def SETLT : CondCode; def SETLE : CondCode; def SETNE : CondCode;
+
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Node Transformation Functions.
+//
+// This mechanism allows targets to manipulate nodes in the output DAG once a
+// match has been formed. This is typically used to manipulate immediate
+// values.
+//
+class SDNodeXForm<SDNode opc, code xformFunction> {
+ SDNode Opcode = opc;
+ code XFormFunction = xformFunction;
+}
+
+def NOOP_SDNodeXForm : SDNodeXForm<imm, [{}]>;
+
+//===----------------------------------------------------------------------===//
+// PatPred Subclasses.
+//
+// These allow specifying different sorts of predicates that control whether a
+// node is matched.
+//
+class PatPred;
+
+class CodePatPred<code predicate> : PatPred {
+ code PredicateCode = predicate;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Pattern Fragments.
+//
+// Pattern fragments are reusable chunks of dags that match specific things.
+// They can take arguments and have C++ predicates that control whether they
+// match. They are intended to make the patterns for common instructions more
+// compact and readable.
+//
+
+/// PatFrag - Represents a pattern fragment. This can match something on the
+/// DAG, from a single node to multiple nested other fragments.
+///
+class PatFrag<dag ops, dag frag, code pred = [{}],
+ SDNodeXForm xform = NOOP_SDNodeXForm> : SDPatternOperator {
+ dag Operands = ops;
+ dag Fragment = frag;
+ code PredicateCode = pred;
+ code ImmediateCode = [{}];
+ SDNodeXForm OperandTransform = xform;
+}
+
+// OutPatFrag is a pattern fragment that is used as part of an output pattern
+// (not an input pattern). These do not have predicates or transforms, but are
+// used to avoid repeated subexpressions in output patterns.
+class OutPatFrag<dag ops, dag frag>
+ : PatFrag<ops, frag, [{}], NOOP_SDNodeXForm>;
+
+// PatLeaf's are pattern fragments that have no operands. This is just a helper
+// to define immediates and other common things concisely.
+class PatLeaf<dag frag, code pred = [{}], SDNodeXForm xform = NOOP_SDNodeXForm>
+ : PatFrag<(ops), frag, pred, xform>;
+
+
+// ImmLeaf is a pattern fragment with a constraint on the immediate. The
+// constraint is a function that is run on the immediate (always with the value
+// sign extended out to an int64_t) as Imm. For example:
+//
+// def immSExt8 : ImmLeaf<i16, [{ return (char)Imm == Imm; }]>;
+//
+// this is a more convenient form to match 'imm' nodes in than PatLeaf and also
+// is preferred over using PatLeaf because it allows the code generator to
+// reason more about the constraint.
+//
+// If FastIsel should ignore all instructions that have an operand of this type,
+// the FastIselShouldIgnore flag can be set. This is an optimization to reduce
+// the code size of the generated fast instruction selector.
+class ImmLeaf<ValueType vt, code pred, SDNodeXForm xform = NOOP_SDNodeXForm>
+ : PatFrag<(ops), (vt imm), [{}], xform> {
+ let ImmediateCode = pred;
+ bit FastIselShouldIgnore = 0;
+}
+
+
+// Leaf fragments.
+
+def vtInt : PatLeaf<(vt), [{ return N->getVT().isInteger(); }]>;
+def vtFP : PatLeaf<(vt), [{ return N->getVT().isFloatingPoint(); }]>;
+
+def immAllOnesV: PatLeaf<(build_vector), [{
+ return ISD::isBuildVectorAllOnes(N);
+}]>;
+def immAllZerosV: PatLeaf<(build_vector), [{
+ return ISD::isBuildVectorAllZeros(N);
+}]>;
+
+
+
+// Other helper fragments.
+def not : PatFrag<(ops node:$in), (xor node:$in, -1)>;
+def vnot : PatFrag<(ops node:$in), (xor node:$in, immAllOnesV)>;
+def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>;
+
+// null_frag - The null pattern operator is used in multiclass instantiations
+// which accept an SDPatternOperator for use in matching patterns for internal
+// definitions. When expanding a pattern, if the null fragment is referenced
+// in the expansion, the pattern is discarded and it is as-if '[]' had been
+// specified. This allows multiclasses to have the isel patterns be optional.
+def null_frag : SDPatternOperator;
+
+// load fragments.
+def unindexedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
+ return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+}]>;
+def load : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
+}]>;
+
+// extending load fragments.
+def extload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
+}]>;
+def sextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
+}]>;
+def zextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
+}]>;
+
+def extloadi1 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
+}]>;
+def extloadi8 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def extloadi16 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def extloadi32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+def extloadf32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32;
+}]>;
+def extloadf64 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64;
+}]>;
+
+def sextloadi1 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
+}]>;
+def sextloadi8 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def sextloadi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def sextloadi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def zextloadi1 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
+}]>;
+def zextloadi8 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def zextloadi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def extloadvi1 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i1;
+}]>;
+def extloadvi8 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
+}]>;
+def extloadvi16 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
+}]>;
+def extloadvi32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
+}]>;
+def extloadvf32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::f32;
+}]>;
+def extloadvf64 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::f64;
+}]>;
+
+def sextloadvi1 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i1;
+}]>;
+def sextloadvi8 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
+}]>;
+def sextloadvi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
+}]>;
+def sextloadvi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
+}]>;
+
+def zextloadvi1 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i1;
+}]>;
+def zextloadvi8 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
+}]>;
+def zextloadvi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
+}]>;
+def zextloadvi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+ return cast<LoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
+}]>;
+
+// store fragments.
+def unindexedstore : PatFrag<(ops node:$val, node:$ptr),
+ (st node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+}]>;
+def store : PatFrag<(ops node:$val, node:$ptr),
+ (unindexedstore node:$val, node:$ptr), [{
+ return !cast<StoreSDNode>(N)->isTruncatingStore();
+}]>;
+
+// truncstore fragments.
+def truncstore : PatFrag<(ops node:$val, node:$ptr),
+ (unindexedstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->isTruncatingStore();
+}]>;
+def truncstorei8 : PatFrag<(ops node:$val, node:$ptr),
+ (truncstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def truncstorei16 : PatFrag<(ops node:$val, node:$ptr),
+ (truncstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def truncstorei32 : PatFrag<(ops node:$val, node:$ptr),
+ (truncstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+def truncstoref32 : PatFrag<(ops node:$val, node:$ptr),
+ (truncstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
+}]>;
+def truncstoref64 : PatFrag<(ops node:$val, node:$ptr),
+ (truncstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f64;
+}]>;
+
+def truncstorevi8 : PatFrag<(ops node:$val, node:$ptr),
+ (truncstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
+}]>;
+
+def truncstorevi16 : PatFrag<(ops node:$val, node:$ptr),
+ (truncstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
+}]>;
+
+def truncstorevi32 : PatFrag<(ops node:$val, node:$ptr),
+ (truncstore node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
+}]>;
+
+// indexed store fragments.
+def istore : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (ist node:$val, node:$base, node:$offset), [{
+ return !cast<StoreSDNode>(N)->isTruncatingStore();
+}]>;
+
+def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (istore node:$val, node:$base, node:$offset), [{
+ ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+ return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
+}]>;
+
+def itruncstore : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (ist node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->isTruncatingStore();
+}]>;
+def pre_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (itruncstore node:$val, node:$base, node:$offset), [{
+ ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+ return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
+}]>;
+def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (pre_truncst node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
+}]>;
+def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (pre_truncst node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (pre_truncst node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (pre_truncst node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (pre_truncst node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
+}]>;
+
+def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset),
+ (istore node:$val, node:$ptr, node:$offset), [{
+ ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+ return AM == ISD::POST_INC || AM == ISD::POST_DEC;
+}]>;
+
+def post_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (itruncstore node:$val, node:$base, node:$offset), [{
+ ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+ return AM == ISD::POST_INC || AM == ISD::POST_DEC;
+}]>;
+def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (post_truncst node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
+}]>;
+def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (post_truncst node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (post_truncst node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (post_truncst node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
+ (post_truncst node:$val, node:$base, node:$offset), [{
+ return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
+}]>;
+
+// nontemporal store fragments.
+def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+ (store node:$val, node:$ptr), [{
+ return cast<StoreSDNode>(N)->isNonTemporal();
+}]>;
+
+def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+ (nontemporalstore node:$val, node:$ptr), [{
+ StoreSDNode *St = cast<StoreSDNode>(N);
+ return St->getAlignment() >= St->getMemoryVT().getStoreSize();
+}]>;
+
+def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
+ (nontemporalstore node:$val, node:$ptr), [{
+ StoreSDNode *St = cast<StoreSDNode>(N);
+ return St->getAlignment() < St->getMemoryVT().getStoreSize();
+}]>;
+
+// setcc convenience fragments.
+def setoeq : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETOEQ)>;
+def setogt : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETOGT)>;
+def setoge : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETOGE)>;
+def setolt : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETOLT)>;
+def setole : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETOLE)>;
+def setone : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETONE)>;
+def seto : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETO)>;
+def setuo : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETUO)>;
+def setueq : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETUEQ)>;
+def setugt : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETUGT)>;
+def setuge : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETUGE)>;
+def setult : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETULT)>;
+def setule : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETULE)>;
+def setune : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETUNE)>;
+def seteq : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETEQ)>;
+def setgt : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETGT)>;
+def setge : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETGE)>;
+def setlt : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETLT)>;
+def setle : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETLE)>;
+def setne : PatFrag<(ops node:$lhs, node:$rhs),
+ (setcc node:$lhs, node:$rhs, SETNE)>;
+
+def atomic_cmp_swap_8 :
+ PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def atomic_cmp_swap_16 :
+ PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def atomic_cmp_swap_32 :
+ PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+def atomic_cmp_swap_64 :
+ PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
+ (atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
+}]>;
+
+multiclass binary_atomic_op<SDNode atomic_op> {
+ def _8 : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_op node:$ptr, node:$val), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
+ }]>;
+ def _16 : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_op node:$ptr, node:$val), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
+ }]>;
+ def _32 : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_op node:$ptr, node:$val), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
+ }]>;
+ def _64 : PatFrag<(ops node:$ptr, node:$val),
+ (atomic_op node:$ptr, node:$val), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
+ }]>;
+}
+
+defm atomic_load_add : binary_atomic_op<atomic_load_add>;
+defm atomic_swap : binary_atomic_op<atomic_swap>;
+defm atomic_load_sub : binary_atomic_op<atomic_load_sub>;
+defm atomic_load_and : binary_atomic_op<atomic_load_and>;
+defm atomic_load_or : binary_atomic_op<atomic_load_or>;
+defm atomic_load_xor : binary_atomic_op<atomic_load_xor>;
+defm atomic_load_nand : binary_atomic_op<atomic_load_nand>;
+defm atomic_load_min : binary_atomic_op<atomic_load_min>;
+defm atomic_load_max : binary_atomic_op<atomic_load_max>;
+defm atomic_load_umin : binary_atomic_op<atomic_load_umin>;
+defm atomic_load_umax : binary_atomic_op<atomic_load_umax>;
+defm atomic_store : binary_atomic_op<atomic_store>;
+
+def atomic_load_8 :
+ PatFrag<(ops node:$ptr),
+ (atomic_load node:$ptr), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def atomic_load_16 :
+ PatFrag<(ops node:$ptr),
+ (atomic_load node:$ptr), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def atomic_load_32 :
+ PatFrag<(ops node:$ptr),
+ (atomic_load node:$ptr), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+def atomic_load_64 :
+ PatFrag<(ops node:$ptr),
+ (atomic_load node:$ptr), [{
+ return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
+}]>;
+
+//===----------------------------------------------------------------------===//
+// Selection DAG CONVERT_RNDSAT patterns
+
+def cvtff : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
+ (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
+ return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FF;
+ }]>;
+
+def cvtss : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
+ (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
+ return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SS;
+ }]>;
+
+def cvtsu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
+ (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
+ return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SU;
+ }]>;
+
+def cvtus : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
+ (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
+ return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_US;
+ }]>;
+
+def cvtuu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
+ (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
+ return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UU;
+ }]>;
+
+def cvtsf : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
+ (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
+ return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SF;
+ }]>;
+
+def cvtuf : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
+ (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
+ return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UF;
+ }]>;
+
+def cvtfs : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
+ (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
+ return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FS;
+ }]>;
+
+def cvtfu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
+ (cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
+ return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FU;
+ }]>;
+
+//===----------------------------------------------------------------------===//
+// Selection DAG Pattern Support.
+//
+// Patterns are what are actually matched against by the target-flavored
+// instruction selection DAG. Instructions defined by the target implicitly
+// define patterns in most cases, but patterns can also be explicitly added when
+// an operation is defined by a sequence of instructions (e.g. loading a large
+// immediate value on RISC targets that do not support immediates as large as
+// their GPRs).
+//
+
+class Pattern<dag patternToMatch, list<dag> resultInstrs> {
+ dag PatternToMatch = patternToMatch;
+ list<dag> ResultInstrs = resultInstrs;
+ list<Predicate> Predicates = []; // See class Instruction in Target.td.
+ int AddedComplexity = 0; // See class Instruction in Target.td.
+}
+
+// Pat - A simple (but common) form of a pattern, which produces a simple result
+// not needing a full list.
+class Pat<dag pattern, dag result> : Pattern<pattern, [result]>;
+
+//===----------------------------------------------------------------------===//
+// Complex pattern definitions.
+//
+
+// Complex patterns, e.g. X86 addressing mode, requires pattern matching code
+// in C++. NumOperands is the number of operands returned by the select function;
+// SelectFunc is the name of the function used to pattern match the max. pattern;
+// RootNodes are the list of possible root nodes of the sub-dags to match.
+// e.g. X86 addressing mode - def addr : ComplexPattern<4, "SelectAddr", [add]>;
+//
+class ComplexPattern<ValueType ty, int numops, string fn,
+ list<SDNode> roots = [], list<SDNodeProperty> props = []> {
+ ValueType Ty = ty;
+ int NumOperands = numops;
+ string SelectFunc = fn;
+ list<SDNode> RootNodes = roots;
+ list<SDNodeProperty> Properties = props;
+}
diff --git a/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h b/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h
new file mode 100644
index 0000000..a7143ac
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetSelectionDAGInfo.h
@@ -0,0 +1,158 @@
+//==-- llvm/Target/TargetSelectionDAGInfo.h - SelectionDAG Info --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the TargetSelectionDAGInfo class, which targets can
+// subclass to parameterize the SelectionDAG lowering and instruction
+// selection process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETSELECTIONDAGINFO_H
+#define LLVM_TARGET_TARGETSELECTIONDAGINFO_H
+
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+/// Targets can subclass this to parameterize the
+/// SelectionDAG lowering and instruction selection process.
+///
+class TargetSelectionDAGInfo {
+ TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) = delete;
+ void operator=(const TargetSelectionDAGInfo &) = delete;
+
+public:
+ explicit TargetSelectionDAGInfo() = default;
+ virtual ~TargetSelectionDAGInfo();
+
+ /// Emit target-specific code that performs a memcpy.
+ /// This can be used by targets to provide code sequences for cases
+ /// that don't fit the target's parameters for simple loads/stores and can be
+ /// more efficient than using a library call. This function can return a null
+ /// SDValue if the target declines to use custom code and a different
+ /// lowering strategy should be used.
+ ///
+ /// If AlwaysInline is true, the size is constant and the target should not
+ /// emit any calls and is strongly encouraged to attempt to emit inline code
+ /// even if it is beyond the usual threshold because this intrinsic is being
+ /// expanded in a place where calls are not feasible (e.g. within the prologue
+ /// for another call). If the target chooses to decline an AlwaysInline
+ /// request here, legalize will resort to using simple loads and stores.
+ virtual SDValue
+ EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
+ SDValue Chain,
+ SDValue Op1, SDValue Op2,
+ SDValue Op3, unsigned Align, bool isVolatile,
+ bool AlwaysInline,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
+ return SDValue();
+ }
+
+ /// Emit target-specific code that performs a memmove.
+ /// This can be used by targets to provide code sequences for cases
+ /// that don't fit the target's parameters for simple loads/stores and can be
+ /// more efficient than using a library call. This function can return a null
+ /// SDValue if the target declines to use custom code and a different
+ /// lowering strategy should be used.
+ virtual SDValue
+ EmitTargetCodeForMemmove(SelectionDAG &DAG, SDLoc dl,
+ SDValue Chain,
+ SDValue Op1, SDValue Op2,
+ SDValue Op3, unsigned Align, bool isVolatile,
+ MachinePointerInfo DstPtrInfo,
+ MachinePointerInfo SrcPtrInfo) const {
+ return SDValue();
+ }
+
+ /// Emit target-specific code that performs a memset.
+ /// This can be used by targets to provide code sequences for cases
+ /// that don't fit the target's parameters for simple stores and can be more
+ /// efficient than using a library call. This function can return a null
+ /// SDValue if the target declines to use custom code and a different
+ /// lowering strategy should be used.
+ virtual SDValue
+ EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
+ SDValue Chain,
+ SDValue Op1, SDValue Op2,
+ SDValue Op3, unsigned Align, bool isVolatile,
+ MachinePointerInfo DstPtrInfo) const {
+ return SDValue();
+ }
+
+ /// Emit target-specific code that performs a memcmp, in cases where that is
+ /// faster than a libcall. The first returned SDValue is the result of the
+ /// memcmp and the second is the chain. Both SDValues can be null if a normal
+ /// libcall should be used.
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForMemcmp(SelectionDAG &DAG, SDLoc dl,
+ SDValue Chain,
+ SDValue Op1, SDValue Op2,
+ SDValue Op3, MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const {
+ return std::make_pair(SDValue(), SDValue());
+ }
+
+ /// Emit target-specific code that performs a memchr, in cases where that is
+ /// faster than a libcall. The first returned SDValue is the result of the
+ /// memchr and the second is the chain. Both SDValues can be null if a normal
+ /// libcall should be used.
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForMemchr(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
+ SDValue Src, SDValue Char, SDValue Length,
+ MachinePointerInfo SrcPtrInfo) const {
+ return std::make_pair(SDValue(), SDValue());
+ }
+
+ /// Emit target-specific code that performs a strcpy or stpcpy, in cases
+ /// where that is faster than a libcall.
+ /// The first returned SDValue is the result of the copy (the start
+ /// of the destination string for strcpy, a pointer to the null terminator
+ /// for stpcpy) and the second is the chain. Both SDValues can be null
+ /// if a normal libcall should be used.
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrcpy(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Dest, SDValue Src,
+ MachinePointerInfo DestPtrInfo,
+ MachinePointerInfo SrcPtrInfo,
+ bool isStpcpy) const {
+ return std::make_pair(SDValue(), SDValue());
+ }
+
+ /// Emit target-specific code that performs a strcmp, in cases where that is
+ /// faster than a libcall.
+ /// The first returned SDValue is the result of the strcmp and the second is
+ /// the chain. Both SDValues can be null if a normal libcall should be used.
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrcmp(SelectionDAG &DAG, SDLoc dl,
+ SDValue Chain,
+ SDValue Op1, SDValue Op2,
+ MachinePointerInfo Op1PtrInfo,
+ MachinePointerInfo Op2PtrInfo) const {
+ return std::make_pair(SDValue(), SDValue());
+ }
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, MachinePointerInfo SrcPtrInfo) const {
+ return std::make_pair(SDValue(), SDValue());
+ }
+
+ virtual std::pair<SDValue, SDValue>
+ EmitTargetCodeForStrnlen(SelectionDAG &DAG, SDLoc DL, SDValue Chain,
+ SDValue Src, SDValue MaxLength,
+ MachinePointerInfo SrcPtrInfo) const {
+ return std::make_pair(SDValue(), SDValue());
+ }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h b/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h
new file mode 100644
index 0000000..d50aa49
--- /dev/null
+++ b/contrib/llvm/include/llvm/Target/TargetSubtargetInfo.h
@@ -0,0 +1,197 @@
+//==-- llvm/Target/TargetSubtargetInfo.h - Target Information ----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the subtarget options of a Target machine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TARGET_TARGETSUBTARGETINFO_H
+#define LLVM_TARGET_TARGETSUBTARGETINFO_H
+
+#include "llvm/CodeGen/PBQPRAConstraint.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/CodeGen.h"
+
+namespace llvm {
+
+class DataLayout;
+class MachineFunction;
+class MachineInstr;
+class SDep;
+class SUnit;
+class TargetFrameLowering;
+class TargetInstrInfo;
+class TargetLowering;
+class TargetRegisterClass;
+class TargetRegisterInfo;
+class TargetSchedModel;
+class TargetSelectionDAGInfo;
+struct MachineSchedPolicy;
+template <typename T> class SmallVectorImpl;
+
+//===----------------------------------------------------------------------===//
+///
+/// TargetSubtargetInfo - Generic base class for all target subtargets. All
+/// Target-specific options that control code generation and printing should
+/// be exposed through a TargetSubtargetInfo-derived class.
+///
+class TargetSubtargetInfo : public MCSubtargetInfo {
+ TargetSubtargetInfo(const TargetSubtargetInfo &) = delete;
+ void operator=(const TargetSubtargetInfo &) = delete;
+ TargetSubtargetInfo() = delete;
+
+protected: // Can only create subclasses...
+ TargetSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS,
+ ArrayRef<SubtargetFeatureKV> PF,
+ ArrayRef<SubtargetFeatureKV> PD,
+ const SubtargetInfoKV *ProcSched,
+ const MCWriteProcResEntry *WPR,
+ const MCWriteLatencyEntry *WL,
+ const MCReadAdvanceEntry *RA, const InstrStage *IS,
+ const unsigned *OC, const unsigned *FP);
+
+public:
+ // AntiDepBreakMode - Type of anti-dependence breaking that should
+ // be performed before post-RA scheduling.
+ typedef enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode;
+ typedef SmallVectorImpl<const TargetRegisterClass *> RegClassVector;
+
+ virtual ~TargetSubtargetInfo();
+
+ // Interfaces to the major aspects of target machine information:
+ //
+ // -- Instruction opcode and operand information
+ // -- Pipelines and scheduling information
+ // -- Stack frame information
+ // -- Selection DAG lowering information
+ //
+ // N.B. These objects may change during compilation. It's not safe to cache
+ // them between functions.
+ virtual const TargetInstrInfo *getInstrInfo() const { return nullptr; }
+ virtual const TargetFrameLowering *getFrameLowering() const {
+ return nullptr;
+ }
+ virtual const TargetLowering *getTargetLowering() const { return nullptr; }
+ virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
+ return nullptr;
+ }
+ /// Target can subclass this hook to select a different DAG scheduler.
+ virtual RegisterScheduler::FunctionPassCtor
+ getDAGScheduler(CodeGenOpt::Level) const {
+ return nullptr;
+ }
+
+ /// getRegisterInfo - If register information is available, return it. If
+ /// not, return null. This is kept separate from RegInfo until RegInfo has
+ /// details of graph coloring register allocation removed from it.
+ ///
+ virtual const TargetRegisterInfo *getRegisterInfo() const { return nullptr; }
+
+ /// getInstrItineraryData - Returns instruction itinerary data for the target
+ /// or specific subtarget.
+ ///
+ virtual const InstrItineraryData *getInstrItineraryData() const {
+ return nullptr;
+ }
+
+ /// Resolve a SchedClass at runtime, where SchedClass identifies an
+ /// MCSchedClassDesc with the isVariant property. This may return the ID of
+ /// another variant SchedClass, but repeated invocation must quickly terminate
+ /// in a nonvariant SchedClass.
+ virtual unsigned resolveSchedClass(unsigned SchedClass,
+ const MachineInstr *MI,
+ const TargetSchedModel *SchedModel) const {
+ return 0;
+ }
+
+ /// \brief True if the subtarget should run MachineScheduler after aggressive
+ /// coalescing.
+ ///
+ /// This currently replaces the SelectionDAG scheduler with the "source" order
+ /// scheduler (though see below for an option to turn this off and use the
+ /// TargetLowering preference). It does not yet disable the postRA scheduler.
+ virtual bool enableMachineScheduler() const;
+
+ /// \brief True if the machine scheduler should disable the TLI preference
+ /// for preRA scheduling with the source level scheduler.
+ virtual bool enableMachineSchedDefaultSched() const { return true; }
+
+ /// \brief True if the subtarget should enable joining global copies.
+ ///
+ /// By default this is enabled if the machine scheduler is enabled, but
+ /// can be overridden.
+ virtual bool enableJoinGlobalCopies() const;
+
+ /// True if the subtarget should run a scheduler after register allocation.
+ ///
+ /// By default this queries the PostRAScheduling bit in the scheduling model
+ /// which is the preferred way to influence this.
+ virtual bool enablePostRAScheduler() const;
+
+ /// \brief True if the subtarget should run the atomic expansion pass.
+ virtual bool enableAtomicExpand() const;
+
+ /// \brief Override generic scheduling policy within a region.
+ ///
+ /// This is a convenient way for targets that don't provide any custom
+ /// scheduling heuristics (no custom MachineSchedStrategy) to make
+ /// changes to the generic scheduling policy.
+ virtual void overrideSchedPolicy(MachineSchedPolicy &Policy,
+ MachineInstr *begin, MachineInstr *end,
+ unsigned NumRegionInstrs) const {}
+
+ // \brief Perform target specific adjustments to the latency of a schedule
+ // dependency.
+ virtual void adjustSchedDependency(SUnit *def, SUnit *use, SDep &dep) const {}
+
+ // For use with PostRAScheduling: get the anti-dependence breaking that should
+ // be performed before post-RA scheduling.
+ virtual AntiDepBreakMode getAntiDepBreakMode() const { return ANTIDEP_NONE; }
+
+ // For use with PostRAScheduling: in CriticalPathRCs, return any register
+ // classes that should only be considered for anti-dependence breaking if they
+ // are on the critical path.
+ virtual void getCriticalPathRCs(RegClassVector &CriticalPathRCs) const {
+ return CriticalPathRCs.clear();
+ }
+
+ // For use with PostRAScheduling: get the minimum optimization level needed
+ // to enable post-RA scheduling.
+ virtual CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const {
+ return CodeGenOpt::Default;
+ }
+
+ /// \brief True if the subtarget should run the local reassignment
+ /// heuristic of the register allocator.
+ /// This heuristic may be compile time intensive, \p OptLevel provides
+ /// a finer grain to tune the register allocator.
+ virtual bool enableRALocalReassignment(CodeGenOpt::Level OptLevel) const;
+
+ /// \brief Enable use of alias analysis during code generation (during MI
+ /// scheduling, DAGCombine, etc.).
+ virtual bool useAA() const;
+
+ /// \brief Enable the use of the early if conversion pass.
+ virtual bool enableEarlyIfConversion() const { return false; }
+
+ /// \brief Return PBQPConstraint(s) for the target.
+ ///
+ /// Override to provide custom PBQP constraints.
+ virtual std::unique_ptr<PBQPRAConstraint> getCustomPBQPConstraints() const {
+ return nullptr;
+ }
+
+ /// Enable tracking of subregister liveness in register allocator.
+ virtual bool enableSubRegLiveness() const { return false; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/IPO.h b/contrib/llvm/include/llvm/Transforms/IPO.h
new file mode 100644
index 0000000..0c374a0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/IPO.h
@@ -0,0 +1,229 @@
+//===- llvm/Transforms/IPO.h - Interprocedural Transformations --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the IPO transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_H
+#define LLVM_TRANSFORMS_IPO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+class FunctionInfoIndex;
+class ModulePass;
+class Pass;
+class Function;
+class BasicBlock;
+class GlobalValue;
+
+//===----------------------------------------------------------------------===//
+//
+// These functions removes symbols from functions and modules. If OnlyDebugInfo
+// is true, only debugging information is removed from the module.
+//
+ModulePass *createStripSymbolsPass(bool OnlyDebugInfo = false);
+
+//===----------------------------------------------------------------------===//
+//
+// These functions strips symbols from functions and modules.
+// Only debugging information is not stripped.
+//
+ModulePass *createStripNonDebugSymbolsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// These pass removes llvm.dbg.declare intrinsics.
+ModulePass *createStripDebugDeclarePass();
+
+//===----------------------------------------------------------------------===//
+//
+// These pass removes unused symbols' debug info.
+ModulePass *createStripDeadDebugInfoPass();
+
+//===----------------------------------------------------------------------===//
+/// createConstantMergePass - This function returns a new pass that merges
+/// duplicate global constants together into a single constant that is shared.
+/// This is useful because some passes (ie TraceValues) insert a lot of string
+/// constants into the program, regardless of whether or not they duplicate an
+/// existing string.
+///
+ModulePass *createConstantMergePass();
+
+//===----------------------------------------------------------------------===//
+/// createGlobalOptimizerPass - This function returns a new pass that optimizes
+/// non-address taken internal globals.
+///
+ModulePass *createGlobalOptimizerPass();
+
+//===----------------------------------------------------------------------===//
+/// createGlobalDCEPass - This transform is designed to eliminate unreachable
+/// internal globals (functions or global variables)
+///
+ModulePass *createGlobalDCEPass();
+
+//===----------------------------------------------------------------------===//
+/// This transform is designed to eliminate available external globals
+/// (functions or global variables)
+///
+ModulePass *createEliminateAvailableExternallyPass();
+
+//===----------------------------------------------------------------------===//
+/// createGVExtractionPass - If deleteFn is true, this pass deletes
+/// the specified global values. Otherwise, it deletes as much of the module as
+/// possible, except for the global values specified.
+///
+ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool
+ deleteFn = false);
+
+//===----------------------------------------------------------------------===//
+/// This pass performs iterative function importing from other modules.
+Pass *createFunctionImportPass(const FunctionInfoIndex *Index = nullptr);
+
+//===----------------------------------------------------------------------===//
+/// createFunctionInliningPass - Return a new pass object that uses a heuristic
+/// to inline direct function calls to small functions.
+///
+/// The Threshold can be passed directly, or asked to be computed from the
+/// given optimization and size optimization arguments.
+///
+/// The -inline-threshold command line option takes precedence over the
+/// threshold given here.
+Pass *createFunctionInliningPass();
+Pass *createFunctionInliningPass(int Threshold);
+Pass *createFunctionInliningPass(unsigned OptLevel, unsigned SizeOptLevel);
+
+//===----------------------------------------------------------------------===//
+/// createAlwaysInlinerPass - Return a new pass object that inlines only
+/// functions that are marked as "always_inline".
+Pass *createAlwaysInlinerPass();
+Pass *createAlwaysInlinerPass(bool InsertLifetime);
+
+//===----------------------------------------------------------------------===//
+/// createPruneEHPass - Return a new pass object which transforms invoke
+/// instructions into calls, if the callee can _not_ unwind the stack.
+///
+Pass *createPruneEHPass();
+
+//===----------------------------------------------------------------------===//
+/// createInternalizePass - This pass loops over all of the functions in the
+/// input module, internalizing all globals (functions and variables) it can.
+////
+/// The symbols in \p ExportList are never internalized.
+///
+/// The symbol in DSOList are internalized if it is safe to drop them from
+/// the symbol table.
+///
+/// Note that commandline options that are used with the above function are not
+/// used now!
+ModulePass *createInternalizePass(ArrayRef<const char *> ExportList);
+/// createInternalizePass - Same as above, but with an empty exportList.
+ModulePass *createInternalizePass();
+
+//===----------------------------------------------------------------------===//
+/// createDeadArgEliminationPass - This pass removes arguments from functions
+/// which are not used by the body of the function.
+///
+ModulePass *createDeadArgEliminationPass();
+
+/// DeadArgHacking pass - Same as DAE, but delete arguments of external
+/// functions as well. This is definitely not safe, and should only be used by
+/// bugpoint.
+ModulePass *createDeadArgHackingPass();
+
+//===----------------------------------------------------------------------===//
+/// createArgumentPromotionPass - This pass promotes "by reference" arguments to
+/// be passed by value if the number of elements passed is smaller or
+/// equal to maxElements (maxElements == 0 means always promote).
+///
+Pass *createArgumentPromotionPass(unsigned maxElements = 3);
+
+//===----------------------------------------------------------------------===//
+/// createIPConstantPropagationPass - This pass propagates constants from call
+/// sites into the bodies of functions.
+///
+ModulePass *createIPConstantPropagationPass();
+
+//===----------------------------------------------------------------------===//
+/// createIPSCCPPass - This pass propagates constants from call sites into the
+/// bodies of functions, and keeps track of whether basic blocks are executable
+/// in the process.
+///
+ModulePass *createIPSCCPPass();
+
+//===----------------------------------------------------------------------===//
+//
+/// createLoopExtractorPass - This pass extracts all natural loops from the
+/// program into a function if it can.
+///
+Pass *createLoopExtractorPass();
+
+/// createSingleLoopExtractorPass - This pass extracts one natural loop from the
+/// program into a function if it can. This is used by bugpoint.
+///
+Pass *createSingleLoopExtractorPass();
+
+/// createBlockExtractorPass - This pass extracts all blocks (except those
+/// specified in the argument list) from the functions in the module.
+///
+ModulePass *createBlockExtractorPass();
+
+/// createStripDeadPrototypesPass - This pass removes any function declarations
+/// (prototypes) that are not used.
+ModulePass *createStripDeadPrototypesPass();
+
+//===----------------------------------------------------------------------===//
+/// createFunctionAttrsPass - This pass discovers functions that do not access
+/// memory, or only read memory, and gives them the readnone/readonly attribute.
+/// It also discovers function arguments that are not captured by the function
+/// and marks them with the nocapture attribute.
+///
+Pass *createFunctionAttrsPass();
+
+//===----------------------------------------------------------------------===//
+/// createMergeFunctionsPass - This pass discovers identical functions and
+/// collapses them.
+///
+ModulePass *createMergeFunctionsPass();
+
+//===----------------------------------------------------------------------===//
+/// createPartialInliningPass - This pass inlines parts of functions.
+///
+ModulePass *createPartialInliningPass();
+
+//===----------------------------------------------------------------------===//
+// createMetaRenamerPass - Rename everything with metasyntatic names.
+//
+ModulePass *createMetaRenamerPass();
+
+//===----------------------------------------------------------------------===//
+/// createBarrierNoopPass - This pass is purely a module pass barrier in a pass
+/// manager.
+ModulePass *createBarrierNoopPass();
+
+/// \brief This pass lowers bitset metadata and the llvm.bitset.test intrinsic
+/// to bitsets.
+ModulePass *createLowerBitSetsPass();
+
+/// \brief This pass export CFI checks for use by external modules.
+ModulePass *createCrossDSOCFIPass();
+
+//===----------------------------------------------------------------------===//
+// SampleProfilePass - Loads sample profile data from disk and generates
+// IR metadata to reflect the profile.
+ModulePass *createSampleProfileLoaderPass();
+ModulePass *createSampleProfileLoaderPass(StringRef Name);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/ForceFunctionAttrs.h b/contrib/llvm/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
new file mode 100644
index 0000000..0ff4afe
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
@@ -0,0 +1,35 @@
+//===-- ForceFunctionAttrs.h - Force function attrs for debugging ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Super simple passes to force specific function attrs from the commandline
+/// into the IR for debugging purposes.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
+#define LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Pass which forces specific function attributes into the IR, primarily as
+/// a debugging tool.
+class ForceFunctionAttrsPass {
+public:
+ static StringRef name() { return "ForceFunctionAttrsPass"; }
+ PreservedAnalyses run(Module &M);
+};
+
+/// Create a legacy pass manager instance of a pass to force function attrs.
+Pass *createForceFunctionAttrsLegacyPass();
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/FunctionImport.h b/contrib/llvm/include/llvm/Transforms/IPO/FunctionImport.h
new file mode 100644
index 0000000..d770779
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/IPO/FunctionImport.h
@@ -0,0 +1,43 @@
+//===- llvm/Transforms/IPO/FunctionImport.h - ThinLTO importing -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUNCTIONIMPORT_H
+#define LLVM_FUNCTIONIMPORT_H
+
+#include "llvm/ADT/StringMap.h"
+#include <functional>
+
+namespace llvm {
+class LLVMContext;
+class Module;
+class FunctionInfoIndex;
+
+/// The function importer is automatically importing function from other modules
+/// based on the provided summary informations.
+class FunctionImporter {
+
+ /// The summaries index used to trigger importing.
+ const FunctionInfoIndex &Index;
+
+ /// Factory function to load a Module for a given identifier
+ std::function<std::unique_ptr<Module>(StringRef Identifier)> ModuleLoader;
+
+public:
+ /// Create a Function Importer.
+ FunctionImporter(
+ const FunctionInfoIndex &Index,
+ std::function<std::unique_ptr<Module>(StringRef Identifier)> ModuleLoader)
+ : Index(Index), ModuleLoader(ModuleLoader) {}
+
+ /// Import functions in Module \p M based on the summary informations.
+ bool importFunctions(Module &M);
+};
+}
+
+#endif // LLVM_FUNCTIONIMPORT_H
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/InferFunctionAttrs.h b/contrib/llvm/include/llvm/Transforms/IPO/InferFunctionAttrs.h
new file mode 100644
index 0000000..80afc02
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/IPO/InferFunctionAttrs.h
@@ -0,0 +1,38 @@
+//===-- InferFunctionAttrs.h - Infer implicit function attributes ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Interfaces for passes which infer implicit function attributes from the
+/// name and signature of function declarations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
+#define LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A pass which infers function attributes from the names and signatures of
+/// function declarations in a module.
+class InferFunctionAttrsPass {
+public:
+ static StringRef name() { return "InferFunctionAttrsPass"; }
+ PreservedAnalyses run(Module &M, AnalysisManager<Module> *AM);
+};
+
+/// Create a legacy pass manager instance of a pass to infer function
+/// attributes.
+Pass *createInferFunctionAttrsLegacyPass();
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h b/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h
new file mode 100644
index 0000000..58ef0cb
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/IPO/InlinerPass.h
@@ -0,0 +1,94 @@
+//===- InlinerPass.h - Code common to all inliners --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a simple policy-based bottom-up inliner. This file
+// implements all of the boring mechanics of the bottom-up inlining, while the
+// subclass determines WHAT to inline, which is the much more interesting
+// component.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INLINERPASS_H
+#define LLVM_TRANSFORMS_IPO_INLINERPASS_H
+
+#include "llvm/Analysis/CallGraphSCCPass.h"
+
+namespace llvm {
+class AssumptionCacheTracker;
+class CallSite;
+class DataLayout;
+class InlineCost;
+template <class PtrType, unsigned SmallSize> class SmallPtrSet;
+
+/// Inliner - This class contains all of the helper code which is used to
+/// perform the inlining operations that do not depend on the policy.
+///
+struct Inliner : public CallGraphSCCPass {
+ explicit Inliner(char &ID);
+ explicit Inliner(char &ID, int Threshold, bool InsertLifetime);
+
+ /// getAnalysisUsage - For this class, we declare that we require and preserve
+ /// the call graph. If the derived class implements this method, it should
+ /// always explicitly call the implementation here.
+ void getAnalysisUsage(AnalysisUsage &Info) const override;
+
+ // Main run interface method, this implements the interface required by the
+ // Pass class.
+ bool runOnSCC(CallGraphSCC &SCC) override;
+
+ using llvm::Pass::doFinalization;
+ // doFinalization - Remove now-dead linkonce functions at the end of
+ // processing to avoid breaking the SCC traversal.
+ bool doFinalization(CallGraph &CG) override;
+
+ /// This method returns the value specified by the -inline-threshold value,
+ /// specified on the command line. This is typically not directly needed.
+ ///
+ unsigned getInlineThreshold() const { return InlineThreshold; }
+
+ /// Calculate the inline threshold for given Caller. This threshold is lower
+ /// if the caller is marked with OptimizeForSize and -inline-threshold is not
+ /// given on the comand line. It is higher if the callee is marked with the
+ /// inlinehint attribute.
+ ///
+ unsigned getInlineThreshold(CallSite CS) const;
+
+ /// getInlineCost - This method must be implemented by the subclass to
+ /// determine the cost of inlining the specified call site. If the cost
+ /// returned is greater than the current inline threshold, the call site is
+ /// not inlined.
+ ///
+ virtual InlineCost getInlineCost(CallSite CS) = 0;
+
+ /// removeDeadFunctions - Remove dead functions.
+ ///
+ /// This also includes a hack in the form of the 'AlwaysInlineOnly' flag
+ /// which restricts it to deleting functions with an 'AlwaysInline'
+ /// attribute. This is useful for the InlineAlways pass that only wants to
+ /// deal with that subset of the functions.
+ bool removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly = false);
+
+private:
+ // InlineThreshold - Cache the value here for easy access.
+ unsigned InlineThreshold;
+
+ // InsertLifetime - Insert @llvm.lifetime intrinsics.
+ bool InsertLifetime;
+
+ /// shouldInline - Return true if the inliner should attempt to
+ /// inline at the given CallSite.
+ bool shouldInline(CallSite CS);
+
+protected:
+ AssumptionCacheTracker *ACT;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/LowerBitSets.h b/contrib/llvm/include/llvm/Transforms/IPO/LowerBitSets.h
new file mode 100644
index 0000000..e5fb7b9
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/IPO/LowerBitSets.h
@@ -0,0 +1,201 @@
+//===- LowerBitSets.h - Bitset lowering pass --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines parts of the bitset lowering pass implementation that may
+// be usefully unit tested.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_LOWERBITSETS_H
+#define LLVM_TRANSFORMS_IPO_LOWERBITSETS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+
+#include <stdint.h>
+#include <limits>
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+class DataLayout;
+class GlobalObject;
+class Value;
+class raw_ostream;
+
+struct BitSetInfo {
+ // The indices of the set bits in the bitset.
+ std::set<uint64_t> Bits;
+
+ // The byte offset into the combined global represented by the bitset.
+ uint64_t ByteOffset;
+
+ // The size of the bitset in bits.
+ uint64_t BitSize;
+
+ // Log2 alignment of the bit set relative to the combined global.
+ // For example, a log2 alignment of 3 means that bits in the bitset
+ // represent addresses 8 bytes apart.
+ unsigned AlignLog2;
+
+ bool isSingleOffset() const {
+ return Bits.size() == 1;
+ }
+
+ bool isAllOnes() const {
+ return Bits.size() == BitSize;
+ }
+
+ bool containsGlobalOffset(uint64_t Offset) const;
+
+ bool containsValue(const DataLayout &DL,
+ const DenseMap<GlobalObject *, uint64_t> &GlobalLayout,
+ Value *V, uint64_t COffset = 0) const;
+
+ void print(raw_ostream &OS) const;
+};
+
+struct BitSetBuilder {
+ SmallVector<uint64_t, 16> Offsets;
+ uint64_t Min, Max;
+
+ BitSetBuilder() : Min(std::numeric_limits<uint64_t>::max()), Max(0) {}
+
+ void addOffset(uint64_t Offset) {
+ if (Min > Offset)
+ Min = Offset;
+ if (Max < Offset)
+ Max = Offset;
+
+ Offsets.push_back(Offset);
+ }
+
+ BitSetInfo build();
+};
+
+/// This class implements a layout algorithm for globals referenced by bit sets
+/// that tries to keep members of small bit sets together. This can
+/// significantly reduce bit set sizes in many cases.
+///
+/// It works by assembling fragments of layout from sets of referenced globals.
+/// Each set of referenced globals causes the algorithm to create a new
+/// fragment, which is assembled by appending each referenced global in the set
+/// into the fragment. If a referenced global has already been referenced by an
+/// fragment created earlier, we instead delete that fragment and append its
+/// contents into the fragment we are assembling.
+///
+/// By starting with the smallest fragments, we minimize the size of the
+/// fragments that are copied into larger fragments. This is most intuitively
+/// thought about when considering the case where the globals are virtual tables
+/// and the bit sets represent their derived classes: in a single inheritance
+/// hierarchy, the optimum layout would involve a depth-first search of the
+/// class hierarchy (and in fact the computed layout ends up looking a lot like
+/// a DFS), but a naive DFS would not work well in the presence of multiple
+/// inheritance. This aspect of the algorithm ends up fitting smaller
+/// hierarchies inside larger ones where that would be beneficial.
+///
+/// For example, consider this class hierarchy:
+///
+/// A B
+/// \ / | \
+/// C D E
+///
+/// We have five bit sets: bsA (A, C), bsB (B, C, D, E), bsC (C), bsD (D) and
+/// bsE (E). If we laid out our objects by DFS traversing B followed by A, our
+/// layout would be {B, C, D, E, A}. This is optimal for bsB as it needs to
+/// cover the only 4 objects in its hierarchy, but not for bsA as it needs to
+/// cover 5 objects, i.e. the entire layout. Our algorithm proceeds as follows:
+///
+/// Add bsC, fragments {{C}}
+/// Add bsD, fragments {{C}, {D}}
+/// Add bsE, fragments {{C}, {D}, {E}}
+/// Add bsA, fragments {{A, C}, {D}, {E}}
+/// Add bsB, fragments {{B, A, C, D, E}}
+///
+/// This layout is optimal for bsA, as it now only needs to cover two (i.e. 3
+/// fewer) objects, at the cost of bsB needing to cover 1 more object.
+///
+/// The bit set lowering pass assigns an object index to each object that needs
+/// to be laid out, and calls addFragment for each bit set passing the object
+/// indices of its referenced globals. It then assembles a layout from the
+/// computed layout in the Fragments field.
+struct GlobalLayoutBuilder {
+ /// The computed layout. Each element of this vector contains a fragment of
+ /// layout (which may be empty) consisting of object indices.
+ std::vector<std::vector<uint64_t>> Fragments;
+
+ /// Mapping from object index to fragment index.
+ std::vector<uint64_t> FragmentMap;
+
+ GlobalLayoutBuilder(uint64_t NumObjects)
+ : Fragments(1), FragmentMap(NumObjects) {}
+
+ /// Add F to the layout while trying to keep its indices contiguous.
+ /// If a previously seen fragment uses any of F's indices, that
+ /// fragment will be laid out inside F.
+ void addFragment(const std::set<uint64_t> &F);
+};
+
+/// This class is used to build a byte array containing overlapping bit sets. By
+/// loading from indexed offsets into the byte array and applying a mask, a
+/// program can test bits from the bit set with a relatively short instruction
+/// sequence. For example, suppose we have 15 bit sets to lay out:
+///
+/// A (16 bits), B (15 bits), C (14 bits), D (13 bits), E (12 bits),
+/// F (11 bits), G (10 bits), H (9 bits), I (7 bits), J (6 bits), K (5 bits),
+/// L (4 bits), M (3 bits), N (2 bits), O (1 bit)
+///
+/// These bits can be laid out in a 16-byte array like this:
+///
+/// Byte Offset
+/// 0123456789ABCDEF
+/// Bit
+/// 7 HHHHHHHHHIIIIIII
+/// 6 GGGGGGGGGGJJJJJJ
+/// 5 FFFFFFFFFFFKKKKK
+/// 4 EEEEEEEEEEEELLLL
+/// 3 DDDDDDDDDDDDDMMM
+/// 2 CCCCCCCCCCCCCCNN
+/// 1 BBBBBBBBBBBBBBBO
+/// 0 AAAAAAAAAAAAAAAA
+///
+/// For example, to test bit X of A, we evaluate ((bits[X] & 1) != 0), or to
+/// test bit X of I, we evaluate ((bits[9 + X] & 0x80) != 0). This can be done
+/// in 1-2 machine instructions on x86, or 4-6 instructions on ARM.
+///
+/// This is a byte array, rather than (say) a 2-byte array or a 4-byte array,
+/// because for one thing it gives us better packing (the more bins there are,
+/// the less evenly they will be filled), and for another, the instruction
+/// sequences can be slightly shorter, both on x86 and ARM.
+struct ByteArrayBuilder {
+ /// The byte array built so far.
+ std::vector<uint8_t> Bytes;
+
+ enum { BitsPerByte = 8 };
+
+ /// The number of bytes allocated so far for each of the bits.
+ uint64_t BitAllocs[BitsPerByte];
+
+ ByteArrayBuilder() {
+ memset(BitAllocs, 0, sizeof(BitAllocs));
+ }
+
+ /// Allocate BitSize bits in the byte array where Bits contains the bits to
+ /// set. AllocByteOffset is set to the offset within the byte array and
+ /// AllocMask is set to the bitmask for those bits. This uses the LPT (Longest
+ /// Processing Time) multiprocessor scheduling algorithm to lay out the bits
+ /// efficiently; the pass allocates bit sets in decreasing size order.
+ void allocate(const std::set<uint64_t> &Bits, uint64_t BitSize,
+ uint64_t &AllocByteOffset, uint8_t &AllocMask);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h b/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
new file mode 100644
index 0000000..a4e7bce
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -0,0 +1,179 @@
+// llvm/Transforms/IPO/PassManagerBuilder.h - Build Standard Pass -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PassManagerBuilder class, which is used to set up a
+// "standard" optimization sequence suitable for languages like C and C++.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
+#define LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
+
+#include <memory>
+#include <vector>
+
+namespace llvm {
+class FunctionInfoIndex;
+class Pass;
+class TargetLibraryInfoImpl;
+class TargetMachine;
+
+// The old pass manager infrastructure is hidden in a legacy namespace now.
+namespace legacy {
+class FunctionPassManager;
+class PassManagerBase;
+}
+
+/// PassManagerBuilder - This class is used to set up a standard optimization
+/// sequence for languages like C and C++, allowing some APIs to customize the
+/// pass sequence in various ways. A simple example of using it would be:
+///
+/// PassManagerBuilder Builder;
+/// Builder.OptLevel = 2;
+/// Builder.populateFunctionPassManager(FPM);
+/// Builder.populateModulePassManager(MPM);
+///
+/// In addition to setting up the basic passes, PassManagerBuilder allows
+/// frontends to vend a plugin API, where plugins are allowed to add extensions
+/// to the default pass manager. They do this by specifying where in the pass
+/// pipeline they want to be added, along with a callback function that adds
+/// the pass(es). For example, a plugin that wanted to add a loop optimization
+/// could do something like this:
+///
+/// static void addMyLoopPass(const PMBuilder &Builder, PassManagerBase &PM) {
+/// if (Builder.getOptLevel() > 2 && Builder.getOptSizeLevel() == 0)
+/// PM.add(createMyAwesomePass());
+/// }
+/// ...
+/// Builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd,
+/// addMyLoopPass);
+/// ...
+class PassManagerBuilder {
+public:
+ /// Extensions are passed the builder itself (so they can see how it is
+ /// configured) as well as the pass manager to add stuff to.
+ typedef void (*ExtensionFn)(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM);
+ enum ExtensionPointTy {
+ /// EP_EarlyAsPossible - This extension point allows adding passes before
+ /// any other transformations, allowing them to see the code as it is coming
+ /// out of the frontend.
+ EP_EarlyAsPossible,
+
+ /// EP_ModuleOptimizerEarly - This extension point allows adding passes
+ /// just before the main module-level optimization passes.
+ EP_ModuleOptimizerEarly,
+
+ /// EP_LoopOptimizerEnd - This extension point allows adding loop passes to
+ /// the end of the loop optimizer.
+ EP_LoopOptimizerEnd,
+
+ /// EP_ScalarOptimizerLate - This extension point allows adding optimization
+ /// passes after most of the main optimizations, but before the last
+ /// cleanup-ish optimizations.
+ EP_ScalarOptimizerLate,
+
+ /// EP_OptimizerLast -- This extension point allows adding passes that
+ /// run after everything else.
+ EP_OptimizerLast,
+
+ /// EP_VectorizerStart - This extension point allows adding optimization
+ /// passes before the vectorizer and other highly target specific
+ /// optimization passes are executed.
+ EP_VectorizerStart,
+
+ /// EP_EnabledOnOptLevel0 - This extension point allows adding passes that
+ /// should not be disabled by O0 optimization level. The passes will be
+ /// inserted after the inlining pass.
+ EP_EnabledOnOptLevel0,
+
+ /// EP_Peephole - This extension point allows adding passes that perform
+ /// peephole optimizations similar to the instruction combiner. These passes
+ /// will be inserted after each instance of the instruction combiner pass.
+ EP_Peephole,
+ };
+
+ /// The Optimization Level - Specify the basic optimization level.
+ /// 0 = -O0, 1 = -O1, 2 = -O2, 3 = -O3
+ unsigned OptLevel;
+
+ /// SizeLevel - How much we're optimizing for size.
+ /// 0 = none, 1 = -Os, 2 = -Oz
+ unsigned SizeLevel;
+
+ /// LibraryInfo - Specifies information about the runtime library for the
+ /// optimizer. If this is non-null, it is added to both the function and
+ /// per-module pass pipeline.
+ TargetLibraryInfoImpl *LibraryInfo;
+
+ /// Inliner - Specifies the inliner to use. If this is non-null, it is
+ /// added to the per-module passes.
+ Pass *Inliner;
+
+ /// The function summary index to use for function importing.
+ const FunctionInfoIndex *FunctionIndex;
+
+ bool DisableTailCalls;
+ bool DisableUnitAtATime;
+ bool DisableUnrollLoops;
+ bool BBVectorize;
+ bool SLPVectorize;
+ bool LoopVectorize;
+ bool RerollLoops;
+ bool LoadCombine;
+ bool DisableGVNLoadPRE;
+ bool VerifyInput;
+ bool VerifyOutput;
+ bool MergeFunctions;
+ bool PrepareForLTO;
+
+private:
+ /// ExtensionList - This is list of all of the extensions that are registered.
+ std::vector<std::pair<ExtensionPointTy, ExtensionFn> > Extensions;
+
+public:
+ PassManagerBuilder();
+ ~PassManagerBuilder();
+ /// Adds an extension that will be used by all PassManagerBuilder instances.
+ /// This is intended to be used by plugins, to register a set of
+ /// optimisations to run automatically.
+ static void addGlobalExtension(ExtensionPointTy Ty, ExtensionFn Fn);
+ void addExtension(ExtensionPointTy Ty, ExtensionFn Fn);
+
+private:
+ void addExtensionsToPM(ExtensionPointTy ETy,
+ legacy::PassManagerBase &PM) const;
+ void addInitialAliasAnalysisPasses(legacy::PassManagerBase &PM) const;
+ void addLTOOptimizationPasses(legacy::PassManagerBase &PM);
+ void addLateLTOOptimizationPasses(legacy::PassManagerBase &PM);
+
+public:
+ /// populateFunctionPassManager - This fills in the function pass manager,
+ /// which is expected to be run on each function immediately as it is
+ /// generated. The idea is to reduce the size of the IR in memory.
+ void populateFunctionPassManager(legacy::FunctionPassManager &FPM);
+
+ /// populateModulePassManager - This sets up the primary pass manager.
+ void populateModulePassManager(legacy::PassManagerBase &MPM);
+ void populateLTOPassManager(legacy::PassManagerBase &PM);
+};
+
+/// Registers a function for adding a standard set of passes. This should be
+/// used by optimizer plugins to allow all front ends to transparently use
+/// them. Create a static instance of this class in your plugin, providing a
+/// private function that the PassManagerBuilder can use to add your passes.
+struct RegisterStandardPasses {
+ RegisterStandardPasses(PassManagerBuilder::ExtensionPointTy Ty,
+ PassManagerBuilder::ExtensionFn Fn) {
+ PassManagerBuilder::addGlobalExtension(Ty, Fn);
+ }
+};
+
+} // end namespace llvm
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/IPO/StripDeadPrototypes.h b/contrib/llvm/include/llvm/Transforms/IPO/StripDeadPrototypes.h
new file mode 100644
index 0000000..9dddd12
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/IPO/StripDeadPrototypes.h
@@ -0,0 +1,34 @@
+//===-- StripDeadPrototypes.h - Remove unused function declarations -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass loops over all of the functions in the input module, looking for
+// dead declarations and removes them. Dead declarations are declarations of
+// functions for which no implementation is available (i.e., declarations for
+// unused library functions).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
+#define LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Pass to remove unused function declarations.
+class StripDeadPrototypesPass {
+public:
+ static StringRef name() { return "StripDeadPrototypesPass"; }
+ PreservedAnalyses run(Module &M);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
diff --git a/contrib/llvm/include/llvm/Transforms/InstCombine/InstCombine.h b/contrib/llvm/include/llvm/Transforms/InstCombine/InstCombine.h
new file mode 100644
index 0000000..f48ec13
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/InstCombine/InstCombine.h
@@ -0,0 +1,46 @@
+//===- InstCombine.h - InstCombine pass -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the primary interface to the instcombine pass. This pass
+/// is suitable for use in the new pass manager. For a pass that works with the
+/// legacy pass manager, please look for \c createInstructionCombiningPass() in
+/// Scalar.h.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
+
+namespace llvm {
+
+class InstCombinePass {
+ InstCombineWorklist Worklist;
+
+public:
+ static StringRef name() { return "InstCombinePass"; }
+
+ // Explicitly define constructors for MSVC.
+ InstCombinePass() {}
+ InstCombinePass(InstCombinePass &&Arg) : Worklist(std::move(Arg.Worklist)) {}
+ InstCombinePass &operator=(InstCombinePass &&RHS) {
+ Worklist = std::move(RHS.Worklist);
+ return *this;
+ }
+
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/InstCombine/InstCombineWorklist.h b/contrib/llvm/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
new file mode 100644
index 0000000..5d2b2d0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
@@ -0,0 +1,116 @@
+//===- InstCombineWorklist.h - Worklist for InstCombine pass ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "instcombine"
+
+namespace llvm {
+
+/// InstCombineWorklist - This is the worklist management logic for
+/// InstCombine.
+class InstCombineWorklist {
+ SmallVector<Instruction*, 256> Worklist;
+ DenseMap<Instruction*, unsigned> WorklistMap;
+
+ void operator=(const InstCombineWorklist&RHS) = delete;
+ InstCombineWorklist(const InstCombineWorklist&) = delete;
+public:
+ InstCombineWorklist() {}
+
+ InstCombineWorklist(InstCombineWorklist &&Arg)
+ : Worklist(std::move(Arg.Worklist)),
+ WorklistMap(std::move(Arg.WorklistMap)) {}
+ InstCombineWorklist &operator=(InstCombineWorklist &&RHS) {
+ Worklist = std::move(RHS.Worklist);
+ WorklistMap = std::move(RHS.WorklistMap);
+ return *this;
+ }
+
+ bool isEmpty() const { return Worklist.empty(); }
+
+ /// Add - Add the specified instruction to the worklist if it isn't already
+ /// in it.
+ void Add(Instruction *I) {
+ if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
+ DEBUG(dbgs() << "IC: ADD: " << *I << '\n');
+ Worklist.push_back(I);
+ }
+ }
+
+ void AddValue(Value *V) {
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ Add(I);
+ }
+
+ /// AddInitialGroup - Add the specified batch of stuff in reverse order.
+ /// which should only be done when the worklist is empty and when the group
+ /// has no duplicates.
+ void AddInitialGroup(ArrayRef<Instruction *> List) {
+ assert(Worklist.empty() && "Worklist must be empty to add initial group");
+ Worklist.reserve(List.size()+16);
+ WorklistMap.resize(List.size());
+ DEBUG(dbgs() << "IC: ADDING: " << List.size() << " instrs to worklist\n");
+ unsigned Idx = 0;
+ for (Instruction *I : reverse(List)) {
+ WorklistMap.insert(std::make_pair(I, Idx++));
+ Worklist.push_back(I);
+ }
+ }
+
+ // Remove - remove I from the worklist if it exists.
+ void Remove(Instruction *I) {
+ DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
+ if (It == WorklistMap.end()) return; // Not in worklist.
+
+ // Don't bother moving everything down, just null out the slot.
+ Worklist[It->second] = nullptr;
+
+ WorklistMap.erase(It);
+ }
+
+ Instruction *RemoveOne() {
+ Instruction *I = Worklist.pop_back_val();
+ WorklistMap.erase(I);
+ return I;
+ }
+
+ /// AddUsersToWorkList - When an instruction is simplified, add all users of
+ /// the instruction to the work lists because they might get more simplified
+ /// now.
+ ///
+ void AddUsersToWorkList(Instruction &I) {
+ for (User *U : I.users())
+ Add(cast<Instruction>(U));
+ }
+
+
+ /// Zap - check that the worklist is empty and nuke the backing store for
+ /// the map if it is large.
+ void Zap() {
+ assert(WorklistMap.empty() && "Worklist empty, but map not?");
+
+ // Do an explicit clear, this shrinks the map if needed.
+ WorklistMap.clear();
+ }
+};
+
+} // end namespace llvm.
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Instrumentation.h b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
new file mode 100644
index 0000000..38dfeb0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Instrumentation.h
@@ -0,0 +1,177 @@
+//===- Transforms/Instrumentation.h - Instrumentation passes ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines constructor functions for instrumentation passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/BasicBlock.h"
+#include <vector>
+
+#if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID)
+inline void *getDFSanArgTLSPtrForJIT() {
+ extern __thread __attribute__((tls_model("initial-exec")))
+ void *__dfsan_arg_tls;
+ return (void *)&__dfsan_arg_tls;
+}
+
+inline void *getDFSanRetValTLSPtrForJIT() {
+ extern __thread __attribute__((tls_model("initial-exec")))
+ void *__dfsan_retval_tls;
+ return (void *)&__dfsan_retval_tls;
+}
+#endif
+
+namespace llvm {
+
+class TargetMachine;
+
+/// Instrumentation passes often insert conditional checks into entry blocks.
+/// Call this function before splitting the entry block to move instructions
+/// that must remain in the entry block up before the split point. Static
+/// allocas and llvm.localescape calls, for example, must remain in the entry
+/// block.
+BasicBlock::iterator PrepareToSplitEntryBlock(BasicBlock &BB,
+ BasicBlock::iterator IP);
+
+class ModulePass;
+class FunctionPass;
+
+// Insert GCOV profiling instrumentation
+struct GCOVOptions {
+ static GCOVOptions getDefault();
+
+ // Specify whether to emit .gcno files.
+ bool EmitNotes;
+
+ // Specify whether to modify the program to emit .gcda files when run.
+ bool EmitData;
+
+ // A four-byte version string. The meaning of a version string is described in
+ // gcc's gcov-io.h
+ char Version[4];
+
+ // Emit a "cfg checksum" that follows the "line number checksum" of a
+ // function. This affects both .gcno and .gcda files.
+ bool UseCfgChecksum;
+
+ // Add the 'noredzone' attribute to added runtime library calls.
+ bool NoRedZone;
+
+ // Emit the name of the function in the .gcda files. This is redundant, as
+ // the function identifier can be used to find the name from the .gcno file.
+ bool FunctionNamesInData;
+
+ // Emit the exit block immediately after the start block, rather than after
+ // all of the function body's blocks.
+ bool ExitBlockBeforeBody;
+};
+ModulePass *createGCOVProfilerPass(const GCOVOptions &Options =
+ GCOVOptions::getDefault());
+
+// PGO Instrumention
+ModulePass *createPGOInstrumentationGenPass();
+ModulePass *
+createPGOInstrumentationUsePass(StringRef Filename = StringRef(""));
+
+/// Options for the frontend instrumentation based profiling pass.
+struct InstrProfOptions {
+ InstrProfOptions() : NoRedZone(false) {}
+
+ // Add the 'noredzone' attribute to added runtime library calls.
+ bool NoRedZone;
+
+ // Name of the profile file to use as output
+ std::string InstrProfileOutput;
+};
+
+/// Insert frontend instrumentation based profiling.
+ModulePass *createInstrProfilingPass(
+ const InstrProfOptions &Options = InstrProfOptions());
+
+// Insert AddressSanitizer (address sanity checking) instrumentation
+FunctionPass *createAddressSanitizerFunctionPass(bool CompileKernel = false,
+ bool Recover = false);
+ModulePass *createAddressSanitizerModulePass(bool CompileKernel = false,
+ bool Recover = false);
+
+// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
+FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0);
+
+// Insert ThreadSanitizer (race detection) instrumentation
+FunctionPass *createThreadSanitizerPass();
+
+// Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation
+ModulePass *createDataFlowSanitizerPass(
+ const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
+ void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
+
+// Options for sanitizer coverage instrumentation.
+struct SanitizerCoverageOptions {
+ SanitizerCoverageOptions()
+ : CoverageType(SCK_None), IndirectCalls(false), TraceBB(false),
+ TraceCmp(false), Use8bitCounters(false) {}
+
+ enum Type {
+ SCK_None = 0,
+ SCK_Function,
+ SCK_BB,
+ SCK_Edge
+ } CoverageType;
+ bool IndirectCalls;
+ bool TraceBB;
+ bool TraceCmp;
+ bool Use8bitCounters;
+};
+
+// Insert SanitizerCoverage instrumentation.
+ModulePass *createSanitizerCoverageModulePass(
+ const SanitizerCoverageOptions &Options = SanitizerCoverageOptions());
+
+#if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID)
+inline ModulePass *createDataFlowSanitizerPassForJIT(
+ const std::vector<std::string> &ABIListFiles = std::vector<std::string>()) {
+ return createDataFlowSanitizerPass(ABIListFiles, getDFSanArgTLSPtrForJIT,
+ getDFSanRetValTLSPtrForJIT);
+}
+#endif
+
+// BoundsChecking - This pass instruments the code to perform run-time bounds
+// checking on loads, stores, and other memory intrinsics.
+FunctionPass *createBoundsCheckingPass();
+
+/// \brief This pass splits the stack into a safe stack and an unsafe stack to
+/// protect against stack-based overflow vulnerabilities.
+FunctionPass *createSafeStackPass(const TargetMachine *TM = nullptr);
+
+/// \brief Calculate what to divide by to scale counts.
+///
+/// Given the maximum count, calculate a divisor that will scale all the
+/// weights to strictly less than UINT32_MAX.
+static inline uint64_t calculateCountScale(uint64_t MaxCount) {
+ return MaxCount < UINT32_MAX ? 1 : MaxCount / UINT32_MAX + 1;
+}
+
+/// \brief Scale an individual branch count.
+///
+/// Scale a 64-bit weight down to 32-bits using \c Scale.
+///
+static inline uint32_t scaleBranchCount(uint64_t Count, uint64_t Scale) {
+ uint64_t Scaled = Count / Scale;
+ assert(Scaled <= UINT32_MAX && "overflow 32-bits");
+ return Scaled;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/ObjCARC.h b/contrib/llvm/include/llvm/Transforms/ObjCARC.h
new file mode 100644
index 0000000..1897adc
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/ObjCARC.h
@@ -0,0 +1,48 @@
+//===-- ObjCARC.h - ObjCARC Scalar Transformations --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the ObjCARC Scalar Transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_OBJCARC_H
+#define LLVM_TRANSFORMS_OBJCARC_H
+
+namespace llvm {
+
+class Pass;
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCAPElim - ObjC ARC autorelease pool elimination.
+//
+Pass *createObjCARCAPElimPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCExpand - ObjC ARC preliminary simplifications.
+//
+Pass *createObjCARCExpandPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCContract - Late ObjC ARC cleanups.
+//
+Pass *createObjCARCContractPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCOpt - ObjC ARC optimization.
+//
+Pass *createObjCARCOptPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar.h b/contrib/llvm/include/llvm/Transforms/Scalar.h
new file mode 100644
index 0000000..9173de1
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Scalar.h
@@ -0,0 +1,491 @@
+//===-- Scalar.h - Scalar Transformations -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Scalar transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_H
+#define LLVM_TRANSFORMS_SCALAR_H
+
+#include "llvm/ADT/StringRef.h"
+#include <functional>
+
+namespace llvm {
+
+class BasicBlockPass;
+class Function;
+class FunctionPass;
+class ModulePass;
+class Pass;
+class GetElementPtrInst;
+class PassInfo;
+class TerminatorInst;
+class TargetLowering;
+class TargetMachine;
+
+//===----------------------------------------------------------------------===//
+//
+// ConstantPropagation - A worklist driven constant propagation pass
+//
+FunctionPass *createConstantPropagationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// AlignmentFromAssumptions - Use assume intrinsics to set load/store
+// alignments.
+//
+FunctionPass *createAlignmentFromAssumptionsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SCCP - Sparse conditional constant propagation.
+//
+FunctionPass *createSCCPPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadInstElimination - This pass quickly removes trivially dead instructions
+// without modifying the CFG of the function. It is a BasicBlockPass, so it
+// runs efficiently when queued next to other BasicBlockPass's.
+//
+Pass *createDeadInstEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadCodeElimination - This pass is more powerful than DeadInstElimination,
+// because it is worklist driven that can potentially revisit instructions when
+// their other instructions become dead, to eliminate chains of dead
+// computations.
+//
+FunctionPass *createDeadCodeEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadStoreElimination - This pass deletes stores that are post-dominated by
+// must-aliased stores and are not loaded used between the stores.
+//
+FunctionPass *createDeadStoreEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// AggressiveDCE - This pass uses the SSA based Aggressive DCE algorithm. This
+// algorithm assumes instructions are dead until proven otherwise, which makes
+// it more successful are removing non-obviously dead instructions.
+//
+FunctionPass *createAggressiveDCEPass();
+
+//===----------------------------------------------------------------------===//
+//
+// BitTrackingDCE - This pass uses a bit-tracking DCE algorithm in order to
+// remove computations of dead bits.
+//
+FunctionPass *createBitTrackingDCEPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SROA - Replace aggregates or pieces of aggregates with scalar SSA values.
+//
+FunctionPass *createSROAPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ScalarReplAggregates - Break up alloca's of aggregates into multiple allocas
+// if possible.
+//
+FunctionPass *createScalarReplAggregatesPass(signed Threshold = -1,
+ bool UseDomTree = true,
+ signed StructMemberThreshold = -1,
+ signed ArrayElementThreshold = -1,
+ signed ScalarLoadThreshold = -1);
+
+//===----------------------------------------------------------------------===//
+//
+// InductiveRangeCheckElimination - Transform loops to elide range checks on
+// linear functions of the induction variable.
+//
+Pass *createInductiveRangeCheckEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InductionVariableSimplify - Transform induction variables in a program to all
+// use a single canonical induction variable per loop.
+//
+Pass *createIndVarSimplifyPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionCombining - Combine instructions to form fewer, simple
+// instructions. This pass does not modify the CFG, and has a tendency to make
+// instructions dead, so a subsequent DCE pass is useful.
+//
+// This pass combines things like:
+// %Y = add int 1, %X
+// %Z = add int 1, %Y
+// into:
+// %Z = add int 2, %X
+//
+FunctionPass *createInstructionCombiningPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LICM - This pass is a loop invariant code motion and memory promotion pass.
+//
+Pass *createLICMPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopInterchange - This pass interchanges loops to provide a more
+// cache-friendly memory access patterns.
+//
+Pass *createLoopInterchangePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopStrengthReduce - This pass is strength reduces GEP instructions that use
+// a loop's canonical induction variable as one of their indices.
+//
+Pass *createLoopStrengthReducePass();
+
+//===----------------------------------------------------------------------===//
+//
+// GlobalMerge - This pass merges internal (by default) globals into structs
+// to enable reuse of a base pointer by indexed addressing modes.
+// It can also be configured to focus on size optimizations only.
+//
+Pass *createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset,
+ bool OnlyOptimizeForSize = false,
+ bool MergeExternalByDefault = false);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopUnswitch - This pass is a simple loop unswitching pass.
+//
+Pass *createLoopUnswitchPass(bool OptimizeForSize = false);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopInstSimplify - This pass simplifies instructions in a loop's body.
+//
+Pass *createLoopInstSimplifyPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopUnroll - This pass is a simple loop unrolling pass.
+//
+Pass *createLoopUnrollPass(int Threshold = -1, int Count = -1,
+ int AllowPartial = -1, int Runtime = -1);
+// Create an unrolling pass for full unrolling only.
+Pass *createSimpleLoopUnrollPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopReroll - This pass is a simple loop rerolling pass.
+//
+Pass *createLoopRerollPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopRotate - This pass is a simple loop rotating pass.
+//
+Pass *createLoopRotatePass(int MaxHeaderSize = -1);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopIdiom - This pass recognizes and replaces idioms in loops.
+//
+Pass *createLoopIdiomPass();
+
+//===----------------------------------------------------------------------===//
+//
+// PromoteMemoryToRegister - This pass is used to promote memory references to
+// be register references. A simple example of the transformation performed by
+// this pass is:
+//
+// FROM CODE TO CODE
+// %X = alloca i32, i32 1 ret i32 42
+// store i32 42, i32 *%X
+// %Y = load i32* %X
+// ret i32 %Y
+//
+FunctionPass *createPromoteMemoryToRegisterPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DemoteRegisterToMemoryPass - This pass is used to demote registers to memory
+// references. In basically undoes the PromoteMemoryToRegister pass to make cfg
+// hacking easier.
+//
+FunctionPass *createDemoteRegisterToMemoryPass();
+extern char &DemoteRegisterToMemoryID;
+
+//===----------------------------------------------------------------------===//
+//
+// Reassociate - This pass reassociates commutative expressions in an order that
+// is designed to promote better constant propagation, GCSE, LICM, PRE...
+//
+// For example: 4 + (x + 5) -> x + (4 + 5)
+//
+FunctionPass *createReassociatePass();
+
+//===----------------------------------------------------------------------===//
+//
+// JumpThreading - Thread control through mult-pred/multi-succ blocks where some
+// preds always go to some succ. Thresholds other than minus one override the
+// internal BB duplication default threshold.
+//
+FunctionPass *createJumpThreadingPass(int Threshold = -1);
+
+//===----------------------------------------------------------------------===//
+//
+// CFGSimplification - Merge basic blocks, eliminate unreachable blocks,
+// simplify terminator instructions, etc...
+//
+FunctionPass *createCFGSimplificationPass(
+ int Threshold = -1, std::function<bool(const Function &)> Ftor = nullptr);
+
+//===----------------------------------------------------------------------===//
+//
+// FlattenCFG - flatten CFG, reduce number of conditional branches by using
+// parallel-and and parallel-or mode, etc...
+//
+FunctionPass *createFlattenCFGPass();
+
+//===----------------------------------------------------------------------===//
+//
+// CFG Structurization - Remove irreducible control flow
+//
+Pass *createStructurizeCFGPass();
+
+//===----------------------------------------------------------------------===//
+//
+// BreakCriticalEdges - Break all of the critical edges in the CFG by inserting
+// a dummy basic block. This pass may be "required" by passes that cannot deal
+// with critical edges. For this usage, a pass must call:
+//
+// AU.addRequiredID(BreakCriticalEdgesID);
+//
+// This pass obviously invalidates the CFG, but can update forward dominator
+// (set, immediate dominators, tree, and frontier) information.
+//
+FunctionPass *createBreakCriticalEdgesPass();
+extern char &BreakCriticalEdgesID;
+
+//===----------------------------------------------------------------------===//
+//
+// LoopSimplify - Insert Pre-header blocks into the CFG for every function in
+// the module. This pass updates dominator information, loop information, and
+// does not add critical edges to the CFG.
+//
+// AU.addRequiredID(LoopSimplifyID);
+//
+Pass *createLoopSimplifyPass();
+extern char &LoopSimplifyID;
+
+//===----------------------------------------------------------------------===//
+//
+// TailCallElimination - This pass eliminates call instructions to the current
+// function which occur immediately before return instructions.
+//
+FunctionPass *createTailCallEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerSwitch - This pass converts SwitchInst instructions into a sequence of
+// chained binary branch instructions.
+//
+FunctionPass *createLowerSwitchPass();
+extern char &LowerSwitchID;
+
+//===----------------------------------------------------------------------===//
+//
+// LowerInvoke - This pass removes invoke instructions, converting them to call
+// instructions.
+//
+FunctionPass *createLowerInvokePass();
+extern char &LowerInvokePassID;
+
+//===----------------------------------------------------------------------===//
+//
+// LCSSA - This pass inserts phi nodes at loop boundaries to simplify other loop
+// optimizations.
+//
+Pass *createLCSSAPass();
+extern char &LCSSAID;
+
+//===----------------------------------------------------------------------===//
+//
+// EarlyCSE - This pass performs a simple and fast CSE pass over the dominator
+// tree.
+//
+FunctionPass *createEarlyCSEPass();
+
+//===----------------------------------------------------------------------===//
+//
+// MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads
+// are hoisted into the header, while stores sink into the footer.
+//
+FunctionPass *createMergedLoadStoreMotionPass();
+
+//===----------------------------------------------------------------------===//
+//
+// GVN - This pass performs global value numbering and redundant load
+// elimination cotemporaneously.
+//
+FunctionPass *createGVNPass(bool NoLoads = false);
+
+//===----------------------------------------------------------------------===//
+//
+// MemCpyOpt - This pass performs optimizations related to eliminating memcpy
+// calls and/or combining multiple stores into memset's.
+//
+FunctionPass *createMemCpyOptPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopDeletion - This pass performs DCE of non-infinite loops that it
+// can prove are dead.
+//
+Pass *createLoopDeletionPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ConstantHoisting - This pass prepares a function for expensive constants.
+//
+FunctionPass *createConstantHoistingPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionNamer - Give any unnamed non-void instructions "tmp" names.
+//
+FunctionPass *createInstructionNamerPass();
+extern char &InstructionNamerID;
+
+//===----------------------------------------------------------------------===//
+//
+// Sink - Code Sinking
+//
+FunctionPass *createSinkingPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerAtomic - Lower atomic intrinsics to non-atomic form
+//
+Pass *createLowerAtomicPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ValuePropagation - Propagate CFG-derived value information
+//
+Pass *createCorrelatedValuePropagationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionSimplifier - Remove redundant instructions.
+//
+FunctionPass *createInstructionSimplifierPass();
+extern char &InstructionSimplifierID;
+
+//===----------------------------------------------------------------------===//
+//
+// LowerExpectIntrinsics - Removes llvm.expect intrinsics and creates
+// "block_weights" metadata.
+FunctionPass *createLowerExpectIntrinsicPass();
+
+//===----------------------------------------------------------------------===//
+//
+// PartiallyInlineLibCalls - Tries to inline the fast path of library
+// calls such as sqrt.
+//
+FunctionPass *createPartiallyInlineLibCallsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ScalarizerPass - Converts vector operations into scalar operations
+//
+FunctionPass *createScalarizerPass();
+
+//===----------------------------------------------------------------------===//
+//
+// AddDiscriminators - Add DWARF path discriminators to the IR.
+FunctionPass *createAddDiscriminatorsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SeparateConstOffsetFromGEP - Split GEPs for better CSE
+//
+FunctionPass *
+createSeparateConstOffsetFromGEPPass(const TargetMachine *TM = nullptr,
+ bool LowerGEP = false);
+
+//===----------------------------------------------------------------------===//
+//
+// SpeculativeExecution - Aggressively hoist instructions to enable
+// speculative execution on targets where branches are expensive.
+//
+FunctionPass *createSpeculativeExecutionPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoadCombine - Combine loads into bigger loads.
+//
+BasicBlockPass *createLoadCombinePass();
+
+//===----------------------------------------------------------------------===//
+//
+// StraightLineStrengthReduce - This pass strength-reduces some certain
+// instruction patterns in straight-line code.
+//
+FunctionPass *createStraightLineStrengthReducePass();
+
+//===----------------------------------------------------------------------===//
+//
+// PlaceSafepoints - Rewrite any IR calls to gc.statepoints and insert any
+// safepoint polls (method entry, backedge) that might be required. This pass
+// does not generate explicit relocation sequences - that's handled by
+// RewriteStatepointsForGC which can be run at an arbitrary point in the pass
+// order following this pass.
+//
+FunctionPass *createPlaceSafepointsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// RewriteStatepointsForGC - Rewrite any gc.statepoints which do not yet have
+// explicit relocations to include explicit relocations.
+//
+ModulePass *createRewriteStatepointsForGCPass();
+
+//===----------------------------------------------------------------------===//
+//
+// Float2Int - Demote floats to ints where possible.
+//
+FunctionPass *createFloat2IntPass();
+
+//===----------------------------------------------------------------------===//
+//
+// NaryReassociate - Simplify n-ary operations by reassociation.
+//
+FunctionPass *createNaryReassociatePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopDistribute - Distribute loops.
+//
+FunctionPass *createLoopDistributePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopLoadElimination - Perform loop-aware load elimination.
+//
+FunctionPass *createLoopLoadEliminationPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/ADCE.h b/contrib/llvm/include/llvm/Transforms/Scalar/ADCE.h
new file mode 100644
index 0000000..f9bc7b7
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/ADCE.h
@@ -0,0 +1,38 @@
+//===- ADCE.h - Aggressive dead code elimination --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Aggressive Dead Code Elimination
+// pass. This pass optimistically assumes that all instructions are dead until
+// proven otherwise, allowing it to eliminate dead computations that other DCE
+// passes do not catch, particularly involving loop computations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_ADCE_H
+#define LLVM_TRANSFORMS_SCALAR_ADCE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A DCE pass that assumes instructions are dead until proven otherwise.
+///
+/// This pass eliminates dead code by optimistically assuming that all
+/// instructions are dead until proven otherwise. This allows it to eliminate
+/// dead computations that other DCE passes do not catch, particularly involving
+/// loop computations.
+class ADCEPass {
+public:
+ static StringRef name() { return "ADCEPass"; }
+ PreservedAnalyses run(Function &F);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_ADCE_H
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/EarlyCSE.h b/contrib/llvm/include/llvm/Transforms/Scalar/EarlyCSE.h
new file mode 100644
index 0000000..e3dd3c0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/EarlyCSE.h
@@ -0,0 +1,39 @@
+//===- EarlyCSE.h - Simple and fast CSE pass --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for a simple, fast CSE pass.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
+#define LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// \brief A simple and fast domtree-based CSE pass.
+///
+/// This pass does a simple depth-first walk over the dominator tree,
+/// eliminating trivially redundant instructions and using instsimplify to
+/// canonicalize things as it goes. It is intended to be fast and catch obvious
+/// cases so that instcombine and other passes are more effective. It is
+/// expected that a later pass of GVN will catch the interesting/hard cases.
+class EarlyCSEPass {
+public:
+ static StringRef name() { return "EarlyCSEPass"; }
+
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h b/contrib/llvm/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
new file mode 100644
index 0000000..4028320
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
@@ -0,0 +1,40 @@
+//===- LowerExpectIntrinsic.h - LowerExpectIntrinsic pass -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The header file for the LowerExpectIntrinsic pass as used by the new pass
+/// manager.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
+#define LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LowerExpectIntrinsicPass {
+public:
+ static StringRef name() { return "LowerExpectIntrinsicPass"; }
+
+ /// \brief Run the pass over the function.
+ ///
+ /// This will lower all of th expect intrinsic calls in this function into
+ /// branch weight metadata. That metadata will subsequently feed the analysis
+ /// of the probabilities and frequencies of the CFG. After running this pass,
+ /// no more expect intrinsics remain, allowing the rest of the optimizer to
+ /// ignore them.
+ PreservedAnalyses run(Function &F);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/SROA.h b/contrib/llvm/include/llvm/Transforms/Scalar/SROA.h
new file mode 100644
index 0000000..f90cc7b
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/SROA.h
@@ -0,0 +1,129 @@
+//===- SROA.h - Scalar Replacement Of Aggregates ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's Scalar Replacement of
+/// Aggregates pass. This pass provides both aggregate splitting and the
+/// primary SSA formation used in the compiler.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SROA_H
+#define LLVM_TRANSFORMS_SCALAR_SROA_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A private "module" namespace for types and utilities used by SROA. These
+/// are implementation details and should not be used by clients.
+namespace sroa {
+class AllocaSliceRewriter;
+class AllocaSlices;
+class Partition;
+class SROALegacyPass;
+}
+
+/// \brief An optimization pass providing Scalar Replacement of Aggregates.
+///
+/// This pass takes allocations which can be completely analyzed (that is, they
+/// don't escape) and tries to turn them into scalar SSA values. There are
+/// a few steps to this process.
+///
+/// 1) It takes allocations of aggregates and analyzes the ways in which they
+/// are used to try to split them into smaller allocations, ideally of
+/// a single scalar data type. It will split up memcpy and memset accesses
+/// as necessary and try to isolate individual scalar accesses.
+/// 2) It will transform accesses into forms which are suitable for SSA value
+/// promotion. This can be replacing a memset with a scalar store of an
+/// integer value, or it can involve speculating operations on a PHI or
+/// select to be a PHI or select of the results.
+/// 3) Finally, this will try to detect a pattern of accesses which map cleanly
+/// onto insert and extract operations on a vector value, and convert them to
+/// this form. By doing so, it will enable promotion of vector aggregates to
+/// SSA vector values.
+class SROA {
+ LLVMContext *C;
+ DominatorTree *DT;
+ AssumptionCache *AC;
+
+ /// \brief Worklist of alloca instructions to simplify.
+ ///
+ /// Each alloca in the function is added to this. Each new alloca formed gets
+ /// added to it as well to recursively simplify unless that alloca can be
+ /// directly promoted. Finally, each time we rewrite a use of an alloca other
+ /// the one being actively rewritten, we add it back onto the list if not
+ /// already present to ensure it is re-visited.
+ SetVector<AllocaInst *, SmallVector<AllocaInst *, 16>> Worklist;
+
+ /// \brief A collection of instructions to delete.
+ /// We try to batch deletions to simplify code and make things a bit more
+ /// efficient.
+ SetVector<Instruction *, SmallVector<Instruction *, 8>> DeadInsts;
+
+ /// \brief Post-promotion worklist.
+ ///
+ /// Sometimes we discover an alloca which has a high probability of becoming
+ /// viable for SROA after a round of promotion takes place. In those cases,
+ /// the alloca is enqueued here for re-processing.
+ ///
+ /// Note that we have to be very careful to clear allocas out of this list in
+ /// the event they are deleted.
+ SetVector<AllocaInst *, SmallVector<AllocaInst *, 16>> PostPromotionWorklist;
+
+ /// \brief A collection of alloca instructions we can directly promote.
+ std::vector<AllocaInst *> PromotableAllocas;
+
+ /// \brief A worklist of PHIs to speculate prior to promoting allocas.
+ ///
+ /// All of these PHIs have been checked for the safety of speculation and by
+ /// being speculated will allow promoting allocas currently in the promotable
+ /// queue.
+ SetVector<PHINode *, SmallVector<PHINode *, 2>> SpeculatablePHIs;
+
+ /// \brief A worklist of select instructions to speculate prior to promoting
+ /// allocas.
+ ///
+ /// All of these select instructions have been checked for the safety of
+ /// speculation and by being speculated will allow promoting allocas
+ /// currently in the promotable queue.
+ SetVector<SelectInst *, SmallVector<SelectInst *, 2>> SpeculatableSelects;
+
+public:
+ SROA() : C(nullptr), DT(nullptr), AC(nullptr) {}
+
+ static StringRef name() { return "SROA"; }
+
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+
+private:
+ friend class sroa::AllocaSliceRewriter;
+ friend class sroa::SROALegacyPass;
+
+ /// Helper used by both the public run method and by the legacy pass.
+ PreservedAnalyses runImpl(Function &F, DominatorTree &RunDT,
+ AssumptionCache &RunAC);
+
+ bool presplitLoadsAndStores(AllocaInst &AI, sroa::AllocaSlices &AS);
+ AllocaInst *rewritePartition(AllocaInst &AI, sroa::AllocaSlices &AS,
+ sroa::Partition &P);
+ bool splitAlloca(AllocaInst &AI, sroa::AllocaSlices &AS);
+ bool runOnAlloca(AllocaInst &AI);
+ void clobberUse(Use &U);
+ void deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
+ bool promoteAllocas(Function &F);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Scalar/SimplifyCFG.h b/contrib/llvm/include/llvm/Transforms/Scalar/SimplifyCFG.h
new file mode 100644
index 0000000..ef28e0f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Scalar/SimplifyCFG.h
@@ -0,0 +1,46 @@
+//===- SimplifyCFG.h - Simplify and canonicalize the CFG --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for the pass responsible for both
+/// simplifying and canonicalizing the CFG.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
+#define LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// \brief A pass to simplify and canonicalize the CFG of a function.
+///
+/// This pass iteratively simplifies the entire CFG of a function, removing
+/// unnecessary control flows and bringing it into the canonical form expected
+/// by the rest of the mid-level optimizer.
+class SimplifyCFGPass {
+ int BonusInstThreshold;
+
+public:
+ static StringRef name() { return "SimplifyCFGPass"; }
+
+ /// \brief Construct a pass with the default thresholds.
+ SimplifyCFGPass();
+
+ /// \brief Construct a pass with a specific bonus threshold.
+ SimplifyCFGPass(int BonusInstThreshold);
+
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/ASanStackFrameLayout.h b/contrib/llvm/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
new file mode 100644
index 0000000..4e4f02c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
@@ -0,0 +1,64 @@
+//===- ASanStackFrameLayout.h - ComputeASanStackFrameLayout -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines ComputeASanStackFrameLayout and auxiliary data structs.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+#define LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class AllocaInst;
+
+// These magic constants should be the same as in
+// in asan_internal.h from ASan runtime in compiler-rt.
+static const int kAsanStackLeftRedzoneMagic = 0xf1;
+static const int kAsanStackMidRedzoneMagic = 0xf2;
+static const int kAsanStackRightRedzoneMagic = 0xf3;
+
+// Input/output data struct for ComputeASanStackFrameLayout.
+struct ASanStackVariableDescription {
+ const char *Name; // Name of the variable that will be displayed by asan
+ // if a stack-related bug is reported.
+ uint64_t Size; // Size of the variable in bytes.
+ size_t Alignment; // Alignment of the variable (power of 2).
+ AllocaInst *AI; // The actual AllocaInst.
+ size_t Offset; // Offset from the beginning of the frame;
+ // set by ComputeASanStackFrameLayout.
+};
+
+// Output data struct for ComputeASanStackFrameLayout.
+struct ASanStackFrameLayout {
+ // Frame description, see DescribeAddressIfStack in ASan runtime.
+ SmallString<64> DescriptionString;
+ // The contents of the shadow memory for the stack frame that we need
+ // to set at function entry.
+ SmallVector<uint8_t, 64> ShadowBytes;
+ size_t FrameAlignment; // Alignment for the entire frame.
+ size_t FrameSize; // Size of the frame in bytes.
+};
+
+void ComputeASanStackFrameLayout(
+ // The array of stack variables. The elements may get reordered and changed.
+ SmallVectorImpl<ASanStackVariableDescription> &Vars,
+ // AddressSanitizer's shadow granularity. Usually 8, may also be 16, 32, 64.
+ size_t Granularity,
+ // The minimal size of the left-most redzone (header).
+ // At least 4 pointer sizes, power of 2, and >= Granularity.
+ // The resulting FrameSize should be multiple of MinHeaderSize.
+ size_t MinHeaderSize,
+ // The result is put here.
+ ASanStackFrameLayout *Layout);
+
+} // llvm namespace
+
+#endif // LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
new file mode 100644
index 0000000..13c856d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -0,0 +1,297 @@
+//===-- Transform/Utils/BasicBlockUtils.h - BasicBlock Utils ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on basic blocks, and
+// instructions contained within basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+#define LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+
+// FIXME: Move to this file: BasicBlock::removePredecessor, BB::splitBasicBlock
+
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+
+namespace llvm {
+
+class MemoryDependenceAnalysis;
+class DominatorTree;
+class LoopInfo;
+class Instruction;
+class MDNode;
+class ReturnInst;
+class TargetLibraryInfo;
+class TerminatorInst;
+
+/// DeleteDeadBlock - Delete the specified block, which must have no
+/// predecessors.
+void DeleteDeadBlock(BasicBlock *BB);
+
+/// FoldSingleEntryPHINodes - We know that BB has one predecessor. If there are
+/// any single-entry PHI nodes in it, fold them away. This handles the case
+/// when all entries to the PHI nodes in a block are guaranteed equal, such as
+/// when the block has exactly one predecessor.
+void FoldSingleEntryPHINodes(BasicBlock *BB,
+ MemoryDependenceAnalysis *MemDep = nullptr);
+
+/// DeleteDeadPHIs - Examine each PHI in the given block and delete it if it
+/// is dead. Also recursively delete any operands that become dead as
+/// a result. This includes tracing the def-use list from the PHI to see if
+/// it is ultimately unused or if it reaches an unused cycle. Return true
+/// if any PHIs were deleted.
+bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
+
+/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
+/// if possible. The return value indicates success or failure.
+bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ MemoryDependenceAnalysis *MemDep = nullptr);
+
+// ReplaceInstWithValue - Replace all uses of an instruction (specified by BI)
+// with a value, then remove and delete the original instruction.
+//
+void ReplaceInstWithValue(BasicBlock::InstListType &BIL,
+ BasicBlock::iterator &BI, Value *V);
+
+// ReplaceInstWithInst - Replace the instruction specified by BI with the
+// instruction specified by I. Copies DebugLoc from BI to I, if I doesn't
+// already have a DebugLoc. The original instruction is deleted and BI is
+// updated to point to the new instruction.
+//
+void ReplaceInstWithInst(BasicBlock::InstListType &BIL,
+ BasicBlock::iterator &BI, Instruction *I);
+
+// ReplaceInstWithInst - Replace the instruction specified by From with the
+// instruction specified by To. Copies DebugLoc from BI to I, if I doesn't
+// already have a DebugLoc.
+//
+void ReplaceInstWithInst(Instruction *From, Instruction *To);
+
+/// \brief Option class for critical edge splitting.
+///
+/// This provides a builder interface for overriding the default options used
+/// during critical edge splitting.
+struct CriticalEdgeSplittingOptions {
+ DominatorTree *DT;
+ LoopInfo *LI;
+ bool MergeIdenticalEdges;
+ bool DontDeleteUselessPHIs;
+ bool PreserveLCSSA;
+
+ CriticalEdgeSplittingOptions(DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr)
+ : DT(DT), LI(LI), MergeIdenticalEdges(false),
+ DontDeleteUselessPHIs(false), PreserveLCSSA(false) {}
+
+ CriticalEdgeSplittingOptions &setMergeIdenticalEdges() {
+ MergeIdenticalEdges = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setDontDeleteUselessPHIs() {
+ DontDeleteUselessPHIs = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setPreserveLCSSA() {
+ PreserveLCSSA = true;
+ return *this;
+ }
+};
+
+/// SplitCriticalEdge - If this edge is a critical edge, insert a new node to
+/// split the critical edge. This will update the analyses passed in through
+/// the option struct. This returns the new block if the edge was split, null
+/// otherwise.
+///
+/// If MergeIdenticalEdges in the options struct is true (not the default),
+/// *all* edges from TI to the specified successor will be merged into the same
+/// critical edge block. This is most commonly interesting with switch
+/// instructions, which may have many edges to any one destination. This
+/// ensures that all edges to that dest go to one block instead of each going
+/// to a different block, but isn't the standard definition of a "critical
+/// edge".
+///
+/// It is invalid to call this function on a critical edge that starts at an
+/// IndirectBrInst. Splitting these edges will almost always create an invalid
+/// program because the address of the new block won't be the one that is jumped
+/// to.
+///
+BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions());
+
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(),
+ Options);
+}
+
+/// SplitCriticalEdge - If the edge from *PI to BB is not critical, return
+/// false. Otherwise, split all edges between the two blocks and return true.
+/// This updates all of the same analyses as the other SplitCriticalEdge
+/// function. If P is specified, it updates the analyses
+/// described above.
+inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ bool MadeChange = false;
+ TerminatorInst *TI = (*PI)->getTerminator();
+ for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
+ if (TI->getSuccessor(i) == Succ)
+ MadeChange |= !!SplitCriticalEdge(TI, i, Options);
+ return MadeChange;
+}
+
+/// SplitCriticalEdge - If an edge from Src to Dst is critical, split the edge
+/// and return true, otherwise return false. This method requires that there be
+/// an edge between the two blocks. It updates the analyses
+/// passed in the options struct
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ TerminatorInst *TI = Src->getTerminator();
+ unsigned i = 0;
+ while (1) {
+ assert(i != TI->getNumSuccessors() && "Edge doesn't exist!");
+ if (TI->getSuccessor(i) == Dst)
+ return SplitCriticalEdge(TI, i, Options);
+ ++i;
+ }
+}
+
+// SplitAllCriticalEdges - Loop over all of the edges in the CFG,
+// breaking critical edges as they are found.
+// Returns the number of broken edges.
+unsigned SplitAllCriticalEdges(Function &F,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions());
+
+/// SplitEdge - Split the edge connecting specified block.
+BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
+
+/// SplitBlock - Split the specified block at the specified instruction - every
+/// thing before SplitPt stays in Old and everything starting with SplitPt moves
+/// to a new block. The two blocks are joined by an unconditional branch and
+/// the loop info is updated.
+///
+BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
+
+/// SplitBlockPredecessors - This method introduces at least one new basic block
+/// into the function and moves some of the predecessors of BB to be
+/// predecessors of the new block. The new predecessors are indicated by the
+/// Preds array. The new block is given a suffix of 'Suffix'. Returns new basic
+/// block to which predecessors from Preds are now pointing.
+///
+/// If BB is a landingpad block then additional basicblock might be introduced.
+/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
+/// details on this case.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+///
+BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+ const char *Suffix,
+ DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ bool PreserveLCSSA = false);
+
+/// SplitLandingPadPredecessors - This method transforms the landing pad,
+/// OrigBB, by introducing two new basic blocks into the function. One of those
+/// new basic blocks gets the predecessors listed in Preds. The other basic
+/// block gets the remaining predecessors of OrigBB. The landingpad instruction
+/// OrigBB is clone into both of the new basic blocks. The new blocks are given
+/// the suffixes 'Suffix1' and 'Suffix2', and are returned in the NewBBs vector.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+///
+void SplitLandingPadPredecessors(BasicBlock *OrigBB,
+ ArrayRef<BasicBlock *> Preds,
+ const char *Suffix, const char *Suffix2,
+ SmallVectorImpl<BasicBlock *> &NewBBs,
+ DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ bool PreserveLCSSA = false);
+
+/// FoldReturnIntoUncondBranch - This method duplicates the specified return
+/// instruction into a predecessor which ends in an unconditional branch. If
+/// the return instruction returns a value defined by a PHI, propagate the
+/// right value into the return. It returns the new return instruction in the
+/// predecessor.
+ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
+ BasicBlock *Pred);
+
+/// SplitBlockAndInsertIfThen - Split the containing block at the
+/// specified instruction - everything before and including SplitBefore stays
+/// in the old basic block, and everything after SplitBefore is moved to a
+/// new block. The two blocks are connected by a conditional branch
+/// (with value of Cmp being the condition).
+/// Before:
+/// Head
+/// SplitBefore
+/// Tail
+/// After:
+/// Head
+/// if (Cond)
+/// ThenBlock
+/// SplitBefore
+/// Tail
+///
+/// If Unreachable is true, then ThenBlock ends with
+/// UnreachableInst, otherwise it branches to Tail.
+/// Returns the NewBasicBlock's terminator.
+///
+/// Updates DT if given.
+TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
+ bool Unreachable,
+ MDNode *BranchWeights = nullptr,
+ DominatorTree *DT = nullptr);
+
+/// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen,
+/// but also creates the ElseBlock.
+/// Before:
+/// Head
+/// SplitBefore
+/// Tail
+/// After:
+/// Head
+/// if (Cond)
+/// ThenBlock
+/// else
+/// ElseBlock
+/// SplitBefore
+/// Tail
+void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
+ TerminatorInst **ThenTerm,
+ TerminatorInst **ElseTerm,
+ MDNode *BranchWeights = nullptr);
+
+///
+/// GetIfCondition - Check whether BB is the merge point of a if-region.
+/// If so, return the boolean condition that determines which entry into
+/// BB will be taken. Also, return by references the block that will be
+/// entered from if the condition is true, and the block that will be
+/// entered if the condition is false.
+Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
+ BasicBlock *&IfFalse);
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
new file mode 100644
index 0000000..879f295
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -0,0 +1,116 @@
+//===- BuildLibCalls.h - Utility builder for libcalls -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
+
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+ class Value;
+ class DataLayout;
+ class TargetLibraryInfo;
+
+ /// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*.
+ Value *CastToCStr(Value *V, IRBuilder<> &B);
+
+ /// EmitStrLen - Emit a call to the strlen function to the builder, for the
+ /// specified pointer. Ptr is required to be some pointer type, and the
+ /// return value has 'intptr_t' type.
+ Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitStrNLen - Emit a call to the strnlen function to the builder, for the
+ /// specified pointer. Ptr is required to be some pointer type, MaxLen must
+ /// be of size_t type, and the return value has 'intptr_t' type.
+ Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// EmitStrChr - Emit a call to the strchr function to the builder, for the
+ /// specified pointer and character. Ptr is required to be some pointer type,
+ /// and the return value has 'i8*' type.
+ Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitStrNCmp - Emit a call to the strncmp function to the builder.
+ Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
+ /// specified pointer arguments.
+ Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
+ const TargetLibraryInfo *TLI, StringRef Name = "strcpy");
+
+ /// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
+ /// specified pointer arguments and length.
+ Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
+ const TargetLibraryInfo *TLI, StringRef Name = "strncpy");
+
+ /// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
+ /// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
+ /// are pointers.
+ Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
+ IRBuilder<> &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
+ /// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
+ Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// EmitMemCmp - Emit a call to the memcmp function.
+ Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
+ /// (e.g. 'floor'). This function is known to take a single of type matching
+ /// 'Op' and returns one value with the same type. If 'Op' is a long double,
+ /// 'l' is added as the suffix of name, if 'Op' is a float, we add a 'f'
+ /// suffix.
+ Value *EmitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
+ const AttributeSet &Attrs);
+
+ /// EmitUnaryFloatFnCall - Emit a call to the binary function named 'Name'
+ /// (e.g. 'fmin'). This function is known to take type matching 'Op1' and
+ /// 'Op2' and return one value with the same type. If 'Op1/Op2' are long
+ /// double, 'l' is added as the suffix of name, if 'Op1/Op2' are float, we
+ /// add a 'f' suffix.
+ Value *EmitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
+ IRBuilder<> &B, const AttributeSet &Attrs);
+
+ /// EmitPutChar - Emit a call to the putchar function. This assumes that Char
+ /// is an integer.
+ Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+
+ /// EmitPutS - Emit a call to the puts function. This assumes that Str is
+ /// some pointer.
+ Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+
+ /// EmitFPutC - Emit a call to the fputc function. This assumes that Char is
+ /// an i32, and File is a pointer to FILE.
+ Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitFPutS - Emit a call to the puts function. Str is required to be a
+ /// pointer and File is a pointer to FILE.
+ Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
+ /// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
+ Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h b/contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h
new file mode 100644
index 0000000..0d081c0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h
@@ -0,0 +1,34 @@
+//===- llvm/Transforms/Utils/BypassSlowDivision.h --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an optimization for div and rem on architectures that
+// execute short instructions significantly faster than longer instructions.
+// For example, on Intel Atom 32-bit divides are slow enough that during
+// runtime it is profitable to check the value of the operands, and if they are
+// positive and less than 256 use an unsigned 8-bit divide.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+#define LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/Function.h"
+
+namespace llvm {
+
+/// This optimization identifies DIV instructions that can be
+/// profitably bypassed and carried out with a shorter, faster divide.
+bool bypassSlowDivision(Function &F,
+ Function::iterator &I,
+ const DenseMap<unsigned int, unsigned int> &BypassWidth);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
new file mode 100644
index 0000000..92a1d52
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -0,0 +1,266 @@
+//===- Cloning.h - Clone various parts of LLVM programs ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various functions that are used to clone chunks of LLVM
+// code for various purposes. This varies from copying whole modules into new
+// modules, to cloning functions with different arguments, to inlining
+// functions, to copying basic blocks to support loop unrolling or superblock
+// formation, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CLONING_H
+#define LLVM_TRANSFORMS_UTILS_CLONING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <functional>
+
+namespace llvm {
+
+class Module;
+class Function;
+class Instruction;
+class Pass;
+class LPPassManager;
+class BasicBlock;
+class Value;
+class CallInst;
+class InvokeInst;
+class ReturnInst;
+class CallSite;
+class Trace;
+class CallGraph;
+class DataLayout;
+class Loop;
+class LoopInfo;
+class AllocaInst;
+class AssumptionCacheTracker;
+class DominatorTree;
+
+/// Return an exact copy of the specified module
+///
+std::unique_ptr<Module> CloneModule(const Module *M);
+std::unique_ptr<Module> CloneModule(const Module *M, ValueToValueMapTy &VMap);
+
+/// Return a copy of the specified module. The ShouldCloneDefinition function
+/// controls whether a specific GlobalValue's definition is cloned. If the
+/// function returns false, the module copy will contain an external reference
+/// in place of the global definition.
+std::unique_ptr<Module>
+CloneModule(const Module *M, ValueToValueMapTy &VMap,
+ std::function<bool(const GlobalValue *)> ShouldCloneDefinition);
+
+/// ClonedCodeInfo - This struct can be used to capture information about code
+/// being cloned, while it is being cloned.
+struct ClonedCodeInfo {
+ /// ContainsCalls - This is set to true if the cloned code contains a normal
+ /// call instruction.
+ bool ContainsCalls;
+
+ /// ContainsDynamicAllocas - This is set to true if the cloned code contains
+ /// a 'dynamic' alloca. Dynamic allocas are allocas that are either not in
+ /// the entry block or they are in the entry block but are not a constant
+ /// size.
+ bool ContainsDynamicAllocas;
+
+ /// All cloned call sites that have operand bundles attached are appended to
+ /// this vector. This vector may contain nulls or undefs if some of the
+ /// originally inserted callsites were DCE'ed after they were cloned.
+ std::vector<WeakVH> OperandBundleCallSites;
+
+ ClonedCodeInfo() : ContainsCalls(false), ContainsDynamicAllocas(false) {}
+};
+
+/// CloneBasicBlock - Return a copy of the specified basic block, but without
+/// embedding the block into a particular function. The block returned is an
+/// exact copy of the specified basic block, without any remapping having been
+/// performed. Because of this, this is only suitable for applications where
+/// the basic block will be inserted into the same function that it was cloned
+/// from (loop unrolling would use this, for example).
+///
+/// Also, note that this function makes a direct copy of the basic block, and
+/// can thus produce illegal LLVM code. In particular, it will copy any PHI
+/// nodes from the original block, even though there are no predecessors for the
+/// newly cloned block (thus, phi nodes will have to be updated). Also, this
+/// block will branch to the old successors of the original block: these
+/// successors will have to have any PHI nodes updated to account for the new
+/// incoming edges.
+///
+/// The correlation between instructions in the source and result basic blocks
+/// is recorded in the VMap map.
+///
+/// If you have a particular suffix you'd like to use to add to any cloned
+/// names, specify it as the optional third parameter.
+///
+/// If you would like the basic block to be auto-inserted into the end of a
+/// function, you can specify it as the optional fourth parameter.
+///
+/// If you would like to collect additional information about the cloned
+/// function, you can specify a ClonedCodeInfo object with the optional fifth
+/// parameter.
+///
+BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
+ const Twine &NameSuffix = "", Function *F = nullptr,
+ ClonedCodeInfo *CodeInfo = nullptr);
+
+/// CloneFunction - Return a copy of the specified function, but without
+/// embedding the function into another module. Also, any references specified
+/// in the VMap are changed to refer to their mapped value instead of the
+/// original one. If any of the arguments to the function are in the VMap,
+/// the arguments are deleted from the resultant function. The VMap is
+/// updated to include mappings from all of the instructions and basicblocks in
+/// the function from their old to new values. The final argument captures
+/// information about the cloned code if non-null.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings, and debug info metadata will not be cloned.
+///
+Function *CloneFunction(const Function *F, ValueToValueMapTy &VMap,
+ bool ModuleLevelChanges,
+ ClonedCodeInfo *CodeInfo = nullptr);
+
+/// Clone OldFunc into NewFunc, transforming the old arguments into references
+/// to VMap values. Note that if NewFunc already has basic blocks, the ones
+/// cloned into it will be added to the end of the function. This function
+/// fills in a list of return instructions, and can optionally remap types
+/// and/or append the specified suffix to all values cloned.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
+void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst*> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+/// A helper class used with CloneAndPruneIntoFromInst to change the default
+/// behavior while instructions are being cloned.
+class CloningDirector {
+public:
+ /// This enumeration describes the way CloneAndPruneIntoFromInst should
+ /// proceed after the CloningDirector has examined an instruction.
+ enum CloningAction {
+ ///< Continue cloning the instruction (default behavior).
+ CloneInstruction,
+ ///< Skip this instruction but continue cloning the current basic block.
+ SkipInstruction,
+ ///< Skip this instruction and stop cloning the current basic block.
+ StopCloningBB,
+ ///< Don't clone the terminator but clone the current block's successors.
+ CloneSuccessors
+ };
+
+ virtual ~CloningDirector() {}
+
+ /// Subclasses must override this function to customize cloning behavior.
+ virtual CloningAction handleInstruction(ValueToValueMapTy &VMap,
+ const Instruction *Inst,
+ BasicBlock *NewBB) = 0;
+
+ virtual ValueMapTypeRemapper *getTypeRemapper() { return nullptr; }
+ virtual ValueMaterializer *getValueMaterializer() { return nullptr; }
+};
+
+void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
+ const Instruction *StartingInst,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst*> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ CloningDirector *Director = nullptr);
+
+
+/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
+/// except that it does some simple constant prop and DCE on the fly. The
+/// effect of this is to copy significantly less code in cases where (for
+/// example) a function call with constant arguments is inlined, and those
+/// constant arguments cause a significant amount of code in the callee to be
+/// dead. Since this doesn't produce an exactly copy of the input, it can't be
+/// used for things like CloneFunction or CloneModule.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
+void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst*> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ Instruction *TheCall = nullptr);
+
+/// InlineFunctionInfo - This class captures the data input to the
+/// InlineFunction call, and records the auxiliary results produced by it.
+class InlineFunctionInfo {
+public:
+ explicit InlineFunctionInfo(CallGraph *cg = nullptr,
+ AssumptionCacheTracker *ACT = nullptr)
+ : CG(cg), ACT(ACT) {}
+
+ /// CG - If non-null, InlineFunction will update the callgraph to reflect the
+ /// changes it makes.
+ CallGraph *CG;
+ AssumptionCacheTracker *ACT;
+
+ /// StaticAllocas - InlineFunction fills this in with all static allocas that
+ /// get copied into the caller.
+ SmallVector<AllocaInst *, 4> StaticAllocas;
+
+ /// InlinedCalls - InlineFunction fills this in with callsites that were
+ /// inlined from the callee. This is only filled in if CG is non-null.
+ SmallVector<WeakVH, 8> InlinedCalls;
+
+ void reset() {
+ StaticAllocas.clear();
+ InlinedCalls.clear();
+ }
+};
+
+/// InlineFunction - This function inlines the called function into the basic
+/// block of the caller. This returns false if it is not possible to inline
+/// this call. The program is still in a well defined state if this occurs
+/// though.
+///
+/// Note that this only does one level of inlining. For example, if the
+/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
+/// exists in the instruction stream. Similarly this will inline a recursive
+/// function by one level.
+///
+bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI,
+ AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
+bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
+ AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
+bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
+ AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
+
+/// \brief Clones a loop \p OrigLoop. Returns the loop and the blocks in \p
+/// Blocks.
+///
+/// Updates LoopInfo and DominatorTree assuming the loop is dominated by block
+/// \p LoopDomBB. Insert the new blocks before block specified in \p Before.
+Loop *cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB,
+ Loop *OrigLoop, ValueToValueMapTy &VMap,
+ const Twine &NameSuffix, LoopInfo *LI,
+ DominatorTree *DT,
+ SmallVectorImpl<BasicBlock *> &Blocks);
+
+/// \brief Remaps instructions in \p Blocks using the mapping in \p VMap.
+void remapInstructionsInBlocks(const SmallVectorImpl<BasicBlock *> &Blocks,
+ ValueToValueMapTy &VMap);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h b/contrib/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h
new file mode 100644
index 0000000..73c15e4
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h
@@ -0,0 +1,65 @@
+//===-- CmpInstAnalysis.h - Utils to help fold compare insts ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file holds routines to help analyse compare instructions
+// and fold them into constants or other compare instructions
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CMPINSTANALYSIS_H
+#define LLVM_TRANSFORMS_UTILS_CMPINSTANALYSIS_H
+
+#include "llvm/IR/InstrTypes.h"
+
+namespace llvm {
+ class ICmpInst;
+ class Value;
+
+ /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
+ /// are carefully arranged to allow folding of expressions such as:
+ ///
+ /// (A < B) | (A > B) --> (A != B)
+ ///
+ /// Note that this is only valid if the first and second predicates have the
+ /// same sign. Is illegal to do: (A u< B) | (A s> B)
+ ///
+ /// Three bits are used to represent the condition, as follows:
+ /// 0 A > B
+ /// 1 A == B
+ /// 2 A < B
+ ///
+ /// <=> Value Definition
+ /// 000 0 Always false
+ /// 001 1 A > B
+ /// 010 2 A == B
+ /// 011 3 A >= B
+ /// 100 4 A < B
+ /// 101 5 A != B
+ /// 110 6 A <= B
+ /// 111 7 Always true
+ ///
+ unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred = false);
+
+ /// getICmpValue - This is the complement of getICmpCode, which turns an
+ /// opcode and two operands into either a constant true or false, or the
+ /// predicate for a new ICmp instruction. The sign is passed in to determine
+ /// which kind of predicate to use in the new icmp instruction.
+ /// Non-NULL return value will be a true or false constant.
+ /// NULL return means a new ICmp is needed. The predicate for which is
+ /// output in NewICmpPred.
+ Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
+ CmpInst::Predicate &NewICmpPred);
+
+ /// PredicatesFoldable - Return true if both predicates match sign or if at
+ /// least one of them is an equality comparison (which is signless).
+ bool PredicatesFoldable(CmpInst::Predicate p1, CmpInst::Predicate p2);
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h b/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
new file mode 100644
index 0000000..3a96d95
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -0,0 +1,126 @@
+//===-- Transform/Utils/CodeExtractor.h - Code extraction util --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A utility to support extracting code from one function into its own
+// stand-alone function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+#define LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SetVector.h"
+
+namespace llvm {
+ class BasicBlock;
+ class DominatorTree;
+ class Function;
+ class Loop;
+ class Module;
+ class RegionNode;
+ class Type;
+ class Value;
+
+ /// \brief Utility class for extracting code into a new function.
+ ///
+ /// This utility provides a simple interface for extracting some sequence of
+ /// code into its own function, replacing it with a call to that function. It
+ /// also provides various methods to query about the nature and result of
+ /// such a transformation.
+ ///
+ /// The rough algorithm used is:
+ /// 1) Find both the inputs and outputs for the extracted region.
+ /// 2) Pass the inputs as arguments, remapping them within the extracted
+ /// function to arguments.
+ /// 3) Add allocas for any scalar outputs, adding all of the outputs' allocas
+ /// as arguments, and inserting stores to the arguments for any scalars.
+ class CodeExtractor {
+ typedef SetVector<Value *> ValueSet;
+
+ // Various bits of state computed on construction.
+ DominatorTree *const DT;
+ const bool AggregateArgs;
+
+ // Bits of intermediate state computed at various phases of extraction.
+ SetVector<BasicBlock *> Blocks;
+ unsigned NumExitBlocks;
+ Type *RetTy;
+
+ public:
+ /// \brief Create a code extractor for a single basic block.
+ ///
+ /// In this formation, we don't require a dominator tree. The given basic
+ /// block is set up for extraction.
+ CodeExtractor(BasicBlock *BB, bool AggregateArgs = false);
+
+ /// \brief Create a code extractor for a sequence of blocks.
+ ///
+ /// Given a sequence of basic blocks where the first block in the sequence
+ /// dominates the rest, prepare a code extractor object for pulling this
+ /// sequence out into its new function. When a DominatorTree is also given,
+ /// extra checking and transformations are enabled.
+ CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
+ bool AggregateArgs = false);
+
+ /// \brief Create a code extractor for a loop body.
+ ///
+ /// Behaves just like the generic code sequence constructor, but uses the
+ /// block sequence of the loop.
+ CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false);
+
+ /// \brief Create a code extractor for a region node.
+ ///
+ /// Behaves just like the generic code sequence constructor, but uses the
+ /// block sequence of the region node passed in.
+ CodeExtractor(DominatorTree &DT, const RegionNode &RN,
+ bool AggregateArgs = false);
+
+ /// \brief Perform the extraction, returning the new function.
+ ///
+ /// Returns zero when called on a CodeExtractor instance where isEligible
+ /// returns false.
+ Function *extractCodeRegion();
+
+ /// \brief Test whether this code extractor is eligible.
+ ///
+ /// Based on the blocks used when constructing the code extractor,
+ /// determine whether it is eligible for extraction.
+ bool isEligible() const { return !Blocks.empty(); }
+
+ /// \brief Compute the set of input values and output values for the code.
+ ///
+ /// These can be used either when performing the extraction or to evaluate
+ /// the expected size of a call to the extracted function. Note that this
+ /// work cannot be cached between the two as once we decide to extract
+ /// a code sequence, that sequence is modified, including changing these
+ /// sets, before extraction occurs. These modifications won't have any
+ /// significant impact on the cost however.
+ void findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs) const;
+
+ private:
+ void severSplitPHINodes(BasicBlock *&Header);
+ void splitReturnBlocks();
+
+ Function *constructFunction(const ValueSet &inputs,
+ const ValueSet &outputs,
+ BasicBlock *header,
+ BasicBlock *newRootNode, BasicBlock *newHeader,
+ Function *oldFunction, Module *M);
+
+ void moveCodeToFunction(Function *newFunction);
+
+ void emitCallAndSwitchStatement(Function *newFunction,
+ BasicBlock *newHeader,
+ ValueSet &inputs,
+ ValueSet &outputs);
+ };
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/CtorUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/CtorUtils.h
new file mode 100644
index 0000000..63e564d
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/CtorUtils.h
@@ -0,0 +1,32 @@
+//===- CtorUtils.h - Helpers for working with global_ctors ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions that are used to process llvm.global_ctors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CTORUTILS_H
+#define LLVM_TRANSFORMS_UTILS_CTORUTILS_H
+
+#include "llvm/ADT/STLExtras.h"
+
+namespace llvm {
+
+class GlobalVariable;
+class Function;
+class Module;
+
+/// Call "ShouldRemove" for every entry in M's global_ctor list and remove the
+/// entries for which it returns true. Return true if anything changed.
+bool optimizeGlobalCtorsList(Module &M,
+ function_ref<bool(Function *)> ShouldRemove);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/GlobalStatus.h b/contrib/llvm/include/llvm/Transforms/Utils/GlobalStatus.h
new file mode 100644
index 0000000..c366095
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/GlobalStatus.h
@@ -0,0 +1,82 @@
+//===- GlobalStatus.h - Compute status info for globals ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+#define LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+
+#include "llvm/IR/Instructions.h"
+
+namespace llvm {
+class Value;
+class Function;
+
+/// It is safe to destroy a constant iff it is only used by constants itself.
+/// Note that constants cannot be cyclic, so this test is pretty easy to
+/// implement recursively.
+///
+bool isSafeToDestroyConstant(const Constant *C);
+
+/// As we analyze each global, keep track of some information about it. If we
+/// find out that the address of the global is taken, none of this info will be
+/// accurate.
+struct GlobalStatus {
+ /// True if the global's address is used in a comparison.
+ bool IsCompared;
+
+ /// True if the global is ever loaded. If the global isn't ever loaded it
+ /// can be deleted.
+ bool IsLoaded;
+
+ /// Keep track of what stores to the global look like.
+ enum StoredType {
+ /// There is no store to this global. It can thus be marked constant.
+ NotStored,
+
+ /// This global is stored to, but the only thing stored is the constant it
+ /// was initialized with. This is only tracked for scalar globals.
+ InitializerStored,
+
+ /// This global is stored to, but only its initializer and one other value
+ /// is ever stored to it. If this global isStoredOnce, we track the value
+ /// stored to it in StoredOnceValue below. This is only tracked for scalar
+ /// globals.
+ StoredOnce,
+
+ /// This global is stored to by multiple values or something else that we
+ /// cannot track.
+ Stored
+ } StoredType;
+
+ /// If only one value (besides the initializer constant) is ever stored to
+ /// this global, keep track of what value it is.
+ Value *StoredOnceValue;
+
+ /// These start out null/false. When the first accessing function is noticed,
+ /// it is recorded. When a second different accessing function is noticed,
+ /// HasMultipleAccessingFunctions is set to true.
+ const Function *AccessingFunction;
+ bool HasMultipleAccessingFunctions;
+
+ /// Set to true if this global has a user that is not an instruction (e.g. a
+ /// constant expr or GV initializer).
+ bool HasNonInstructionUser;
+
+ /// Set to the strongest atomic ordering requirement.
+ AtomicOrdering Ordering;
+
+ /// Look at all uses of the global and fill in the GlobalStatus structure. If
+ /// the global has its address taken, return true to indicate we can't do
+ /// anything with it.
+ static bool analyzeGlobal(const Value *V, GlobalStatus &GS);
+
+ GlobalStatus();
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/IntegerDivision.h b/contrib/llvm/include/llvm/Transforms/Utils/IntegerDivision.h
new file mode 100644
index 0000000..0ec3321
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/IntegerDivision.h
@@ -0,0 +1,73 @@
+//===- llvm/Transforms/Utils/IntegerDivision.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of 32bit and 64bit scalar integer
+// division for targets that don't have native support. It's largely derived
+// from compiler-rt's implementations of __udivsi3 and __udivmoddi4,
+// but hand-tuned for targets that prefer less control flow.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
+#define LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
+
+namespace llvm {
+ class BinaryOperator;
+}
+
+namespace llvm {
+
+ /// Generate code to calculate the remainder of two integers, replacing Rem
+ /// with the generated code. This currently generates code using the udiv
+ /// expansion, but future work includes generating more specialized code,
+ /// e.g. when more information about the operands are known. Implements both
+ /// 32bit and 64bit scalar division.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandRemainder(BinaryOperator *Rem);
+
+ /// Generate code to divide two integers, replacing Div with the generated
+ /// code. This currently generates code similarly to compiler-rt's
+ /// implementations, but future work includes generating more specialized code
+ /// when more information about the operands are known. Implements both
+ /// 32bit and 64bit scalar division.
+ ///
+ /// @brief Replace Div with generated code.
+ bool expandDivision(BinaryOperator* Div);
+
+ /// Generate code to calculate the remainder of two integers, replacing Rem
+ /// with the generated code. Uses ExpandReminder with a 32bit Rem which
+ /// makes it useful for targets with little or no support for less than
+ /// 32 bit arithmetic.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandRemainderUpTo32Bits(BinaryOperator *Rem);
+
+ /// Generate code to calculate the remainder of two integers, replacing Rem
+ /// with the generated code. Uses ExpandReminder with a 64bit Rem.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandRemainderUpTo64Bits(BinaryOperator *Rem);
+
+ /// Generate code to divide two integers, replacing Div with the generated
+ /// code. Uses ExpandDivision with a 32bit Div which makes it useful for
+ /// targets with little or no support for less than 32 bit arithmetic.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandDivisionUpTo32Bits(BinaryOperator *Div);
+
+ /// Generate code to divide two integers, replacing Div with the generated
+ /// code. Uses ExpandDivision with a 64bit Div.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandDivisionUpTo64Bits(BinaryOperator *Div);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/Local.h b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
new file mode 100644
index 0000000..81b376f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/Local.h
@@ -0,0 +1,335 @@
+//===-- Local.h - Functions to perform local transformations ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform various local transformations to the
+// program.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H
+#define LLVM_TRANSFORMS_UTILS_LOCAL_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Operator.h"
+
+namespace llvm {
+
+class User;
+class BasicBlock;
+class Function;
+class BranchInst;
+class Instruction;
+class DbgDeclareInst;
+class StoreInst;
+class LoadInst;
+class Value;
+class PHINode;
+class AllocaInst;
+class AssumptionCache;
+class ConstantExpr;
+class DataLayout;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+class DIBuilder;
+class DominatorTree;
+
+template<typename T> class SmallVectorImpl;
+
+//===----------------------------------------------------------------------===//
+// Local constant propagation.
+//
+
+/// ConstantFoldTerminator - If a terminator instruction is predicated on a
+/// constant value, convert it into an unconditional branch to the constant
+/// destination. This is a nontrivial operation because the successors of this
+/// basic block must have their PHI nodes updated.
+/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
+/// conditions and indirectbr addresses this might make dead if
+/// DeleteDeadConditions is true.
+bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
+ const TargetLibraryInfo *TLI = nullptr);
+
+//===----------------------------------------------------------------------===//
+// Local dead code elimination.
+//
+
+/// isInstructionTriviallyDead - Return true if the result produced by the
+/// instruction is not used, and the instruction has no side effects.
+///
+bool isInstructionTriviallyDead(Instruction *I,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
+/// trivially dead instruction, delete it. If that makes any of its operands
+/// trivially dead, delete them too, recursively. Return true if any
+/// instructions were deleted.
+bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
+/// dead PHI node, due to being a def-use chain of single-use nodes that
+/// either forms a cycle or is terminated by a trivially dead instruction,
+/// delete it. If that makes any of its operands trivially dead, delete them
+/// too, recursively. Return true if a change was made.
+bool RecursivelyDeleteDeadPHINode(PHINode *PN,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
+/// simplify any instructions in it and recursively delete dead instructions.
+///
+/// This returns true if it changed the code, note that it can delete
+/// instructions in other blocks as well in this block.
+bool SimplifyInstructionsInBlock(BasicBlock *BB,
+ const TargetLibraryInfo *TLI = nullptr);
+
+//===----------------------------------------------------------------------===//
+// Control Flow Graph Restructuring.
+//
+
+/// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
+/// method is called when we're about to delete Pred as a predecessor of BB. If
+/// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
+///
+/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
+/// nodes that collapse into identity values. For example, if we have:
+/// x = phi(1, 0, 0, 0)
+/// y = and x, z
+///
+/// .. and delete the predecessor corresponding to the '1', this will attempt to
+/// recursively fold the 'and' to 0.
+void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred);
+
+/// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
+/// predecessor is known to have one successor (BB!). Eliminate the edge
+/// between them, moving the instructions in the predecessor into BB. This
+/// deletes the predecessor block.
+///
+void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr);
+
+/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
+/// unconditional branch, and contains no instructions other than PHI nodes,
+/// potential debug intrinsics and the branch. If possible, eliminate BB by
+/// rewriting all the predecessors to branch to the successor block and return
+/// true. If we can't transform, return false.
+bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB);
+
+/// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
+/// nodes in this block. This doesn't try to be clever about PHI nodes
+/// which differ only in the order of the incoming values, but instcombine
+/// orders them so it usually won't matter.
+///
+bool EliminateDuplicatePHINodes(BasicBlock *BB);
+
+/// SimplifyCFG - This function is used to do simplification of a CFG. For
+/// example, it adjusts branches to branches to eliminate the extra hop, it
+/// eliminates unreachable basic blocks, and does other "peephole" optimization
+/// of the CFG. It returns true if a modification was made, possibly deleting
+/// the basic block that was pointed to.
+///
+bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
+ unsigned BonusInstThreshold, AssumptionCache *AC = nullptr);
+
+/// FlatternCFG - This function is used to flatten a CFG. For
+/// example, it uses parallel-and and parallel-or mode to collapse
+// if-conditions and merge if-regions with identical statements.
+///
+bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
+
+/// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,
+/// and if a predecessor branches to us and one of our successors, fold the
+/// setcc into the predecessor and use logical operations to pick the right
+/// destination.
+bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1);
+
+/// DemoteRegToStack - This function takes a virtual register computed by an
+/// Instruction and replaces it with a slot in the stack frame, allocated via
+/// alloca. This allows the CFG to be changed around without fear of
+/// invalidating the SSA information for the value. It returns the pointer to
+/// the alloca inserted to create a stack slot for X.
+///
+AllocaInst *DemoteRegToStack(Instruction &X,
+ bool VolatileLoads = false,
+ Instruction *AllocaPoint = nullptr);
+
+/// DemotePHIToStack - This function takes a virtual register computed by a phi
+/// node and replaces it with a slot in the stack frame, allocated via alloca.
+/// The phi node is deleted and it returns the pointer to the alloca inserted.
+AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
+
+/// getOrEnforceKnownAlignment - If the specified pointer has an alignment that
+/// we can determine, return it, otherwise return 0. If PrefAlign is specified,
+/// and it is more than the alignment of the ultimate object, see if we can
+/// increase the alignment of the ultimate object, making this check succeed.
+unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
+ const DataLayout &DL,
+ const Instruction *CxtI = nullptr,
+ AssumptionCache *AC = nullptr,
+ const DominatorTree *DT = nullptr);
+
+/// getKnownAlignment - Try to infer an alignment for the specified pointer.
+static inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
+ const Instruction *CxtI = nullptr,
+ AssumptionCache *AC = nullptr,
+ const DominatorTree *DT = nullptr) {
+ return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
+}
+
+/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
+/// code necessary to compute the offset from the base pointer (without adding
+/// in the base pointer). Return the result as a signed integer of intptr size.
+/// When NoAssumptions is true, no assumptions about index computation not
+/// overflowing is made.
+template <typename IRBuilderTy>
+Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
+ bool NoAssumptions = false) {
+ GEPOperator *GEPOp = cast<GEPOperator>(GEP);
+ Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
+ Value *Result = Constant::getNullValue(IntPtrTy);
+
+ // If the GEP is inbounds, we know that none of the addressing operations will
+ // overflow in an unsigned sense.
+ bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
+
+ // Build a mask for high order bits.
+ unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
+ uint64_t PtrSizeMask = ~0ULL >> (64 - IntPtrWidth);
+
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
+ ++i, ++GTI) {
+ Value *Op = *i;
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
+ if (Constant *OpC = dyn_cast<Constant>(Op)) {
+ if (OpC->isZeroValue())
+ continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (OpC->getType()->isVectorTy())
+ OpC = OpC->getSplatValue();
+
+ uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
+ Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
+
+ if (Size)
+ Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".offs");
+ continue;
+ }
+
+ Constant *Scale = ConstantInt::get(IntPtrTy, Size);
+ Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
+ Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
+ // Emit an add instruction.
+ Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
+ continue;
+ }
+ // Convert to correct type.
+ if (Op->getType() != IntPtrTy)
+ Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
+ if (Size != 1) {
+ // We'll let instcombine(mul) convert this to a shl if possible.
+ Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".idx", isInBounds /*NUW*/);
+ }
+
+ // Emit an add instruction.
+ Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
+ }
+ return Result;
+}
+
+///===---------------------------------------------------------------------===//
+/// Dbg Intrinsic utilities
+///
+
+/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
+/// that has an associated llvm.dbg.decl intrinsic.
+bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
+ StoreInst *SI, DIBuilder &Builder);
+
+/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
+/// that has an associated llvm.dbg.decl intrinsic.
+bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
+ LoadInst *LI, DIBuilder &Builder);
+
+/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
+/// of llvm.dbg.value intrinsics.
+bool LowerDbgDeclare(Function &F);
+
+/// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic corresponding to
+/// an alloca, if any.
+DbgDeclareInst *FindAllocaDbgDeclare(Value *V);
+
+/// \brief Replaces llvm.dbg.declare instruction when the address it describes
+/// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
+/// prepended to the expression. If Offset is non-zero, a constant displacement
+/// is added to the expression (after the optional Deref). Offset can be
+/// negative.
+bool replaceDbgDeclare(Value *Address, Value *NewAddress,
+ Instruction *InsertBefore, DIBuilder &Builder,
+ bool Deref, int Offset);
+
+/// \brief Replaces llvm.dbg.declare instruction when the alloca it describes
+/// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
+/// prepended to the expression. If Offset is non-zero, a constant displacement
+/// is added to the expression (after the optional Deref). Offset can be
+/// negative. New llvm.dbg.declare is inserted immediately before AI.
+bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
+ DIBuilder &Builder, bool Deref, int Offset = 0);
+
+/// \brief Insert an unreachable instruction before the specified
+/// instruction, making it and the rest of the code in the block dead.
+void changeToUnreachable(Instruction *I, bool UseLLVMTrap);
+
+/// Replace 'BB's terminator with one that does not have an unwind successor
+/// block. Rewrites `invoke` to `call`, etc. Updates any PHIs in unwind
+/// successor.
+///
+/// \param BB Block whose terminator will be replaced. Its terminator must
+/// have an unwind successor.
+void removeUnwindEdge(BasicBlock *BB);
+
+/// \brief Remove all blocks that can not be reached from the function's entry.
+///
+/// Returns true if any basic block was removed.
+bool removeUnreachableBlocks(Function &F);
+
+/// \brief Combine the metadata of two instructions so that K can replace J
+///
+/// Metadata not listed as known via KnownIDs is removed
+void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs);
+
+/// \brief Replace each use of 'From' with 'To' if that use is dominated by
+/// the given edge. Returns the number of replacements made.
+unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
+ const BasicBlockEdge &Edge);
+/// \brief Replace each use of 'From' with 'To' if that use is dominated by
+/// the given BasicBlock. Returns the number of replacements made.
+unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
+ const BasicBlock *BB);
+
+
+/// \brief Return true if the CallSite CS calls a gc leaf function.
+///
+/// A leaf function is a function that does not safepoint the thread during its
+/// execution. During a call or invoke to such a function, the callers stack
+/// does not have to be made parseable.
+///
+/// Most passes can and should ignore this information, and it is only used
+/// during lowering by the GC infrastructure.
+bool callsGCLeafFunction(ImmutableCallSite CS);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h
new file mode 100644
index 0000000..17aaee0
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/LoopUtils.h
@@ -0,0 +1,379 @@
+//===- llvm/Transforms/Utils/LoopUtils.h - Loop utilities -*- C++ -*-=========//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop transformation utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+#define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+class AliasSet;
+class AliasSetTracker;
+class AssumptionCache;
+class BasicBlock;
+class DataLayout;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class Pass;
+class PredIteratorCache;
+class ScalarEvolution;
+class TargetLibraryInfo;
+
+/// \brief Captures loop safety information.
+/// It keep information for loop & its header may throw exception.
+struct LICMSafetyInfo {
+ bool MayThrow; // The current loop contains an instruction which
+ // may throw.
+ bool HeaderMayThrow; // Same as previous, but specific to loop header
+ LICMSafetyInfo() : MayThrow(false), HeaderMayThrow(false)
+ {}
+};
+
+/// The RecurrenceDescriptor is used to identify recurrences variables in a
+/// loop. Reduction is a special case of recurrence that has uses of the
+/// recurrence variable outside the loop. The method isReductionPHI identifies
+/// reductions that are basic recurrences.
+///
+/// Basic recurrences are defined as the summation, product, OR, AND, XOR, min,
+/// or max of a set of terms. For example: for(i=0; i<n; i++) { total +=
+/// array[i]; } is a summation of array elements. Basic recurrences are a
+/// special case of chains of recurrences (CR). See ScalarEvolution for CR
+/// references.
+
+/// This struct holds information about recurrence variables.
+class RecurrenceDescriptor {
+
+public:
+ /// This enum represents the kinds of recurrences that we support.
+ enum RecurrenceKind {
+ RK_NoRecurrence, ///< Not a recurrence.
+ RK_IntegerAdd, ///< Sum of integers.
+ RK_IntegerMult, ///< Product of integers.
+ RK_IntegerOr, ///< Bitwise or logical OR of numbers.
+ RK_IntegerAnd, ///< Bitwise or logical AND of numbers.
+ RK_IntegerXor, ///< Bitwise or logical XOR of numbers.
+ RK_IntegerMinMax, ///< Min/max implemented in terms of select(cmp()).
+ RK_FloatAdd, ///< Sum of floats.
+ RK_FloatMult, ///< Product of floats.
+ RK_FloatMinMax ///< Min/max implemented in terms of select(cmp()).
+ };
+
+ // This enum represents the kind of minmax recurrence.
+ enum MinMaxRecurrenceKind {
+ MRK_Invalid,
+ MRK_UIntMin,
+ MRK_UIntMax,
+ MRK_SIntMin,
+ MRK_SIntMax,
+ MRK_FloatMin,
+ MRK_FloatMax
+ };
+
+ RecurrenceDescriptor()
+ : StartValue(nullptr), LoopExitInstr(nullptr), Kind(RK_NoRecurrence),
+ MinMaxKind(MRK_Invalid), UnsafeAlgebraInst(nullptr),
+ RecurrenceType(nullptr), IsSigned(false) {}
+
+ RecurrenceDescriptor(Value *Start, Instruction *Exit, RecurrenceKind K,
+ MinMaxRecurrenceKind MK, Instruction *UAI, Type *RT,
+ bool Signed, SmallPtrSetImpl<Instruction *> &CI)
+ : StartValue(Start), LoopExitInstr(Exit), Kind(K), MinMaxKind(MK),
+ UnsafeAlgebraInst(UAI), RecurrenceType(RT), IsSigned(Signed) {
+ CastInsts.insert(CI.begin(), CI.end());
+ }
+
+ /// This POD struct holds information about a potential recurrence operation.
+ class InstDesc {
+
+ public:
+ InstDesc(bool IsRecur, Instruction *I, Instruction *UAI = nullptr)
+ : IsRecurrence(IsRecur), PatternLastInst(I), MinMaxKind(MRK_Invalid),
+ UnsafeAlgebraInst(UAI) {}
+
+ InstDesc(Instruction *I, MinMaxRecurrenceKind K, Instruction *UAI = nullptr)
+ : IsRecurrence(true), PatternLastInst(I), MinMaxKind(K),
+ UnsafeAlgebraInst(UAI) {}
+
+ bool isRecurrence() { return IsRecurrence; }
+
+ bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
+
+ Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
+
+ MinMaxRecurrenceKind getMinMaxKind() { return MinMaxKind; }
+
+ Instruction *getPatternInst() { return PatternLastInst; }
+
+ private:
+ // Is this instruction a recurrence candidate.
+ bool IsRecurrence;
+ // The last instruction in a min/max pattern (select of the select(icmp())
+ // pattern), or the current recurrence instruction otherwise.
+ Instruction *PatternLastInst;
+ // If this is a min/max pattern the comparison predicate.
+ MinMaxRecurrenceKind MinMaxKind;
+ // Recurrence has unsafe algebra.
+ Instruction *UnsafeAlgebraInst;
+ };
+
+ /// Returns a struct describing if the instruction 'I' can be a recurrence
+ /// variable of type 'Kind'. If the recurrence is a min/max pattern of
+ /// select(icmp()) this function advances the instruction pointer 'I' from the
+ /// compare instruction to the select instruction and stores this pointer in
+ /// 'PatternLastInst' member of the returned struct.
+ static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind,
+ InstDesc &Prev, bool HasFunNoNaNAttr);
+
+ /// Returns true if instruction I has multiple uses in Insts
+ static bool hasMultipleUsesOf(Instruction *I,
+ SmallPtrSetImpl<Instruction *> &Insts);
+
+ /// Returns true if all uses of the instruction I is within the Set.
+ static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);
+
+ /// Returns a struct describing if the instruction if the instruction is a
+ /// Select(ICmp(X, Y), X, Y) instruction pattern corresponding to a min(X, Y)
+ /// or max(X, Y).
+ static InstDesc isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev);
+
+ /// Returns identity corresponding to the RecurrenceKind.
+ static Constant *getRecurrenceIdentity(RecurrenceKind K, Type *Tp);
+
+ /// Returns the opcode of binary operation corresponding to the
+ /// RecurrenceKind.
+ static unsigned getRecurrenceBinOp(RecurrenceKind Kind);
+
+ /// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
+ static Value *createMinMaxOp(IRBuilder<> &Builder, MinMaxRecurrenceKind RK,
+ Value *Left, Value *Right);
+
+ /// Returns true if Phi is a reduction of type Kind and adds it to the
+ /// RecurrenceDescriptor.
+ static bool AddReductionVar(PHINode *Phi, RecurrenceKind Kind, Loop *TheLoop,
+ bool HasFunNoNaNAttr,
+ RecurrenceDescriptor &RedDes);
+
+ /// Returns true if Phi is a reduction in TheLoop. The RecurrenceDescriptor is
+ /// returned in RedDes.
+ static bool isReductionPHI(PHINode *Phi, Loop *TheLoop,
+ RecurrenceDescriptor &RedDes);
+
+ RecurrenceKind getRecurrenceKind() { return Kind; }
+
+ MinMaxRecurrenceKind getMinMaxRecurrenceKind() { return MinMaxKind; }
+
+ TrackingVH<Value> getRecurrenceStartValue() { return StartValue; }
+
+ Instruction *getLoopExitInstr() { return LoopExitInstr; }
+
+ /// Returns true if the recurrence has unsafe algebra which requires a relaxed
+ /// floating-point model.
+ bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
+
+ /// Returns first unsafe algebra instruction in the PHI node's use-chain.
+ Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
+
+ /// Returns true if the recurrence kind is an integer kind.
+ static bool isIntegerRecurrenceKind(RecurrenceKind Kind);
+
+ /// Returns true if the recurrence kind is a floating point kind.
+ static bool isFloatingPointRecurrenceKind(RecurrenceKind Kind);
+
+ /// Returns true if the recurrence kind is an arithmetic kind.
+ static bool isArithmeticRecurrenceKind(RecurrenceKind Kind);
+
+ /// Determines if Phi may have been type-promoted. If Phi has a single user
+ /// that ANDs the Phi with a type mask, return the user. RT is updated to
+ /// account for the narrower bit width represented by the mask, and the AND
+ /// instruction is added to CI.
+ static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT,
+ SmallPtrSetImpl<Instruction *> &Visited,
+ SmallPtrSetImpl<Instruction *> &CI);
+
+ /// Returns true if all the source operands of a recurrence are either
+ /// SExtInsts or ZExtInsts. This function is intended to be used with
+ /// lookThroughAnd to determine if the recurrence has been type-promoted. The
+ /// source operands are added to CI, and IsSigned is updated to indicate if
+ /// all source operands are SExtInsts.
+ static bool getSourceExtensionKind(Instruction *Start, Instruction *Exit,
+ Type *RT, bool &IsSigned,
+ SmallPtrSetImpl<Instruction *> &Visited,
+ SmallPtrSetImpl<Instruction *> &CI);
+
+ /// Returns the type of the recurrence. This type can be narrower than the
+ /// actual type of the Phi if the recurrence has been type-promoted.
+ Type *getRecurrenceType() { return RecurrenceType; }
+
+ /// Returns a reference to the instructions used for type-promoting the
+ /// recurrence.
+ SmallPtrSet<Instruction *, 8> &getCastInsts() { return CastInsts; }
+
+ /// Returns true if all source operands of the recurrence are SExtInsts.
+ bool isSigned() { return IsSigned; }
+
+private:
+ // The starting value of the recurrence.
+ // It does not have to be zero!
+ TrackingVH<Value> StartValue;
+ // The instruction who's value is used outside the loop.
+ Instruction *LoopExitInstr;
+ // The kind of the recurrence.
+ RecurrenceKind Kind;
+ // If this a min/max recurrence the kind of recurrence.
+ MinMaxRecurrenceKind MinMaxKind;
+ // First occurance of unasfe algebra in the PHI's use-chain.
+ Instruction *UnsafeAlgebraInst;
+ // The type of the recurrence.
+ Type *RecurrenceType;
+ // True if all source operands of the recurrence are SExtInsts.
+ bool IsSigned;
+ // Instructions used for type-promoting the recurrence.
+ SmallPtrSet<Instruction *, 8> CastInsts;
+};
+
+/// A struct for saving information about induction variables.
+class InductionDescriptor {
+public:
+ /// This enum represents the kinds of inductions that we support.
+ enum InductionKind {
+ IK_NoInduction, ///< Not an induction variable.
+ IK_IntInduction, ///< Integer induction variable. Step = C.
+ IK_PtrInduction ///< Pointer induction var. Step = C / sizeof(elem).
+ };
+
+public:
+ /// Default constructor - creates an invalid induction.
+ InductionDescriptor()
+ : StartValue(nullptr), IK(IK_NoInduction), StepValue(nullptr) {}
+
+ /// Get the consecutive direction. Returns:
+ /// 0 - unknown or non-consecutive.
+ /// 1 - consecutive and increasing.
+ /// -1 - consecutive and decreasing.
+ int getConsecutiveDirection() const;
+
+ /// Compute the transformed value of Index at offset StartValue using step
+ /// StepValue.
+ /// For integer induction, returns StartValue + Index * StepValue.
+ /// For pointer induction, returns StartValue[Index * StepValue].
+ /// FIXME: The newly created binary instructions should contain nsw/nuw
+ /// flags, which can be found from the original scalar operations.
+ Value *transform(IRBuilder<> &B, Value *Index) const;
+
+ Value *getStartValue() const { return StartValue; }
+ InductionKind getKind() const { return IK; }
+ ConstantInt *getStepValue() const { return StepValue; }
+
+ static bool isInductionPHI(PHINode *Phi, ScalarEvolution *SE,
+ InductionDescriptor &D);
+
+private:
+ /// Private constructor - used by \c isInductionPHI.
+ InductionDescriptor(Value *Start, InductionKind K, ConstantInt *Step);
+
+ /// Start value.
+ TrackingVH<Value> StartValue;
+ /// Induction kind.
+ InductionKind IK;
+ /// Step value.
+ ConstantInt *StepValue;
+};
+
+BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
+ bool PreserveLCSSA);
+
+/// \brief Simplify each loop in a loop nest recursively.
+///
+/// This takes a potentially un-simplified loop L (and its children) and turns
+/// it into a simplified loop nest with preheaders and single backedges. It will
+/// update \c AliasAnalysis and \c ScalarEvolution analyses if they're non-null.
+bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE,
+ AssumptionCache *AC, bool PreserveLCSSA);
+
+/// \brief Put loop into LCSSA form.
+///
+/// Looks at all instructions in the loop which have uses outside of the
+/// current loop. For each, an LCSSA PHI node is inserted and the uses outside
+/// the loop are rewritten to use this node.
+///
+/// LoopInfo and DominatorTree are required and preserved.
+///
+/// If ScalarEvolution is passed in, it will be preserved.
+///
+/// Returns true if any modifications are made to the loop.
+bool formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI,
+ ScalarEvolution *SE);
+
+/// \brief Put a loop nest into LCSSA form.
+///
+/// This recursively forms LCSSA for a loop nest.
+///
+/// LoopInfo and DominatorTree are required and preserved.
+///
+/// If ScalarEvolution is passed in, it will be preserved.
+///
+/// Returns true if any modifications are made to the loop.
+bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
+ ScalarEvolution *SE);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in
+/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
+/// uses before definitions, allowing us to sink a loop body in one pass without
+/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
+/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
+/// instructions of the loop and loop safety information as arguments.
+/// It returns changed status.
+bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+ TargetLibraryInfo *, Loop *, AliasSetTracker *,
+ LICMSafetyInfo *);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in depth
+/// first order w.r.t the DominatorTree. This allows us to visit definitions
+/// before uses, allowing us to hoist a loop body in one pass without iteration.
+/// Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree, DataLayout,
+/// TargetLibraryInfo, Loop, AliasSet information for all instructions of the
+/// loop and loop safety information as arguments. It returns changed status.
+bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+ TargetLibraryInfo *, Loop *, AliasSetTracker *,
+ LICMSafetyInfo *);
+
+/// \brief Try to promote memory values to scalars by sinking stores out of
+/// the loop and moving loads to before the loop. We do this by looping over
+/// the stores in the loop, looking for stores to Must pointers which are
+/// loop invariant. It takes AliasSet, Loop exit blocks vector, loop exit blocks
+/// insertion point vector, PredIteratorCache, LoopInfo, DominatorTree, Loop,
+/// AliasSet information for all instructions of the loop and loop safety
+/// information as arguments. It returns changed status.
+bool promoteLoopAccessesToScalars(AliasSet &, SmallVectorImpl<BasicBlock*> &,
+ SmallVectorImpl<Instruction*> &,
+ PredIteratorCache &, LoopInfo *,
+ DominatorTree *, Loop *, AliasSetTracker *,
+ LICMSafetyInfo *);
+
+/// \brief Computes safety information for a loop
+/// checks loop body & header for the possibility of may throw
+/// exception, it takes LICMSafetyInfo and loop as argument.
+/// Updates safety information in LICMSafetyInfo argument.
+void computeLICMSafetyInfo(LICMSafetyInfo *, Loop *);
+
+/// \brief Returns the instructions that use values defined in the loop.
+SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/LoopVersioning.h b/contrib/llvm/include/llvm/Transforms/Utils/LoopVersioning.h
new file mode 100644
index 0000000..3b70594
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/LoopVersioning.h
@@ -0,0 +1,114 @@
+//===- LoopVersioning.h - Utility to version a loop -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a utility class to perform loop versioning. The versioned
+// loop speculates that otherwise may-aliasing memory accesses don't overlap and
+// emits checks to prove this.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
+#define LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
+
+#include "llvm/Analysis/LoopAccessAnalysis.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+
+namespace llvm {
+
+class Loop;
+class LoopAccessInfo;
+class LoopInfo;
+class ScalarEvolution;
+
+/// \brief This class emits a version of the loop where run-time checks ensure
+/// that may-alias pointers can't overlap.
+///
+/// It currently only supports single-exit loops and assumes that the loop
+/// already has a preheader.
+class LoopVersioning {
+public:
+ /// \brief Expects LoopAccessInfo, Loop, LoopInfo, DominatorTree as input.
+ /// It uses runtime check provided by the user. If \p UseLAIChecks is true,
+ /// we will retain the default checks made by LAI. Otherwise, construct an
+ /// object having no checks and we expect the user to add them.
+ LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
+ DominatorTree *DT, ScalarEvolution *SE,
+ bool UseLAIChecks = true);
+
+ /// \brief Performs the CFG manipulation part of versioning the loop including
+ /// the DominatorTree and LoopInfo updates.
+ ///
+ /// The loop that was used to construct the class will be the "versioned" loop
+ /// i.e. the loop that will receive control if all the memchecks pass.
+ ///
+ /// This allows the loop transform pass to operate on the same loop regardless
+ /// of whether versioning was necessary or not:
+ ///
+ /// for each loop L:
+ /// analyze L
+ /// if versioning is necessary version L
+ /// transform L
+ void versionLoop() { versionLoop(findDefsUsedOutsideOfLoop(VersionedLoop)); }
+
+ /// \brief Same but if the client has already precomputed the set of values
+ /// used outside the loop, this API will allows passing that.
+ void versionLoop(const SmallVectorImpl<Instruction *> &DefsUsedOutside);
+
+ /// \brief Returns the versioned loop. Control flows here if pointers in the
+ /// loop don't alias (i.e. all memchecks passed). (This loop is actually the
+ /// same as the original loop that we got constructed with.)
+ Loop *getVersionedLoop() { return VersionedLoop; }
+
+ /// \brief Returns the fall-back loop. Control flows here if pointers in the
+ /// loop may alias (i.e. one of the memchecks failed).
+ Loop *getNonVersionedLoop() { return NonVersionedLoop; }
+
+ /// \brief Sets the runtime alias checks for versioning the loop.
+ void setAliasChecks(
+ const SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks);
+
+ /// \brief Sets the runtime SCEV checks for versioning the loop.
+ void setSCEVChecks(SCEVUnionPredicate Check);
+
+private:
+ /// \brief Adds the necessary PHI nodes for the versioned loops based on the
+ /// loop-defined values used outside of the loop.
+ ///
+ /// This needs to be called after versionLoop if there are defs in the loop
+ /// that are used outside the loop.
+ void addPHINodes(const SmallVectorImpl<Instruction *> &DefsUsedOutside);
+
+ /// \brief The original loop. This becomes the "versioned" one. I.e.,
+ /// control flows here if pointers in the loop don't alias.
+ Loop *VersionedLoop;
+ /// \brief The fall-back loop. I.e. control flows here if pointers in the
+ /// loop may alias (memchecks failed).
+ Loop *NonVersionedLoop;
+
+ /// \brief This maps the instructions from VersionedLoop to their counterpart
+ /// in NonVersionedLoop.
+ ValueToValueMapTy VMap;
+
+ /// \brief The set of alias checks that we are versioning for.
+ SmallVector<RuntimePointerChecking::PointerCheck, 4> AliasChecks;
+
+ /// \brief The set of SCEV checks that we are versioning for.
+ SCEVUnionPredicate Preds;
+
+ /// \brief Analyses used.
+ const LoopAccessInfo &LAI;
+ LoopInfo *LI;
+ DominatorTree *DT;
+ ScalarEvolution *SE;
+};
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h b/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
new file mode 100644
index 0000000..0f23d34
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -0,0 +1,64 @@
+//===-- ModuleUtils.h - Functions to manipulate Modules ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on Modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+#define LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include <utility> // for std::pair
+
+namespace llvm {
+
+class Module;
+class Function;
+class GlobalValue;
+class GlobalVariable;
+class Constant;
+class StringRef;
+class Value;
+class Type;
+template <class PtrType> class SmallPtrSetImpl;
+
+/// Append F to the list of global ctors of module M with the given Priority.
+/// This wraps the function in the appropriate structure and stores it along
+/// side other global constructors. For details see
+/// http://llvm.org/docs/LangRef.html#intg_global_ctors
+void appendToGlobalCtors(Module &M, Function *F, int Priority);
+
+/// Same as appendToGlobalCtors(), but for global dtors.
+void appendToGlobalDtors(Module &M, Function *F, int Priority);
+
+/// \brief Given "llvm.used" or "llvm.compiler.used" as a global name, collect
+/// the initializer elements of that global in Set and return the global itself.
+GlobalVariable *collectUsedGlobalVariables(Module &M,
+ SmallPtrSetImpl<GlobalValue *> &Set,
+ bool CompilerUsed);
+
+// Validate the result of Module::getOrInsertFunction called for an interface
+// function of given sanitizer. If the instrumented module defines a function
+// with the same name, their prototypes must match, otherwise
+// getOrInsertFunction returns a bitcast.
+Function *checkSanitizerInterfaceFunction(Constant *FuncOrBitcast);
+
+/// \brief Creates sanitizer constructor function, and calls sanitizer's init
+/// function from it.
+/// \return Returns pair of pointers to constructor, and init functions
+/// respectively.
+std::pair<Function *, Function *> createSanitizerCtorAndInitFunctions(
+ Module &M, StringRef CtorName, StringRef InitName,
+ ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
+ StringRef VersionCheckName = StringRef());
+} // End llvm namespace
+
+#endif // LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h b/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h
new file mode 100644
index 0000000..d0602bf
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -0,0 +1,50 @@
+//===- PromoteMemToReg.h - Promote Allocas to Scalars -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to promote alloca instructions to SSA
+// registers, by using the SSA construction algorithm.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
+#define LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+
+class AllocaInst;
+class DominatorTree;
+class AliasSetTracker;
+class AssumptionCache;
+
+/// \brief Return true if this alloca is legal for promotion.
+///
+/// This is true if there are only loads, stores, and lifetime markers
+/// (transitively) using this alloca. This also enforces that there is only
+/// ever one layer of bitcasts or GEPs between the alloca and the lifetime
+/// markers.
+bool isAllocaPromotable(const AllocaInst *AI);
+
+/// \brief Promote the specified list of alloca instructions into scalar
+/// registers, inserting PHI nodes as appropriate.
+///
+/// This function makes use of DominanceFrontier information. This function
+/// does not modify the CFG of the function at all. All allocas must be from
+/// the same function.
+///
+/// If AST is specified, the specified tracker is updated to reflect changes
+/// made to the IR.
+void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
+ AliasSetTracker *AST = nullptr,
+ AssumptionCache *AC = nullptr);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
new file mode 100644
index 0000000..1c7b2c58
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -0,0 +1,178 @@
+//===-- SSAUpdater.h - Unstructured SSA Update Tool -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SSAUpdater class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+ class BasicBlock;
+ class Instruction;
+ class LoadInst;
+ template<typename T> class SmallVectorImpl;
+ template<typename T> class SSAUpdaterTraits;
+ class PHINode;
+ class Type;
+ class Use;
+ class Value;
+
+/// \brief Helper class for SSA formation on a set of values defined in
+/// multiple blocks.
+///
+/// This is used when code duplication or another unstructured
+/// transformation wants to rewrite a set of uses of one value with uses of a
+/// set of values.
+class SSAUpdater {
+ friend class SSAUpdaterTraits<SSAUpdater>;
+
+private:
+ /// This keeps track of which value to use on a per-block basis. When we
+ /// insert PHI nodes, we keep track of them here.
+ //typedef DenseMap<BasicBlock*, Value*> AvailableValsTy;
+ void *AV;
+
+ /// ProtoType holds the type of the values being rewritten.
+ Type *ProtoType;
+
+ /// PHI nodes are given a name based on ProtoName.
+ std::string ProtoName;
+
+ /// If this is non-null, the SSAUpdater adds all PHI nodes that it creates to
+ /// the vector.
+ SmallVectorImpl<PHINode*> *InsertedPHIs;
+
+public:
+ /// If InsertedPHIs is specified, it will be filled
+ /// in with all PHI Nodes created by rewriting.
+ explicit SSAUpdater(SmallVectorImpl<PHINode*> *InsertedPHIs = nullptr);
+ ~SSAUpdater();
+
+ /// \brief Reset this object to get ready for a new set of SSA updates with
+ /// type 'Ty'.
+ ///
+ /// PHI nodes get a name based on 'Name'.
+ void Initialize(Type *Ty, StringRef Name);
+
+ /// \brief Indicate that a rewritten value is available in the specified block
+ /// with the specified value.
+ void AddAvailableValue(BasicBlock *BB, Value *V);
+
+ /// \brief Return true if the SSAUpdater already has a value for the specified
+ /// block.
+ bool HasValueForBlock(BasicBlock *BB) const;
+
+ /// \brief Construct SSA form, materializing a value that is live at the end
+ /// of the specified block.
+ Value *GetValueAtEndOfBlock(BasicBlock *BB);
+
+ /// \brief Construct SSA form, materializing a value that is live in the
+ /// middle of the specified block.
+ ///
+ /// \c GetValueInMiddleOfBlock is the same as \c GetValueAtEndOfBlock except
+ /// in one important case: if there is a definition of the rewritten value
+ /// after the 'use' in BB. Consider code like this:
+ ///
+ /// \code
+ /// X1 = ...
+ /// SomeBB:
+ /// use(X)
+ /// X2 = ...
+ /// br Cond, SomeBB, OutBB
+ /// \endcode
+ ///
+ /// In this case, there are two values (X1 and X2) added to the AvailableVals
+ /// set by the client of the rewriter, and those values are both live out of
+ /// their respective blocks. However, the use of X happens in the *middle* of
+ /// a block. Because of this, we need to insert a new PHI node in SomeBB to
+ /// merge the appropriate values, and this value isn't live out of the block.
+ Value *GetValueInMiddleOfBlock(BasicBlock *BB);
+
+ /// \brief Rewrite a use of the symbolic value.
+ ///
+ /// This handles PHI nodes, which use their value in the corresponding
+ /// predecessor. Note that this will not work if the use is supposed to be
+ /// rewritten to a value defined in the same block as the use, but above it.
+ /// Any 'AddAvailableValue's added for the use's block will be considered to
+ /// be below it.
+ void RewriteUse(Use &U);
+
+ /// \brief Rewrite a use like \c RewriteUse but handling in-block definitions.
+ ///
+ /// This version of the method can rewrite uses in the same block as
+ /// a definition, because it assumes that all uses of a value are below any
+ /// inserted values.
+ void RewriteUseAfterInsertions(Use &U);
+
+private:
+ Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
+
+ void operator=(const SSAUpdater&) = delete;
+ SSAUpdater(const SSAUpdater&) = delete;
+};
+
+/// \brief Helper class for promoting a collection of loads and stores into SSA
+/// Form using the SSAUpdater.
+///
+/// This handles complexities that SSAUpdater doesn't, such as multiple loads
+/// and stores in one block.
+///
+/// Clients of this class are expected to subclass this and implement the
+/// virtual methods.
+class LoadAndStorePromoter {
+protected:
+ SSAUpdater &SSA;
+
+public:
+ LoadAndStorePromoter(ArrayRef<const Instruction*> Insts,
+ SSAUpdater &S, StringRef Name = StringRef());
+ virtual ~LoadAndStorePromoter() {}
+
+ /// \brief This does the promotion.
+ ///
+ /// Insts is a list of loads and stores to promote, and Name is the basename
+ /// for the PHIs to insert. After this is complete, the loads and stores are
+ /// removed from the code.
+ void run(const SmallVectorImpl<Instruction*> &Insts) const;
+
+ /// \brief Return true if the specified instruction is in the Inst list.
+ ///
+ /// The Insts list is the one passed into the constructor. Clients should
+ /// implement this with a more efficient version if possible.
+ virtual bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts) const;
+
+ /// \brief This hook is invoked after all the stores are found and inserted as
+ /// available values.
+ virtual void doExtraRewritesBeforeFinalDeletion() const {
+ }
+
+ /// \brief Clients can choose to implement this to get notified right before
+ /// a load is RAUW'd another value.
+ virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {
+ }
+
+ /// \brief Called before each instruction is deleted.
+ virtual void instructionDeleted(Instruction *I) const {
+ }
+
+ /// \brief Called to update debug info associated with the instruction.
+ virtual void updateDebugInfo(Instruction *I) const {
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
new file mode 100644
index 0000000..425ecd3
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -0,0 +1,460 @@
+//===-- SSAUpdaterImpl.h - SSA Updater Implementation -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a template that implements the core algorithm for the
+// SSAUpdater and MachineSSAUpdater.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+
+#define DEBUG_TYPE "ssaupdater"
+
+class CastInst;
+class PHINode;
+template<typename T> class SSAUpdaterTraits;
+
+template<typename UpdaterT>
+class SSAUpdaterImpl {
+private:
+ UpdaterT *Updater;
+
+ typedef SSAUpdaterTraits<UpdaterT> Traits;
+ typedef typename Traits::BlkT BlkT;
+ typedef typename Traits::ValT ValT;
+ typedef typename Traits::PhiT PhiT;
+
+ /// BBInfo - Per-basic block information used internally by SSAUpdaterImpl.
+ /// The predecessors of each block are cached here since pred_iterator is
+ /// slow and we need to iterate over the blocks at least a few times.
+ class BBInfo {
+ public:
+ BlkT *BB; // Back-pointer to the corresponding block.
+ ValT AvailableVal; // Value to use in this block.
+ BBInfo *DefBB; // Block that defines the available value.
+ int BlkNum; // Postorder number.
+ BBInfo *IDom; // Immediate dominator.
+ unsigned NumPreds; // Number of predecessor blocks.
+ BBInfo **Preds; // Array[NumPreds] of predecessor blocks.
+ PhiT *PHITag; // Marker for existing PHIs that match.
+
+ BBInfo(BlkT *ThisBB, ValT V)
+ : BB(ThisBB), AvailableVal(V), DefBB(V ? this : nullptr), BlkNum(0),
+ IDom(nullptr), NumPreds(0), Preds(nullptr), PHITag(nullptr) {}
+ };
+
+ typedef DenseMap<BlkT*, ValT> AvailableValsTy;
+ AvailableValsTy *AvailableVals;
+
+ SmallVectorImpl<PhiT*> *InsertedPHIs;
+
+ typedef SmallVectorImpl<BBInfo*> BlockListTy;
+ typedef DenseMap<BlkT*, BBInfo*> BBMapTy;
+ BBMapTy BBMap;
+ BumpPtrAllocator Allocator;
+
+public:
+ explicit SSAUpdaterImpl(UpdaterT *U, AvailableValsTy *A,
+ SmallVectorImpl<PhiT*> *Ins) :
+ Updater(U), AvailableVals(A), InsertedPHIs(Ins) { }
+
+ /// GetValue - Check to see if AvailableVals has an entry for the specified
+ /// BB and if so, return it. If not, construct SSA form by first
+ /// calculating the required placement of PHIs and then inserting new PHIs
+ /// where needed.
+ ValT GetValue(BlkT *BB) {
+ SmallVector<BBInfo*, 100> BlockList;
+ BBInfo *PseudoEntry = BuildBlockList(BB, &BlockList);
+
+ // Special case: bail out if BB is unreachable.
+ if (BlockList.size() == 0) {
+ ValT V = Traits::GetUndefVal(BB, Updater);
+ (*AvailableVals)[BB] = V;
+ return V;
+ }
+
+ FindDominators(&BlockList, PseudoEntry);
+ FindPHIPlacement(&BlockList);
+ FindAvailableVals(&BlockList);
+
+ return BBMap[BB]->DefBB->AvailableVal;
+ }
+
+ /// BuildBlockList - Starting from the specified basic block, traverse back
+ /// through its predecessors until reaching blocks with known values.
+ /// Create BBInfo structures for the blocks and append them to the block
+ /// list.
+ BBInfo *BuildBlockList(BlkT *BB, BlockListTy *BlockList) {
+ SmallVector<BBInfo*, 10> RootList;
+ SmallVector<BBInfo*, 64> WorkList;
+
+ BBInfo *Info = new (Allocator) BBInfo(BB, 0);
+ BBMap[BB] = Info;
+ WorkList.push_back(Info);
+
+ // Search backward from BB, creating BBInfos along the way and stopping
+ // when reaching blocks that define the value. Record those defining
+ // blocks on the RootList.
+ SmallVector<BlkT*, 10> Preds;
+ while (!WorkList.empty()) {
+ Info = WorkList.pop_back_val();
+ Preds.clear();
+ Traits::FindPredecessorBlocks(Info->BB, &Preds);
+ Info->NumPreds = Preds.size();
+ if (Info->NumPreds == 0)
+ Info->Preds = nullptr;
+ else
+ Info->Preds = static_cast<BBInfo**>
+ (Allocator.Allocate(Info->NumPreds * sizeof(BBInfo*),
+ AlignOf<BBInfo*>::Alignment));
+
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BlkT *Pred = Preds[p];
+ // Check if BBMap already has a BBInfo for the predecessor block.
+ typename BBMapTy::value_type &BBMapBucket =
+ BBMap.FindAndConstruct(Pred);
+ if (BBMapBucket.second) {
+ Info->Preds[p] = BBMapBucket.second;
+ continue;
+ }
+
+ // Create a new BBInfo for the predecessor.
+ ValT PredVal = AvailableVals->lookup(Pred);
+ BBInfo *PredInfo = new (Allocator) BBInfo(Pred, PredVal);
+ BBMapBucket.second = PredInfo;
+ Info->Preds[p] = PredInfo;
+
+ if (PredInfo->AvailableVal) {
+ RootList.push_back(PredInfo);
+ continue;
+ }
+ WorkList.push_back(PredInfo);
+ }
+ }
+
+ // Now that we know what blocks are backwards-reachable from the starting
+ // block, do a forward depth-first traversal to assign postorder numbers
+ // to those blocks.
+ BBInfo *PseudoEntry = new (Allocator) BBInfo(nullptr, 0);
+ unsigned BlkNum = 1;
+
+ // Initialize the worklist with the roots from the backward traversal.
+ while (!RootList.empty()) {
+ Info = RootList.pop_back_val();
+ Info->IDom = PseudoEntry;
+ Info->BlkNum = -1;
+ WorkList.push_back(Info);
+ }
+
+ while (!WorkList.empty()) {
+ Info = WorkList.back();
+
+ if (Info->BlkNum == -2) {
+ // All the successors have been handled; assign the postorder number.
+ Info->BlkNum = BlkNum++;
+ // If not a root, put it on the BlockList.
+ if (!Info->AvailableVal)
+ BlockList->push_back(Info);
+ WorkList.pop_back();
+ continue;
+ }
+
+ // Leave this entry on the worklist, but set its BlkNum to mark that its
+ // successors have been put on the worklist. When it returns to the top
+ // the list, after handling its successors, it will be assigned a
+ // number.
+ Info->BlkNum = -2;
+
+ // Add unvisited successors to the work list.
+ for (typename Traits::BlkSucc_iterator SI =
+ Traits::BlkSucc_begin(Info->BB),
+ E = Traits::BlkSucc_end(Info->BB); SI != E; ++SI) {
+ BBInfo *SuccInfo = BBMap[*SI];
+ if (!SuccInfo || SuccInfo->BlkNum)
+ continue;
+ SuccInfo->BlkNum = -1;
+ WorkList.push_back(SuccInfo);
+ }
+ }
+ PseudoEntry->BlkNum = BlkNum;
+ return PseudoEntry;
+ }
+
+ /// IntersectDominators - This is the dataflow lattice "meet" operation for
+ /// finding dominators. Given two basic blocks, it walks up the dominator
+ /// tree until it finds a common dominator of both. It uses the postorder
+ /// number of the blocks to determine how to do that.
+ BBInfo *IntersectDominators(BBInfo *Blk1, BBInfo *Blk2) {
+ while (Blk1 != Blk2) {
+ while (Blk1->BlkNum < Blk2->BlkNum) {
+ Blk1 = Blk1->IDom;
+ if (!Blk1)
+ return Blk2;
+ }
+ while (Blk2->BlkNum < Blk1->BlkNum) {
+ Blk2 = Blk2->IDom;
+ if (!Blk2)
+ return Blk1;
+ }
+ }
+ return Blk1;
+ }
+
+ /// FindDominators - Calculate the dominator tree for the subset of the CFG
+ /// corresponding to the basic blocks on the BlockList. This uses the
+ /// algorithm from: "A Simple, Fast Dominance Algorithm" by Cooper, Harvey
+ /// and Kennedy, published in Software--Practice and Experience, 2001,
+ /// 4:1-10. Because the CFG subset does not include any edges leading into
+ /// blocks that define the value, the results are not the usual dominator
+ /// tree. The CFG subset has a single pseudo-entry node with edges to a set
+ /// of root nodes for blocks that define the value. The dominators for this
+ /// subset CFG are not the standard dominators but they are adequate for
+ /// placing PHIs within the subset CFG.
+ void FindDominators(BlockListTy *BlockList, BBInfo *PseudoEntry) {
+ bool Changed;
+ do {
+ Changed = false;
+ // Iterate over the list in reverse order, i.e., forward on CFG edges.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+ BBInfo *NewIDom = nullptr;
+
+ // Iterate through the block's predecessors.
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BBInfo *Pred = Info->Preds[p];
+
+ // Treat an unreachable predecessor as a definition with 'undef'.
+ if (Pred->BlkNum == 0) {
+ Pred->AvailableVal = Traits::GetUndefVal(Pred->BB, Updater);
+ (*AvailableVals)[Pred->BB] = Pred->AvailableVal;
+ Pred->DefBB = Pred;
+ Pred->BlkNum = PseudoEntry->BlkNum;
+ PseudoEntry->BlkNum++;
+ }
+
+ if (!NewIDom)
+ NewIDom = Pred;
+ else
+ NewIDom = IntersectDominators(NewIDom, Pred);
+ }
+
+ // Check if the IDom value has changed.
+ if (NewIDom && NewIDom != Info->IDom) {
+ Info->IDom = NewIDom;
+ Changed = true;
+ }
+ }
+ } while (Changed);
+ }
+
+ /// IsDefInDomFrontier - Search up the dominator tree from Pred to IDom for
+ /// any blocks containing definitions of the value. If one is found, then
+ /// the successor of Pred is in the dominance frontier for the definition,
+ /// and this function returns true.
+ bool IsDefInDomFrontier(const BBInfo *Pred, const BBInfo *IDom) {
+ for (; Pred != IDom; Pred = Pred->IDom) {
+ if (Pred->DefBB == Pred)
+ return true;
+ }
+ return false;
+ }
+
+ /// FindPHIPlacement - PHIs are needed in the iterated dominance frontiers
+ /// of the known definitions. Iteratively add PHIs in the dom frontiers
+ /// until nothing changes. Along the way, keep track of the nearest
+ /// dominating definitions for non-PHI blocks.
+ void FindPHIPlacement(BlockListTy *BlockList) {
+ bool Changed;
+ do {
+ Changed = false;
+ // Iterate over the list in reverse order, i.e., forward on CFG edges.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+
+ // If this block already needs a PHI, there is nothing to do here.
+ if (Info->DefBB == Info)
+ continue;
+
+ // Default to use the same def as the immediate dominator.
+ BBInfo *NewDefBB = Info->IDom->DefBB;
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ if (IsDefInDomFrontier(Info->Preds[p], Info->IDom)) {
+ // Need a PHI here.
+ NewDefBB = Info;
+ break;
+ }
+ }
+
+ // Check if anything changed.
+ if (NewDefBB != Info->DefBB) {
+ Info->DefBB = NewDefBB;
+ Changed = true;
+ }
+ }
+ } while (Changed);
+ }
+
+ /// FindAvailableVal - If this block requires a PHI, first check if an
+ /// existing PHI matches the PHI placement and reaching definitions computed
+ /// earlier, and if not, create a new PHI. Visit all the block's
+ /// predecessors to calculate the available value for each one and fill in
+ /// the incoming values for a new PHI.
+ void FindAvailableVals(BlockListTy *BlockList) {
+ // Go through the worklist in forward order (i.e., backward through the CFG)
+ // and check if existing PHIs can be used. If not, create empty PHIs where
+ // they are needed.
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I) {
+ BBInfo *Info = *I;
+ // Check if there needs to be a PHI in BB.
+ if (Info->DefBB != Info)
+ continue;
+
+ // Look for an existing PHI.
+ FindExistingPHI(Info->BB, BlockList);
+ if (Info->AvailableVal)
+ continue;
+
+ ValT PHI = Traits::CreateEmptyPHI(Info->BB, Info->NumPreds, Updater);
+ Info->AvailableVal = PHI;
+ (*AvailableVals)[Info->BB] = PHI;
+ }
+
+ // Now go back through the worklist in reverse order to fill in the
+ // arguments for any new PHIs added in the forward traversal.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+
+ if (Info->DefBB != Info) {
+ // Record the available value at join nodes to speed up subsequent
+ // uses of this SSAUpdater for the same value.
+ if (Info->NumPreds > 1)
+ (*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal;
+ continue;
+ }
+
+ // Check if this block contains a newly added PHI.
+ PhiT *PHI = Traits::ValueIsNewPHI(Info->AvailableVal, Updater);
+ if (!PHI)
+ continue;
+
+ // Iterate through the block's predecessors.
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BBInfo *PredInfo = Info->Preds[p];
+ BlkT *Pred = PredInfo->BB;
+ // Skip to the nearest preceding definition.
+ if (PredInfo->DefBB != PredInfo)
+ PredInfo = PredInfo->DefBB;
+ Traits::AddPHIOperand(PHI, PredInfo->AvailableVal, Pred);
+ }
+
+ DEBUG(dbgs() << " Inserted PHI: " << *PHI << "\n");
+
+ // If the client wants to know about all new instructions, tell it.
+ if (InsertedPHIs) InsertedPHIs->push_back(PHI);
+ }
+ }
+
+ /// FindExistingPHI - Look through the PHI nodes in a block to see if any of
+ /// them match what is needed.
+ void FindExistingPHI(BlkT *BB, BlockListTy *BlockList) {
+ for (typename BlkT::iterator BBI = BB->begin(), BBE = BB->end();
+ BBI != BBE; ++BBI) {
+ PhiT *SomePHI = Traits::InstrIsPHI(&*BBI);
+ if (!SomePHI)
+ break;
+ if (CheckIfPHIMatches(SomePHI)) {
+ RecordMatchingPHIs(BlockList);
+ break;
+ }
+ // Match failed: clear all the PHITag values.
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I)
+ (*I)->PHITag = nullptr;
+ }
+ }
+
+ /// CheckIfPHIMatches - Check if a PHI node matches the placement and values
+ /// in the BBMap.
+ bool CheckIfPHIMatches(PhiT *PHI) {
+ SmallVector<PhiT*, 20> WorkList;
+ WorkList.push_back(PHI);
+
+ // Mark that the block containing this PHI has been visited.
+ BBMap[PHI->getParent()]->PHITag = PHI;
+
+ while (!WorkList.empty()) {
+ PHI = WorkList.pop_back_val();
+
+ // Iterate through the PHI's incoming values.
+ for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI),
+ E = Traits::PHI_end(PHI); I != E; ++I) {
+ ValT IncomingVal = I.getIncomingValue();
+ BBInfo *PredInfo = BBMap[I.getIncomingBlock()];
+ // Skip to the nearest preceding definition.
+ if (PredInfo->DefBB != PredInfo)
+ PredInfo = PredInfo->DefBB;
+
+ // Check if it matches the expected value.
+ if (PredInfo->AvailableVal) {
+ if (IncomingVal == PredInfo->AvailableVal)
+ continue;
+ return false;
+ }
+
+ // Check if the value is a PHI in the correct block.
+ PhiT *IncomingPHIVal = Traits::ValueIsPHI(IncomingVal, Updater);
+ if (!IncomingPHIVal || IncomingPHIVal->getParent() != PredInfo->BB)
+ return false;
+
+ // If this block has already been visited, check if this PHI matches.
+ if (PredInfo->PHITag) {
+ if (IncomingPHIVal == PredInfo->PHITag)
+ continue;
+ return false;
+ }
+ PredInfo->PHITag = IncomingPHIVal;
+
+ WorkList.push_back(IncomingPHIVal);
+ }
+ }
+ return true;
+ }
+
+ /// RecordMatchingPHIs - For each PHI node that matches, record it in both
+ /// the BBMap and the AvailableVals mapping.
+ void RecordMatchingPHIs(BlockListTy *BlockList) {
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I)
+ if (PhiT *PHI = (*I)->PHITag) {
+ BlkT *BB = PHI->getParent();
+ ValT PHIVal = Traits::GetPHIValue(PHI);
+ (*AvailableVals)[BB] = PHIVal;
+ BBMap[BB]->AvailableVal = PHIVal;
+ }
+ }
+};
+
+#undef DEBUG_TYPE // "ssaupdater"
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h
new file mode 100644
index 0000000..3c55e64
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -0,0 +1,71 @@
+//===-- llvm/Transforms/Utils/SimplifyIndVar.h - Indvar Utils ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines in interface for induction variable simplification. It does
+// not define any actual pass or policy, but provides a single function to
+// simplify a loop's induction variables based on ScalarEvolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+class CastInst;
+class DominatorTree;
+class IVUsers;
+class Loop;
+class LoopInfo;
+class PHINode;
+class ScalarEvolution;
+
+/// Interface for visiting interesting IV users that are recognized but not
+/// simplified by this utility.
+class IVVisitor {
+protected:
+ const DominatorTree *DT;
+ bool ShouldSplitOverflowIntrinsics;
+
+ virtual void anchor();
+
+public:
+ IVVisitor(): DT(nullptr), ShouldSplitOverflowIntrinsics(false) {}
+ virtual ~IVVisitor() {}
+
+ const DominatorTree *getDomTree() const { return DT; }
+
+ bool shouldSplitOverflowInstrinsics() const {
+ return ShouldSplitOverflowIntrinsics;
+ }
+ void setSplitOverflowIntrinsics() {
+ ShouldSplitOverflowIntrinsics = true;
+ assert(DT && "Splitting overflow intrinsics requires a DomTree.");
+ }
+
+ virtual void visitCast(CastInst *Cast) = 0;
+};
+
+/// simplifyUsersOfIV - Simplify instructions that use this induction variable
+/// by using ScalarEvolution to analyze the IV's recurrence.
+bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
+ LoopInfo *LI, SmallVectorImpl<WeakVH> &Dead,
+ IVVisitor *V = nullptr);
+
+/// SimplifyLoopIVs - Simplify users of induction variables within this
+/// loop. This does not actually change or add IVs.
+bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
+ LoopInfo *LI, SmallVectorImpl<WeakVH> &Dead);
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
new file mode 100644
index 0000000..410a075
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -0,0 +1,174 @@
+//===- SimplifyLibCalls.h - Library call simplifier -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+class Value;
+class CallInst;
+class DataLayout;
+class Instruction;
+class TargetLibraryInfo;
+class BasicBlock;
+class Function;
+
+/// \brief This class implements simplifications for calls to fortified library
+/// functions (__st*cpy_chk, __memcpy_chk, __memmove_chk, __memset_chk), to,
+/// when possible, replace them with their non-checking counterparts.
+/// Other optimizations can also be done, but it's possible to disable them and
+/// only simplify needless use of the checking versions (when the object size
+/// is unknown) by passing true for OnlyLowerUnknownSize.
+class FortifiedLibCallSimplifier {
+private:
+ const TargetLibraryInfo *TLI;
+ bool OnlyLowerUnknownSize;
+
+public:
+ FortifiedLibCallSimplifier(const TargetLibraryInfo *TLI,
+ bool OnlyLowerUnknownSize = false);
+
+ /// \brief Take the given call instruction and return a more
+ /// optimal value to replace the instruction with or 0 if a more
+ /// optimal form can't be found.
+ /// The call must not be an indirect call.
+ Value *optimizeCall(CallInst *CI);
+
+private:
+ Value *optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemSetChk(CallInst *CI, IRBuilder<> &B);
+
+ // Str/Stp cpy are similar enough to be handled in the same functions.
+ Value *optimizeStrpCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc::Func Func);
+ Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc::Func Func);
+
+ /// \brief Checks whether the call \p CI to a fortified libcall is foldable
+ /// to the non-fortified version.
+ bool isFortifiedCallFoldable(CallInst *CI, unsigned ObjSizeOp,
+ unsigned SizeOp, bool isString);
+};
+
+/// LibCallSimplifier - This class implements a collection of optimizations
+/// that replace well formed calls to library functions with a more optimal
+/// form. For example, replacing 'printf("Hello!")' with 'puts("Hello!")'.
+class LibCallSimplifier {
+private:
+ FortifiedLibCallSimplifier FortifiedSimplifier;
+ const DataLayout &DL;
+ const TargetLibraryInfo *TLI;
+ bool UnsafeFPShrink;
+ function_ref<void(Instruction *, Value *)> Replacer;
+
+ /// \brief Internal wrapper for RAUW that is the default implementation.
+ ///
+ /// Other users may provide an alternate function with this signature instead
+ /// of this one.
+ static void replaceAllUsesWithDefault(Instruction *I, Value *With);
+
+ /// \brief Replace an instruction's uses with a value using our replacer.
+ void replaceAllUsesWith(Instruction *I, Value *With);
+
+public:
+ LibCallSimplifier(const DataLayout &DL, const TargetLibraryInfo *TLI,
+ function_ref<void(Instruction *, Value *)> Replacer =
+ &replaceAllUsesWithDefault);
+
+ /// optimizeCall - Take the given call instruction and return a more
+ /// optimal value to replace the instruction with or 0 if a more
+ /// optimal form can't be found. Note that the returned value may
+ /// be equal to the instruction being optimized. In this case all
+ /// other instructions that use the given instruction were modified
+ /// and the given instruction is dead.
+ /// The call must not be an indirect call.
+ Value *optimizeCall(CallInst *CI);
+
+private:
+ // String and Memory Library Call Optimizations
+ Value *optimizeStrCat(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrNCat(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrChr(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrRChr(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrCmp(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrNCmp(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrCpy(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStpCpy(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrNCpy(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrLen(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrPBrk(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrTo(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrSpn(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrCSpn(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrStr(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemChr(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemCmp(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemCpy(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemMove(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemSet(CallInst *CI, IRBuilder<> &B);
+ // Wrapper for all String/Memory Library Call Optimizations
+ Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B);
+
+ // Math Library Optimizations
+ Value *optimizeUnaryDoubleFP(CallInst *CI, IRBuilder<> &B, bool CheckRetType);
+ Value *optimizeBinaryDoubleFP(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeCos(CallInst *CI, IRBuilder<> &B);
+ Value *optimizePow(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeExp2(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFabs(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFMinFMax(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeLog(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeSqrt(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeSinCosPi(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeTan(CallInst *CI, IRBuilder<> &B);
+
+ // Integer Library Call Optimizations
+ Value *optimizeFFS(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeAbs(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeIsDigit(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeIsAscii(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeToAscii(CallInst *CI, IRBuilder<> &B);
+
+ // Formatting and IO Library Call Optimizations
+ Value *optimizeErrorReporting(CallInst *CI, IRBuilder<> &B,
+ int StreamArg = -1);
+ Value *optimizePrintF(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeSPrintF(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFPrintF(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFWrite(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFPuts(CallInst *CI, IRBuilder<> &B);
+ Value *optimizePuts(CallInst *CI, IRBuilder<> &B);
+
+ // Helper methods
+ Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, IRBuilder<> &B);
+ void classifyArgUse(Value *Val, BasicBlock *BB, bool IsFloat,
+ SmallVectorImpl<CallInst *> &SinCalls,
+ SmallVectorImpl<CallInst *> &CosCalls,
+ SmallVectorImpl<CallInst *> &SinCosCalls);
+ void replaceTrigInsts(SmallVectorImpl<CallInst *> &Calls, Value *Res);
+ Value *optimizePrintFString(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeSPrintFString(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFPrintFString(CallInst *CI, IRBuilder<> &B);
+
+ /// hasFloatVersion - Checks if there is a float version of the specified
+ /// function by checking for an existing function with name FuncName + f
+ bool hasFloatVersion(StringRef FuncName);
+};
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SplitModule.h b/contrib/llvm/include/llvm/Transforms/Utils/SplitModule.h
new file mode 100644
index 0000000..7d896d1
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SplitModule.h
@@ -0,0 +1,43 @@
+//===- SplitModule.h - Split a module into partitions -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the function llvm::SplitModule, which splits a module
+// into multiple linkable partitions. It can be used to implement parallel code
+// generation for link-time optimization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+#define LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+
+#include <functional>
+#include <memory>
+
+namespace llvm {
+
+class Module;
+class StringRef;
+
+/// Splits the module M into N linkable partitions. The function ModuleCallback
+/// is called N times passing each individual partition as the MPart argument.
+///
+/// FIXME: This function does not deal with the somewhat subtle symbol
+/// visibility issues around module splitting, including (but not limited to):
+///
+/// - Internal symbols should not collide with symbols defined outside the
+/// module.
+/// - Internal symbols defined in module-level inline asm should be visible to
+/// each partition.
+void SplitModule(
+ std::unique_ptr<Module> M, unsigned N,
+ std::function<void(std::unique_ptr<Module> MPart)> ModuleCallback);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/SymbolRewriter.h b/contrib/llvm/include/llvm/Transforms/Utils/SymbolRewriter.h
new file mode 100644
index 0000000..5ccee98
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/SymbolRewriter.h
@@ -0,0 +1,152 @@
+//===-- SymbolRewriter.h - Symbol Rewriting Pass ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the prototypes and definitions related to the Symbol
+// Rewriter pass.
+//
+// The Symbol Rewriter pass takes a set of rewrite descriptors which define
+// transformations for symbol names. These can be either single name to name
+// trnsformation or more broad regular expression based transformations.
+//
+// All the functions are re-written at the IR level. The Symbol Rewriter itself
+// is exposed as a module level pass. All symbols at the module level are
+// iterated. For any matching symbol, the requested transformation is applied,
+// updating references to it as well (a la RAUW). The resulting binary will
+// only contain the rewritten symbols.
+//
+// By performing this operation in the compiler, we are able to catch symbols
+// that would otherwise not be possible to catch (e.g. inlined symbols).
+//
+// This makes it possible to cleanly transform symbols without resorting to
+// overly-complex macro tricks and the pre-processor. An example of where this
+// is useful is the sanitizers where we would like to intercept a well-defined
+// set of functions across the module.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SYMBOL_REWRITER_H
+#define LLVM_TRANSFORMS_UTILS_SYMBOL_REWRITER_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/Module.h"
+
+namespace llvm {
+class MemoryBuffer;
+
+namespace yaml {
+class KeyValueNode;
+class MappingNode;
+class ScalarNode;
+class Stream;
+}
+
+namespace SymbolRewriter {
+/// The basic entity representing a rewrite operation. It serves as the base
+/// class for any rewrite descriptor. It has a certain set of specializations
+/// which describe a particular rewrite.
+///
+/// The RewriteMapParser can be used to parse a mapping file that provides the
+/// mapping for rewriting the symbols. The descriptors individually describe
+/// whether to rewrite a function, global variable, or global alias. Each of
+/// these can be selected either by explicitly providing a name for the ones to
+/// be rewritten or providing a (posix compatible) regular expression that will
+/// select the symbols to rewrite. This descriptor list is passed to the
+/// SymbolRewriter pass.
+class RewriteDescriptor : public ilist_node<RewriteDescriptor> {
+ RewriteDescriptor(const RewriteDescriptor &) = delete;
+
+ const RewriteDescriptor &
+ operator=(const RewriteDescriptor &) = delete;
+
+public:
+ enum class Type {
+ Invalid, /// invalid
+ Function, /// function - descriptor rewrites a function
+ GlobalVariable, /// global variable - descriptor rewrites a global variable
+ NamedAlias, /// named alias - descriptor rewrites a global alias
+ };
+
+ virtual ~RewriteDescriptor() {}
+
+ Type getType() const { return Kind; }
+
+ virtual bool performOnModule(Module &M) = 0;
+
+protected:
+ explicit RewriteDescriptor(Type T) : Kind(T) {}
+
+private:
+ const Type Kind;
+};
+
+typedef iplist<RewriteDescriptor> RewriteDescriptorList;
+
+class RewriteMapParser {
+public:
+ bool parse(const std::string &MapFile, RewriteDescriptorList *Descriptors);
+
+private:
+ bool parse(std::unique_ptr<MemoryBuffer> &MapFile, RewriteDescriptorList *DL);
+ bool parseEntry(yaml::Stream &Stream, yaml::KeyValueNode &Entry,
+ RewriteDescriptorList *DL);
+ bool parseRewriteFunctionDescriptor(yaml::Stream &Stream,
+ yaml::ScalarNode *Key,
+ yaml::MappingNode *Value,
+ RewriteDescriptorList *DL);
+ bool parseRewriteGlobalVariableDescriptor(yaml::Stream &Stream,
+ yaml::ScalarNode *Key,
+ yaml::MappingNode *Value,
+ RewriteDescriptorList *DL);
+ bool parseRewriteGlobalAliasDescriptor(yaml::Stream &YS, yaml::ScalarNode *K,
+ yaml::MappingNode *V,
+ RewriteDescriptorList *DL);
+};
+}
+
+template <>
+struct ilist_traits<SymbolRewriter::RewriteDescriptor>
+ : public ilist_default_traits<SymbolRewriter::RewriteDescriptor> {
+ mutable ilist_half_node<SymbolRewriter::RewriteDescriptor> Sentinel;
+
+public:
+ // createSentinel is used to get a reference to a node marking the end of
+ // the list. Because the sentinel is relative to this instance, use a
+ // non-static method.
+ SymbolRewriter::RewriteDescriptor *createSentinel() const {
+ // since i[p] lists always publicly derive from the corresponding
+ // traits, placing a data member in this class will augment the
+ // i[p]list. Since the NodeTy is expected to publicly derive from
+ // ilist_node<NodeTy>, there is a legal viable downcast from it to
+ // NodeTy. We use this trick to superpose i[p]list with a "ghostly"
+ // NodeTy, which becomes the sentinel. Dereferencing the sentinel is
+ // forbidden (save the ilist_node<NodeTy>) so no one will ever notice
+ // the superposition.
+ return static_cast<SymbolRewriter::RewriteDescriptor *>(&Sentinel);
+ }
+ void destroySentinel(SymbolRewriter::RewriteDescriptor *) {}
+
+ SymbolRewriter::RewriteDescriptor *provideInitialHead() const {
+ return createSentinel();
+ }
+
+ SymbolRewriter::RewriteDescriptor *
+ ensureHead(SymbolRewriter::RewriteDescriptor *&) const {
+ return createSentinel();
+ }
+
+ static void noteHead(SymbolRewriter::RewriteDescriptor *,
+ SymbolRewriter::RewriteDescriptor *) {}
+};
+
+ModulePass *createRewriteSymbolsPass();
+ModulePass *createRewriteSymbolsPass(SymbolRewriter::RewriteDescriptorList &);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/contrib/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
new file mode 100644
index 0000000..550292f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -0,0 +1,52 @@
+//===-- UnifyFunctionExitNodes.h - Ensure fn's have one return --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is used to ensure that functions have at most one return and one
+// unwind instruction in them. Additionally, it keeps track of which node is
+// the new exit node of the CFG. If there are no return or unwind instructions
+// in the function, the getReturnBlock/getUnwindBlock methods will return a null
+// pointer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+#define LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+struct UnifyFunctionExitNodes : public FunctionPass {
+ BasicBlock *ReturnBlock, *UnwindBlock, *UnreachableBlock;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ UnifyFunctionExitNodes() : FunctionPass(ID),
+ ReturnBlock(nullptr), UnwindBlock(nullptr) {
+ initializeUnifyFunctionExitNodesPass(*PassRegistry::getPassRegistry());
+ }
+
+ // We can preserve non-critical-edgeness when we unify function exit nodes
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ // getReturn|Unwind|UnreachableBlock - Return the new single (or nonexistent)
+ // return, unwind, or unreachable basic blocks in the CFG.
+ //
+ BasicBlock *getReturnBlock() const { return ReturnBlock; }
+ BasicBlock *getUnwindBlock() const { return UnwindBlock; }
+ BasicBlock *getUnreachableBlock() const { return UnreachableBlock; }
+
+ bool runOnFunction(Function &F) override;
+};
+
+Pass *createUnifyFunctionExitNodesPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h b/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h
new file mode 100644
index 0000000..710817c
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -0,0 +1,45 @@
+//===- llvm/Transforms/Utils/UnrollLoop.h - Unrolling utilities -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop unrolling utilities. It does not define any
+// actual pass or policy, but provides a single function to perform loop
+// unrolling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class LPPassManager;
+class MDNode;
+class Pass;
+class ScalarEvolution;
+
+bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool AllowRuntime,
+ bool AllowExpensiveTripCount, unsigned TripMultiple,
+ LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
+ AssumptionCache *AC, bool PreserveLCSSA);
+
+bool UnrollRuntimeLoopProlog(Loop *L, unsigned Count,
+ bool AllowExpensiveTripCount, LoopInfo *LI,
+ ScalarEvolution *SE, DominatorTree *DT,
+ bool PreserveLCSSA);
+
+MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
+}
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h b/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
new file mode 100644
index 0000000..469022f
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Utils/ValueMapper.h
@@ -0,0 +1,135 @@
+//===- ValueMapper.h - Remapping for constants and metadata -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MapValue interface which is used by various parts of
+// the Transforms/Utils library to implement cloning and linking facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+#define LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+
+#include "llvm/IR/ValueMap.h"
+
+namespace llvm {
+ class Value;
+ class Instruction;
+ typedef ValueMap<const Value *, WeakVH> ValueToValueMapTy;
+
+ /// ValueMapTypeRemapper - This is a class that can be implemented by clients
+ /// to remap types when cloning constants and instructions.
+ class ValueMapTypeRemapper {
+ virtual void anchor(); // Out of line method.
+ public:
+ virtual ~ValueMapTypeRemapper() {}
+
+ /// remapType - The client should implement this method if they want to
+ /// remap types while mapping values.
+ virtual Type *remapType(Type *SrcTy) = 0;
+ };
+
+ /// ValueMaterializer - This is a class that can be implemented by clients
+ /// to materialize Values on demand.
+ class ValueMaterializer {
+ virtual void anchor(); // Out of line method.
+
+ protected:
+ ~ValueMaterializer() = default;
+ ValueMaterializer() = default;
+ ValueMaterializer(const ValueMaterializer&) = default;
+ ValueMaterializer &operator=(const ValueMaterializer&) = default;
+
+ public:
+ /// The client should implement this method if they want to generate a
+ /// mapped Value on demand. For example, if linking lazily.
+ virtual Value *materializeDeclFor(Value *V) = 0;
+
+ /// If the data being mapped is recursive, the above function can map
+ /// just the declaration and this is called to compute the initializer.
+ /// It is called after the mapping is recorded, so it doesn't need to worry
+ /// about recursion.
+ virtual void materializeInitFor(GlobalValue *New, GlobalValue *Old);
+
+ /// If the client needs to handle temporary metadata it must implement
+ /// these methods.
+ virtual Metadata *mapTemporaryMetadata(Metadata *MD) { return nullptr; }
+ virtual void replaceTemporaryMetadata(const Metadata *OrigMD,
+ Metadata *NewMD) {}
+
+ /// The client should implement this method if some metadata need
+ /// not be mapped, for example DISubprogram metadata for functions not
+ /// linked into the destination module.
+ virtual bool isMetadataNeeded(Metadata *MD) { return true; }
+ };
+
+ /// RemapFlags - These are flags that the value mapping APIs allow.
+ enum RemapFlags {
+ RF_None = 0,
+
+ /// RF_NoModuleLevelChanges - If this flag is set, the remapper knows that
+ /// only local values within a function (such as an instruction or argument)
+ /// are mapped, not global values like functions and global metadata.
+ RF_NoModuleLevelChanges = 1,
+
+ /// RF_IgnoreMissingEntries - If this flag is set, the remapper ignores
+ /// entries that are not in the value map. If it is unset, it aborts if an
+ /// operand is asked to be remapped which doesn't exist in the mapping.
+ RF_IgnoreMissingEntries = 2,
+
+ /// Instruct the remapper to move distinct metadata instead of duplicating
+ /// it when there are module-level changes.
+ RF_MoveDistinctMDs = 4,
+
+ /// Any global values not in value map are mapped to null instead of
+ /// mapping to self. Illegal if RF_IgnoreMissingEntries is also set.
+ RF_NullMapMissingGlobalValues = 8,
+
+ /// Set when there is still temporary metadata that must be handled,
+ /// such as when we are doing function importing and will materialize
+ /// and link metadata as a postpass.
+ RF_HaveUnmaterializedMetadata = 16,
+ };
+
+ static inline RemapFlags operator|(RemapFlags LHS, RemapFlags RHS) {
+ return RemapFlags(unsigned(LHS)|unsigned(RHS));
+ }
+
+ Value *MapValue(const Value *V, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+ Metadata *MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+ /// MapMetadata - provide versions that preserve type safety for MDNodes.
+ MDNode *MapMetadata(const MDNode *MD, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+ void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+ /// MapValue - provide versions that preserve type safety for Constants.
+ inline Constant *MapValue(const Constant *V, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr) {
+ return cast<Constant>(MapValue((const Value*)V, VM, Flags, TypeMapper,
+ Materializer));
+ }
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/Transforms/Vectorize.h b/contrib/llvm/include/llvm/Transforms/Vectorize.h
new file mode 100644
index 0000000..aec3993
--- /dev/null
+++ b/contrib/llvm/include/llvm/Transforms/Vectorize.h
@@ -0,0 +1,144 @@
+//===-- Vectorize.h - Vectorization Transformations -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Vectorize transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTORIZE_H
+#define LLVM_TRANSFORMS_VECTORIZE_H
+
+namespace llvm {
+class BasicBlock;
+class BasicBlockPass;
+class Pass;
+
+//===----------------------------------------------------------------------===//
+/// @brief Vectorize configuration.
+struct VectorizeConfig {
+ //===--------------------------------------------------------------------===//
+ // Target architecture related parameters
+
+ /// @brief The size of the native vector registers.
+ unsigned VectorBits;
+
+ /// @brief Vectorize boolean values.
+ bool VectorizeBools;
+
+ /// @brief Vectorize integer values.
+ bool VectorizeInts;
+
+ /// @brief Vectorize floating-point values.
+ bool VectorizeFloats;
+
+ /// @brief Vectorize pointer values.
+ bool VectorizePointers;
+
+ /// @brief Vectorize casting (conversion) operations.
+ bool VectorizeCasts;
+
+ /// @brief Vectorize floating-point math intrinsics.
+ bool VectorizeMath;
+
+ /// @brief Vectorize bit intrinsics.
+ bool VectorizeBitManipulations;
+
+ /// @brief Vectorize the fused-multiply-add intrinsic.
+ bool VectorizeFMA;
+
+ /// @brief Vectorize select instructions.
+ bool VectorizeSelect;
+
+ /// @brief Vectorize comparison instructions.
+ bool VectorizeCmp;
+
+ /// @brief Vectorize getelementptr instructions.
+ bool VectorizeGEP;
+
+ /// @brief Vectorize loads and stores.
+ bool VectorizeMemOps;
+
+ /// @brief Only generate aligned loads and stores.
+ bool AlignedOnly;
+
+ //===--------------------------------------------------------------------===//
+ // Misc parameters
+
+ /// @brief The required chain depth for vectorization.
+ unsigned ReqChainDepth;
+
+ /// @brief The maximum search distance for instruction pairs.
+ unsigned SearchLimit;
+
+ /// @brief The maximum number of candidate pairs with which to use a full
+ /// cycle check.
+ unsigned MaxCandPairsForCycleCheck;
+
+ /// @brief Replicating one element to a pair breaks the chain.
+ bool SplatBreaksChain;
+
+ /// @brief The maximum number of pairable instructions per group.
+ unsigned MaxInsts;
+
+ /// @brief The maximum number of candidate instruction pairs per group.
+ unsigned MaxPairs;
+
+ /// @brief The maximum number of pairing iterations.
+ unsigned MaxIter;
+
+ /// @brief Don't try to form odd-length vectors.
+ bool Pow2LenOnly;
+
+ /// @brief Don't boost the chain-depth contribution of loads and stores.
+ bool NoMemOpBoost;
+
+ /// @brief Use a fast instruction dependency analysis.
+ bool FastDep;
+
+ /// @brief Initialize the VectorizeConfig from command line options.
+ VectorizeConfig();
+};
+
+//===----------------------------------------------------------------------===//
+//
+// BBVectorize - A basic-block vectorization pass.
+//
+BasicBlockPass *
+createBBVectorizePass(const VectorizeConfig &C = VectorizeConfig());
+
+//===----------------------------------------------------------------------===//
+//
+// LoopVectorize - Create a loop vectorization pass.
+//
+Pass *createLoopVectorizePass(bool NoUnrolling = false,
+ bool AlwaysVectorize = true);
+
+//===----------------------------------------------------------------------===//
+//
+// SLPVectorizer - Create a bottom-up SLP vectorizer pass.
+//
+Pass *createSLPVectorizerPass();
+
+//===----------------------------------------------------------------------===//
+/// @brief Vectorize the BasicBlock.
+///
+/// @param BB The BasicBlock to be vectorized
+/// @param P The current running pass, should require AliasAnalysis and
+/// ScalarEvolution. After the vectorization, AliasAnalysis,
+/// ScalarEvolution and CFG are preserved.
+///
+/// @return True if the BB is changed, false otherwise.
+///
+bool vectorizeBasicBlock(Pass *P, BasicBlock &BB,
+ const VectorizeConfig &C = VectorizeConfig());
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/include/llvm/module.modulemap b/contrib/llvm/include/llvm/module.modulemap
new file mode 100644
index 0000000..0adce0c
--- /dev/null
+++ b/contrib/llvm/include/llvm/module.modulemap
@@ -0,0 +1,231 @@
+module LLVM_Analysis {
+ requires cplusplus
+ umbrella "Analysis"
+ module * { export * }
+
+ // FIXME: Why is this excluded?
+ exclude header "Analysis/BlockFrequencyInfoImpl.h"
+
+ // This is intended for (repeated) textual inclusion.
+ textual header "Analysis/TargetLibraryInfo.def"
+}
+
+module LLVM_AsmParser { requires cplusplus umbrella "AsmParser" module * { export * } }
+
+// A module covering CodeGen/ and Target/. These are intertwined
+// and codependent, and thus notionally form a single module.
+module LLVM_Backend {
+ requires cplusplus
+
+ module CodeGen {
+ umbrella "CodeGen"
+ module * { export * }
+
+ // FIXME: Why is this excluded?
+ exclude header "CodeGen/MachineValueType.h"
+
+ // Exclude these; they're intended to be included into only a single
+ // translation unit (or none) and aren't part of this module.
+ exclude header "CodeGen/CommandFlags.h"
+ exclude header "CodeGen/LinkAllAsmWriterComponents.h"
+ exclude header "CodeGen/LinkAllCodegenComponents.h"
+
+ // These are intended for (repeated) textual inclusion.
+ textual header "CodeGen/DIEValue.def"
+ }
+
+ module Target {
+ umbrella "Target"
+ module * { export * }
+ }
+
+ // FIXME: Where should this go?
+ module Analysis_BlockFrequencyInfoImpl {
+ header "Analysis/BlockFrequencyInfoImpl.h"
+ export *
+ }
+}
+
+module LLVM_Bitcode { requires cplusplus umbrella "Bitcode" module * { export * } }
+module LLVM_Config { requires cplusplus umbrella "Config" module * { export * } }
+
+module LLVM_DebugInfo {
+ requires cplusplus
+ module DIContext { header "DebugInfo/DIContext.h" export * }
+}
+
+module LLVM_DebugInfo_DWARF {
+ requires cplusplus
+
+ umbrella "DebugInfo/DWARF"
+ module * { export * }
+}
+
+module LLVM_DebugInfo_PDB {
+ requires cplusplus
+
+ umbrella "DebugInfo/PDB"
+ module * { export * }
+
+ // Separate out this subdirectory; it's an optional component that depends on
+ // a separate library which might not be available.
+ //
+ // FIXME: There should be a better way to specify this.
+ exclude header "DebugInfo/PDB/DIA/DIADataStream.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumDebugStreams.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumLineNumbers.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumSourceFiles.h"
+ exclude header "DebugInfo/PDB/DIA/DIAEnumSymbols.h"
+ exclude header "DebugInfo/PDB/DIA/DIALineNumber.h"
+ exclude header "DebugInfo/PDB/DIA/DIARawSymbol.h"
+ exclude header "DebugInfo/PDB/DIA/DIASession.h"
+ exclude header "DebugInfo/PDB/DIA/DIASourceFile.h"
+ exclude header "DebugInfo/PDB/DIA/DIASupport.h"
+}
+
+module LLVM_DebugInfo_PDB_DIA {
+ requires cplusplus
+
+ umbrella "DebugInfo/PDB/DIA"
+ module * { export * }
+}
+
+module LLVM_ExecutionEngine {
+ requires cplusplus
+
+ umbrella "ExecutionEngine"
+ module * { export * }
+
+ // Exclude this; it's an optional component of the ExecutionEngine.
+ exclude header "ExecutionEngine/OProfileWrapper.h"
+
+ // Exclude these; they're intended to be included into only a single
+ // translation unit (or none) and aren't part of this module.
+ exclude header "ExecutionEngine/JIT.h"
+ exclude header "ExecutionEngine/MCJIT.h"
+ exclude header "ExecutionEngine/Interpreter.h"
+ exclude header "ExecutionEngine/OrcMCJITReplacement.h"
+}
+
+module LLVM_IR {
+ requires cplusplus
+
+ // FIXME: Is this the right place for these?
+ module Pass { header "Pass.h" export * }
+ module PassSupport { header "PassSupport.h" export * }
+ module PassAnalysisSupport { header "PassAnalysisSupport.h" export * }
+ module PassRegistry { header "PassRegistry.h" export * }
+ module InitializePasses { header "InitializePasses.h" export * }
+
+ umbrella "IR"
+ module * { export * }
+
+ // These are intended for (repeated) textual inclusion.
+ textual header "IR/DebugInfoFlags.def"
+ textual header "IR/Instruction.def"
+ textual header "IR/Metadata.def"
+ textual header "IR/Value.def"
+}
+
+module LLVM_IRReader { requires cplusplus umbrella "IRReader" module * { export * } }
+module LLVM_LineEditor { requires cplusplus umbrella "LineEditor" module * { export * } }
+module LLVM_LTO { requires cplusplus umbrella "LTO" module * { export * } }
+
+module LLVM_MC {
+ requires cplusplus
+
+ // FIXME: Mislayered?
+ module Support_TargetRegistry {
+ header "Support/TargetRegistry.h"
+ export *
+ }
+
+ umbrella "MC"
+ module * { export * }
+
+ // Exclude this; it's fundamentally non-modular.
+ exclude header "MC/MCTargetOptionsCommandFlags.h"
+}
+
+module LLVM_Object {
+ requires cplusplus
+ umbrella "Object"
+ module * { export * }
+}
+
+module LLVM_Option { requires cplusplus umbrella "Option" module * { export * } }
+module LLVM_TableGen { requires cplusplus umbrella "TableGen" module * { export * } }
+
+module LLVM_Transforms {
+ requires cplusplus
+ umbrella "Transforms"
+ module * { export * }
+
+ // FIXME: Excluded because it does bad things with the legacy pass manager.
+ exclude header "Transforms/IPO/PassManagerBuilder.h"
+}
+
+// A module covering ADT/ and Support/. These are intertwined and
+// codependent, and notionally form a single module.
+module LLVM_Utils {
+ module ADT {
+ requires cplusplus
+
+ umbrella "ADT"
+ module * { export * }
+ }
+
+ module Support {
+ requires cplusplus
+
+ umbrella "Support"
+ module * { export * }
+
+ // Exclude this; it's only included on Solaris.
+ exclude header "Support/Solaris.h"
+
+ // Exclude this; it's only included on AIX and fundamentally non-modular.
+ exclude header "Support/AIXDataTypesFix.h"
+
+ // Exclude this; it's fundamentally non-modular.
+ exclude header "Support/PluginLoader.h"
+
+ // FIXME: Mislayered?
+ exclude header "Support/TargetRegistry.h"
+
+ // These are intended for textual inclusion.
+ textual header "Support/ARMTargetParser.def"
+ textual header "Support/Dwarf.def"
+ textual header "Support/ELFRelocs/AArch64.def"
+ textual header "Support/ELFRelocs/ARM.def"
+ textual header "Support/ELFRelocs/AVR.def"
+ textual header "Support/ELFRelocs/Hexagon.def"
+ textual header "Support/ELFRelocs/i386.def"
+ textual header "Support/ELFRelocs/Mips.def"
+ textual header "Support/ELFRelocs/PowerPC64.def"
+ textual header "Support/ELFRelocs/PowerPC.def"
+ textual header "Support/ELFRelocs/Sparc.def"
+ textual header "Support/ELFRelocs/SystemZ.def"
+ textual header "Support/ELFRelocs/x86_64.def"
+ }
+
+ // This part of the module is usable from both C and C++ code.
+ module ConvertUTF {
+ header "Support/ConvertUTF.h"
+ export *
+ }
+}
+
+module LLVM_CodeGen_MachineValueType {
+ requires cplusplus
+ header "CodeGen/MachineValueType.h"
+ export *
+}
+
+// This is used for a $src == $build compilation. Otherwise we use
+// LLVM_Support_DataTypes_Build, defined in a module map that is
+// copied into the build area.
+module LLVM_Support_DataTypes_Src {
+ header "llvm/Support/DataTypes.h"
+ export *
+}
diff --git a/contrib/llvm/include/llvm/module.modulemap.build b/contrib/llvm/include/llvm/module.modulemap.build
new file mode 100644
index 0000000..7150fe9
--- /dev/null
+++ b/contrib/llvm/include/llvm/module.modulemap.build
@@ -0,0 +1,5 @@
+// This is copied into the build area for a $src != $build compilation.
+module LLVM_Support_DataTypes {
+ header "Support/DataTypes.h"
+ export *
+}
OpenPOWER on IntegriCloud